repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
sorgerlab/indra
indra/databases/hgnc_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L83-L107
def get_hgnc_name(hgnc_id): """Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID. """ try: hgnc_name = hgnc_names[hgnc_id] except KeyError: xml_tree = get_hgnc_entry(hgnc_id) if xml_tree is None: return None hgnc_name_tag =\ xml_tree.find("result/doc/str[@name='symbol']") if hgnc_name_tag is None: return None hgnc_name = hgnc_name_tag.text.strip() return hgnc_name
[ "def", "get_hgnc_name", "(", "hgnc_id", ")", ":", "try", ":", "hgnc_name", "=", "hgnc_names", "[", "hgnc_id", "]", "except", "KeyError", ":", "xml_tree", "=", "get_hgnc_entry", "(", "hgnc_id", ")", "if", "xml_tree", "is", "None", ":", "return", "None", "hgnc_name_tag", "=", "xml_tree", ".", "find", "(", "\"result/doc/str[@name='symbol']\"", ")", "if", "hgnc_name_tag", "is", "None", ":", "return", "None", "hgnc_name", "=", "hgnc_name_tag", ".", "text", ".", "strip", "(", ")", "return", "hgnc_name" ]
Return the HGNC symbol corresponding to the given HGNC ID. Parameters ---------- hgnc_id : str The HGNC ID to be converted. Returns ------- hgnc_name : str The HGNC symbol corresponding to the given HGNC ID.
[ "Return", "the", "HGNC", "symbol", "corresponding", "to", "the", "given", "HGNC", "ID", "." ]
python
train
ganguli-lab/proxalgs
proxalgs/tensor.py
https://github.com/ganguli-lab/proxalgs/blob/74f54467ad072d3229edea93fa84ddd98dd77c67/proxalgs/tensor.py#L69-L96
def susvd(x, x_obs, rho, penalties): """ Sequential unfolding SVD Parameters ---------- x : Tensor x_obs : array_like rho : float penalties : array_like penalty for each unfolding of the input tensor """ assert type(x) == Tensor, "Input array must be a Tensor" while True: # proximal operator for the Fro. norm x = squared_error(x, rho, x_obs) # sequential singular value thresholding for ix, penalty in enumerate(penalties): x = x.unfold(ix).svt(penalty / rho).fold() yield x
[ "def", "susvd", "(", "x", ",", "x_obs", ",", "rho", ",", "penalties", ")", ":", "assert", "type", "(", "x", ")", "==", "Tensor", ",", "\"Input array must be a Tensor\"", "while", "True", ":", "# proximal operator for the Fro. norm", "x", "=", "squared_error", "(", "x", ",", "rho", ",", "x_obs", ")", "# sequential singular value thresholding", "for", "ix", ",", "penalty", "in", "enumerate", "(", "penalties", ")", ":", "x", "=", "x", ".", "unfold", "(", "ix", ")", ".", "svt", "(", "penalty", "/", "rho", ")", ".", "fold", "(", ")", "yield", "x" ]
Sequential unfolding SVD Parameters ---------- x : Tensor x_obs : array_like rho : float penalties : array_like penalty for each unfolding of the input tensor
[ "Sequential", "unfolding", "SVD" ]
python
train
mathandy/svgpathtools
svgpathtools/document.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/document.py#L324-L338
def add_group(self, group_attribs=None, parent=None): """Add an empty group element to the SVG.""" if parent is None: parent = self.tree.getroot() elif not self.contains_group(parent): warnings.warn('The requested group {0} does not belong to ' 'this Document'.format(parent)) if group_attribs is None: group_attribs = {} else: group_attribs = group_attribs.copy() return SubElement(parent, '{{{0}}}g'.format( SVG_NAMESPACE['svg']), group_attribs)
[ "def", "add_group", "(", "self", ",", "group_attribs", "=", "None", ",", "parent", "=", "None", ")", ":", "if", "parent", "is", "None", ":", "parent", "=", "self", ".", "tree", ".", "getroot", "(", ")", "elif", "not", "self", ".", "contains_group", "(", "parent", ")", ":", "warnings", ".", "warn", "(", "'The requested group {0} does not belong to '", "'this Document'", ".", "format", "(", "parent", ")", ")", "if", "group_attribs", "is", "None", ":", "group_attribs", "=", "{", "}", "else", ":", "group_attribs", "=", "group_attribs", ".", "copy", "(", ")", "return", "SubElement", "(", "parent", ",", "'{{{0}}}g'", ".", "format", "(", "SVG_NAMESPACE", "[", "'svg'", "]", ")", ",", "group_attribs", ")" ]
Add an empty group element to the SVG.
[ "Add", "an", "empty", "group", "element", "to", "the", "SVG", "." ]
python
train
edx/edx-django-utils
edx_django_utils/monitoring/middleware.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/middleware.py#L115-L122
def process_request(self, request): """ Store memory data to log later. """ if self._is_enabled(): self._cache.set(self.guid_key, six.text_type(uuid4())) log_prefix = self._log_prefix(u"Before", request) self._cache.set(self.memory_data_key, self._memory_data(log_prefix))
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "if", "self", ".", "_is_enabled", "(", ")", ":", "self", ".", "_cache", ".", "set", "(", "self", ".", "guid_key", ",", "six", ".", "text_type", "(", "uuid4", "(", ")", ")", ")", "log_prefix", "=", "self", ".", "_log_prefix", "(", "u\"Before\"", ",", "request", ")", "self", ".", "_cache", ".", "set", "(", "self", ".", "memory_data_key", ",", "self", ".", "_memory_data", "(", "log_prefix", ")", ")" ]
Store memory data to log later.
[ "Store", "memory", "data", "to", "log", "later", "." ]
python
train
psd-tools/psd-tools
src/psd_tools/compression.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/compression.py#L154-L173
def _shuffled_order(w, h): """ Generator for the order of 4-byte values. 32bit channels are also encoded using delta encoding, but it make no sense to apply delta compression to bytes. It is possible to apply delta compression to 2-byte or 4-byte words, but it seems it is not the best way either. In PSD, each 4-byte item is split into 4 bytes and these bytes are packed together: "123412341234" becomes "111222333444"; delta compression is applied to the packed data. So we have to (a) decompress data from the delta compression and (b) recombine data back to 4-byte values. """ rowsize = 4 * w for row in range(0, rowsize * h, rowsize): for offset in range(row, row + w): for x in range(offset, offset + rowsize, w): yield x
[ "def", "_shuffled_order", "(", "w", ",", "h", ")", ":", "rowsize", "=", "4", "*", "w", "for", "row", "in", "range", "(", "0", ",", "rowsize", "*", "h", ",", "rowsize", ")", ":", "for", "offset", "in", "range", "(", "row", ",", "row", "+", "w", ")", ":", "for", "x", "in", "range", "(", "offset", ",", "offset", "+", "rowsize", ",", "w", ")", ":", "yield", "x" ]
Generator for the order of 4-byte values. 32bit channels are also encoded using delta encoding, but it make no sense to apply delta compression to bytes. It is possible to apply delta compression to 2-byte or 4-byte words, but it seems it is not the best way either. In PSD, each 4-byte item is split into 4 bytes and these bytes are packed together: "123412341234" becomes "111222333444"; delta compression is applied to the packed data. So we have to (a) decompress data from the delta compression and (b) recombine data back to 4-byte values.
[ "Generator", "for", "the", "order", "of", "4", "-", "byte", "values", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/sensor/lego.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/lego.py#L791-L797
def heading(self, channel=1): """ Returns heading (-25, 25) to the beacon on the given channel. """ self._ensure_mode(self.MODE_IR_SEEK) channel = self._normalize_channel(channel) return self.value(channel * 2)
[ "def", "heading", "(", "self", ",", "channel", "=", "1", ")", ":", "self", ".", "_ensure_mode", "(", "self", ".", "MODE_IR_SEEK", ")", "channel", "=", "self", ".", "_normalize_channel", "(", "channel", ")", "return", "self", ".", "value", "(", "channel", "*", "2", ")" ]
Returns heading (-25, 25) to the beacon on the given channel.
[ "Returns", "heading", "(", "-", "25", "25", ")", "to", "the", "beacon", "on", "the", "given", "channel", "." ]
python
train
google/openhtf
openhtf/output/servers/station_server.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/station_server.py#L202-L210
def _to_dict_with_event(cls, test_state): """Process a test state into the format we want to send to the frontend.""" original_dict, event = test_state.asdict_with_event() # This line may produce a 'dictionary changed size during iteration' error. test_state_dict = data.convert_to_base_types(original_dict) test_state_dict['execution_uid'] = test_state.execution_uid return test_state_dict, event
[ "def", "_to_dict_with_event", "(", "cls", ",", "test_state", ")", ":", "original_dict", ",", "event", "=", "test_state", ".", "asdict_with_event", "(", ")", "# This line may produce a 'dictionary changed size during iteration' error.", "test_state_dict", "=", "data", ".", "convert_to_base_types", "(", "original_dict", ")", "test_state_dict", "[", "'execution_uid'", "]", "=", "test_state", ".", "execution_uid", "return", "test_state_dict", ",", "event" ]
Process a test state into the format we want to send to the frontend.
[ "Process", "a", "test", "state", "into", "the", "format", "we", "want", "to", "send", "to", "the", "frontend", "." ]
python
train
SheffieldML/GPy
GPy/util/cluster_with_offset.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/cluster_with_offset.py#L91-L179
def cluster(data,inputs,verbose=False): """Clusters data Using the new offset model, this method uses a greedy algorithm to cluster the data. It starts with all the data points in separate clusters and tests whether combining them increases the overall log-likelihood (LL). It then iteratively joins pairs of clusters which cause the greatest increase in the LL, until no join increases the LL. arguments: inputs -- the 'X's in a list, one item per cluster data -- the 'Y's in a list, one item per cluster returns a list of the clusters. """ N=len(data) #Define a set of N active cluster active = [] for p in range(0,N): active.append([p]) loglikes = np.zeros(len(active)) loglikes[:] = None pairloglikes = np.zeros([len(active),len(active)]) pairloglikes[:] = None pairoffset = np.zeros([len(active),len(active)]) it = 0 while True: if verbose: it +=1 print("Iteration %d" % it) #Compute the log-likelihood of each cluster (add them together) for clusti in range(len(active)): if verbose: sys.stdout.write('.') sys.stdout.flush() if np.isnan(loglikes[clusti]): loglikes[clusti], unused_offset = get_log_likelihood_offset(inputs,data,[clusti]) #try combining with each other cluster... for clustj in range(clusti): #count from 0 to clustj-1 temp = [clusti,clustj] if np.isnan(pairloglikes[clusti,clustj]): pairloglikes[clusti,clustj],pairoffset[clusti,clustj] = get_log_likelihood_offset(inputs,data,temp) seploglikes = np.repeat(loglikes[:,None].T,len(loglikes),0)+np.repeat(loglikes[:,None],len(loglikes),1) loglikeimprovement = pairloglikes - seploglikes #how much likelihood improves with clustering top = np.unravel_index(np.nanargmax(pairloglikes-seploglikes), pairloglikes.shape) #if loglikeimprovement.shape[0]<3: # #no more clustering to do - this shouldn't happen really unless # #we've set the threshold to apply clustering to less than 0 # break #if theres further clustering to be done... if loglikeimprovement[top[0],top[1]]>0: active[top[0]].extend(active[top[1]]) offset=pairoffset[top[0],top[1]] inputs[top[0]] = np.vstack([inputs[top[0]],inputs[top[1]]-offset]) data[top[0]] = np.hstack([data[top[0]],data[top[1]]]) del inputs[top[1]] del data[top[1]] del active[top[1]] #None = message to say we need to recalculate pairloglikes[:,top[0]] = None pairloglikes[top[0],:] = None pairloglikes = np.delete(pairloglikes,top[1],0) pairloglikes = np.delete(pairloglikes,top[1],1) loglikes[top[0]] = None loglikes = np.delete(loglikes,top[1]) else: break #if loglikeimprovement[top[0],top[1]]>0: # print "joined" # print top # print offset # print offsets # print offsets[top[1]]-offsets[top[0]] #TODO Add a way to return the offsets applied to all the time series return active
[ "def", "cluster", "(", "data", ",", "inputs", ",", "verbose", "=", "False", ")", ":", "N", "=", "len", "(", "data", ")", "#Define a set of N active cluster", "active", "=", "[", "]", "for", "p", "in", "range", "(", "0", ",", "N", ")", ":", "active", ".", "append", "(", "[", "p", "]", ")", "loglikes", "=", "np", ".", "zeros", "(", "len", "(", "active", ")", ")", "loglikes", "[", ":", "]", "=", "None", "pairloglikes", "=", "np", ".", "zeros", "(", "[", "len", "(", "active", ")", ",", "len", "(", "active", ")", "]", ")", "pairloglikes", "[", ":", "]", "=", "None", "pairoffset", "=", "np", ".", "zeros", "(", "[", "len", "(", "active", ")", ",", "len", "(", "active", ")", "]", ")", "it", "=", "0", "while", "True", ":", "if", "verbose", ":", "it", "+=", "1", "print", "(", "\"Iteration %d\"", "%", "it", ")", "#Compute the log-likelihood of each cluster (add them together)", "for", "clusti", "in", "range", "(", "len", "(", "active", ")", ")", ":", "if", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "'.'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "np", ".", "isnan", "(", "loglikes", "[", "clusti", "]", ")", ":", "loglikes", "[", "clusti", "]", ",", "unused_offset", "=", "get_log_likelihood_offset", "(", "inputs", ",", "data", ",", "[", "clusti", "]", ")", "#try combining with each other cluster...", "for", "clustj", "in", "range", "(", "clusti", ")", ":", "#count from 0 to clustj-1", "temp", "=", "[", "clusti", ",", "clustj", "]", "if", "np", ".", "isnan", "(", "pairloglikes", "[", "clusti", ",", "clustj", "]", ")", ":", "pairloglikes", "[", "clusti", ",", "clustj", "]", ",", "pairoffset", "[", "clusti", ",", "clustj", "]", "=", "get_log_likelihood_offset", "(", "inputs", ",", "data", ",", "temp", ")", "seploglikes", "=", "np", ".", "repeat", "(", "loglikes", "[", ":", ",", "None", "]", ".", "T", ",", "len", "(", "loglikes", ")", ",", "0", ")", "+", "np", ".", "repeat", "(", "loglikes", "[", ":", ",", "None", "]", ",", "len", "(", "loglikes", ")", ",", "1", ")", "loglikeimprovement", "=", "pairloglikes", "-", "seploglikes", "#how much likelihood improves with clustering", "top", "=", "np", ".", "unravel_index", "(", "np", ".", "nanargmax", "(", "pairloglikes", "-", "seploglikes", ")", ",", "pairloglikes", ".", "shape", ")", "#if loglikeimprovement.shape[0]<3:", "# #no more clustering to do - this shouldn't happen really unless", "# #we've set the threshold to apply clustering to less than 0", "# break ", "#if theres further clustering to be done...", "if", "loglikeimprovement", "[", "top", "[", "0", "]", ",", "top", "[", "1", "]", "]", ">", "0", ":", "active", "[", "top", "[", "0", "]", "]", ".", "extend", "(", "active", "[", "top", "[", "1", "]", "]", ")", "offset", "=", "pairoffset", "[", "top", "[", "0", "]", ",", "top", "[", "1", "]", "]", "inputs", "[", "top", "[", "0", "]", "]", "=", "np", ".", "vstack", "(", "[", "inputs", "[", "top", "[", "0", "]", "]", ",", "inputs", "[", "top", "[", "1", "]", "]", "-", "offset", "]", ")", "data", "[", "top", "[", "0", "]", "]", "=", "np", ".", "hstack", "(", "[", "data", "[", "top", "[", "0", "]", "]", ",", "data", "[", "top", "[", "1", "]", "]", "]", ")", "del", "inputs", "[", "top", "[", "1", "]", "]", "del", "data", "[", "top", "[", "1", "]", "]", "del", "active", "[", "top", "[", "1", "]", "]", "#None = message to say we need to recalculate", "pairloglikes", "[", ":", ",", "top", "[", "0", "]", "]", "=", "None", "pairloglikes", "[", "top", "[", "0", "]", ",", ":", "]", "=", "None", "pairloglikes", "=", "np", ".", "delete", "(", "pairloglikes", ",", "top", "[", "1", "]", ",", "0", ")", "pairloglikes", "=", "np", ".", "delete", "(", "pairloglikes", ",", "top", "[", "1", "]", ",", "1", ")", "loglikes", "[", "top", "[", "0", "]", "]", "=", "None", "loglikes", "=", "np", ".", "delete", "(", "loglikes", ",", "top", "[", "1", "]", ")", "else", ":", "break", "#if loglikeimprovement[top[0],top[1]]>0:", "# print \"joined\"", "# print top", "# print offset", "# print offsets", "# print offsets[top[1]]-offsets[top[0]]", "#TODO Add a way to return the offsets applied to all the time series", "return", "active" ]
Clusters data Using the new offset model, this method uses a greedy algorithm to cluster the data. It starts with all the data points in separate clusters and tests whether combining them increases the overall log-likelihood (LL). It then iteratively joins pairs of clusters which cause the greatest increase in the LL, until no join increases the LL. arguments: inputs -- the 'X's in a list, one item per cluster data -- the 'Y's in a list, one item per cluster returns a list of the clusters.
[ "Clusters", "data", "Using", "the", "new", "offset", "model", "this", "method", "uses", "a", "greedy", "algorithm", "to", "cluster", "the", "data", ".", "It", "starts", "with", "all", "the", "data", "points", "in", "separate", "clusters", "and", "tests", "whether", "combining", "them", "increases", "the", "overall", "log", "-", "likelihood", "(", "LL", ")", ".", "It", "then", "iteratively", "joins", "pairs", "of", "clusters", "which", "cause", "the", "greatest", "increase", "in", "the", "LL", "until", "no", "join", "increases", "the", "LL", ".", "arguments", ":", "inputs", "--", "the", "X", "s", "in", "a", "list", "one", "item", "per", "cluster", "data", "--", "the", "Y", "s", "in", "a", "list", "one", "item", "per", "cluster", "returns", "a", "list", "of", "the", "clusters", "." ]
python
train
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L1323-L1337
def retrieve(self, id) : """ Retrieve a single order Returns a single order available to the user, according to the unique order ID provided If the specified order does not exist, the request will return an error :calls: ``get /orders/{id}`` :param int id: Unique identifier of a Order. :return: Dictionary that support attriubte-style access and represent Order resource. :rtype: dict """ _, _, order = self.http_client.get("/orders/{id}".format(id=id)) return order
[ "def", "retrieve", "(", "self", ",", "id", ")", ":", "_", ",", "_", ",", "order", "=", "self", ".", "http_client", ".", "get", "(", "\"/orders/{id}\"", ".", "format", "(", "id", "=", "id", ")", ")", "return", "order" ]
Retrieve a single order Returns a single order available to the user, according to the unique order ID provided If the specified order does not exist, the request will return an error :calls: ``get /orders/{id}`` :param int id: Unique identifier of a Order. :return: Dictionary that support attriubte-style access and represent Order resource. :rtype: dict
[ "Retrieve", "a", "single", "order" ]
python
train
iamteem/redisco
redisco/containers.py
https://github.com/iamteem/redisco/blob/a7ba19ff3c38061d6d8bc0c10fa754baadcfeb91/redisco/containers.py#L138-L143
def difference(self, key, *others): """Return a new set with elements in the set that are not in the others.""" if not isinstance(key, str): raise ValueError("String expected.") self.db.sdiffstore(key, [self.key] + [o.key for o in others]) return Set(key)
[ "def", "difference", "(", "self", ",", "key", ",", "*", "others", ")", ":", "if", "not", "isinstance", "(", "key", ",", "str", ")", ":", "raise", "ValueError", "(", "\"String expected.\"", ")", "self", ".", "db", ".", "sdiffstore", "(", "key", ",", "[", "self", ".", "key", "]", "+", "[", "o", ".", "key", "for", "o", "in", "others", "]", ")", "return", "Set", "(", "key", ")" ]
Return a new set with elements in the set that are not in the others.
[ "Return", "a", "new", "set", "with", "elements", "in", "the", "set", "that", "are", "not", "in", "the", "others", "." ]
python
train
databio/pypiper
pypiper/ngstk.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L922-L945
def fastqc_rename(self, input_bam, output_dir, sample_name): """ Create pair of commands to run fastqc and organize files. The first command returned is the one that actually runs fastqc when it's executed; the second moves the output files to the output folder for the sample indicated. :param str input_bam: Path to file for which to run fastqc. :param str output_dir: Path to folder in which fastqc output will be written, and within which the sample's output folder lives. :param str sample_name: Sample name, which determines subfolder within output_dir for the fastqc files. :return list[str]: Pair of commands, to run fastqc and then move the files to their intended destination based on sample name. """ cmds = list() initial = os.path.splitext(os.path.basename(input_bam))[0] cmd1 = self.fastqc(input_bam, output_dir) cmds.append(cmd1) cmd2 = "if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi".format( os.path.join(output_dir, initial), os.path.join(output_dir, sample_name)) cmds.append(cmd2) return cmds
[ "def", "fastqc_rename", "(", "self", ",", "input_bam", ",", "output_dir", ",", "sample_name", ")", ":", "cmds", "=", "list", "(", ")", "initial", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "input_bam", ")", ")", "[", "0", "]", "cmd1", "=", "self", ".", "fastqc", "(", "input_bam", ",", "output_dir", ")", "cmds", ".", "append", "(", "cmd1", ")", "cmd2", "=", "\"if [[ ! -s {1}_fastqc.html ]]; then mv {0}_fastqc.html {1}_fastqc.html; mv {0}_fastqc.zip {1}_fastqc.zip; fi\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "initial", ")", ",", "os", ".", "path", ".", "join", "(", "output_dir", ",", "sample_name", ")", ")", "cmds", ".", "append", "(", "cmd2", ")", "return", "cmds" ]
Create pair of commands to run fastqc and organize files. The first command returned is the one that actually runs fastqc when it's executed; the second moves the output files to the output folder for the sample indicated. :param str input_bam: Path to file for which to run fastqc. :param str output_dir: Path to folder in which fastqc output will be written, and within which the sample's output folder lives. :param str sample_name: Sample name, which determines subfolder within output_dir for the fastqc files. :return list[str]: Pair of commands, to run fastqc and then move the files to their intended destination based on sample name.
[ "Create", "pair", "of", "commands", "to", "run", "fastqc", "and", "organize", "files", "." ]
python
train
treethought/flask-assistant
api_ai/api.py
https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/api_ai/api.py#L77-L84
def agent_intents(self): """Returns a list of intent json objects""" endpoint = self._intent_uri() intents = self._get(endpoint) # should be list of dicts if isinstance(intents, dict): # if error: intents = {status: {error}} raise Exception(intents["status"]) return [Intent(intent_json=i) for i in intents]
[ "def", "agent_intents", "(", "self", ")", ":", "endpoint", "=", "self", ".", "_intent_uri", "(", ")", "intents", "=", "self", ".", "_get", "(", "endpoint", ")", "# should be list of dicts", "if", "isinstance", "(", "intents", ",", "dict", ")", ":", "# if error: intents = {status: {error}}", "raise", "Exception", "(", "intents", "[", "\"status\"", "]", ")", "return", "[", "Intent", "(", "intent_json", "=", "i", ")", "for", "i", "in", "intents", "]" ]
Returns a list of intent json objects
[ "Returns", "a", "list", "of", "intent", "json", "objects" ]
python
train
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L1795-L1835
def sign(self, subject, **prefs): """ Sign text, a message, or a timestamp using this key. :param subject: The text to be signed :type subject: ``str``, :py:obj:`~pgpy.PGPMessage`, ``None`` :raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked :raises: :py:exc:`~pgpy.errors.PGPError` if the key is public :returns: :py:obj:`PGPSignature` The following optional keyword arguments can be used with :py:meth:`PGPKey.sign`, as well as :py:meth:`PGPKey.certify`, :py:meth:`PGPKey.revoke`, and :py:meth:`PGPKey.bind`: :keyword expires: Set an expiration date for this signature :type expires: :py:obj:`~datetime.datetime`, :py:obj:`~datetime.timedelta` :keyword notation: Add arbitrary notation data to this signature. :type notation: ``dict`` :keyword policy_uri: Add a URI to the signature that should describe the policy under which the signature was issued. :type policy_uri: ``str`` :keyword revocable: If ``False``, this signature will be marked non-revocable :type revocable: ``bool`` :keyword user: Specify which User ID to use when creating this signature. Also adds a "Signer's User ID" to the signature. :type user: ``str`` """ sig_type = SignatureType.BinaryDocument hash_algo = prefs.pop('hash', None) if subject is None: sig_type = SignatureType.Timestamp if isinstance(subject, PGPMessage): if subject.type == 'cleartext': sig_type = SignatureType.CanonicalDocument subject = subject.message sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid) return self._sign(subject, sig, **prefs)
[ "def", "sign", "(", "self", ",", "subject", ",", "*", "*", "prefs", ")", ":", "sig_type", "=", "SignatureType", ".", "BinaryDocument", "hash_algo", "=", "prefs", ".", "pop", "(", "'hash'", ",", "None", ")", "if", "subject", "is", "None", ":", "sig_type", "=", "SignatureType", ".", "Timestamp", "if", "isinstance", "(", "subject", ",", "PGPMessage", ")", ":", "if", "subject", ".", "type", "==", "'cleartext'", ":", "sig_type", "=", "SignatureType", ".", "CanonicalDocument", "subject", "=", "subject", ".", "message", "sig", "=", "PGPSignature", ".", "new", "(", "sig_type", ",", "self", ".", "key_algorithm", ",", "hash_algo", ",", "self", ".", "fingerprint", ".", "keyid", ")", "return", "self", ".", "_sign", "(", "subject", ",", "sig", ",", "*", "*", "prefs", ")" ]
Sign text, a message, or a timestamp using this key. :param subject: The text to be signed :type subject: ``str``, :py:obj:`~pgpy.PGPMessage`, ``None`` :raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked :raises: :py:exc:`~pgpy.errors.PGPError` if the key is public :returns: :py:obj:`PGPSignature` The following optional keyword arguments can be used with :py:meth:`PGPKey.sign`, as well as :py:meth:`PGPKey.certify`, :py:meth:`PGPKey.revoke`, and :py:meth:`PGPKey.bind`: :keyword expires: Set an expiration date for this signature :type expires: :py:obj:`~datetime.datetime`, :py:obj:`~datetime.timedelta` :keyword notation: Add arbitrary notation data to this signature. :type notation: ``dict`` :keyword policy_uri: Add a URI to the signature that should describe the policy under which the signature was issued. :type policy_uri: ``str`` :keyword revocable: If ``False``, this signature will be marked non-revocable :type revocable: ``bool`` :keyword user: Specify which User ID to use when creating this signature. Also adds a "Signer's User ID" to the signature. :type user: ``str``
[ "Sign", "text", "a", "message", "or", "a", "timestamp", "using", "this", "key", "." ]
python
train
joke2k/faker
faker/providers/date_time/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/date_time/__init__.py#L1550-L1572
def date_time_between(self, start_date='-30y', end_date='now', tzinfo=None): """ Get a DateTime object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "now" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ start_date = self._parse_date_time(start_date, tzinfo=tzinfo) end_date = self._parse_date_time(end_date, tzinfo=tzinfo) if end_date - start_date <= 1: ts = start_date + self.generator.random.random() else: ts = self.generator.random.randint(start_date, end_date) if tzinfo is None: return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts) else: return ( datetime(1970, 1, 1, tzinfo=tzutc()) + timedelta(seconds=ts) ).astimezone(tzinfo)
[ "def", "date_time_between", "(", "self", ",", "start_date", "=", "'-30y'", ",", "end_date", "=", "'now'", ",", "tzinfo", "=", "None", ")", ":", "start_date", "=", "self", ".", "_parse_date_time", "(", "start_date", ",", "tzinfo", "=", "tzinfo", ")", "end_date", "=", "self", ".", "_parse_date_time", "(", "end_date", ",", "tzinfo", "=", "tzinfo", ")", "if", "end_date", "-", "start_date", "<=", "1", ":", "ts", "=", "start_date", "+", "self", ".", "generator", ".", "random", ".", "random", "(", ")", "else", ":", "ts", "=", "self", ".", "generator", ".", "random", ".", "randint", "(", "start_date", ",", "end_date", ")", "if", "tzinfo", "is", "None", ":", "return", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "tzinfo", ")", "+", "timedelta", "(", "seconds", "=", "ts", ")", "else", ":", "return", "(", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "tzutc", "(", ")", ")", "+", "timedelta", "(", "seconds", "=", "ts", ")", ")", ".", "astimezone", "(", "tzinfo", ")" ]
Get a DateTime object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "now" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime
[ "Get", "a", "DateTime", "object", "based", "on", "a", "random", "date", "between", "two", "given", "dates", ".", "Accepts", "date", "strings", "that", "can", "be", "recognized", "by", "strtotime", "()", "." ]
python
train
evyatarmeged/Raccoon
raccoon_src/lib/host.py
https://github.com/evyatarmeged/Raccoon/blob/985797f73329976ec9c3fefbe4bbb3c74096ca51/raccoon_src/lib/host.py#L88-L137
def parse(self): """ Try to extract domain (full, naked, sub-domain), IP and port. """ if self.target.endswith("/"): self.target = self.target[:-1] if self._is_proto(self.target): try: self.protocol, self.target = self.target.split("://") self.logger.info("{} Protocol detected: {}".format(COLORED_COMBOS.NOTIFY, self.protocol)) if self.protocol.lower() == "https" and self.port == 80: self.port = 443 except ValueError: raise HostHandlerException("Could not make domain and protocol from host") if ":" in self.target: self._extract_port(self.target) if self.validate_ip(self.target): self.logger.info("{} Detected {} as an IP address.".format(COLORED_COMBOS.NOTIFY, self.target)) self.is_ip = True else: domains = [] if self.target.startswith("www."): # Obviously an FQDN domains.extend((self.target, self.target.split("www.")[1])) self.fqdn = self.target self.naked = ".".join(self.fqdn.split('.')[1:]) else: domains.append(self.target) domain_levels = self.target.split(".") if len(domain_levels) == 2 or (len(domain_levels) == 3 and domain_levels[1] == "co"): self.logger.info("{} Found {} to be a naked domain".format(COLORED_COMBOS.NOTIFY, self.target)) self.naked = self.target try: self.dns_results = DNSHandler.query_dns(domains, self.dns_records) except Timeout: raise HostHandlerException("DNS Query timed out. Maybe target has DNS protection ?") if self.dns_results.get("CNAME"): # Naked domains shouldn't hold CNAME records according to RFC regulations self.logger.info("{} Found {} to be an FQDN by CNAME presence in DNS records".format( COLORED_COMBOS.NOTIFY, self.target)) self.fqdn = self.target self.naked = ".".join(self.fqdn.split('.')[1:]) self.create_host_dir_and_set_file_logger() self.write_up()
[ "def", "parse", "(", "self", ")", ":", "if", "self", ".", "target", ".", "endswith", "(", "\"/\"", ")", ":", "self", ".", "target", "=", "self", ".", "target", "[", ":", "-", "1", "]", "if", "self", ".", "_is_proto", "(", "self", ".", "target", ")", ":", "try", ":", "self", ".", "protocol", ",", "self", ".", "target", "=", "self", ".", "target", ".", "split", "(", "\"://\"", ")", "self", ".", "logger", ".", "info", "(", "\"{} Protocol detected: {}\"", ".", "format", "(", "COLORED_COMBOS", ".", "NOTIFY", ",", "self", ".", "protocol", ")", ")", "if", "self", ".", "protocol", ".", "lower", "(", ")", "==", "\"https\"", "and", "self", ".", "port", "==", "80", ":", "self", ".", "port", "=", "443", "except", "ValueError", ":", "raise", "HostHandlerException", "(", "\"Could not make domain and protocol from host\"", ")", "if", "\":\"", "in", "self", ".", "target", ":", "self", ".", "_extract_port", "(", "self", ".", "target", ")", "if", "self", ".", "validate_ip", "(", "self", ".", "target", ")", ":", "self", ".", "logger", ".", "info", "(", "\"{} Detected {} as an IP address.\"", ".", "format", "(", "COLORED_COMBOS", ".", "NOTIFY", ",", "self", ".", "target", ")", ")", "self", ".", "is_ip", "=", "True", "else", ":", "domains", "=", "[", "]", "if", "self", ".", "target", ".", "startswith", "(", "\"www.\"", ")", ":", "# Obviously an FQDN", "domains", ".", "extend", "(", "(", "self", ".", "target", ",", "self", ".", "target", ".", "split", "(", "\"www.\"", ")", "[", "1", "]", ")", ")", "self", ".", "fqdn", "=", "self", ".", "target", "self", ".", "naked", "=", "\".\"", ".", "join", "(", "self", ".", "fqdn", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", ")", "else", ":", "domains", ".", "append", "(", "self", ".", "target", ")", "domain_levels", "=", "self", ".", "target", ".", "split", "(", "\".\"", ")", "if", "len", "(", "domain_levels", ")", "==", "2", "or", "(", "len", "(", "domain_levels", ")", "==", "3", "and", "domain_levels", "[", "1", "]", "==", "\"co\"", ")", ":", "self", ".", "logger", ".", "info", "(", "\"{} Found {} to be a naked domain\"", ".", "format", "(", "COLORED_COMBOS", ".", "NOTIFY", ",", "self", ".", "target", ")", ")", "self", ".", "naked", "=", "self", ".", "target", "try", ":", "self", ".", "dns_results", "=", "DNSHandler", ".", "query_dns", "(", "domains", ",", "self", ".", "dns_records", ")", "except", "Timeout", ":", "raise", "HostHandlerException", "(", "\"DNS Query timed out. Maybe target has DNS protection ?\"", ")", "if", "self", ".", "dns_results", ".", "get", "(", "\"CNAME\"", ")", ":", "# Naked domains shouldn't hold CNAME records according to RFC regulations", "self", ".", "logger", ".", "info", "(", "\"{} Found {} to be an FQDN by CNAME presence in DNS records\"", ".", "format", "(", "COLORED_COMBOS", ".", "NOTIFY", ",", "self", ".", "target", ")", ")", "self", ".", "fqdn", "=", "self", ".", "target", "self", ".", "naked", "=", "\".\"", ".", "join", "(", "self", ".", "fqdn", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", ")", "self", ".", "create_host_dir_and_set_file_logger", "(", ")", "self", ".", "write_up", "(", ")" ]
Try to extract domain (full, naked, sub-domain), IP and port.
[ "Try", "to", "extract", "domain", "(", "full", "naked", "sub", "-", "domain", ")", "IP", "and", "port", "." ]
python
train
standage/tag
tag/feature.py
https://github.com/standage/tag/blob/94686adf57115cea1c5235e99299e691f80ba10b/tag/feature.py#L525-L550
def attribute_crawl(self, key): """ Grab all attribute values associated with the given feature. Traverse the given feature (and all of its descendants) to find all values associated with the given attribute key. >>> import tag >>> reader = tag.GFF3Reader(tag.pkgdata('otau-no-seqreg.gff3')) >>> features = tag.select.features(reader) >>> for feature in features: ... names = feature.attribute_crawl('Name') ... print(sorted(list(names))) ['Ot01g00060', 'XM_003074019.1', 'XP_003074065.1'] ['Ot01g00070', 'XM_003074020.1', 'XP_003074066.1'] ['Ot01g00080', 'XM_003074021.1', 'XP_003074067.1'] ['Ot01g00090', 'XM_003074022.1', 'XP_003074068.1'] ['Ot01g00100', 'XM_003074023.1', 'XP_003074069.1'] ['Ot01g00110', 'XM_003074024.1', 'XP_003074070.1'] """ union = set() for feature in self: values = feature.get_attribute(key, as_list=True) if values is not None: union.update(set(values)) return union
[ "def", "attribute_crawl", "(", "self", ",", "key", ")", ":", "union", "=", "set", "(", ")", "for", "feature", "in", "self", ":", "values", "=", "feature", ".", "get_attribute", "(", "key", ",", "as_list", "=", "True", ")", "if", "values", "is", "not", "None", ":", "union", ".", "update", "(", "set", "(", "values", ")", ")", "return", "union" ]
Grab all attribute values associated with the given feature. Traverse the given feature (and all of its descendants) to find all values associated with the given attribute key. >>> import tag >>> reader = tag.GFF3Reader(tag.pkgdata('otau-no-seqreg.gff3')) >>> features = tag.select.features(reader) >>> for feature in features: ... names = feature.attribute_crawl('Name') ... print(sorted(list(names))) ['Ot01g00060', 'XM_003074019.1', 'XP_003074065.1'] ['Ot01g00070', 'XM_003074020.1', 'XP_003074066.1'] ['Ot01g00080', 'XM_003074021.1', 'XP_003074067.1'] ['Ot01g00090', 'XM_003074022.1', 'XP_003074068.1'] ['Ot01g00100', 'XM_003074023.1', 'XP_003074069.1'] ['Ot01g00110', 'XM_003074024.1', 'XP_003074070.1']
[ "Grab", "all", "attribute", "values", "associated", "with", "the", "given", "feature", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/feature.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/feature.py#L637-L644
def __is_subproperty_of (parent_property, p): """ As is_subfeature_of, for subproperties. """ if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(p, Property) return is_subfeature_of (parent_property, p.feature)
[ "def", "__is_subproperty_of", "(", "parent_property", ",", "p", ")", ":", "if", "__debug__", ":", "from", ".", "property", "import", "Property", "assert", "isinstance", "(", "parent_property", ",", "Property", ")", "assert", "isinstance", "(", "p", ",", "Property", ")", "return", "is_subfeature_of", "(", "parent_property", ",", "p", ".", "feature", ")" ]
As is_subfeature_of, for subproperties.
[ "As", "is_subfeature_of", "for", "subproperties", "." ]
python
train
tanghaibao/goatools
goatools/anno/init/reader_genetogo.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/init/reader_genetogo.py#L114-L118
def _prt_line_detail(self, prt, line, lnum=""): """Print each field and its value.""" data = zip(self.flds, line.split('\t')) txt = ["{:2}) {:13} {}".format(i, hdr, val) for i, (hdr, val) in enumerate(data)] prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT='\n'.join(txt)))
[ "def", "_prt_line_detail", "(", "self", ",", "prt", ",", "line", ",", "lnum", "=", "\"\"", ")", ":", "data", "=", "zip", "(", "self", ".", "flds", ",", "line", ".", "split", "(", "'\\t'", ")", ")", "txt", "=", "[", "\"{:2}) {:13} {}\"", ".", "format", "(", "i", ",", "hdr", ",", "val", ")", "for", "i", ",", "(", "hdr", ",", "val", ")", "in", "enumerate", "(", "data", ")", "]", "prt", ".", "write", "(", "\"{LNUM}\\n{TXT}\\n\"", ".", "format", "(", "LNUM", "=", "lnum", ",", "TXT", "=", "'\\n'", ".", "join", "(", "txt", ")", ")", ")" ]
Print each field and its value.
[ "Print", "each", "field", "and", "its", "value", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/classroom_edit.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/classroom_edit.py#L21-L64
def get_user_lists(self, course, classroomid): """ Get the available student and tutor lists for classroom edition""" tutor_list = course.get_staff() # Determine if user is grouped or not in the classroom student_list = list(self.database.classrooms.aggregate([ {"$match": {"_id": ObjectId(classroomid)}}, {"$unwind": "$students"}, {"$project": { "students": 1, "grouped": { "$anyElementTrue": { "$map": { "input": "$groups.students", "as": "group", "in": { "$anyElementTrue": { "$map": { "input": "$$group", "as": "groupmember", "in": {"$eq": ["$$groupmember", "$students"]} } } } } } } }} ])) student_list = dict([(student["students"], student) for student in student_list]) other_students = [entry['students'] for entry in list(self.database.classrooms.aggregate([ {"$match": {"courseid": course.get_id(), "_id": {"$ne": ObjectId(classroomid)}}}, {"$unwind": "$students"}, {"$project": {"_id": 0, "students": 1}} ]))] users_info = self.user_manager.get_users_info(other_students + list(student_list.keys()) + tutor_list) # Order the non-registered students other_students = sorted(other_students, key=lambda val: (("0"+users_info[val][0]) if users_info[val] else ("1"+val))) return student_list, tutor_list, other_students, users_info
[ "def", "get_user_lists", "(", "self", ",", "course", ",", "classroomid", ")", ":", "tutor_list", "=", "course", ".", "get_staff", "(", ")", "# Determine if user is grouped or not in the classroom", "student_list", "=", "list", "(", "self", ".", "database", ".", "classrooms", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "{", "\"_id\"", ":", "ObjectId", "(", "classroomid", ")", "}", "}", ",", "{", "\"$unwind\"", ":", "\"$students\"", "}", ",", "{", "\"$project\"", ":", "{", "\"students\"", ":", "1", ",", "\"grouped\"", ":", "{", "\"$anyElementTrue\"", ":", "{", "\"$map\"", ":", "{", "\"input\"", ":", "\"$groups.students\"", ",", "\"as\"", ":", "\"group\"", ",", "\"in\"", ":", "{", "\"$anyElementTrue\"", ":", "{", "\"$map\"", ":", "{", "\"input\"", ":", "\"$$group\"", ",", "\"as\"", ":", "\"groupmember\"", ",", "\"in\"", ":", "{", "\"$eq\"", ":", "[", "\"$$groupmember\"", ",", "\"$students\"", "]", "}", "}", "}", "}", "}", "}", "}", "}", "}", "]", ")", ")", "student_list", "=", "dict", "(", "[", "(", "student", "[", "\"students\"", "]", ",", "student", ")", "for", "student", "in", "student_list", "]", ")", "other_students", "=", "[", "entry", "[", "'students'", "]", "for", "entry", "in", "list", "(", "self", ".", "database", ".", "classrooms", ".", "aggregate", "(", "[", "{", "\"$match\"", ":", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"_id\"", ":", "{", "\"$ne\"", ":", "ObjectId", "(", "classroomid", ")", "}", "}", "}", ",", "{", "\"$unwind\"", ":", "\"$students\"", "}", ",", "{", "\"$project\"", ":", "{", "\"_id\"", ":", "0", ",", "\"students\"", ":", "1", "}", "}", "]", ")", ")", "]", "users_info", "=", "self", ".", "user_manager", ".", "get_users_info", "(", "other_students", "+", "list", "(", "student_list", ".", "keys", "(", ")", ")", "+", "tutor_list", ")", "# Order the non-registered students", "other_students", "=", "sorted", "(", "other_students", ",", "key", "=", "lambda", "val", ":", "(", "(", "\"0\"", "+", "users_info", "[", "val", "]", "[", "0", "]", ")", "if", "users_info", "[", "val", "]", "else", "(", "\"1\"", "+", "val", ")", ")", ")", "return", "student_list", ",", "tutor_list", ",", "other_students", ",", "users_info" ]
Get the available student and tutor lists for classroom edition
[ "Get", "the", "available", "student", "and", "tutor", "lists", "for", "classroom", "edition" ]
python
train
CellProfiler/centrosome
centrosome/otsu.py
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/otsu.py#L205-L217
def entropy_score(var,bins, w=None, decimate=True): '''Compute entropy scores, given a variance and # of bins ''' if w is None: n = len(var) w = np.arange(0,n,n//bins) / float(n) if decimate: n = len(var) var = var[0:n:n//bins] score = w * np.log(var * w * np.sqrt(2*np.pi*np.exp(1))) score[np.isnan(score)]=np.Inf return score
[ "def", "entropy_score", "(", "var", ",", "bins", ",", "w", "=", "None", ",", "decimate", "=", "True", ")", ":", "if", "w", "is", "None", ":", "n", "=", "len", "(", "var", ")", "w", "=", "np", ".", "arange", "(", "0", ",", "n", ",", "n", "//", "bins", ")", "/", "float", "(", "n", ")", "if", "decimate", ":", "n", "=", "len", "(", "var", ")", "var", "=", "var", "[", "0", ":", "n", ":", "n", "//", "bins", "]", "score", "=", "w", "*", "np", ".", "log", "(", "var", "*", "w", "*", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", "*", "np", ".", "exp", "(", "1", ")", ")", ")", "score", "[", "np", ".", "isnan", "(", "score", ")", "]", "=", "np", ".", "Inf", "return", "score" ]
Compute entropy scores, given a variance and # of bins
[ "Compute", "entropy", "scores", "given", "a", "variance", "and", "#", "of", "bins" ]
python
train
rootpy/rootpy
rootpy/collection.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/collection.py#L34-L48
def as_list_with_options(self): """ Similar to list(self) except elements which have an option associated with them are returned as a ``TListItemWithOption`` """ it = ROOT.TIter(self) elem = it.Next() result = [] while elem: if it.GetOption(): result.append(TListItemWithOption(elem, it.GetOption())) else: result.append(elem) elem = it.Next() return result
[ "def", "as_list_with_options", "(", "self", ")", ":", "it", "=", "ROOT", ".", "TIter", "(", "self", ")", "elem", "=", "it", ".", "Next", "(", ")", "result", "=", "[", "]", "while", "elem", ":", "if", "it", ".", "GetOption", "(", ")", ":", "result", ".", "append", "(", "TListItemWithOption", "(", "elem", ",", "it", ".", "GetOption", "(", ")", ")", ")", "else", ":", "result", ".", "append", "(", "elem", ")", "elem", "=", "it", ".", "Next", "(", ")", "return", "result" ]
Similar to list(self) except elements which have an option associated with them are returned as a ``TListItemWithOption``
[ "Similar", "to", "list", "(", "self", ")", "except", "elements", "which", "have", "an", "option", "associated", "with", "them", "are", "returned", "as", "a", "TListItemWithOption" ]
python
train
juju/charm-helpers
charmhelpers/fetch/python/debug.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/python/debug.py#L41-L54
def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): """ Set a trace point using the remote debugger """ atexit.register(close_port, port) try: log("Starting a remote python debugger session on %s:%s" % (addr, port)) open_port(port) debugger = Rpdb(addr=addr, port=port) debugger.set_trace(sys._getframe().f_back) except Exception: _error("Cannot start a remote debug session on %s:%s" % (addr, port))
[ "def", "set_trace", "(", "addr", "=", "DEFAULT_ADDR", ",", "port", "=", "DEFAULT_PORT", ")", ":", "atexit", ".", "register", "(", "close_port", ",", "port", ")", "try", ":", "log", "(", "\"Starting a remote python debugger session on %s:%s\"", "%", "(", "addr", ",", "port", ")", ")", "open_port", "(", "port", ")", "debugger", "=", "Rpdb", "(", "addr", "=", "addr", ",", "port", "=", "port", ")", "debugger", ".", "set_trace", "(", "sys", ".", "_getframe", "(", ")", ".", "f_back", ")", "except", "Exception", ":", "_error", "(", "\"Cannot start a remote debug session on %s:%s\"", "%", "(", "addr", ",", "port", ")", ")" ]
Set a trace point using the remote debugger
[ "Set", "a", "trace", "point", "using", "the", "remote", "debugger" ]
python
train
modin-project/modin
modin/pandas/io.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/io.py#L35-L101
def _make_parser_func(sep): """Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object. """ def parser_func( filepath_or_buffer, sep=sep, delimiter=None, header="infer", names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression="infer", thousands=None, decimal=b".", lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) if not kwargs.get("sep", sep): kwargs["sep"] = "\t" return _read(**kwargs) return parser_func
[ "def", "_make_parser_func", "(", "sep", ")", ":", "def", "parser_func", "(", "filepath_or_buffer", ",", "sep", "=", "sep", ",", "delimiter", "=", "None", ",", "header", "=", "\"infer\"", ",", "names", "=", "None", ",", "index_col", "=", "None", ",", "usecols", "=", "None", ",", "squeeze", "=", "False", ",", "prefix", "=", "None", ",", "mangle_dupe_cols", "=", "True", ",", "dtype", "=", "None", ",", "engine", "=", "None", ",", "converters", "=", "None", ",", "true_values", "=", "None", ",", "false_values", "=", "None", ",", "skipinitialspace", "=", "False", ",", "skiprows", "=", "None", ",", "nrows", "=", "None", ",", "na_values", "=", "None", ",", "keep_default_na", "=", "True", ",", "na_filter", "=", "True", ",", "verbose", "=", "False", ",", "skip_blank_lines", "=", "True", ",", "parse_dates", "=", "False", ",", "infer_datetime_format", "=", "False", ",", "keep_date_col", "=", "False", ",", "date_parser", "=", "None", ",", "dayfirst", "=", "False", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "compression", "=", "\"infer\"", ",", "thousands", "=", "None", ",", "decimal", "=", "b\".\"", ",", "lineterminator", "=", "None", ",", "quotechar", "=", "'\"'", ",", "quoting", "=", "0", ",", "escapechar", "=", "None", ",", "comment", "=", "None", ",", "encoding", "=", "None", ",", "dialect", "=", "None", ",", "tupleize_cols", "=", "None", ",", "error_bad_lines", "=", "True", ",", "warn_bad_lines", "=", "True", ",", "skipfooter", "=", "0", ",", "doublequote", "=", "True", ",", "delim_whitespace", "=", "False", ",", "low_memory", "=", "True", ",", "memory_map", "=", "False", ",", "float_precision", "=", "None", ",", ")", ":", "_", ",", "_", ",", "_", ",", "kwargs", "=", "inspect", ".", "getargvalues", "(", "inspect", ".", "currentframe", "(", ")", ")", "if", "not", "kwargs", ".", "get", "(", "\"sep\"", ",", "sep", ")", ":", "kwargs", "[", "\"sep\"", "]", "=", "\"\\t\"", "return", "_read", "(", "*", "*", "kwargs", ")", "return", "parser_func" ]
Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object.
[ "Creates", "a", "parser", "function", "from", "the", "given", "sep", "." ]
python
train
ryanmcgrath/twython-django
twython_django_oauth/views.py
https://github.com/ryanmcgrath/twython-django/blob/e49e3ccba94939187378993269eff19e198e5f64/twython_django_oauth/views.py#L27-L44
def begin_auth(request): """The view function that initiates the entire handshake. For the most part, this is 100% drag and drop. """ # Instantiate Twython with the first leg of our trip. twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET) # Request an authorization url to send the user to... callback_url = request.build_absolute_uri(reverse('twython_django_oauth.views.thanks')) auth_props = twitter.get_authentication_tokens(callback_url) # Then send them over there, durh. request.session['request_token'] = auth_props request.session['next_url'] = request.GET.get('next',None) return HttpResponseRedirect(auth_props['auth_url'])
[ "def", "begin_auth", "(", "request", ")", ":", "# Instantiate Twython with the first leg of our trip.", "twitter", "=", "Twython", "(", "settings", ".", "TWITTER_KEY", ",", "settings", ".", "TWITTER_SECRET", ")", "# Request an authorization url to send the user to...", "callback_url", "=", "request", ".", "build_absolute_uri", "(", "reverse", "(", "'twython_django_oauth.views.thanks'", ")", ")", "auth_props", "=", "twitter", ".", "get_authentication_tokens", "(", "callback_url", ")", "# Then send them over there, durh.", "request", ".", "session", "[", "'request_token'", "]", "=", "auth_props", "request", ".", "session", "[", "'next_url'", "]", "=", "request", ".", "GET", ".", "get", "(", "'next'", ",", "None", ")", "return", "HttpResponseRedirect", "(", "auth_props", "[", "'auth_url'", "]", ")" ]
The view function that initiates the entire handshake. For the most part, this is 100% drag and drop.
[ "The", "view", "function", "that", "initiates", "the", "entire", "handshake", "." ]
python
train
maxzheng/bumper-lib
bumper/cars.py
https://github.com/maxzheng/bumper-lib/blob/32a9dec5448673825bb2d7d92fa68882b597f794/bumper/cars.py#L33-L41
def parse(cls, s, required=False): """ Parse string to create an instance :param str s: String with requirement to parse :param bool required: Is this requirement required to be fulfilled? If not, then it is a filter. """ req = pkg_resources.Requirement.parse(s) return cls(req, required=required)
[ "def", "parse", "(", "cls", ",", "s", ",", "required", "=", "False", ")", ":", "req", "=", "pkg_resources", ".", "Requirement", ".", "parse", "(", "s", ")", "return", "cls", "(", "req", ",", "required", "=", "required", ")" ]
Parse string to create an instance :param str s: String with requirement to parse :param bool required: Is this requirement required to be fulfilled? If not, then it is a filter.
[ "Parse", "string", "to", "create", "an", "instance" ]
python
valid
dwavesystems/dwave_embedding_utilities
dwave_embedding_utilities.py
https://github.com/dwavesystems/dwave_embedding_utilities/blob/2e485e0ae89d96f3c0005f144bab4b465a3039a3/dwave_embedding_utilities.py#L549-L553
def _all_equal(iterable): """True if all values in `iterable` are equal, else False.""" iterator = iter(iterable) first = next(iterator) return all(first == rest for rest in iterator)
[ "def", "_all_equal", "(", "iterable", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "first", "=", "next", "(", "iterator", ")", "return", "all", "(", "first", "==", "rest", "for", "rest", "in", "iterator", ")" ]
True if all values in `iterable` are equal, else False.
[ "True", "if", "all", "values", "in", "iterable", "are", "equal", "else", "False", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/texture.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/texture.py#L226-L277
def _resize(self, shape, format=None, internalformat=None): """Internal method for resize. """ shape = self._normalize_shape(shape) # Check if not self._resizable: raise RuntimeError("Texture is not resizable") # Determine format if format is None: format = self._formats[shape[-1]] # Keep current format if channels match if self._format and \ self._inv_formats[self._format] == self._inv_formats[format]: format = self._format else: format = check_enum(format) if internalformat is None: # Keep current internalformat if channels match if self._internalformat and \ self._inv_internalformats[self._internalformat] == shape[-1]: internalformat = self._internalformat else: internalformat = check_enum(internalformat) # Check if format not in self._inv_formats: raise ValueError('Invalid texture format: %r.' % format) elif shape[-1] != self._inv_formats[format]: raise ValueError('Format does not match with given shape. ' '(format expects %d elements, data has %d)' % (self._inv_formats[format], shape[-1])) if internalformat is None: pass elif internalformat not in self._inv_internalformats: raise ValueError( 'Invalid texture internalformat: %r. Allowed formats: %r' % (internalformat, self._inv_internalformats) ) elif shape[-1] != self._inv_internalformats[internalformat]: raise ValueError('Internalformat does not match with given shape.') # Store and send GLIR command self._shape = shape self._format = format self._internalformat = internalformat self._glir.command('SIZE', self._id, self._shape, self._format, self._internalformat)
[ "def", "_resize", "(", "self", ",", "shape", ",", "format", "=", "None", ",", "internalformat", "=", "None", ")", ":", "shape", "=", "self", ".", "_normalize_shape", "(", "shape", ")", "# Check", "if", "not", "self", ".", "_resizable", ":", "raise", "RuntimeError", "(", "\"Texture is not resizable\"", ")", "# Determine format", "if", "format", "is", "None", ":", "format", "=", "self", ".", "_formats", "[", "shape", "[", "-", "1", "]", "]", "# Keep current format if channels match", "if", "self", ".", "_format", "and", "self", ".", "_inv_formats", "[", "self", ".", "_format", "]", "==", "self", ".", "_inv_formats", "[", "format", "]", ":", "format", "=", "self", ".", "_format", "else", ":", "format", "=", "check_enum", "(", "format", ")", "if", "internalformat", "is", "None", ":", "# Keep current internalformat if channels match", "if", "self", ".", "_internalformat", "and", "self", ".", "_inv_internalformats", "[", "self", ".", "_internalformat", "]", "==", "shape", "[", "-", "1", "]", ":", "internalformat", "=", "self", ".", "_internalformat", "else", ":", "internalformat", "=", "check_enum", "(", "internalformat", ")", "# Check", "if", "format", "not", "in", "self", ".", "_inv_formats", ":", "raise", "ValueError", "(", "'Invalid texture format: %r.'", "%", "format", ")", "elif", "shape", "[", "-", "1", "]", "!=", "self", ".", "_inv_formats", "[", "format", "]", ":", "raise", "ValueError", "(", "'Format does not match with given shape. '", "'(format expects %d elements, data has %d)'", "%", "(", "self", ".", "_inv_formats", "[", "format", "]", ",", "shape", "[", "-", "1", "]", ")", ")", "if", "internalformat", "is", "None", ":", "pass", "elif", "internalformat", "not", "in", "self", ".", "_inv_internalformats", ":", "raise", "ValueError", "(", "'Invalid texture internalformat: %r. Allowed formats: %r'", "%", "(", "internalformat", ",", "self", ".", "_inv_internalformats", ")", ")", "elif", "shape", "[", "-", "1", "]", "!=", "self", ".", "_inv_internalformats", "[", "internalformat", "]", ":", "raise", "ValueError", "(", "'Internalformat does not match with given shape.'", ")", "# Store and send GLIR command", "self", ".", "_shape", "=", "shape", "self", ".", "_format", "=", "format", "self", ".", "_internalformat", "=", "internalformat", "self", ".", "_glir", ".", "command", "(", "'SIZE'", ",", "self", ".", "_id", ",", "self", ".", "_shape", ",", "self", ".", "_format", ",", "self", ".", "_internalformat", ")" ]
Internal method for resize.
[ "Internal", "method", "for", "resize", "." ]
python
train
rm-hull/luma.emulator
luma/emulator/device.py
https://github.com/rm-hull/luma.emulator/blob/ca3db028b33d17cda9247ea5189873ff0408d013/luma/emulator/device.py#L262-L273
def _generate_art(self, image, width, height): """ Return an iterator that produces the ascii art. """ # Characters aren't square, so scale the output by the aspect ratio of a charater height = int(height * self._char_width / float(self._char_height)) image = image.resize((width, height), Image.ANTIALIAS).convert("RGB") for (r, g, b) in image.getdata(): greyscale = int(0.299 * r + 0.587 * g + 0.114 * b) ch = self._chars[int(greyscale / 255. * (len(self._chars) - 1) + 0.5)] yield (ch, rgb2short(r, g, b))
[ "def", "_generate_art", "(", "self", ",", "image", ",", "width", ",", "height", ")", ":", "# Characters aren't square, so scale the output by the aspect ratio of a charater", "height", "=", "int", "(", "height", "*", "self", ".", "_char_width", "/", "float", "(", "self", ".", "_char_height", ")", ")", "image", "=", "image", ".", "resize", "(", "(", "width", ",", "height", ")", ",", "Image", ".", "ANTIALIAS", ")", ".", "convert", "(", "\"RGB\"", ")", "for", "(", "r", ",", "g", ",", "b", ")", "in", "image", ".", "getdata", "(", ")", ":", "greyscale", "=", "int", "(", "0.299", "*", "r", "+", "0.587", "*", "g", "+", "0.114", "*", "b", ")", "ch", "=", "self", ".", "_chars", "[", "int", "(", "greyscale", "/", "255.", "*", "(", "len", "(", "self", ".", "_chars", ")", "-", "1", ")", "+", "0.5", ")", "]", "yield", "(", "ch", ",", "rgb2short", "(", "r", ",", "g", ",", "b", ")", ")" ]
Return an iterator that produces the ascii art.
[ "Return", "an", "iterator", "that", "produces", "the", "ascii", "art", "." ]
python
train
OpenKMIP/PyKMIP
kmip/services/server/crypto/engine.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/services/server/crypto/engine.py#L290-L377
def encrypt(self, encryption_algorithm, encryption_key, plain_text, cipher_mode=None, padding_method=None, iv_nonce=None, hashing_algorithm=None): """ Encrypt data using symmetric or asymmetric encryption. Args: encryption_algorithm (CryptographicAlgorithm): An enumeration specifying the encryption algorithm to use for encryption. encryption_key (bytes): The bytes of the encryption key to use for encryption. plain_text (bytes): The bytes to be encrypted. cipher_mode (BlockCipherMode): An enumeration specifying the block cipher mode to use with the encryption algorithm. Required in the general case. Optional if the encryption algorithm is RC4 (aka ARC4). If optional, defaults to None. padding_method (PaddingMethod): An enumeration specifying the padding method to use on the data before encryption. Required if the cipher mode is for block ciphers (e.g., CBC, ECB). Optional otherwise, defaults to None. iv_nonce (bytes): The IV/nonce value to use to initialize the mode of the encryption algorithm. Optional, defaults to None. If required and not provided, it will be autogenerated and returned with the cipher text. hashing_algorithm (HashingAlgorithm): An enumeration specifying the hashing algorithm to use with the encryption algorithm, if needed. Required for OAEP-based asymmetric encryption. Optional, defaults to None. Returns: dict: A dictionary containing the encrypted data, with at least the following key/value fields: * cipher_text - the bytes of the encrypted data * iv_nonce - the bytes of the IV/counter/nonce used if it was needed by the encryption scheme and if it was automatically generated for the encryption Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> result = engine.encrypt( ... encryption_algorithm=CryptographicAlgorithm.AES, ... encryption_key=( ... b'\xF3\x96\xE7\x1C\xCF\xCD\xEC\x1F' ... b'\xFC\xE2\x8E\xA6\xF8\x74\x28\xB0' ... ), ... plain_text=( ... b'\x00\x01\x02\x03\x04\x05\x06\x07' ... b'\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F' ... ), ... cipher_mode=BlockCipherMode.CBC, ... padding_method=PaddingMethod.ANSI_X923, ... ) >>> result.get('cipher_text') b'\x18[\xb9y\x1bL\xd1\x8f\x9a\xa0e\x02b\xa3=c' >>> result.iv_counter_nonce b'8qA\x05\xc4\x86\x03\xd9=\xef\xdf\xb8ke\x9a\xa2' """ if encryption_algorithm is None: raise exceptions.InvalidField("Encryption algorithm is required.") if encryption_algorithm == enums.CryptographicAlgorithm.RSA: return self._encrypt_asymmetric( encryption_algorithm, encryption_key, plain_text, padding_method, hashing_algorithm=hashing_algorithm ) else: return self._encrypt_symmetric( encryption_algorithm, encryption_key, plain_text, cipher_mode=cipher_mode, padding_method=padding_method, iv_nonce=iv_nonce )
[ "def", "encrypt", "(", "self", ",", "encryption_algorithm", ",", "encryption_key", ",", "plain_text", ",", "cipher_mode", "=", "None", ",", "padding_method", "=", "None", ",", "iv_nonce", "=", "None", ",", "hashing_algorithm", "=", "None", ")", ":", "if", "encryption_algorithm", "is", "None", ":", "raise", "exceptions", ".", "InvalidField", "(", "\"Encryption algorithm is required.\"", ")", "if", "encryption_algorithm", "==", "enums", ".", "CryptographicAlgorithm", ".", "RSA", ":", "return", "self", ".", "_encrypt_asymmetric", "(", "encryption_algorithm", ",", "encryption_key", ",", "plain_text", ",", "padding_method", ",", "hashing_algorithm", "=", "hashing_algorithm", ")", "else", ":", "return", "self", ".", "_encrypt_symmetric", "(", "encryption_algorithm", ",", "encryption_key", ",", "plain_text", ",", "cipher_mode", "=", "cipher_mode", ",", "padding_method", "=", "padding_method", ",", "iv_nonce", "=", "iv_nonce", ")" ]
Encrypt data using symmetric or asymmetric encryption. Args: encryption_algorithm (CryptographicAlgorithm): An enumeration specifying the encryption algorithm to use for encryption. encryption_key (bytes): The bytes of the encryption key to use for encryption. plain_text (bytes): The bytes to be encrypted. cipher_mode (BlockCipherMode): An enumeration specifying the block cipher mode to use with the encryption algorithm. Required in the general case. Optional if the encryption algorithm is RC4 (aka ARC4). If optional, defaults to None. padding_method (PaddingMethod): An enumeration specifying the padding method to use on the data before encryption. Required if the cipher mode is for block ciphers (e.g., CBC, ECB). Optional otherwise, defaults to None. iv_nonce (bytes): The IV/nonce value to use to initialize the mode of the encryption algorithm. Optional, defaults to None. If required and not provided, it will be autogenerated and returned with the cipher text. hashing_algorithm (HashingAlgorithm): An enumeration specifying the hashing algorithm to use with the encryption algorithm, if needed. Required for OAEP-based asymmetric encryption. Optional, defaults to None. Returns: dict: A dictionary containing the encrypted data, with at least the following key/value fields: * cipher_text - the bytes of the encrypted data * iv_nonce - the bytes of the IV/counter/nonce used if it was needed by the encryption scheme and if it was automatically generated for the encryption Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> result = engine.encrypt( ... encryption_algorithm=CryptographicAlgorithm.AES, ... encryption_key=( ... b'\xF3\x96\xE7\x1C\xCF\xCD\xEC\x1F' ... b'\xFC\xE2\x8E\xA6\xF8\x74\x28\xB0' ... ), ... plain_text=( ... b'\x00\x01\x02\x03\x04\x05\x06\x07' ... b'\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F' ... ), ... cipher_mode=BlockCipherMode.CBC, ... padding_method=PaddingMethod.ANSI_X923, ... ) >>> result.get('cipher_text') b'\x18[\xb9y\x1bL\xd1\x8f\x9a\xa0e\x02b\xa3=c' >>> result.iv_counter_nonce b'8qA\x05\xc4\x86\x03\xd9=\xef\xdf\xb8ke\x9a\xa2'
[ "Encrypt", "data", "using", "symmetric", "or", "asymmetric", "encryption", "." ]
python
test
mvn23/pyotgw
pyotgw/pyotgw.py
https://github.com/mvn23/pyotgw/blob/7612378ef4332b250176505af33e7536d6c9da78/pyotgw/pyotgw.py#L218-L278
async def get_reports(self): """ Update the pyotgw object with the information from all of the PR commands and return the updated status dict. This method is a coroutine """ cmd = OTGW_CMD_REPORT reports = {} for value in OTGW_REPORTS.keys(): ret = await self._wait_for_cmd(cmd, value) if ret is None: reports[value] = None continue reports[value] = ret[2:] status = { OTGW_ABOUT: reports.get(OTGW_REPORT_ABOUT), OTGW_BUILD: reports.get(OTGW_REPORT_BUILDDATE), OTGW_CLOCKMHZ: reports.get(OTGW_REPORT_CLOCKMHZ), OTGW_MODE: reports.get(OTGW_REPORT_GW_MODE), OTGW_SMART_PWR: reports.get(OTGW_REPORT_SMART_PWR), OTGW_THRM_DETECT: reports.get(OTGW_REPORT_THERMOSTAT_DETECT), OTGW_DHW_OVRD: reports.get(OTGW_REPORT_DHW_SETTING), } ovrd_mode = reports.get(OTGW_REPORT_SETPOINT_OVRD) if ovrd_mode is not None: ovrd_mode = str.upper(ovrd_mode[0]) status.update({OTGW_SETP_OVRD_MODE: ovrd_mode}) gpio_funcs = reports.get(OTGW_REPORT_GPIO_FUNCS) if gpio_funcs is not None: status.update({ OTGW_GPIO_A: int(gpio_funcs[0]), OTGW_GPIO_B: int(gpio_funcs[1]), }) led_funcs = reports.get(OTGW_REPORT_LED_FUNCS) if led_funcs is not None: status.update({ OTGW_LED_A: led_funcs[0], OTGW_LED_B: led_funcs[1], OTGW_LED_C: led_funcs[2], OTGW_LED_D: led_funcs[3], OTGW_LED_E: led_funcs[4], OTGW_LED_F: led_funcs[5], }) tweaks = reports.get(OTGW_REPORT_TWEAKS) if tweaks is not None: status.update({ OTGW_IGNORE_TRANSITIONS: int(tweaks[0]), OTGW_OVRD_HB: int(tweaks[1]), }) sb_temp = reports.get(OTGW_REPORT_SETBACK_TEMP) if sb_temp is not None: status.update({OTGW_SB_TEMP: float(sb_temp)}) vref = reports.get(OTGW_REPORT_VREF) if vref is not None: status.update({OTGW_VREF: int(vref)}) if (ovrd_mode is not None and ovrd_mode != OTGW_SETP_OVRD_DISABLED): status[DATA_ROOM_SETPOINT_OVRD] = float( reports[OTGW_REPORT_SETPOINT_OVRD][1:]) self._update_status(status) return dict(self._protocol.status)
[ "async", "def", "get_reports", "(", "self", ")", ":", "cmd", "=", "OTGW_CMD_REPORT", "reports", "=", "{", "}", "for", "value", "in", "OTGW_REPORTS", ".", "keys", "(", ")", ":", "ret", "=", "await", "self", ".", "_wait_for_cmd", "(", "cmd", ",", "value", ")", "if", "ret", "is", "None", ":", "reports", "[", "value", "]", "=", "None", "continue", "reports", "[", "value", "]", "=", "ret", "[", "2", ":", "]", "status", "=", "{", "OTGW_ABOUT", ":", "reports", ".", "get", "(", "OTGW_REPORT_ABOUT", ")", ",", "OTGW_BUILD", ":", "reports", ".", "get", "(", "OTGW_REPORT_BUILDDATE", ")", ",", "OTGW_CLOCKMHZ", ":", "reports", ".", "get", "(", "OTGW_REPORT_CLOCKMHZ", ")", ",", "OTGW_MODE", ":", "reports", ".", "get", "(", "OTGW_REPORT_GW_MODE", ")", ",", "OTGW_SMART_PWR", ":", "reports", ".", "get", "(", "OTGW_REPORT_SMART_PWR", ")", ",", "OTGW_THRM_DETECT", ":", "reports", ".", "get", "(", "OTGW_REPORT_THERMOSTAT_DETECT", ")", ",", "OTGW_DHW_OVRD", ":", "reports", ".", "get", "(", "OTGW_REPORT_DHW_SETTING", ")", ",", "}", "ovrd_mode", "=", "reports", ".", "get", "(", "OTGW_REPORT_SETPOINT_OVRD", ")", "if", "ovrd_mode", "is", "not", "None", ":", "ovrd_mode", "=", "str", ".", "upper", "(", "ovrd_mode", "[", "0", "]", ")", "status", ".", "update", "(", "{", "OTGW_SETP_OVRD_MODE", ":", "ovrd_mode", "}", ")", "gpio_funcs", "=", "reports", ".", "get", "(", "OTGW_REPORT_GPIO_FUNCS", ")", "if", "gpio_funcs", "is", "not", "None", ":", "status", ".", "update", "(", "{", "OTGW_GPIO_A", ":", "int", "(", "gpio_funcs", "[", "0", "]", ")", ",", "OTGW_GPIO_B", ":", "int", "(", "gpio_funcs", "[", "1", "]", ")", ",", "}", ")", "led_funcs", "=", "reports", ".", "get", "(", "OTGW_REPORT_LED_FUNCS", ")", "if", "led_funcs", "is", "not", "None", ":", "status", ".", "update", "(", "{", "OTGW_LED_A", ":", "led_funcs", "[", "0", "]", ",", "OTGW_LED_B", ":", "led_funcs", "[", "1", "]", ",", "OTGW_LED_C", ":", "led_funcs", "[", "2", "]", ",", "OTGW_LED_D", ":", "led_funcs", "[", "3", "]", ",", "OTGW_LED_E", ":", "led_funcs", "[", "4", "]", ",", "OTGW_LED_F", ":", "led_funcs", "[", "5", "]", ",", "}", ")", "tweaks", "=", "reports", ".", "get", "(", "OTGW_REPORT_TWEAKS", ")", "if", "tweaks", "is", "not", "None", ":", "status", ".", "update", "(", "{", "OTGW_IGNORE_TRANSITIONS", ":", "int", "(", "tweaks", "[", "0", "]", ")", ",", "OTGW_OVRD_HB", ":", "int", "(", "tweaks", "[", "1", "]", ")", ",", "}", ")", "sb_temp", "=", "reports", ".", "get", "(", "OTGW_REPORT_SETBACK_TEMP", ")", "if", "sb_temp", "is", "not", "None", ":", "status", ".", "update", "(", "{", "OTGW_SB_TEMP", ":", "float", "(", "sb_temp", ")", "}", ")", "vref", "=", "reports", ".", "get", "(", "OTGW_REPORT_VREF", ")", "if", "vref", "is", "not", "None", ":", "status", ".", "update", "(", "{", "OTGW_VREF", ":", "int", "(", "vref", ")", "}", ")", "if", "(", "ovrd_mode", "is", "not", "None", "and", "ovrd_mode", "!=", "OTGW_SETP_OVRD_DISABLED", ")", ":", "status", "[", "DATA_ROOM_SETPOINT_OVRD", "]", "=", "float", "(", "reports", "[", "OTGW_REPORT_SETPOINT_OVRD", "]", "[", "1", ":", "]", ")", "self", ".", "_update_status", "(", "status", ")", "return", "dict", "(", "self", ".", "_protocol", ".", "status", ")" ]
Update the pyotgw object with the information from all of the PR commands and return the updated status dict. This method is a coroutine
[ "Update", "the", "pyotgw", "object", "with", "the", "information", "from", "all", "of", "the", "PR", "commands", "and", "return", "the", "updated", "status", "dict", "." ]
python
train
stevepeak/dictime
dictime/dictime.py
https://github.com/stevepeak/dictime/blob/6d8724bed5a7844e47a9c16a233f8db494c98c61/dictime/dictime.py#L123-L135
def has_key(self, key): """Does the key exist? This method will check to see if it has expired too. """ if key in self._dict: try: self[key] return True except ValueError: return False except KeyError: return False return False
[ "def", "has_key", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "_dict", ":", "try", ":", "self", "[", "key", "]", "return", "True", "except", "ValueError", ":", "return", "False", "except", "KeyError", ":", "return", "False", "return", "False" ]
Does the key exist? This method will check to see if it has expired too.
[ "Does", "the", "key", "exist?", "This", "method", "will", "check", "to", "see", "if", "it", "has", "expired", "too", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2121-L2141
def save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1): """save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject *""" if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") if type(filename) == str: pass elif type(filename) == unicode: filename = filename.encode('utf8') else: raise TypeError("filename must be a string") if filename == self.name and not incremental: raise ValueError("save to original must be incremental") if self.pageCount < 1: raise ValueError("cannot save with zero pages") if incremental: if self.name != filename or self.stream: raise ValueError("incremental needs original file") return _fitz.Document_save(self, filename, garbage, clean, deflate, incremental, ascii, expand, linear, pretty, decrypt)
[ "def", "save", "(", "self", ",", "filename", ",", "garbage", "=", "0", ",", "clean", "=", "0", ",", "deflate", "=", "0", ",", "incremental", "=", "0", ",", "ascii", "=", "0", ",", "expand", "=", "0", ",", "linear", "=", "0", ",", "pretty", "=", "0", ",", "decrypt", "=", "1", ")", ":", "if", "self", ".", "isClosed", "or", "self", ".", "isEncrypted", ":", "raise", "ValueError", "(", "\"operation illegal for closed / encrypted doc\"", ")", "if", "type", "(", "filename", ")", "==", "str", ":", "pass", "elif", "type", "(", "filename", ")", "==", "unicode", ":", "filename", "=", "filename", ".", "encode", "(", "'utf8'", ")", "else", ":", "raise", "TypeError", "(", "\"filename must be a string\"", ")", "if", "filename", "==", "self", ".", "name", "and", "not", "incremental", ":", "raise", "ValueError", "(", "\"save to original must be incremental\"", ")", "if", "self", ".", "pageCount", "<", "1", ":", "raise", "ValueError", "(", "\"cannot save with zero pages\"", ")", "if", "incremental", ":", "if", "self", ".", "name", "!=", "filename", "or", "self", ".", "stream", ":", "raise", "ValueError", "(", "\"incremental needs original file\"", ")", "return", "_fitz", ".", "Document_save", "(", "self", ",", "filename", ",", "garbage", ",", "clean", ",", "deflate", ",", "incremental", ",", "ascii", ",", "expand", ",", "linear", ",", "pretty", ",", "decrypt", ")" ]
save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject *
[ "save", "(", "self", "filename", "garbage", "=", "0", "clean", "=", "0", "deflate", "=", "0", "incremental", "=", "0", "ascii", "=", "0", "expand", "=", "0", "linear", "=", "0", "pretty", "=", "0", "decrypt", "=", "1", ")", "-", ">", "PyObject", "*" ]
python
train
BlockHub/blockhubdpostools
dpostools/api.py
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/api.py#L36-L48
def add_peer(self, peer): """ Add a peer or multiple peers to the PEERS variable, takes a single string or a list. :param peer(list or string) """ if type(peer) == list: for i in peer: check_url(i) self.PEERS.extend(peer) elif type(peer) == str: check_url(peer) self.PEERS.append(peer)
[ "def", "add_peer", "(", "self", ",", "peer", ")", ":", "if", "type", "(", "peer", ")", "==", "list", ":", "for", "i", "in", "peer", ":", "check_url", "(", "i", ")", "self", ".", "PEERS", ".", "extend", "(", "peer", ")", "elif", "type", "(", "peer", ")", "==", "str", ":", "check_url", "(", "peer", ")", "self", ".", "PEERS", ".", "append", "(", "peer", ")" ]
Add a peer or multiple peers to the PEERS variable, takes a single string or a list. :param peer(list or string)
[ "Add", "a", "peer", "or", "multiple", "peers", "to", "the", "PEERS", "variable", "takes", "a", "single", "string", "or", "a", "list", "." ]
python
valid
piface/pifacecommon
pifacecommon/interrupts.py
https://github.com/piface/pifacecommon/blob/006bca14c18d43ba2d9eafaa84ef83b512c51cf6/pifacecommon/interrupts.py#L381-L392
def bring_gpio_interrupt_into_userspace(): # activate gpio interrupt """Bring the interrupt pin on the GPIO into Linux userspace.""" try: # is it already there? with open(GPIO_INTERRUPT_DEVICE_VALUE): return except IOError: # no, bring it into userspace with open(GPIO_EXPORT_FILE, 'w') as export_file: export_file.write(str(GPIO_INTERRUPT_PIN)) wait_until_file_exists(GPIO_INTERRUPT_DEVICE_VALUE)
[ "def", "bring_gpio_interrupt_into_userspace", "(", ")", ":", "# activate gpio interrupt", "try", ":", "# is it already there?", "with", "open", "(", "GPIO_INTERRUPT_DEVICE_VALUE", ")", ":", "return", "except", "IOError", ":", "# no, bring it into userspace", "with", "open", "(", "GPIO_EXPORT_FILE", ",", "'w'", ")", "as", "export_file", ":", "export_file", ".", "write", "(", "str", "(", "GPIO_INTERRUPT_PIN", ")", ")", "wait_until_file_exists", "(", "GPIO_INTERRUPT_DEVICE_VALUE", ")" ]
Bring the interrupt pin on the GPIO into Linux userspace.
[ "Bring", "the", "interrupt", "pin", "on", "the", "GPIO", "into", "Linux", "userspace", "." ]
python
test
horazont/aioxmpp
aioxmpp/utils.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/utils.py#L269-L318
def gather_reraise_multi(*fut_or_coros, message="gather_reraise_multi"): """ Wrap all the arguments `fut_or_coros` in futures with :func:`asyncio.ensure_future` and wait until all of them are finish or fail. :param fut_or_coros: the futures or coroutines to wait for :type fut_or_coros: future or coroutine :param message: the message included with the raised :class:`aioxmpp.errrors.GatherError` in the case of failure. :type message: :class:`str` :returns: the list of the results of the arguments. :raises aioxmpp.errors.GatherError: if any of the futures or coroutines fail. If an exception was raised, reraise all exceptions wrapped in a :class:`aioxmpp.errors.GatherError` with the message set to `message`. .. note:: This is similar to the standard function :func:`asyncio.gather`, but avoids the in-band signalling of raised exceptions as return values, by raising exceptions bundled as a :class:`aioxmpp.errors.GatherError`. .. note:: Use this function only if you are either a) not interested in the return values, or b) only interested in the return values if all futures are successful. """ todo = [asyncio.ensure_future(fut_or_coro) for fut_or_coro in fut_or_coros] if not todo: return [] yield from asyncio.wait(todo) results = [] exceptions = [] for fut in todo: if fut.exception() is not None: exceptions.append(fut.exception()) else: results.append(fut.result()) if exceptions: raise aioxmpp.errors.GatherError(message, exceptions) return results
[ "def", "gather_reraise_multi", "(", "*", "fut_or_coros", ",", "message", "=", "\"gather_reraise_multi\"", ")", ":", "todo", "=", "[", "asyncio", ".", "ensure_future", "(", "fut_or_coro", ")", "for", "fut_or_coro", "in", "fut_or_coros", "]", "if", "not", "todo", ":", "return", "[", "]", "yield", "from", "asyncio", ".", "wait", "(", "todo", ")", "results", "=", "[", "]", "exceptions", "=", "[", "]", "for", "fut", "in", "todo", ":", "if", "fut", ".", "exception", "(", ")", "is", "not", "None", ":", "exceptions", ".", "append", "(", "fut", ".", "exception", "(", ")", ")", "else", ":", "results", ".", "append", "(", "fut", ".", "result", "(", ")", ")", "if", "exceptions", ":", "raise", "aioxmpp", ".", "errors", ".", "GatherError", "(", "message", ",", "exceptions", ")", "return", "results" ]
Wrap all the arguments `fut_or_coros` in futures with :func:`asyncio.ensure_future` and wait until all of them are finish or fail. :param fut_or_coros: the futures or coroutines to wait for :type fut_or_coros: future or coroutine :param message: the message included with the raised :class:`aioxmpp.errrors.GatherError` in the case of failure. :type message: :class:`str` :returns: the list of the results of the arguments. :raises aioxmpp.errors.GatherError: if any of the futures or coroutines fail. If an exception was raised, reraise all exceptions wrapped in a :class:`aioxmpp.errors.GatherError` with the message set to `message`. .. note:: This is similar to the standard function :func:`asyncio.gather`, but avoids the in-band signalling of raised exceptions as return values, by raising exceptions bundled as a :class:`aioxmpp.errors.GatherError`. .. note:: Use this function only if you are either a) not interested in the return values, or b) only interested in the return values if all futures are successful.
[ "Wrap", "all", "the", "arguments", "fut_or_coros", "in", "futures", "with", ":", "func", ":", "asyncio", ".", "ensure_future", "and", "wait", "until", "all", "of", "them", "are", "finish", "or", "fail", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/classes.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L90-L105
def get_classname(obj): """ Returns the classname of the JB_Object, Python class or object. :param obj: the java object or Python class/object to get the classname for :type obj: object :return: the classname :rtype: str """ if isinstance(obj, javabridge.JB_Object): cls = javabridge.call(obj, "getClass", "()Ljava/lang/Class;") return javabridge.call(cls, "getName", "()Ljava/lang/String;") elif inspect.isclass(obj): return obj.__module__ + "." + obj.__name__ else: return get_classname(obj.__class__)
[ "def", "get_classname", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "javabridge", ".", "JB_Object", ")", ":", "cls", "=", "javabridge", ".", "call", "(", "obj", ",", "\"getClass\"", ",", "\"()Ljava/lang/Class;\"", ")", "return", "javabridge", ".", "call", "(", "cls", ",", "\"getName\"", ",", "\"()Ljava/lang/String;\"", ")", "elif", "inspect", ".", "isclass", "(", "obj", ")", ":", "return", "obj", ".", "__module__", "+", "\".\"", "+", "obj", ".", "__name__", "else", ":", "return", "get_classname", "(", "obj", ".", "__class__", ")" ]
Returns the classname of the JB_Object, Python class or object. :param obj: the java object or Python class/object to get the classname for :type obj: object :return: the classname :rtype: str
[ "Returns", "the", "classname", "of", "the", "JB_Object", "Python", "class", "or", "object", "." ]
python
train
cosven/feeluown-core
fuocore/netease/api.py
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/netease/api.py#L116-L129
def search(self, s, stype=1, offset=0, total='true', limit=60): """get songs list from search keywords""" action = uri + '/search/get' data = { 's': s, 'type': stype, 'offset': offset, 'total': total, 'limit': 60 } resp = self.request('POST', action, data) if resp['code'] == 200: return resp['result']['songs'] return []
[ "def", "search", "(", "self", ",", "s", ",", "stype", "=", "1", ",", "offset", "=", "0", ",", "total", "=", "'true'", ",", "limit", "=", "60", ")", ":", "action", "=", "uri", "+", "'/search/get'", "data", "=", "{", "'s'", ":", "s", ",", "'type'", ":", "stype", ",", "'offset'", ":", "offset", ",", "'total'", ":", "total", ",", "'limit'", ":", "60", "}", "resp", "=", "self", ".", "request", "(", "'POST'", ",", "action", ",", "data", ")", "if", "resp", "[", "'code'", "]", "==", "200", ":", "return", "resp", "[", "'result'", "]", "[", "'songs'", "]", "return", "[", "]" ]
get songs list from search keywords
[ "get", "songs", "list", "from", "search", "keywords" ]
python
train
mwouts/jupytext
jupytext/cell_metadata.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_metadata.py#L314-L330
def try_eval_metadata(metadata, name): """Evaluate given metadata to a python object, if possible""" value = metadata[name] if not isinstance(value, (str, unicode)): return if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")): if name in ['active', 'magic_args', 'language']: metadata[name] = value[1:-1] return if value.startswith('c(') and value.endswith(')'): value = '[' + value[2:-1] + ']' elif value.startswith('list(') and value.endswith(')'): value = '[' + value[5:-1] + ']' try: metadata[name] = ast.literal_eval(value) except (SyntaxError, ValueError): return
[ "def", "try_eval_metadata", "(", "metadata", ",", "name", ")", ":", "value", "=", "metadata", "[", "name", "]", "if", "not", "isinstance", "(", "value", ",", "(", "str", ",", "unicode", ")", ")", ":", "return", "if", "(", "value", ".", "startswith", "(", "'\"'", ")", "and", "value", ".", "endswith", "(", "'\"'", ")", ")", "or", "(", "value", ".", "startswith", "(", "\"'\"", ")", "and", "value", ".", "endswith", "(", "\"'\"", ")", ")", ":", "if", "name", "in", "[", "'active'", ",", "'magic_args'", ",", "'language'", "]", ":", "metadata", "[", "name", "]", "=", "value", "[", "1", ":", "-", "1", "]", "return", "if", "value", ".", "startswith", "(", "'c('", ")", "and", "value", ".", "endswith", "(", "')'", ")", ":", "value", "=", "'['", "+", "value", "[", "2", ":", "-", "1", "]", "+", "']'", "elif", "value", ".", "startswith", "(", "'list('", ")", "and", "value", ".", "endswith", "(", "')'", ")", ":", "value", "=", "'['", "+", "value", "[", "5", ":", "-", "1", "]", "+", "']'", "try", ":", "metadata", "[", "name", "]", "=", "ast", ".", "literal_eval", "(", "value", ")", "except", "(", "SyntaxError", ",", "ValueError", ")", ":", "return" ]
Evaluate given metadata to a python object, if possible
[ "Evaluate", "given", "metadata", "to", "a", "python", "object", "if", "possible" ]
python
train
rm-hull/luma.core
luma/core/mixin.py
https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/mixin.py#L13-L40
def capabilities(self, width, height, rotate, mode="1"): """ Assigns attributes such as ``width``, ``height``, ``size`` and ``bounding_box`` correctly oriented from the supplied parameters. :param width: The device width. :type width: int :param height: The device height. :type height: int :param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3 represents 270° rotation. :type rotate: int :param mode: The supported color model, one of ``"1"``, ``"RGB"`` or ``"RGBA"`` only. :type mode: str """ assert mode in ("1", "RGB", "RGBA") assert rotate in (0, 1, 2, 3) self._w = width self._h = height self.width = width if rotate % 2 == 0 else height self.height = height if rotate % 2 == 0 else width self.size = (self.width, self.height) self.bounding_box = (0, 0, self.width - 1, self.height - 1) self.rotate = rotate self.mode = mode self.persist = False
[ "def", "capabilities", "(", "self", ",", "width", ",", "height", ",", "rotate", ",", "mode", "=", "\"1\"", ")", ":", "assert", "mode", "in", "(", "\"1\"", ",", "\"RGB\"", ",", "\"RGBA\"", ")", "assert", "rotate", "in", "(", "0", ",", "1", ",", "2", ",", "3", ")", "self", ".", "_w", "=", "width", "self", ".", "_h", "=", "height", "self", ".", "width", "=", "width", "if", "rotate", "%", "2", "==", "0", "else", "height", "self", ".", "height", "=", "height", "if", "rotate", "%", "2", "==", "0", "else", "width", "self", ".", "size", "=", "(", "self", ".", "width", ",", "self", ".", "height", ")", "self", ".", "bounding_box", "=", "(", "0", ",", "0", ",", "self", ".", "width", "-", "1", ",", "self", ".", "height", "-", "1", ")", "self", ".", "rotate", "=", "rotate", "self", ".", "mode", "=", "mode", "self", ".", "persist", "=", "False" ]
Assigns attributes such as ``width``, ``height``, ``size`` and ``bounding_box`` correctly oriented from the supplied parameters. :param width: The device width. :type width: int :param height: The device height. :type height: int :param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3 represents 270° rotation. :type rotate: int :param mode: The supported color model, one of ``"1"``, ``"RGB"`` or ``"RGBA"`` only. :type mode: str
[ "Assigns", "attributes", "such", "as", "width", "height", "size", "and", "bounding_box", "correctly", "oriented", "from", "the", "supplied", "parameters", "." ]
python
train
MediaFire/mediafire-python-open-sdk
mediafire/api.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/api.py#L430-L439
def user_set_avatar(self, action=None, quick_key=None, url=None): """user/set_avatar http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar """ return self.request("user/set_avatar", QueryParams({ "action": action, "quick_key": quick_key, "url": url }))
[ "def", "user_set_avatar", "(", "self", ",", "action", "=", "None", ",", "quick_key", "=", "None", ",", "url", "=", "None", ")", ":", "return", "self", ".", "request", "(", "\"user/set_avatar\"", ",", "QueryParams", "(", "{", "\"action\"", ":", "action", ",", "\"quick_key\"", ":", "quick_key", ",", "\"url\"", ":", "url", "}", ")", ")" ]
user/set_avatar http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
[ "user", "/", "set_avatar" ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L4554-L4578
def b_vdm(B, lat): """ Converts a magnetic field value (input in units of tesla) to a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM); output in units of Am^2) Parameters ---------- B: local magnetic field strength in tesla lat: latitude of site in degrees Returns ---------- V(A)DM in units of Am^2 Examples -------- >>> pmag.b_vdm(33e-6,22)*1e-21 71.58815974511788 """ # changed radius of the earth from 3.367e6 3/12/2010 fact = ((6.371e6)**3) * 1e7 colat = np.radians(90. - lat) return fact * B / (np.sqrt(1 + 3 * (np.cos(colat)**2)))
[ "def", "b_vdm", "(", "B", ",", "lat", ")", ":", "# changed radius of the earth from 3.367e6 3/12/2010", "fact", "=", "(", "(", "6.371e6", ")", "**", "3", ")", "*", "1e7", "colat", "=", "np", ".", "radians", "(", "90.", "-", "lat", ")", "return", "fact", "*", "B", "/", "(", "np", ".", "sqrt", "(", "1", "+", "3", "*", "(", "np", ".", "cos", "(", "colat", ")", "**", "2", ")", ")", ")" ]
Converts a magnetic field value (input in units of tesla) to a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM); output in units of Am^2) Parameters ---------- B: local magnetic field strength in tesla lat: latitude of site in degrees Returns ---------- V(A)DM in units of Am^2 Examples -------- >>> pmag.b_vdm(33e-6,22)*1e-21 71.58815974511788
[ "Converts", "a", "magnetic", "field", "value", "(", "input", "in", "units", "of", "tesla", ")", "to", "a", "virtual", "dipole", "moment", "(", "VDM", ")", "or", "a", "virtual", "axial", "dipole", "moment", "(", "VADM", ")", ";", "output", "in", "units", "of", "Am^2", ")" ]
python
train
saltstack/salt
salt/roster/terraform.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/terraform.py#L128-L159
def _parse_state_file(state_file_path='terraform.tfstate'): ''' Parses the terraform state file passing different resource types to the right handler ''' ret = {} with salt.utils.files.fopen(state_file_path, 'r') as fh_: tfstate = salt.utils.json.load(fh_) modules = tfstate.get('modules') if not modules: log.error('Malformed tfstate file. No modules found') return ret for module in modules: resources = module.get('resources', []) for resource_name, resource in salt.ext.six.iteritems(resources): roster_entry = None if resource['type'] == 'salt_host': roster_entry = _handle_salt_host_resource(resource) if not roster_entry: continue minion_id = roster_entry.get(MINION_ID, resource.get('id')) if not minion_id: continue if MINION_ID in roster_entry: del roster_entry[MINION_ID] _add_ssh_key(roster_entry) ret[minion_id] = roster_entry return ret
[ "def", "_parse_state_file", "(", "state_file_path", "=", "'terraform.tfstate'", ")", ":", "ret", "=", "{", "}", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "state_file_path", ",", "'r'", ")", "as", "fh_", ":", "tfstate", "=", "salt", ".", "utils", ".", "json", ".", "load", "(", "fh_", ")", "modules", "=", "tfstate", ".", "get", "(", "'modules'", ")", "if", "not", "modules", ":", "log", ".", "error", "(", "'Malformed tfstate file. No modules found'", ")", "return", "ret", "for", "module", "in", "modules", ":", "resources", "=", "module", ".", "get", "(", "'resources'", ",", "[", "]", ")", "for", "resource_name", ",", "resource", "in", "salt", ".", "ext", ".", "six", ".", "iteritems", "(", "resources", ")", ":", "roster_entry", "=", "None", "if", "resource", "[", "'type'", "]", "==", "'salt_host'", ":", "roster_entry", "=", "_handle_salt_host_resource", "(", "resource", ")", "if", "not", "roster_entry", ":", "continue", "minion_id", "=", "roster_entry", ".", "get", "(", "MINION_ID", ",", "resource", ".", "get", "(", "'id'", ")", ")", "if", "not", "minion_id", ":", "continue", "if", "MINION_ID", "in", "roster_entry", ":", "del", "roster_entry", "[", "MINION_ID", "]", "_add_ssh_key", "(", "roster_entry", ")", "ret", "[", "minion_id", "]", "=", "roster_entry", "return", "ret" ]
Parses the terraform state file passing different resource types to the right handler
[ "Parses", "the", "terraform", "state", "file", "passing", "different", "resource", "types", "to", "the", "right", "handler" ]
python
train
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/streaming_client.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/streaming_client.py#L859-L899
def _read_bytes_from_framed_body(self, b): """Reads the requested number of bytes from a streaming framed message body. :param int b: Number of bytes to read :returns: Bytes read from source stream and decrypted :rtype: bytes """ plaintext = b"" final_frame = False _LOGGER.debug("collecting %d bytes", b) while len(plaintext) < b and not final_frame: _LOGGER.debug("Reading frame") frame_data, final_frame = deserialize_frame( stream=self.source_stream, header=self._header, verifier=self.verifier ) _LOGGER.debug("Read complete for frame %d", frame_data.sequence_number) if frame_data.sequence_number != self.last_sequence_number + 1: raise SerializationError("Malformed message: frames out of order") self.last_sequence_number += 1 aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self._header.content_type, is_final_frame=frame_data.final_frame ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=frame_data.sequence_number, length=len(frame_data.ciphertext), ) plaintext += decrypt( algorithm=self._header.algorithm, key=self._derived_data_key, encrypted_data=frame_data, associated_data=associated_data, ) plaintext_length = len(plaintext) _LOGGER.debug("bytes collected: %d", plaintext_length) if final_frame: _LOGGER.debug("Reading footer") self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier) return plaintext
[ "def", "_read_bytes_from_framed_body", "(", "self", ",", "b", ")", ":", "plaintext", "=", "b\"\"", "final_frame", "=", "False", "_LOGGER", ".", "debug", "(", "\"collecting %d bytes\"", ",", "b", ")", "while", "len", "(", "plaintext", ")", "<", "b", "and", "not", "final_frame", ":", "_LOGGER", ".", "debug", "(", "\"Reading frame\"", ")", "frame_data", ",", "final_frame", "=", "deserialize_frame", "(", "stream", "=", "self", ".", "source_stream", ",", "header", "=", "self", ".", "_header", ",", "verifier", "=", "self", ".", "verifier", ")", "_LOGGER", ".", "debug", "(", "\"Read complete for frame %d\"", ",", "frame_data", ".", "sequence_number", ")", "if", "frame_data", ".", "sequence_number", "!=", "self", ".", "last_sequence_number", "+", "1", ":", "raise", "SerializationError", "(", "\"Malformed message: frames out of order\"", ")", "self", ".", "last_sequence_number", "+=", "1", "aad_content_string", "=", "aws_encryption_sdk", ".", "internal", ".", "utils", ".", "get_aad_content_string", "(", "content_type", "=", "self", ".", "_header", ".", "content_type", ",", "is_final_frame", "=", "frame_data", ".", "final_frame", ")", "associated_data", "=", "assemble_content_aad", "(", "message_id", "=", "self", ".", "_header", ".", "message_id", ",", "aad_content_string", "=", "aad_content_string", ",", "seq_num", "=", "frame_data", ".", "sequence_number", ",", "length", "=", "len", "(", "frame_data", ".", "ciphertext", ")", ",", ")", "plaintext", "+=", "decrypt", "(", "algorithm", "=", "self", ".", "_header", ".", "algorithm", ",", "key", "=", "self", ".", "_derived_data_key", ",", "encrypted_data", "=", "frame_data", ",", "associated_data", "=", "associated_data", ",", ")", "plaintext_length", "=", "len", "(", "plaintext", ")", "_LOGGER", ".", "debug", "(", "\"bytes collected: %d\"", ",", "plaintext_length", ")", "if", "final_frame", ":", "_LOGGER", ".", "debug", "(", "\"Reading footer\"", ")", "self", ".", "footer", "=", "deserialize_footer", "(", "stream", "=", "self", ".", "source_stream", ",", "verifier", "=", "self", ".", "verifier", ")", "return", "plaintext" ]
Reads the requested number of bytes from a streaming framed message body. :param int b: Number of bytes to read :returns: Bytes read from source stream and decrypted :rtype: bytes
[ "Reads", "the", "requested", "number", "of", "bytes", "from", "a", "streaming", "framed", "message", "body", "." ]
python
train
saimn/sigal
sigal/video.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/video.py#L149-L192
def process_video(filepath, outpath, settings): """Process a video: resize, create thumbnail.""" logger = logging.getLogger(__name__) filename = os.path.split(filepath)[1] basename, ext = splitext(filename) try: if settings['use_orig'] and is_valid_html5_video(ext): outname = os.path.join(outpath, filename) utils.copy(filepath, outname, symlink=settings['orig_link']) else: valid_formats = ['mp4', 'webm'] video_format = settings['video_format'] if video_format not in valid_formats: logger.error('Invalid video_format. Please choose one of: %s', valid_formats) raise ValueError outname = os.path.join(outpath, basename + '.' + video_format) generate_video(filepath, outname, settings, options=settings.get(video_format + '_options')) except Exception: if logger.getEffectiveLevel() == logging.DEBUG: raise else: return Status.FAILURE if settings['make_thumbs']: thumb_name = os.path.join(outpath, get_thumb(settings, filename)) try: generate_thumbnail( outname, thumb_name, settings['thumb_size'], settings['thumb_video_delay'], fit=settings['thumb_fit'], options=settings['jpg_options'], converter=settings['video_converter']) except Exception: if logger.getEffectiveLevel() == logging.DEBUG: raise else: return Status.FAILURE return Status.SUCCESS
[ "def", "process_video", "(", "filepath", ",", "outpath", ",", "settings", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "filename", "=", "os", ".", "path", ".", "split", "(", "filepath", ")", "[", "1", "]", "basename", ",", "ext", "=", "splitext", "(", "filename", ")", "try", ":", "if", "settings", "[", "'use_orig'", "]", "and", "is_valid_html5_video", "(", "ext", ")", ":", "outname", "=", "os", ".", "path", ".", "join", "(", "outpath", ",", "filename", ")", "utils", ".", "copy", "(", "filepath", ",", "outname", ",", "symlink", "=", "settings", "[", "'orig_link'", "]", ")", "else", ":", "valid_formats", "=", "[", "'mp4'", ",", "'webm'", "]", "video_format", "=", "settings", "[", "'video_format'", "]", "if", "video_format", "not", "in", "valid_formats", ":", "logger", ".", "error", "(", "'Invalid video_format. Please choose one of: %s'", ",", "valid_formats", ")", "raise", "ValueError", "outname", "=", "os", ".", "path", ".", "join", "(", "outpath", ",", "basename", "+", "'.'", "+", "video_format", ")", "generate_video", "(", "filepath", ",", "outname", ",", "settings", ",", "options", "=", "settings", ".", "get", "(", "video_format", "+", "'_options'", ")", ")", "except", "Exception", ":", "if", "logger", ".", "getEffectiveLevel", "(", ")", "==", "logging", ".", "DEBUG", ":", "raise", "else", ":", "return", "Status", ".", "FAILURE", "if", "settings", "[", "'make_thumbs'", "]", ":", "thumb_name", "=", "os", ".", "path", ".", "join", "(", "outpath", ",", "get_thumb", "(", "settings", ",", "filename", ")", ")", "try", ":", "generate_thumbnail", "(", "outname", ",", "thumb_name", ",", "settings", "[", "'thumb_size'", "]", ",", "settings", "[", "'thumb_video_delay'", "]", ",", "fit", "=", "settings", "[", "'thumb_fit'", "]", ",", "options", "=", "settings", "[", "'jpg_options'", "]", ",", "converter", "=", "settings", "[", "'video_converter'", "]", ")", "except", "Exception", ":", "if", "logger", ".", "getEffectiveLevel", "(", ")", "==", "logging", ".", "DEBUG", ":", "raise", "else", ":", "return", "Status", ".", "FAILURE", "return", "Status", ".", "SUCCESS" ]
Process a video: resize, create thumbnail.
[ "Process", "a", "video", ":", "resize", "create", "thumbnail", "." ]
python
valid
RudolfCardinal/pythonlib
cardinal_pythonlib/interval.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L412-L431
def dayspan(startdate: datetime.date, enddate: datetime.date, include_end: bool = True) -> Optional["Interval"]: """ Returns an :class:`Interval` representing the date range given, from midnight at the start of the first day to midnight at the end of the last (i.e. at the start of the next day after the last), or if include_end is False, 24h before that. If the parameters are invalid, returns ``None``. """ if enddate < startdate: return None if enddate == startdate and include_end: return None start_dt = datetime.datetime.combine(startdate, datetime.time()) end_dt = datetime.datetime.combine(enddate, datetime.time()) if include_end: end_dt += datetime.timedelta(days=1) return Interval(start_dt, end_dt)
[ "def", "dayspan", "(", "startdate", ":", "datetime", ".", "date", ",", "enddate", ":", "datetime", ".", "date", ",", "include_end", ":", "bool", "=", "True", ")", "->", "Optional", "[", "\"Interval\"", "]", ":", "if", "enddate", "<", "startdate", ":", "return", "None", "if", "enddate", "==", "startdate", "and", "include_end", ":", "return", "None", "start_dt", "=", "datetime", ".", "datetime", ".", "combine", "(", "startdate", ",", "datetime", ".", "time", "(", ")", ")", "end_dt", "=", "datetime", ".", "datetime", ".", "combine", "(", "enddate", ",", "datetime", ".", "time", "(", ")", ")", "if", "include_end", ":", "end_dt", "+=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "return", "Interval", "(", "start_dt", ",", "end_dt", ")" ]
Returns an :class:`Interval` representing the date range given, from midnight at the start of the first day to midnight at the end of the last (i.e. at the start of the next day after the last), or if include_end is False, 24h before that. If the parameters are invalid, returns ``None``.
[ "Returns", "an", ":", "class", ":", "Interval", "representing", "the", "date", "range", "given", "from", "midnight", "at", "the", "start", "of", "the", "first", "day", "to", "midnight", "at", "the", "end", "of", "the", "last", "(", "i", ".", "e", ".", "at", "the", "start", "of", "the", "next", "day", "after", "the", "last", ")", "or", "if", "include_end", "is", "False", "24h", "before", "that", "." ]
python
train
jobovy/galpy
galpy/util/bovy_plot.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_plot.py#L374-L458
def bovy_plot3d(*args,**kwargs): """ NAME: bovy_plot3d PURPOSE: plot in 3d much as in 2d INPUT: see http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange overplot=True does not start a new figure OUTPUT: HISTORY: 2011-01-08 - Written - Bovy (NYU) """ overplot= kwargs.pop('overplot',False) if not overplot: pyplot.figure() ax=pyplot.gca(projection='3d') ax.set_autoscale_on(False) xlabel= kwargs.pop('xlabel',None) ylabel= kwargs.pop('ylabel',None) zlabel= kwargs.pop('zlabel',None) if 'xrange' in kwargs: xlimits= kwargs.pop('xrange') else: if isinstance(args[0],list): xlimits=(sc.array(args[0]).min(),sc.array(args[0]).max()) else: xlimits=(args[0].min(),args[0].max()) if 'yrange' in kwargs: ylimits= kwargs.pop('yrange') else: if isinstance(args[1],list): ylimits=(sc.array(args[1]).min(),sc.array(args[1]).max()) else: ylimits=(args[1].min(),args[1].max()) if 'zrange' in kwargs: zlimits= kwargs.pop('zrange') else: if isinstance(args[2],list): zlimits=(sc.array(args[2]).min(),sc.array(args[2]).max()) else: zlimits=(args[1].min(),args[2].max()) out= pyplot.plot(*args,**kwargs) if overplot: pass else: if xlabel != None: if xlabel[0] != '$': thisxlabel=r'$'+xlabel+'$' else: thisxlabel=xlabel ax.set_xlabel(thisxlabel) if ylabel != None: if ylabel[0] != '$': thisylabel=r'$'+ylabel+'$' else: thisylabel=ylabel ax.set_ylabel(thisylabel) if zlabel != None: if zlabel[0] != '$': thiszlabel=r'$'+zlabel+'$' else: thiszlabel=zlabel ax.set_zlabel(thiszlabel) ax.set_xlim3d(*xlimits) ax.set_ylim3d(*ylimits) ax.set_zlim3d(*zlimits) return out
[ "def", "bovy_plot3d", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "overplot", "=", "kwargs", ".", "pop", "(", "'overplot'", ",", "False", ")", "if", "not", "overplot", ":", "pyplot", ".", "figure", "(", ")", "ax", "=", "pyplot", ".", "gca", "(", "projection", "=", "'3d'", ")", "ax", ".", "set_autoscale_on", "(", "False", ")", "xlabel", "=", "kwargs", ".", "pop", "(", "'xlabel'", ",", "None", ")", "ylabel", "=", "kwargs", ".", "pop", "(", "'ylabel'", ",", "None", ")", "zlabel", "=", "kwargs", ".", "pop", "(", "'zlabel'", ",", "None", ")", "if", "'xrange'", "in", "kwargs", ":", "xlimits", "=", "kwargs", ".", "pop", "(", "'xrange'", ")", "else", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "xlimits", "=", "(", "sc", ".", "array", "(", "args", "[", "0", "]", ")", ".", "min", "(", ")", ",", "sc", ".", "array", "(", "args", "[", "0", "]", ")", ".", "max", "(", ")", ")", "else", ":", "xlimits", "=", "(", "args", "[", "0", "]", ".", "min", "(", ")", ",", "args", "[", "0", "]", ".", "max", "(", ")", ")", "if", "'yrange'", "in", "kwargs", ":", "ylimits", "=", "kwargs", ".", "pop", "(", "'yrange'", ")", "else", ":", "if", "isinstance", "(", "args", "[", "1", "]", ",", "list", ")", ":", "ylimits", "=", "(", "sc", ".", "array", "(", "args", "[", "1", "]", ")", ".", "min", "(", ")", ",", "sc", ".", "array", "(", "args", "[", "1", "]", ")", ".", "max", "(", ")", ")", "else", ":", "ylimits", "=", "(", "args", "[", "1", "]", ".", "min", "(", ")", ",", "args", "[", "1", "]", ".", "max", "(", ")", ")", "if", "'zrange'", "in", "kwargs", ":", "zlimits", "=", "kwargs", ".", "pop", "(", "'zrange'", ")", "else", ":", "if", "isinstance", "(", "args", "[", "2", "]", ",", "list", ")", ":", "zlimits", "=", "(", "sc", ".", "array", "(", "args", "[", "2", "]", ")", ".", "min", "(", ")", ",", "sc", ".", "array", "(", "args", "[", "2", "]", ")", ".", "max", "(", ")", ")", "else", ":", "zlimits", "=", "(", "args", "[", "1", "]", ".", "min", "(", ")", ",", "args", "[", "2", "]", ".", "max", "(", ")", ")", "out", "=", "pyplot", ".", "plot", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "overplot", ":", "pass", "else", ":", "if", "xlabel", "!=", "None", ":", "if", "xlabel", "[", "0", "]", "!=", "'$'", ":", "thisxlabel", "=", "r'$'", "+", "xlabel", "+", "'$'", "else", ":", "thisxlabel", "=", "xlabel", "ax", ".", "set_xlabel", "(", "thisxlabel", ")", "if", "ylabel", "!=", "None", ":", "if", "ylabel", "[", "0", "]", "!=", "'$'", ":", "thisylabel", "=", "r'$'", "+", "ylabel", "+", "'$'", "else", ":", "thisylabel", "=", "ylabel", "ax", ".", "set_ylabel", "(", "thisylabel", ")", "if", "zlabel", "!=", "None", ":", "if", "zlabel", "[", "0", "]", "!=", "'$'", ":", "thiszlabel", "=", "r'$'", "+", "zlabel", "+", "'$'", "else", ":", "thiszlabel", "=", "zlabel", "ax", ".", "set_zlabel", "(", "thiszlabel", ")", "ax", ".", "set_xlim3d", "(", "*", "xlimits", ")", "ax", ".", "set_ylim3d", "(", "*", "ylimits", ")", "ax", ".", "set_zlim3d", "(", "*", "zlimits", ")", "return", "out" ]
NAME: bovy_plot3d PURPOSE: plot in 3d much as in 2d INPUT: see http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange overplot=True does not start a new figure OUTPUT: HISTORY: 2011-01-08 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
stephanepechard/projy
projy/cmdline.py
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L94-L110
def template_class_from_name(name): """ Return the template class object from agiven name. """ # import the right template module term = TerminalView() template_name = name + 'Template' try: __import__('projy.templates.' + template_name) template_mod = sys.modules['projy.templates.' + template_name] except ImportError: term.print_error_and_exit("Unable to find {}".format(name)) # import the class from the module try: template_class = getattr(template_mod, template_name) except AttributeError: term.print_error_and_exit("Unable to create a template {}".format(name)) return template_class()
[ "def", "template_class_from_name", "(", "name", ")", ":", "# import the right template module", "term", "=", "TerminalView", "(", ")", "template_name", "=", "name", "+", "'Template'", "try", ":", "__import__", "(", "'projy.templates.'", "+", "template_name", ")", "template_mod", "=", "sys", ".", "modules", "[", "'projy.templates.'", "+", "template_name", "]", "except", "ImportError", ":", "term", ".", "print_error_and_exit", "(", "\"Unable to find {}\"", ".", "format", "(", "name", ")", ")", "# import the class from the module", "try", ":", "template_class", "=", "getattr", "(", "template_mod", ",", "template_name", ")", "except", "AttributeError", ":", "term", ".", "print_error_and_exit", "(", "\"Unable to create a template {}\"", ".", "format", "(", "name", ")", ")", "return", "template_class", "(", ")" ]
Return the template class object from agiven name.
[ "Return", "the", "template", "class", "object", "from", "agiven", "name", "." ]
python
train
galaxyproject/pulsar
pulsar/client/action_mapper.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/action_mapper.py#L235-L242
def __process_action(self, action, file_type): """ Extension point to populate extra action information after an action has been created. """ if getattr(action, "inject_url", False): self.__inject_url(action, file_type) if getattr(action, "inject_ssh_properties", False): self.__inject_ssh_properties(action)
[ "def", "__process_action", "(", "self", ",", "action", ",", "file_type", ")", ":", "if", "getattr", "(", "action", ",", "\"inject_url\"", ",", "False", ")", ":", "self", ".", "__inject_url", "(", "action", ",", "file_type", ")", "if", "getattr", "(", "action", ",", "\"inject_ssh_properties\"", ",", "False", ")", ":", "self", ".", "__inject_ssh_properties", "(", "action", ")" ]
Extension point to populate extra action information after an action has been created.
[ "Extension", "point", "to", "populate", "extra", "action", "information", "after", "an", "action", "has", "been", "created", "." ]
python
train
aegirhall/console-menu
consolemenu/menu_formatter.py
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/menu_formatter.py#L256-L288
def format(self, title=None, subtitle=None, prologue_text=None, epilogue_text=None, items=None): """ Format the menu and return as a string. :return: a string representation of the formatted menu. """ self.clear_data() content = '' # Header Section if title is not None: self.__header.title = title if subtitle is not None: self.__header.subtitle = subtitle sections = [self.__header] # Prologue Section if prologue_text is not None: self.__prologue.text = prologue_text sections.append(self.__prologue) # Items Section if items is not None: self.__items_section.items = items sections.append(self.__items_section) # Epilogue Section if epilogue_text is not None: self.__epilogue.text = epilogue_text sections.append(self.__epilogue) sections.append(self.__footer) sections.append(self.__prompt) for sect in sections: content += "\n".join(sect.generate()) # Don't add newline to prompt so input is on same line as prompt if not isinstance(sect, MenuPrompt): content += "\n" return content
[ "def", "format", "(", "self", ",", "title", "=", "None", ",", "subtitle", "=", "None", ",", "prologue_text", "=", "None", ",", "epilogue_text", "=", "None", ",", "items", "=", "None", ")", ":", "self", ".", "clear_data", "(", ")", "content", "=", "''", "# Header Section", "if", "title", "is", "not", "None", ":", "self", ".", "__header", ".", "title", "=", "title", "if", "subtitle", "is", "not", "None", ":", "self", ".", "__header", ".", "subtitle", "=", "subtitle", "sections", "=", "[", "self", ".", "__header", "]", "# Prologue Section", "if", "prologue_text", "is", "not", "None", ":", "self", ".", "__prologue", ".", "text", "=", "prologue_text", "sections", ".", "append", "(", "self", ".", "__prologue", ")", "# Items Section", "if", "items", "is", "not", "None", ":", "self", ".", "__items_section", ".", "items", "=", "items", "sections", ".", "append", "(", "self", ".", "__items_section", ")", "# Epilogue Section", "if", "epilogue_text", "is", "not", "None", ":", "self", ".", "__epilogue", ".", "text", "=", "epilogue_text", "sections", ".", "append", "(", "self", ".", "__epilogue", ")", "sections", ".", "append", "(", "self", ".", "__footer", ")", "sections", ".", "append", "(", "self", ".", "__prompt", ")", "for", "sect", "in", "sections", ":", "content", "+=", "\"\\n\"", ".", "join", "(", "sect", ".", "generate", "(", ")", ")", "# Don't add newline to prompt so input is on same line as prompt", "if", "not", "isinstance", "(", "sect", ",", "MenuPrompt", ")", ":", "content", "+=", "\"\\n\"", "return", "content" ]
Format the menu and return as a string. :return: a string representation of the formatted menu.
[ "Format", "the", "menu", "and", "return", "as", "a", "string", ".", ":", "return", ":", "a", "string", "representation", "of", "the", "formatted", "menu", "." ]
python
train
saltstack/salt
salt/utils/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cloud.py#L3097-L3158
def diff_node_cache(prov_dir, node, new_data, opts): ''' Check new node data against current cache. If data differ, fire an event which consists of the new node data. This function will only run if configured to do so in the main Salt Cloud configuration file (normally /etc/salt/cloud). .. code-block:: yaml diff_cache_events: True .. versionadded:: 2014.7.0 ''' if 'diff_cache_events' not in opts or not opts['diff_cache_events']: return if node is None: return path = '{0}.p'.format(os.path.join(prov_dir, node)) if not os.path.exists(path): event_data = _strip_cache_events(new_data, opts) fire_event( 'event', 'new node found', 'salt/cloud/{0}/cache_node_new'.format(node), args={'new_data': event_data}, sock_dir=opts.get( 'sock_dir', os.path.join(__opts__['sock_dir'], 'master')), transport=opts.get('transport', 'zeromq') ) return with salt.utils.files.fopen(path, 'r') as fh_: try: cache_data = salt.utils.data.decode( salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) except ValueError: log.warning('Cache for %s was corrupt: Deleting', node) cache_data = {} # Perform a simple diff between the old and the new data, and if it differs, # return both dicts. # TODO: Return an actual diff diff = salt.utils.compat.cmp(new_data, cache_data) if diff != 0: fire_event( 'event', 'node data differs', 'salt/cloud/{0}/cache_node_diff'.format(node), args={ 'new_data': _strip_cache_events(new_data, opts), 'cache_data': _strip_cache_events(cache_data, opts), }, sock_dir=opts.get( 'sock_dir', os.path.join(__opts__['sock_dir'], 'master')), transport=opts.get('transport', 'zeromq') )
[ "def", "diff_node_cache", "(", "prov_dir", ",", "node", ",", "new_data", ",", "opts", ")", ":", "if", "'diff_cache_events'", "not", "in", "opts", "or", "not", "opts", "[", "'diff_cache_events'", "]", ":", "return", "if", "node", "is", "None", ":", "return", "path", "=", "'{0}.p'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "prov_dir", ",", "node", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "event_data", "=", "_strip_cache_events", "(", "new_data", ",", "opts", ")", "fire_event", "(", "'event'", ",", "'new node found'", ",", "'salt/cloud/{0}/cache_node_new'", ".", "format", "(", "node", ")", ",", "args", "=", "{", "'new_data'", ":", "event_data", "}", ",", "sock_dir", "=", "opts", ".", "get", "(", "'sock_dir'", ",", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'sock_dir'", "]", ",", "'master'", ")", ")", ",", "transport", "=", "opts", ".", "get", "(", "'transport'", ",", "'zeromq'", ")", ")", "return", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'r'", ")", "as", "fh_", ":", "try", ":", "cache_data", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "salt", ".", "utils", ".", "msgpack", ".", "load", "(", "fh_", ",", "encoding", "=", "MSGPACK_ENCODING", ")", ")", "except", "ValueError", ":", "log", ".", "warning", "(", "'Cache for %s was corrupt: Deleting'", ",", "node", ")", "cache_data", "=", "{", "}", "# Perform a simple diff between the old and the new data, and if it differs,", "# return both dicts.", "# TODO: Return an actual diff", "diff", "=", "salt", ".", "utils", ".", "compat", ".", "cmp", "(", "new_data", ",", "cache_data", ")", "if", "diff", "!=", "0", ":", "fire_event", "(", "'event'", ",", "'node data differs'", ",", "'salt/cloud/{0}/cache_node_diff'", ".", "format", "(", "node", ")", ",", "args", "=", "{", "'new_data'", ":", "_strip_cache_events", "(", "new_data", ",", "opts", ")", ",", "'cache_data'", ":", "_strip_cache_events", "(", "cache_data", ",", "opts", ")", ",", "}", ",", "sock_dir", "=", "opts", ".", "get", "(", "'sock_dir'", ",", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'sock_dir'", "]", ",", "'master'", ")", ")", ",", "transport", "=", "opts", ".", "get", "(", "'transport'", ",", "'zeromq'", ")", ")" ]
Check new node data against current cache. If data differ, fire an event which consists of the new node data. This function will only run if configured to do so in the main Salt Cloud configuration file (normally /etc/salt/cloud). .. code-block:: yaml diff_cache_events: True .. versionadded:: 2014.7.0
[ "Check", "new", "node", "data", "against", "current", "cache", ".", "If", "data", "differ", "fire", "an", "event", "which", "consists", "of", "the", "new", "node", "data", "." ]
python
train
ronaldguillen/wave
wave/utils/breadcrumbs.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/breadcrumbs.py#L6-L58
def get_breadcrumbs(url, request=None): """ Given a url returns a list of breadcrumbs, which are each a tuple of (name, url). """ from wave.reverse import preserve_builtin_query_params from wave.settings import api_settings from wave.views import APIView view_name_func = api_settings.VIEW_NAME_FUNCTION def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen): """ Add tuples of (name, url) to the breadcrumbs list, progressively chomping off parts of the url. """ try: (view, unused_args, unused_kwargs) = resolve(url) except Exception: pass else: # Check if this is a REST framework view, # and if so add it to the breadcrumbs cls = getattr(view, 'cls', None) if cls is not None and issubclass(cls, APIView): # Don't list the same view twice in a row. # Probably an optional trailing slash. if not seen or seen[-1] != view: suffix = getattr(view, 'suffix', None) name = view_name_func(cls, suffix) insert_url = preserve_builtin_query_params(prefix + url, request) breadcrumbs_list.insert(0, (name, insert_url)) seen.append(view) if url == '': # All done return breadcrumbs_list elif url.endswith('/'): # Drop trailing slash off the end and continue to try to # resolve more breadcrumbs url = url.rstrip('/') return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) # Drop trailing non-slash off the end and continue to try to # resolve more breadcrumbs url = url[:url.rfind('/') + 1] return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) prefix = get_script_prefix().rstrip('/') url = url[len(prefix):] return breadcrumbs_recursive(url, [], prefix, [])
[ "def", "get_breadcrumbs", "(", "url", ",", "request", "=", "None", ")", ":", "from", "wave", ".", "reverse", "import", "preserve_builtin_query_params", "from", "wave", ".", "settings", "import", "api_settings", "from", "wave", ".", "views", "import", "APIView", "view_name_func", "=", "api_settings", ".", "VIEW_NAME_FUNCTION", "def", "breadcrumbs_recursive", "(", "url", ",", "breadcrumbs_list", ",", "prefix", ",", "seen", ")", ":", "\"\"\"\n Add tuples of (name, url) to the breadcrumbs list,\n progressively chomping off parts of the url.\n \"\"\"", "try", ":", "(", "view", ",", "unused_args", ",", "unused_kwargs", ")", "=", "resolve", "(", "url", ")", "except", "Exception", ":", "pass", "else", ":", "# Check if this is a REST framework view,", "# and if so add it to the breadcrumbs", "cls", "=", "getattr", "(", "view", ",", "'cls'", ",", "None", ")", "if", "cls", "is", "not", "None", "and", "issubclass", "(", "cls", ",", "APIView", ")", ":", "# Don't list the same view twice in a row.", "# Probably an optional trailing slash.", "if", "not", "seen", "or", "seen", "[", "-", "1", "]", "!=", "view", ":", "suffix", "=", "getattr", "(", "view", ",", "'suffix'", ",", "None", ")", "name", "=", "view_name_func", "(", "cls", ",", "suffix", ")", "insert_url", "=", "preserve_builtin_query_params", "(", "prefix", "+", "url", ",", "request", ")", "breadcrumbs_list", ".", "insert", "(", "0", ",", "(", "name", ",", "insert_url", ")", ")", "seen", ".", "append", "(", "view", ")", "if", "url", "==", "''", ":", "# All done", "return", "breadcrumbs_list", "elif", "url", ".", "endswith", "(", "'/'", ")", ":", "# Drop trailing slash off the end and continue to try to", "# resolve more breadcrumbs", "url", "=", "url", ".", "rstrip", "(", "'/'", ")", "return", "breadcrumbs_recursive", "(", "url", ",", "breadcrumbs_list", ",", "prefix", ",", "seen", ")", "# Drop trailing non-slash off the end and continue to try to", "# resolve more breadcrumbs", "url", "=", "url", "[", ":", "url", ".", "rfind", "(", "'/'", ")", "+", "1", "]", "return", "breadcrumbs_recursive", "(", "url", ",", "breadcrumbs_list", ",", "prefix", ",", "seen", ")", "prefix", "=", "get_script_prefix", "(", ")", ".", "rstrip", "(", "'/'", ")", "url", "=", "url", "[", "len", "(", "prefix", ")", ":", "]", "return", "breadcrumbs_recursive", "(", "url", ",", "[", "]", ",", "prefix", ",", "[", "]", ")" ]
Given a url returns a list of breadcrumbs, which are each a tuple of (name, url).
[ "Given", "a", "url", "returns", "a", "list", "of", "breadcrumbs", "which", "are", "each", "a", "tuple", "of", "(", "name", "url", ")", "." ]
python
train
michal-stuglik/django-blastplus
blastplus/utils.py
https://github.com/michal-stuglik/django-blastplus/blob/4f5e15fb9f8069c3bed5f8fd941c4b9891daad4b/blastplus/utils.py#L71-L96
def run_blast_commands(ncbicommandline_method, **keywords): """Runs blastplus/tblastn search, collects result and pass as a xml temporary file. """ # temporary files for output blast_out_tmp = tempfile.NamedTemporaryFile(mode="w+",delete=False) keywords['out'] = blast_out_tmp.name # unpack query temp file object query_file_object_tmp = keywords['query'] keywords['query'] = query_file_object_tmp.name stderr = '' error_string = '' try: # formating blastplus command blastplusx_cline = ncbicommandline_method(**keywords) stdout, stderr = blastplusx_cline() except ApplicationError as e: error_string = "Runtime error: " + stderr + "\n" + e.cmd # remove query temp file os.unlink(query_file_object_tmp.name) # os.remove(query_file_object_tmp.name) return blast_out_tmp, error_string
[ "def", "run_blast_commands", "(", "ncbicommandline_method", ",", "*", "*", "keywords", ")", ":", "# temporary files for output", "blast_out_tmp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "\"w+\"", ",", "delete", "=", "False", ")", "keywords", "[", "'out'", "]", "=", "blast_out_tmp", ".", "name", "# unpack query temp file object", "query_file_object_tmp", "=", "keywords", "[", "'query'", "]", "keywords", "[", "'query'", "]", "=", "query_file_object_tmp", ".", "name", "stderr", "=", "''", "error_string", "=", "''", "try", ":", "# formating blastplus command", "blastplusx_cline", "=", "ncbicommandline_method", "(", "*", "*", "keywords", ")", "stdout", ",", "stderr", "=", "blastplusx_cline", "(", ")", "except", "ApplicationError", "as", "e", ":", "error_string", "=", "\"Runtime error: \"", "+", "stderr", "+", "\"\\n\"", "+", "e", ".", "cmd", "# remove query temp file", "os", ".", "unlink", "(", "query_file_object_tmp", ".", "name", ")", "# os.remove(query_file_object_tmp.name)", "return", "blast_out_tmp", ",", "error_string" ]
Runs blastplus/tblastn search, collects result and pass as a xml temporary file.
[ "Runs", "blastplus", "/", "tblastn", "search", "collects", "result", "and", "pass", "as", "a", "xml", "temporary", "file", "." ]
python
train
acorg/dark-matter
dark/sam.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L272-L299
def parseFilteringOptions(cls, args, filterRead=None, storeQueryIds=False): """ Parse command line options (added in C{addSAMFilteringOptions}. @param args: The command line arguments, as returned by C{argparse.parse_args}. @param filterRead: A one-argument function that accepts a read and returns C{None} if the read should be omitted in filtering or else a C{Read} instance. @param storeQueryIds: If C{True}, query ids will be stored as the SAM/BAM file is read. @return: A C{SAMFilter} instance. """ referenceIds = (set(chain.from_iterable(args.referenceId)) if args.referenceId else None) return cls( args.samfile, filterRead=filterRead, referenceIds=referenceIds, storeQueryIds=storeQueryIds, dropUnmapped=args.dropUnmapped, dropSecondary=args.dropSecondary, dropSupplementary=args.dropSupplementary, dropDuplicates=args.dropDuplicates, keepQCFailures=args.keepQCFailures, minScore=args.minScore, maxScore=args.maxScore)
[ "def", "parseFilteringOptions", "(", "cls", ",", "args", ",", "filterRead", "=", "None", ",", "storeQueryIds", "=", "False", ")", ":", "referenceIds", "=", "(", "set", "(", "chain", ".", "from_iterable", "(", "args", ".", "referenceId", ")", ")", "if", "args", ".", "referenceId", "else", "None", ")", "return", "cls", "(", "args", ".", "samfile", ",", "filterRead", "=", "filterRead", ",", "referenceIds", "=", "referenceIds", ",", "storeQueryIds", "=", "storeQueryIds", ",", "dropUnmapped", "=", "args", ".", "dropUnmapped", ",", "dropSecondary", "=", "args", ".", "dropSecondary", ",", "dropSupplementary", "=", "args", ".", "dropSupplementary", ",", "dropDuplicates", "=", "args", ".", "dropDuplicates", ",", "keepQCFailures", "=", "args", ".", "keepQCFailures", ",", "minScore", "=", "args", ".", "minScore", ",", "maxScore", "=", "args", ".", "maxScore", ")" ]
Parse command line options (added in C{addSAMFilteringOptions}. @param args: The command line arguments, as returned by C{argparse.parse_args}. @param filterRead: A one-argument function that accepts a read and returns C{None} if the read should be omitted in filtering or else a C{Read} instance. @param storeQueryIds: If C{True}, query ids will be stored as the SAM/BAM file is read. @return: A C{SAMFilter} instance.
[ "Parse", "command", "line", "options", "(", "added", "in", "C", "{", "addSAMFilteringOptions", "}", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L226-L261
def actnorm(name, x, logscale_factor=3., reverse=False, init=False, trainable=True): """x_{ij} = s x x_{ij} + b. Per-channel scaling and bias. If init is set to True, the scaling and bias are initialized such that the mean and variance of the output activations of the first minibatch are zero and one respectively. Args: name: variable scope. x: input logscale_factor: Used in actnorm_scale. Optimizes f(ls*s') instead of f(s) where s' = s / ls. Helps in faster convergence. reverse: forward or reverse operation. init: Whether or not to do data-dependent initialization. trainable: Returns: x: output after adding bias and scaling. objective: log(sum(s)) """ var_arg_scope = arg_scope([get_variable_ddi], trainable=trainable) var_scope = tf.variable_scope(name, reuse=tf.AUTO_REUSE) with var_scope, var_arg_scope: if not reverse: x = actnorm_center(name + "_center", x, reverse, init=init) x, objective = actnorm_scale( name + "_scale", x, logscale_factor=logscale_factor, reverse=reverse, init=init) else: x, objective = actnorm_scale( name + "_scale", x, logscale_factor=logscale_factor, reverse=reverse, init=init) x = actnorm_center(name + "_center", x, reverse, init=init) return x, objective
[ "def", "actnorm", "(", "name", ",", "x", ",", "logscale_factor", "=", "3.", ",", "reverse", "=", "False", ",", "init", "=", "False", ",", "trainable", "=", "True", ")", ":", "var_arg_scope", "=", "arg_scope", "(", "[", "get_variable_ddi", "]", ",", "trainable", "=", "trainable", ")", "var_scope", "=", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", "with", "var_scope", ",", "var_arg_scope", ":", "if", "not", "reverse", ":", "x", "=", "actnorm_center", "(", "name", "+", "\"_center\"", ",", "x", ",", "reverse", ",", "init", "=", "init", ")", "x", ",", "objective", "=", "actnorm_scale", "(", "name", "+", "\"_scale\"", ",", "x", ",", "logscale_factor", "=", "logscale_factor", ",", "reverse", "=", "reverse", ",", "init", "=", "init", ")", "else", ":", "x", ",", "objective", "=", "actnorm_scale", "(", "name", "+", "\"_scale\"", ",", "x", ",", "logscale_factor", "=", "logscale_factor", ",", "reverse", "=", "reverse", ",", "init", "=", "init", ")", "x", "=", "actnorm_center", "(", "name", "+", "\"_center\"", ",", "x", ",", "reverse", ",", "init", "=", "init", ")", "return", "x", ",", "objective" ]
x_{ij} = s x x_{ij} + b. Per-channel scaling and bias. If init is set to True, the scaling and bias are initialized such that the mean and variance of the output activations of the first minibatch are zero and one respectively. Args: name: variable scope. x: input logscale_factor: Used in actnorm_scale. Optimizes f(ls*s') instead of f(s) where s' = s / ls. Helps in faster convergence. reverse: forward or reverse operation. init: Whether or not to do data-dependent initialization. trainable: Returns: x: output after adding bias and scaling. objective: log(sum(s))
[ "x_", "{", "ij", "}", "=", "s", "x", "x_", "{", "ij", "}", "+", "b", ".", "Per", "-", "channel", "scaling", "and", "bias", "." ]
python
train
sebp/scikit-survival
sksurv/svm/survival_svm.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L825-L851
def predict(self, X): """Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks. """ val = numpy.dot(X, self.coef_) if hasattr(self, "intercept_"): val += self.intercept_ # Order by increasing survival time if objective is pure ranking if self.rank_ratio == 1: val *= -1 else: # model was fitted on log(time), transform to original scale val = numpy.exp(val) return val
[ "def", "predict", "(", "self", ",", "X", ")", ":", "val", "=", "numpy", ".", "dot", "(", "X", ",", "self", ".", "coef_", ")", "if", "hasattr", "(", "self", ",", "\"intercept_\"", ")", ":", "val", "+=", "self", ".", "intercept_", "# Order by increasing survival time if objective is pure ranking", "if", "self", ".", "rank_ratio", "==", "1", ":", "val", "*=", "-", "1", "else", ":", "# model was fitted on log(time), transform to original scale", "val", "=", "numpy", ".", "exp", "(", "val", ")", "return", "val" ]
Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks.
[ "Rank", "samples", "according", "to", "survival", "times" ]
python
train
Metatab/metapack
metapack/jupyter/exporters.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L292-L347
def from_notebook_node(self, nb, resources=None, **kw): """Create a Metatab package from a notebook node """ nb_copy = copy.deepcopy(nb) # The the package name and directory, either from the inlined Metatab doc, # or from the config try: self.output_dir = self.get_output_dir(nb) except NotebookError as e: # Notebook probably lacks a metatab doc. self.log.fatal(e) sys.exit(1) assert self.output_dir resources = self._init_resources(resources) resources['outputs'] = {} if 'language' in nb['metadata']: resources['language'] = nb['metadata']['language'].lower() # Do any other configured preprocessing nb_copy, resources = self._preprocess(nb_copy, resources) # The Notebook can set some terms with tags self.extra_terms = self.extract_terms(nb_copy) # Clear the output before executing self.clear_output(nb_copy) nb_copy, resources = self.exec_notebook(nb_copy, resources, self.notebook_dir) eld = ExtractLibDirs() eld.preprocess(nb_copy, {}) self.lib_dirs = eld.lib_dirs efm = ExtractFinalMetatabDoc() efm.preprocess(nb_copy, {}) if not efm.doc: raise MetapackError("No metatab doc") self.doc = efm.doc for section, term, value in self.extra_terms: self.doc[section].get_or_new_term(term, value) nb, _ = RemoveMetatab().preprocess(nb, {}) resources['outputs']['notebooks/{}.ipynb'.format(self.package_name)] = nbformat.writes(nb).encode('utf-8') return efm.doc.as_csv(), resources
[ "def", "from_notebook_node", "(", "self", ",", "nb", ",", "resources", "=", "None", ",", "*", "*", "kw", ")", ":", "nb_copy", "=", "copy", ".", "deepcopy", "(", "nb", ")", "# The the package name and directory, either from the inlined Metatab doc,", "# or from the config", "try", ":", "self", ".", "output_dir", "=", "self", ".", "get_output_dir", "(", "nb", ")", "except", "NotebookError", "as", "e", ":", "# Notebook probably lacks a metatab doc.", "self", ".", "log", ".", "fatal", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "assert", "self", ".", "output_dir", "resources", "=", "self", ".", "_init_resources", "(", "resources", ")", "resources", "[", "'outputs'", "]", "=", "{", "}", "if", "'language'", "in", "nb", "[", "'metadata'", "]", ":", "resources", "[", "'language'", "]", "=", "nb", "[", "'metadata'", "]", "[", "'language'", "]", ".", "lower", "(", ")", "# Do any other configured preprocessing", "nb_copy", ",", "resources", "=", "self", ".", "_preprocess", "(", "nb_copy", ",", "resources", ")", "# The Notebook can set some terms with tags", "self", ".", "extra_terms", "=", "self", ".", "extract_terms", "(", "nb_copy", ")", "# Clear the output before executing", "self", ".", "clear_output", "(", "nb_copy", ")", "nb_copy", ",", "resources", "=", "self", ".", "exec_notebook", "(", "nb_copy", ",", "resources", ",", "self", ".", "notebook_dir", ")", "eld", "=", "ExtractLibDirs", "(", ")", "eld", ".", "preprocess", "(", "nb_copy", ",", "{", "}", ")", "self", ".", "lib_dirs", "=", "eld", ".", "lib_dirs", "efm", "=", "ExtractFinalMetatabDoc", "(", ")", "efm", ".", "preprocess", "(", "nb_copy", ",", "{", "}", ")", "if", "not", "efm", ".", "doc", ":", "raise", "MetapackError", "(", "\"No metatab doc\"", ")", "self", ".", "doc", "=", "efm", ".", "doc", "for", "section", ",", "term", ",", "value", "in", "self", ".", "extra_terms", ":", "self", ".", "doc", "[", "section", "]", ".", "get_or_new_term", "(", "term", ",", "value", ")", "nb", ",", "_", "=", "RemoveMetatab", "(", ")", ".", "preprocess", "(", "nb", ",", "{", "}", ")", "resources", "[", "'outputs'", "]", "[", "'notebooks/{}.ipynb'", ".", "format", "(", "self", ".", "package_name", ")", "]", "=", "nbformat", ".", "writes", "(", "nb", ")", ".", "encode", "(", "'utf-8'", ")", "return", "efm", ".", "doc", ".", "as_csv", "(", ")", ",", "resources" ]
Create a Metatab package from a notebook node
[ "Create", "a", "Metatab", "package", "from", "a", "notebook", "node" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2563-L2574
def addLineAnnot(self, p1, p2): """Add 'Line' annot for points p1 and p2.""" CheckParent(self) val = _fitz.Page_addLineAnnot(self, p1, p2) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
[ "def", "addLineAnnot", "(", "self", ",", "p1", ",", "p2", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_addLineAnnot", "(", "self", ",", "p1", ",", "p2", ")", "if", "not", "val", ":", "return", "val", ".", "thisown", "=", "True", "val", ".", "parent", "=", "weakref", ".", "proxy", "(", "self", ")", "self", ".", "_annot_refs", "[", "id", "(", "val", ")", "]", "=", "val", "return", "val" ]
Add 'Line' annot for points p1 and p2.
[ "Add", "Line", "annot", "for", "points", "p1", "and", "p2", "." ]
python
train
ejeschke/ginga
ginga/util/io_rgb.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/io_rgb.py#L125-L211
def _imload(self, filepath, kwds): """Load an image file, guessing the format, and return a numpy array containing an RGB image. If EXIF keywords can be read they are returned in the dict _kwds_. """ start_time = time.time() typ, enc = mimetypes.guess_type(filepath) if not typ: typ = 'image/jpeg' typ, subtyp = typ.split('/') self.logger.debug("MIME type is %s/%s" % (typ, subtyp)) data_loaded = False if have_opencv and subtyp not in ['gif']: # First choice is OpenCv, because it supports high-bit depth # multiband images means = 'opencv' data_np = cv2.imread(filepath, cv2.IMREAD_ANYDEPTH + cv2.IMREAD_ANYCOLOR) if data_np is not None: data_loaded = True # funky indexing because opencv returns BGR images, # whereas PIL and others return RGB if len(data_np.shape) >= 3 and data_np.shape[2] >= 3: data_np = data_np[..., :: -1] # OpenCv doesn't "do" image metadata, so we punt to piexif # library (if installed) self.piexif_getexif(filepath, kwds) # OpenCv added a feature to do auto-orientation when loading # (see https://github.com/opencv/opencv/issues/4344) # So reset these values to prevent auto-orientation from # happening later kwds['Orientation'] = 1 kwds['Image Orientation'] = 1 # convert to working color profile, if can if self.clr_mgr.can_profile(): data_np = self.clr_mgr.profile_to_working_numpy(data_np, kwds) if not data_loaded and have_pil: means = 'PIL' image = PILimage.open(filepath) try: if hasattr(image, '_getexif'): info = image._getexif() if info is not None: for tag, value in info.items(): kwd = TAGS.get(tag, tag) kwds[kwd] = value elif have_exif: self.piexif_getexif(image.info["exif"], kwds) else: self.logger.warning("Please install 'piexif' module to get image metadata") except Exception as e: self.logger.warning("Failed to get image metadata: %s" % (str(e))) # convert to working color profile, if can if self.clr_mgr.can_profile(): image = self.clr_mgr.profile_to_working_pil(image, kwds) # convert from PIL to numpy data_np = np.array(image) if data_np is not None: data_loaded = True if (not data_loaded and (typ == 'image') and (subtyp in ('x-portable-pixmap', 'x-portable-greymap'))): # Special opener for PPM files, preserves high bit depth means = 'built-in' data_np = open_ppm(filepath) if data_np is not None: data_loaded = True if not data_loaded: raise ImageError("No way to load image format '%s/%s'" % ( typ, subtyp)) end_time = time.time() self.logger.debug("loading (%s) time %.4f sec" % ( means, end_time - start_time)) return data_np
[ "def", "_imload", "(", "self", ",", "filepath", ",", "kwds", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "typ", ",", "enc", "=", "mimetypes", ".", "guess_type", "(", "filepath", ")", "if", "not", "typ", ":", "typ", "=", "'image/jpeg'", "typ", ",", "subtyp", "=", "typ", ".", "split", "(", "'/'", ")", "self", ".", "logger", ".", "debug", "(", "\"MIME type is %s/%s\"", "%", "(", "typ", ",", "subtyp", ")", ")", "data_loaded", "=", "False", "if", "have_opencv", "and", "subtyp", "not", "in", "[", "'gif'", "]", ":", "# First choice is OpenCv, because it supports high-bit depth", "# multiband images", "means", "=", "'opencv'", "data_np", "=", "cv2", ".", "imread", "(", "filepath", ",", "cv2", ".", "IMREAD_ANYDEPTH", "+", "cv2", ".", "IMREAD_ANYCOLOR", ")", "if", "data_np", "is", "not", "None", ":", "data_loaded", "=", "True", "# funky indexing because opencv returns BGR images,", "# whereas PIL and others return RGB", "if", "len", "(", "data_np", ".", "shape", ")", ">=", "3", "and", "data_np", ".", "shape", "[", "2", "]", ">=", "3", ":", "data_np", "=", "data_np", "[", "...", ",", ":", ":", "-", "1", "]", "# OpenCv doesn't \"do\" image metadata, so we punt to piexif", "# library (if installed)", "self", ".", "piexif_getexif", "(", "filepath", ",", "kwds", ")", "# OpenCv added a feature to do auto-orientation when loading", "# (see https://github.com/opencv/opencv/issues/4344)", "# So reset these values to prevent auto-orientation from", "# happening later", "kwds", "[", "'Orientation'", "]", "=", "1", "kwds", "[", "'Image Orientation'", "]", "=", "1", "# convert to working color profile, if can", "if", "self", ".", "clr_mgr", ".", "can_profile", "(", ")", ":", "data_np", "=", "self", ".", "clr_mgr", ".", "profile_to_working_numpy", "(", "data_np", ",", "kwds", ")", "if", "not", "data_loaded", "and", "have_pil", ":", "means", "=", "'PIL'", "image", "=", "PILimage", ".", "open", "(", "filepath", ")", "try", ":", "if", "hasattr", "(", "image", ",", "'_getexif'", ")", ":", "info", "=", "image", ".", "_getexif", "(", ")", "if", "info", "is", "not", "None", ":", "for", "tag", ",", "value", "in", "info", ".", "items", "(", ")", ":", "kwd", "=", "TAGS", ".", "get", "(", "tag", ",", "tag", ")", "kwds", "[", "kwd", "]", "=", "value", "elif", "have_exif", ":", "self", ".", "piexif_getexif", "(", "image", ".", "info", "[", "\"exif\"", "]", ",", "kwds", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "\"Please install 'piexif' module to get image metadata\"", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "warning", "(", "\"Failed to get image metadata: %s\"", "%", "(", "str", "(", "e", ")", ")", ")", "# convert to working color profile, if can", "if", "self", ".", "clr_mgr", ".", "can_profile", "(", ")", ":", "image", "=", "self", ".", "clr_mgr", ".", "profile_to_working_pil", "(", "image", ",", "kwds", ")", "# convert from PIL to numpy", "data_np", "=", "np", ".", "array", "(", "image", ")", "if", "data_np", "is", "not", "None", ":", "data_loaded", "=", "True", "if", "(", "not", "data_loaded", "and", "(", "typ", "==", "'image'", ")", "and", "(", "subtyp", "in", "(", "'x-portable-pixmap'", ",", "'x-portable-greymap'", ")", ")", ")", ":", "# Special opener for PPM files, preserves high bit depth", "means", "=", "'built-in'", "data_np", "=", "open_ppm", "(", "filepath", ")", "if", "data_np", "is", "not", "None", ":", "data_loaded", "=", "True", "if", "not", "data_loaded", ":", "raise", "ImageError", "(", "\"No way to load image format '%s/%s'\"", "%", "(", "typ", ",", "subtyp", ")", ")", "end_time", "=", "time", ".", "time", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"loading (%s) time %.4f sec\"", "%", "(", "means", ",", "end_time", "-", "start_time", ")", ")", "return", "data_np" ]
Load an image file, guessing the format, and return a numpy array containing an RGB image. If EXIF keywords can be read they are returned in the dict _kwds_.
[ "Load", "an", "image", "file", "guessing", "the", "format", "and", "return", "a", "numpy", "array", "containing", "an", "RGB", "image", ".", "If", "EXIF", "keywords", "can", "be", "read", "they", "are", "returned", "in", "the", "dict", "_kwds_", "." ]
python
train
django-treebeard/django-treebeard
treebeard/models.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/models.py#L43-L57
def get_foreign_keys(cls): """Get foreign keys and models they refer to, so we can pre-process the data for load_bulk """ foreign_keys = {} for field in cls._meta.fields: if ( field.get_internal_type() == 'ForeignKey' and field.name != 'parent' ): if django.VERSION >= (1, 9): foreign_keys[field.name] = field.remote_field.model else: foreign_keys[field.name] = field.rel.to return foreign_keys
[ "def", "get_foreign_keys", "(", "cls", ")", ":", "foreign_keys", "=", "{", "}", "for", "field", "in", "cls", ".", "_meta", ".", "fields", ":", "if", "(", "field", ".", "get_internal_type", "(", ")", "==", "'ForeignKey'", "and", "field", ".", "name", "!=", "'parent'", ")", ":", "if", "django", ".", "VERSION", ">=", "(", "1", ",", "9", ")", ":", "foreign_keys", "[", "field", ".", "name", "]", "=", "field", ".", "remote_field", ".", "model", "else", ":", "foreign_keys", "[", "field", ".", "name", "]", "=", "field", ".", "rel", ".", "to", "return", "foreign_keys" ]
Get foreign keys and models they refer to, so we can pre-process the data for load_bulk
[ "Get", "foreign", "keys", "and", "models", "they", "refer", "to", "so", "we", "can", "pre", "-", "process", "the", "data", "for", "load_bulk" ]
python
train
pygobject/pgi
pgi/importer.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/importer.py#L117-L136
def get_import_stacklevel(import_hook): """Returns the stacklevel value for warnings.warn() for when the warning gets emitted by an imported module, but the warning should point at the code doing the import. Pass import_hook=True if the warning gets generated by an import hook (warn() gets called in load_module(), see PEP302) """ py_version = sys.version_info[:2] if py_version <= (3, 2): # 2.7 included return 4 if import_hook else 2 elif py_version == (3, 3): return 8 if import_hook else 10 elif py_version == (3, 4): return 10 if import_hook else 8 else: # fixed again in 3.5+, see https://bugs.python.org/issue24305 return 4 if import_hook else 2
[ "def", "get_import_stacklevel", "(", "import_hook", ")", ":", "py_version", "=", "sys", ".", "version_info", "[", ":", "2", "]", "if", "py_version", "<=", "(", "3", ",", "2", ")", ":", "# 2.7 included", "return", "4", "if", "import_hook", "else", "2", "elif", "py_version", "==", "(", "3", ",", "3", ")", ":", "return", "8", "if", "import_hook", "else", "10", "elif", "py_version", "==", "(", "3", ",", "4", ")", ":", "return", "10", "if", "import_hook", "else", "8", "else", ":", "# fixed again in 3.5+, see https://bugs.python.org/issue24305", "return", "4", "if", "import_hook", "else", "2" ]
Returns the stacklevel value for warnings.warn() for when the warning gets emitted by an imported module, but the warning should point at the code doing the import. Pass import_hook=True if the warning gets generated by an import hook (warn() gets called in load_module(), see PEP302)
[ "Returns", "the", "stacklevel", "value", "for", "warnings", ".", "warn", "()", "for", "when", "the", "warning", "gets", "emitted", "by", "an", "imported", "module", "but", "the", "warning", "should", "point", "at", "the", "code", "doing", "the", "import", "." ]
python
train
molmod/molmod
molmod/binning.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/binning.py#L94-L102
def iter_surrounding(self, center_key): """Iterate over all bins surrounding the given bin""" for shift in self.neighbor_indexes: key = tuple(np.add(center_key, shift).astype(int)) if self.integer_cell is not None: key = self.wrap_key(key) bin = self._bins.get(key) if bin is not None: yield key, bin
[ "def", "iter_surrounding", "(", "self", ",", "center_key", ")", ":", "for", "shift", "in", "self", ".", "neighbor_indexes", ":", "key", "=", "tuple", "(", "np", ".", "add", "(", "center_key", ",", "shift", ")", ".", "astype", "(", "int", ")", ")", "if", "self", ".", "integer_cell", "is", "not", "None", ":", "key", "=", "self", ".", "wrap_key", "(", "key", ")", "bin", "=", "self", ".", "_bins", ".", "get", "(", "key", ")", "if", "bin", "is", "not", "None", ":", "yield", "key", ",", "bin" ]
Iterate over all bins surrounding the given bin
[ "Iterate", "over", "all", "bins", "surrounding", "the", "given", "bin" ]
python
train
saltstack/salt
salt/modules/boto_datapipeline.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_datapipeline.py#L222-L231
def _get_client(region, key, keyid, profile): ''' Get a boto connection to Data Pipeline. ''' session = _get_session(region, key, keyid, profile) if not session: log.error("Failed to get datapipeline client.") return None return session.client('datapipeline')
[ "def", "_get_client", "(", "region", ",", "key", ",", "keyid", ",", "profile", ")", ":", "session", "=", "_get_session", "(", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "not", "session", ":", "log", ".", "error", "(", "\"Failed to get datapipeline client.\"", ")", "return", "None", "return", "session", ".", "client", "(", "'datapipeline'", ")" ]
Get a boto connection to Data Pipeline.
[ "Get", "a", "boto", "connection", "to", "Data", "Pipeline", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/mds/apis/endpoints_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/mds/apis/endpoints_api.py#L127-L147
def get_endpoint_resources(self, device_id, **kwargs): # noqa: E501 """List the resources on an endpoint # noqa: E501 The list of resources is cached by Device Management Connect, so this call does not create a message to the device. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id} -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_endpoint_resources(device_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique device ID for an endpoint. Note that the ID needs to be an exact match. You cannot use wildcards here. (required) :return: list[Resource] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 else: (data) = self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 return data
[ "def", "get_endpoint_resources", "(", "self", ",", "device_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_endpoint_resources_with_http_info", "(", "device_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_endpoint_resources_with_http_info", "(", "device_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
List the resources on an endpoint # noqa: E501 The list of resources is cached by Device Management Connect, so this call does not create a message to the device. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id} -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_endpoint_resources(device_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique device ID for an endpoint. Note that the ID needs to be an exact match. You cannot use wildcards here. (required) :return: list[Resource] If the method is called asynchronously, returns the request thread.
[ "List", "the", "resources", "on", "an", "endpoint", "#", "noqa", ":", "E501" ]
python
train
nerdvegas/rez
src/rez/vendor/version/version.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/version.py#L297-L302
def copy(self): """Returns a copy of the version.""" other = Version(None) other.tokens = self.tokens[:] other.seps = self.seps[:] return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "Version", "(", "None", ")", "other", ".", "tokens", "=", "self", ".", "tokens", "[", ":", "]", "other", ".", "seps", "=", "self", ".", "seps", "[", ":", "]", "return", "other" ]
Returns a copy of the version.
[ "Returns", "a", "copy", "of", "the", "version", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L7051-L7075
def replace_namespaced_daemon_set_status(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_daemon_set_status # noqa: E501 replace status of the specified DaemonSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_daemon_set_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1DaemonSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1DaemonSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_daemon_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "replace_namespaced_daemon_set_status", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "replace_namespaced_daemon_set_status_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "replace_namespaced_daemon_set_status_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
replace_namespaced_daemon_set_status # noqa: E501 replace status of the specified DaemonSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_daemon_set_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the DaemonSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1DaemonSet body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1DaemonSet If the method is called asynchronously, returns the request thread.
[ "replace_namespaced_daemon_set_status", "#", "noqa", ":", "E501" ]
python
train
slackapi/python-slackclient
slack/web/client.py
https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/slack/web/client.py#L547-L556
def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse: """Add a comment to an existing file. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890' """ kwargs.update({"comment": comment, "file": file}) return self.api_call("files.comments.add", json=kwargs)
[ "def", "files_comments_add", "(", "self", ",", "*", ",", "comment", ":", "str", ",", "file", ":", "str", ",", "*", "*", "kwargs", ")", "->", "SlackResponse", ":", "kwargs", ".", "update", "(", "{", "\"comment\"", ":", "comment", ",", "\"file\"", ":", "file", "}", ")", "return", "self", ".", "api_call", "(", "\"files.comments.add\"", ",", "json", "=", "kwargs", ")" ]
Add a comment to an existing file. Args: comment (str): The body of the comment. e.g. 'Everyone should take a moment to read this file.' file (str): The file id. e.g. 'F1234467890'
[ "Add", "a", "comment", "to", "an", "existing", "file", "." ]
python
train
playpauseandstop/rororo
rororo/settings.py
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/settings.py#L37-L67
def immutable_settings(defaults: Settings, **optionals: Any) -> types.MappingProxyType: r"""Initialize and return immutable Settings dictionary. Settings dictionary allows you to setup settings values from multiple sources and make sure that values cannot be changed, updated by anyone else after initialization. This helps keep things clear and not worry about hidden settings change somewhere around your web application. :param defaults: Read settings values from module or dict-like instance. :param \*\*optionals: Update base settings with optional values. In common additional values shouldn't be passed, if settings values already populated from local settings or environment. But in case of using application factories this makes sense:: from . import settings def create_app(**options): app = ... app.settings = immutable_settings(settings, **options) return app And yes each additional key overwrite default setting value. """ settings = {key: value for key, value in iter_settings(defaults)} for key, value in iter_settings(optionals): settings[key] = value return types.MappingProxyType(settings)
[ "def", "immutable_settings", "(", "defaults", ":", "Settings", ",", "*", "*", "optionals", ":", "Any", ")", "->", "types", ".", "MappingProxyType", ":", "settings", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "iter_settings", "(", "defaults", ")", "}", "for", "key", ",", "value", "in", "iter_settings", "(", "optionals", ")", ":", "settings", "[", "key", "]", "=", "value", "return", "types", ".", "MappingProxyType", "(", "settings", ")" ]
r"""Initialize and return immutable Settings dictionary. Settings dictionary allows you to setup settings values from multiple sources and make sure that values cannot be changed, updated by anyone else after initialization. This helps keep things clear and not worry about hidden settings change somewhere around your web application. :param defaults: Read settings values from module or dict-like instance. :param \*\*optionals: Update base settings with optional values. In common additional values shouldn't be passed, if settings values already populated from local settings or environment. But in case of using application factories this makes sense:: from . import settings def create_app(**options): app = ... app.settings = immutable_settings(settings, **options) return app And yes each additional key overwrite default setting value.
[ "r", "Initialize", "and", "return", "immutable", "Settings", "dictionary", "." ]
python
train
awslabs/aws-sam-cli
samcli/local/apigw/local_apigw_service.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/apigw/local_apigw_service.py#L92-L102
def _generate_route_keys(self, methods, path): """ Generates the key to the _dict_of_routes based on the list of methods and path supplied :param list(str) methods: List of HTTP Methods :param str path: Path off the base url :return: str of Path:Method """ for method in methods: yield self._route_key(method, path)
[ "def", "_generate_route_keys", "(", "self", ",", "methods", ",", "path", ")", ":", "for", "method", "in", "methods", ":", "yield", "self", ".", "_route_key", "(", "method", ",", "path", ")" ]
Generates the key to the _dict_of_routes based on the list of methods and path supplied :param list(str) methods: List of HTTP Methods :param str path: Path off the base url :return: str of Path:Method
[ "Generates", "the", "key", "to", "the", "_dict_of_routes", "based", "on", "the", "list", "of", "methods", "and", "path", "supplied" ]
python
train
drericstrong/pyedna
pyedna/ezdna.py
https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L458-L508
def GetPoints(edna_service): """ Obtains all the points in the edna_service, including real-time values. :param edna_service: The full Site.Service name of the eDNA service. :return: A pandas DataFrame of points in the form [Tag, Value, Time, Description, Units] """ # Define all required variables in the correct ctypes format szServiceName = c_char_p(edna_service.encode('utf-8')) nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999) szPoint, szTime = create_string_buffer(30), create_string_buffer(30) szStatus, szDesc = create_string_buffer(20), create_string_buffer(90) szUnits = create_string_buffer(20) szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30) szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90) szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999) nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20) nDesc, nUnits = c_ushort(90), c_ushort(20) # Call the eDNA function. nRet is zero if the function is successful. points = [] nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey), byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime, byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits) tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits) if tag: points.append(tag) # Iterate across all the returned services while nRet == 0: nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint, byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus, byref(szDesc2), nDesc, byref(szUnits2), nUnits) # We want to ensure only UTF-8 characters are returned. Ignoring # characters is slightly unsafe, but they should only occur in the # units or description, so it's not a huge issue. tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2, szUnits2) if tag: points.append(tag) # If no results were returned, raise a warning df = pd.DataFrame() if points: df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status", "Description", "Units"]) else: warnings.warn("WARNING- No points were returned. Check that the " + "service exists and contains points.") return df
[ "def", "GetPoints", "(", "edna_service", ")", ":", "# Define all required variables in the correct ctypes format\r", "szServiceName", "=", "c_char_p", "(", "edna_service", ".", "encode", "(", "'utf-8'", ")", ")", "nStarting", ",", "pulKey", ",", "pdValue", "=", "c_ushort", "(", "0", ")", ",", "c_ulong", "(", "0", ")", ",", "c_double", "(", "-", "9999", ")", "szPoint", ",", "szTime", "=", "create_string_buffer", "(", "30", ")", ",", "create_string_buffer", "(", "30", ")", "szStatus", ",", "szDesc", "=", "create_string_buffer", "(", "20", ")", ",", "create_string_buffer", "(", "90", ")", "szUnits", "=", "create_string_buffer", "(", "20", ")", "szPoint2", ",", "szTime2", "=", "create_string_buffer", "(", "30", ")", ",", "create_string_buffer", "(", "30", ")", "szStatus2", ",", "szDesc2", "=", "create_string_buffer", "(", "20", ")", ",", "create_string_buffer", "(", "90", ")", "szUnits2", ",", "pdValue2", "=", "create_string_buffer", "(", "20", ")", ",", "c_double", "(", "-", "9999", ")", "nPoint", ",", "nTime", ",", "nStatus", "=", "c_ushort", "(", "30", ")", ",", "c_ushort", "(", "30", ")", ",", "c_ushort", "(", "20", ")", "nDesc", ",", "nUnits", "=", "c_ushort", "(", "90", ")", ",", "c_ushort", "(", "20", ")", "# Call the eDNA function. nRet is zero if the function is successful.\r", "points", "=", "[", "]", "nRet", "=", "dna_dll", ".", "DnaGetPointEntry", "(", "szServiceName", ",", "nStarting", ",", "byref", "(", "pulKey", ")", ",", "byref", "(", "szPoint", ")", ",", "nPoint", ",", "byref", "(", "pdValue", ")", ",", "byref", "(", "szTime", ")", ",", "nTime", ",", "byref", "(", "szStatus", ")", ",", "nStatus", ",", "byref", "(", "szDesc", ")", ",", "nDesc", ",", "byref", "(", "szUnits", ")", ",", "nUnits", ")", "tag", "=", "_FormatPoints", "(", "szPoint", ",", "pdValue", ",", "szTime", ",", "szStatus", ",", "szDesc", ",", "szUnits", ")", "if", "tag", ":", "points", ".", "append", "(", "tag", ")", "# Iterate across all the returned services\r", "while", "nRet", "==", "0", ":", "nRet", "=", "dna_dll", ".", "DnaGetNextPointEntry", "(", "pulKey", ",", "byref", "(", "szPoint2", ")", ",", "nPoint", ",", "byref", "(", "pdValue2", ")", ",", "byref", "(", "szTime2", ")", ",", "nTime", ",", "byref", "(", "szStatus2", ")", ",", "nStatus", ",", "byref", "(", "szDesc2", ")", ",", "nDesc", ",", "byref", "(", "szUnits2", ")", ",", "nUnits", ")", "# We want to ensure only UTF-8 characters are returned. Ignoring\r", "# characters is slightly unsafe, but they should only occur in the\r", "# units or description, so it's not a huge issue.\r", "tag", "=", "_FormatPoints", "(", "szPoint2", ",", "pdValue2", ",", "szTime2", ",", "szStatus2", ",", "szDesc2", ",", "szUnits2", ")", "if", "tag", ":", "points", ".", "append", "(", "tag", ")", "# If no results were returned, raise a warning\r", "df", "=", "pd", ".", "DataFrame", "(", ")", "if", "points", ":", "df", "=", "pd", ".", "DataFrame", "(", "points", ",", "columns", "=", "[", "\"Tag\"", ",", "\"Value\"", ",", "\"Time\"", ",", "\"Status\"", ",", "\"Description\"", ",", "\"Units\"", "]", ")", "else", ":", "warnings", ".", "warn", "(", "\"WARNING- No points were returned. Check that the \"", "+", "\"service exists and contains points.\"", ")", "return", "df" ]
Obtains all the points in the edna_service, including real-time values. :param edna_service: The full Site.Service name of the eDNA service. :return: A pandas DataFrame of points in the form [Tag, Value, Time, Description, Units]
[ "Obtains", "all", "the", "points", "in", "the", "edna_service", "including", "real", "-", "time", "values", ".", ":", "param", "edna_service", ":", "The", "full", "Site", ".", "Service", "name", "of", "the", "eDNA", "service", ".", ":", "return", ":", "A", "pandas", "DataFrame", "of", "points", "in", "the", "form", "[", "Tag", "Value", "Time", "Description", "Units", "]" ]
python
train
spencerahill/aospy
aospy/calc.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L331-L335
def _get_all_data(self, start_date, end_date): """Get the needed data from all of the vars in the calculation.""" return [self._get_input_data(var, start_date, end_date) for var in _replace_pressure(self.variables, self.dtype_in_vert)]
[ "def", "_get_all_data", "(", "self", ",", "start_date", ",", "end_date", ")", ":", "return", "[", "self", ".", "_get_input_data", "(", "var", ",", "start_date", ",", "end_date", ")", "for", "var", "in", "_replace_pressure", "(", "self", ".", "variables", ",", "self", ".", "dtype_in_vert", ")", "]" ]
Get the needed data from all of the vars in the calculation.
[ "Get", "the", "needed", "data", "from", "all", "of", "the", "vars", "in", "the", "calculation", "." ]
python
train
pypa/setuptools
setuptools/package_index.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/package_index.py#L667-L678
def fetch(self, requirement, tmpdir, force_scan=False, source=False): """Obtain a file suitable for fulfilling `requirement` DEPRECATED; use the ``fetch_distribution()`` method now instead. For backward compatibility, this routine is identical but returns the ``location`` of the downloaded distribution instead of a distribution object. """ dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) if dist is not None: return dist.location return None
[ "def", "fetch", "(", "self", ",", "requirement", ",", "tmpdir", ",", "force_scan", "=", "False", ",", "source", "=", "False", ")", ":", "dist", "=", "self", ".", "fetch_distribution", "(", "requirement", ",", "tmpdir", ",", "force_scan", ",", "source", ")", "if", "dist", "is", "not", "None", ":", "return", "dist", ".", "location", "return", "None" ]
Obtain a file suitable for fulfilling `requirement` DEPRECATED; use the ``fetch_distribution()`` method now instead. For backward compatibility, this routine is identical but returns the ``location`` of the downloaded distribution instead of a distribution object.
[ "Obtain", "a", "file", "suitable", "for", "fulfilling", "requirement" ]
python
train
clchiou/startup
startup.py
https://github.com/clchiou/startup/blob/13cbf3ce1deffbc10d33a5f64c396a73129a5929/startup.py#L350-L356
def write(self, value): """Write a (new) value to this variable.""" assert self.num_write_waits > 0, self self.num_write_waits -= 1 self.values.append(value) if self.readable: LOG.debug('%s is now readable', self.name)
[ "def", "write", "(", "self", ",", "value", ")", ":", "assert", "self", ".", "num_write_waits", ">", "0", ",", "self", "self", ".", "num_write_waits", "-=", "1", "self", ".", "values", ".", "append", "(", "value", ")", "if", "self", ".", "readable", ":", "LOG", ".", "debug", "(", "'%s is now readable'", ",", "self", ".", "name", ")" ]
Write a (new) value to this variable.
[ "Write", "a", "(", "new", ")", "value", "to", "this", "variable", "." ]
python
train
OpenHumans/open-humans-api
ohapi/command_line.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/command_line.py#L444-L453
def oauth2_auth_url_cli(redirect_uri=None, client_id=None, base_url=OH_BASE_URL): """ Command line function for obtaining the Oauth2 url. For more information visit :func:`oauth2_auth_url<ohapi.api.oauth2_auth_url>`. """ result = oauth2_auth_url(redirect_uri, client_id, base_url) print('The requested URL is : \r') print(result)
[ "def", "oauth2_auth_url_cli", "(", "redirect_uri", "=", "None", ",", "client_id", "=", "None", ",", "base_url", "=", "OH_BASE_URL", ")", ":", "result", "=", "oauth2_auth_url", "(", "redirect_uri", ",", "client_id", ",", "base_url", ")", "print", "(", "'The requested URL is : \\r'", ")", "print", "(", "result", ")" ]
Command line function for obtaining the Oauth2 url. For more information visit :func:`oauth2_auth_url<ohapi.api.oauth2_auth_url>`.
[ "Command", "line", "function", "for", "obtaining", "the", "Oauth2", "url", ".", "For", "more", "information", "visit", ":", "func", ":", "oauth2_auth_url<ohapi", ".", "api", ".", "oauth2_auth_url", ">", "." ]
python
train
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L385-L393
def clear(self): ''' Clear plugin manager state. Registered mimetype functions will be disposed after calling this method. ''' self._mimetype_functions = list(self._default_mimetype_functions) super(MimetypePluginManager, self).clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "_mimetype_functions", "=", "list", "(", "self", ".", "_default_mimetype_functions", ")", "super", "(", "MimetypePluginManager", ",", "self", ")", ".", "clear", "(", ")" ]
Clear plugin manager state. Registered mimetype functions will be disposed after calling this method.
[ "Clear", "plugin", "manager", "state", "." ]
python
train
kmmbvnr/django-any
django_any/forms.py
https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/forms.py#L375-L383
def model_choice_field_data(field, **kwargs): """ Return one of first ten items for field queryset """ data = list(field.queryset[:10]) if data: return random.choice(data) else: raise TypeError('No %s available in queryset' % field.queryset.model)
[ "def", "model_choice_field_data", "(", "field", ",", "*", "*", "kwargs", ")", ":", "data", "=", "list", "(", "field", ".", "queryset", "[", ":", "10", "]", ")", "if", "data", ":", "return", "random", ".", "choice", "(", "data", ")", "else", ":", "raise", "TypeError", "(", "'No %s available in queryset'", "%", "field", ".", "queryset", ".", "model", ")" ]
Return one of first ten items for field queryset
[ "Return", "one", "of", "first", "ten", "items", "for", "field", "queryset" ]
python
test
nerdvegas/rez
src/rezgui/objects/Config.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rezgui/objects/Config.py#L15-L33
def value(self, key, type_=None): """Get the value of a setting. If `type` is not provided, the key must be for a known setting, present in `self.default_settings`. Conversely if `type` IS provided, the key must be for an unknown setting. """ if type_ is None: default = self._default_value(key) val = self._value(key, default) if type(val) == type(default): return val else: return self._convert_value(val, type(default)) else: val = self._value(key, None) if val is None: return None return self._convert_value(val, type_)
[ "def", "value", "(", "self", ",", "key", ",", "type_", "=", "None", ")", ":", "if", "type_", "is", "None", ":", "default", "=", "self", ".", "_default_value", "(", "key", ")", "val", "=", "self", ".", "_value", "(", "key", ",", "default", ")", "if", "type", "(", "val", ")", "==", "type", "(", "default", ")", ":", "return", "val", "else", ":", "return", "self", ".", "_convert_value", "(", "val", ",", "type", "(", "default", ")", ")", "else", ":", "val", "=", "self", ".", "_value", "(", "key", ",", "None", ")", "if", "val", "is", "None", ":", "return", "None", "return", "self", ".", "_convert_value", "(", "val", ",", "type_", ")" ]
Get the value of a setting. If `type` is not provided, the key must be for a known setting, present in `self.default_settings`. Conversely if `type` IS provided, the key must be for an unknown setting.
[ "Get", "the", "value", "of", "a", "setting", "." ]
python
train
cisco-sas/kitty
kitty/model/low_level/container.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L135-L147
def set_offset(self, offset): ''' Set the absolute offset of current field, if the field should have default value, set the offset of the sub fields as well. :param offset: absolute offset of this field (in bits) ''' super(Container, self).set_offset(offset) if self.is_default(): for field in self._fields: field.set_offset(offset) offset += len(field._current_rendered)
[ "def", "set_offset", "(", "self", ",", "offset", ")", ":", "super", "(", "Container", ",", "self", ")", ".", "set_offset", "(", "offset", ")", "if", "self", ".", "is_default", "(", ")", ":", "for", "field", "in", "self", ".", "_fields", ":", "field", ".", "set_offset", "(", "offset", ")", "offset", "+=", "len", "(", "field", ".", "_current_rendered", ")" ]
Set the absolute offset of current field, if the field should have default value, set the offset of the sub fields as well. :param offset: absolute offset of this field (in bits)
[ "Set", "the", "absolute", "offset", "of", "current", "field", "if", "the", "field", "should", "have", "default", "value", "set", "the", "offset", "of", "the", "sub", "fields", "as", "well", "." ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/variant_loader.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/variant_loader.py#L290-L304
def load_variant(self, variant_obj): """Load a variant object Args: variant_obj(dict) Returns: inserted_id """ # LOG.debug("Loading variant %s", variant_obj['_id']) try: result = self.variant_collection.insert_one(variant_obj) except DuplicateKeyError as err: raise IntegrityError("Variant %s already exists in database", variant_obj['_id']) return result
[ "def", "load_variant", "(", "self", ",", "variant_obj", ")", ":", "# LOG.debug(\"Loading variant %s\", variant_obj['_id'])", "try", ":", "result", "=", "self", ".", "variant_collection", ".", "insert_one", "(", "variant_obj", ")", "except", "DuplicateKeyError", "as", "err", ":", "raise", "IntegrityError", "(", "\"Variant %s already exists in database\"", ",", "variant_obj", "[", "'_id'", "]", ")", "return", "result" ]
Load a variant object Args: variant_obj(dict) Returns: inserted_id
[ "Load", "a", "variant", "object" ]
python
test
iotile/typedargs
typedargs/typeinfo.py
https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/typeinfo.py#L350-L366
def load_external_types(self, path): """ Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system """ folder, filename = os.path.split(path) try: fileobj, pathname, description = imp.find_module(filename, [folder]) mod = imp.load_module(filename, fileobj, pathname, description) except ImportError as exc: raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc)) self.load_type_module(mod)
[ "def", "load_external_types", "(", "self", ",", "path", ")", ":", "folder", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "try", ":", "fileobj", ",", "pathname", ",", "description", "=", "imp", ".", "find_module", "(", "filename", ",", "[", "folder", "]", ")", "mod", "=", "imp", ".", "load_module", "(", "filename", ",", "fileobj", ",", "pathname", ",", "description", ")", "except", "ImportError", "as", "exc", ":", "raise", "ArgumentError", "(", "\"could not import module in order to load external types\"", ",", "module_path", "=", "path", ",", "parent_directory", "=", "folder", ",", "module_name", "=", "filename", ",", "error", "=", "str", "(", "exc", ")", ")", "self", ".", "load_type_module", "(", "mod", ")" ]
Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system
[ "Given", "a", "path", "to", "a", "python", "package", "or", "module", "load", "that", "module", "search", "for", "all", "defined", "variables", "inside", "of", "it", "that", "do", "not", "start", "with", "_", "or", "__", "and", "inject", "them", "into", "the", "type", "system", ".", "If", "any", "of", "the", "types", "cannot", "be", "injected", "silently", "ignore", "them", "unless", "verbose", "is", "True", ".", "If", "path", "points", "to", "a", "module", "it", "should", "not", "contain", "the", "trailing", ".", "py", "since", "this", "is", "added", "automatically", "by", "the", "python", "import", "system" ]
python
test
JoelBender/bacpypes
py25/bacpypes/iocb.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/iocb.py#L415-L430
def abort(self, err): """Called by a client to abort all of the member transactions. When the last pending member is aborted the group callback function will be called.""" if _debug: IOGroup._debug("abort %r", err) # change the state to reflect that it was killed self.ioState = ABORTED self.ioError = err # abort all the members for iocb in self.ioMembers: iocb.abort(err) # notify the client self.trigger()
[ "def", "abort", "(", "self", ",", "err", ")", ":", "if", "_debug", ":", "IOGroup", ".", "_debug", "(", "\"abort %r\"", ",", "err", ")", "# change the state to reflect that it was killed", "self", ".", "ioState", "=", "ABORTED", "self", ".", "ioError", "=", "err", "# abort all the members", "for", "iocb", "in", "self", ".", "ioMembers", ":", "iocb", ".", "abort", "(", "err", ")", "# notify the client", "self", ".", "trigger", "(", ")" ]
Called by a client to abort all of the member transactions. When the last pending member is aborted the group callback function will be called.
[ "Called", "by", "a", "client", "to", "abort", "all", "of", "the", "member", "transactions", ".", "When", "the", "last", "pending", "member", "is", "aborted", "the", "group", "callback", "function", "will", "be", "called", "." ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1464-L1492
def consolidateBy(requestContext, seriesList, consolidationFunc): """ Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max') """ for series in seriesList: # datalib will throw an exception, so it's not necessary to validate # here series.consolidationFunc = consolidationFunc series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc) series.pathExpression = series.name return seriesList
[ "def", "consolidateBy", "(", "requestContext", ",", "seriesList", ",", "consolidationFunc", ")", ":", "for", "series", "in", "seriesList", ":", "# datalib will throw an exception, so it's not necessary to validate", "# here", "series", ".", "consolidationFunc", "=", "consolidationFunc", "series", ".", "name", "=", "'consolidateBy(%s,\"%s\")'", "%", "(", "series", ".", "name", ",", "series", ".", "consolidationFunc", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "return", "seriesList" ]
Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max')
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "a", "consolidation", "function", "name", "." ]
python
train
MultipedRobotics/pyxl320
pyxl320/Packet.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L474-L490
def packetToDict(pkt): """ Given a packet, this turns it into a dictionary ... is this useful? in: packet, array of numbers out: dictionary (key, value) """ d = { 'id': pkt[4], 'instruction': xl320.InstrToStr[pkt[7]], 'length': (pkt[6] << 8) + pkt[5], 'params': pkt[8:-2], 'crc': pkt[-2:] } return d
[ "def", "packetToDict", "(", "pkt", ")", ":", "d", "=", "{", "'id'", ":", "pkt", "[", "4", "]", ",", "'instruction'", ":", "xl320", ".", "InstrToStr", "[", "pkt", "[", "7", "]", "]", ",", "'length'", ":", "(", "pkt", "[", "6", "]", "<<", "8", ")", "+", "pkt", "[", "5", "]", ",", "'params'", ":", "pkt", "[", "8", ":", "-", "2", "]", ",", "'crc'", ":", "pkt", "[", "-", "2", ":", "]", "}", "return", "d" ]
Given a packet, this turns it into a dictionary ... is this useful? in: packet, array of numbers out: dictionary (key, value)
[ "Given", "a", "packet", "this", "turns", "it", "into", "a", "dictionary", "...", "is", "this", "useful?" ]
python
train
cloudendpoints/endpoints-python
endpoints/apiserving.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/apiserving.py#L469-L494
def protorpc_to_endpoints_error(self, status, body): """Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body) """ try: rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body) except (ValueError, messages.ValidationError): rpc_error = remote.RpcStatus() if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR: # Try to map to HTTP error code. error_class = _ERROR_NAME_MAP.get(rpc_error.error_name) if error_class: status, body = self.__write_error(error_class.http_status, rpc_error.error_message) return status, body
[ "def", "protorpc_to_endpoints_error", "(", "self", ",", "status", ",", "body", ")", ":", "try", ":", "rpc_error", "=", "self", ".", "__PROTOJSON", ".", "decode_message", "(", "remote", ".", "RpcStatus", ",", "body", ")", "except", "(", "ValueError", ",", "messages", ".", "ValidationError", ")", ":", "rpc_error", "=", "remote", ".", "RpcStatus", "(", ")", "if", "rpc_error", ".", "state", "==", "remote", ".", "RpcStatus", ".", "State", ".", "APPLICATION_ERROR", ":", "# Try to map to HTTP error code.", "error_class", "=", "_ERROR_NAME_MAP", ".", "get", "(", "rpc_error", ".", "error_name", ")", "if", "error_class", ":", "status", ",", "body", "=", "self", ".", "__write_error", "(", "error_class", ".", "http_status", ",", "rpc_error", ".", "error_message", ")", "return", "status", ",", "body" ]
Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body)
[ "Convert", "a", "ProtoRPC", "error", "to", "the", "format", "expected", "by", "Google", "Endpoints", "." ]
python
train
DAI-Lab/Copulas
copulas/multivariate/tree.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/tree.py#L140-L163
def get_tau_matrix(self): """Get tau matrix for adjacent pairs. Returns: :param tau: tau matrix for the current tree :type tau: np.ndarray """ num_edges = len(self.edges) tau = np.empty([num_edges, num_edges]) for i in range(num_edges): edge = self.edges[i] for j in edge.neighbors: if self.level == 1: left_u = self.u_matrix[:, edge.L] right_u = self.u_matrix[:, edge.R] else: left_parent, right_parent = edge.parents left_u, right_u = Edge.get_conditional_uni(left_parent, right_parent) tau[i, j], pvalue = scipy.stats.kendalltau(left_u, right_u) return tau
[ "def", "get_tau_matrix", "(", "self", ")", ":", "num_edges", "=", "len", "(", "self", ".", "edges", ")", "tau", "=", "np", ".", "empty", "(", "[", "num_edges", ",", "num_edges", "]", ")", "for", "i", "in", "range", "(", "num_edges", ")", ":", "edge", "=", "self", ".", "edges", "[", "i", "]", "for", "j", "in", "edge", ".", "neighbors", ":", "if", "self", ".", "level", "==", "1", ":", "left_u", "=", "self", ".", "u_matrix", "[", ":", ",", "edge", ".", "L", "]", "right_u", "=", "self", ".", "u_matrix", "[", ":", ",", "edge", ".", "R", "]", "else", ":", "left_parent", ",", "right_parent", "=", "edge", ".", "parents", "left_u", ",", "right_u", "=", "Edge", ".", "get_conditional_uni", "(", "left_parent", ",", "right_parent", ")", "tau", "[", "i", ",", "j", "]", ",", "pvalue", "=", "scipy", ".", "stats", ".", "kendalltau", "(", "left_u", ",", "right_u", ")", "return", "tau" ]
Get tau matrix for adjacent pairs. Returns: :param tau: tau matrix for the current tree :type tau: np.ndarray
[ "Get", "tau", "matrix", "for", "adjacent", "pairs", "." ]
python
train
inspirehep/plotextractor
plotextractor/extractor.py
https://github.com/inspirehep/plotextractor/blob/12a65350fb9f32d496f9ea57908d9a2771b20474/plotextractor/extractor.py#L570-L771
def put_it_together(cur_image, caption, context, extracted_image_data, line_index, lines): """Put it together. Takes the current image(s) and caption(s) and assembles them into something useful in the extracted_image_data list. :param: cur_image (string || list): the image currently being dealt with, or the list of images, in the case of subimages :param: caption (string || list): the caption or captions currently in scope :param: extracted_image_data ([(string, string), (string, string), ...]): a list of tuples of images matched to captions from this document. :param: line_index (int): the index where we are in the lines (for searchback and searchforward purposes) :param: lines ([string, string, ...]): the lines in the TeX :return: (cur_image, caption, extracted_image_data): the same arguments it was sent, processed appropriately """ if type(cur_image) == list: if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR': cur_image[MAIN_CAPTION_OR_IMAGE] = '' for image in cur_image[SUB_CAPTION_OR_IMAGE]: if image == 'ERROR': cur_image[SUB_CAPTION_OR_IMAGE].remove(image) if cur_image != '' and caption != '': if type(cur_image) == list and type(caption) == list: if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\ caption[MAIN_CAPTION_OR_IMAGE] != '': extracted_image_data.append( (cur_image[MAIN_CAPTION_OR_IMAGE], caption[MAIN_CAPTION_OR_IMAGE], context)) if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list: # why is the main image a list? # it's a good idea to attach the main caption to other # things, but the main image can only be used once cur_image[MAIN_CAPTION_OR_IMAGE] = '' if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list: if type(caption[SUB_CAPTION_OR_IMAGE]) == list: for index in \ range(len(cur_image[SUB_CAPTION_OR_IMAGE])): if index < len(caption[SUB_CAPTION_OR_IMAGE]): long_caption = \ caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \ caption[SUB_CAPTION_OR_IMAGE][index] else: long_caption = \ caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \ 'Caption not extracted' extracted_image_data.append( (cur_image[SUB_CAPTION_OR_IMAGE][index], long_caption, context)) else: long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \ ' : ' + caption[SUB_CAPTION_OR_IMAGE] for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]: extracted_image_data.append( (sub_image, long_caption, context)) else: if type(caption[SUB_CAPTION_OR_IMAGE]) == list: long_caption = caption[MAIN_CAPTION_OR_IMAGE] for sub_cap in caption[SUB_CAPTION_OR_IMAGE]: long_caption = long_caption + ' : ' + sub_cap extracted_image_data.append( (cur_image[SUB_CAPTION_OR_IMAGE], long_caption, context)) else: # wtf are they lists for? extracted_image_data.append( (cur_image[SUB_CAPTION_OR_IMAGE], caption[SUB_CAPTION_OR_IMAGE], context)) elif type(cur_image) == list: if cur_image[MAIN_CAPTION_OR_IMAGE] != '': extracted_image_data.append( (cur_image[MAIN_CAPTION_OR_IMAGE], caption, context)) if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list: for image in cur_image[SUB_CAPTION_OR_IMAGE]: extracted_image_data.append((image, caption, context)) else: extracted_image_data.append( (cur_image[SUB_CAPTION_OR_IMAGE], caption, context)) elif type(caption) == list: if caption[MAIN_CAPTION_OR_IMAGE] != '': extracted_image_data.append( (cur_image, caption[MAIN_CAPTION_OR_IMAGE], context)) if type(caption[SUB_CAPTION_OR_IMAGE]) == list: # multiple caps for one image: long_caption = caption[MAIN_CAPTION_OR_IMAGE] for subcap in caption[SUB_CAPTION_OR_IMAGE]: if long_caption != '': long_caption += ' : ' long_caption += subcap extracted_image_data.append((cur_image, long_caption, context)) else: extracted_image_data.append( (cur_image, caption[SUB_CAPTION_OR_IMAGE]. context)) else: extracted_image_data.append((cur_image, caption, context)) elif cur_image != '' and caption == '': # we may have missed the caption somewhere. REASONABLE_SEARCHBACK = 25 REASONABLE_SEARCHFORWARD = 5 curly_no_tag_preceding = '(?<!\\w){' for searchback in range(REASONABLE_SEARCHBACK): if line_index - searchback < 0: continue back_line = lines[line_index - searchback] m = re.search(curly_no_tag_preceding, back_line) if m: open_curly = m.start() open_curly, open_curly_line, close_curly, \ close_curly_line = find_open_and_close_braces( line_index - searchback, open_curly, '{', lines) cap_begin = open_curly + 1 caption = assemble_caption(open_curly_line, cap_begin, close_curly_line, close_curly, lines) if type(cur_image) == list: extracted_image_data.append( (cur_image[MAIN_CAPTION_OR_IMAGE], caption, context)) for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]: extracted_image_data.append( (sub_img, caption, context)) else: extracted_image_data.append((cur_image, caption, context)) break if caption == '': for searchforward in range(REASONABLE_SEARCHFORWARD): if line_index + searchforward >= len(lines): break fwd_line = lines[line_index + searchforward] m = re.search(curly_no_tag_preceding, fwd_line) if m: open_curly = m.start() open_curly, open_curly_line, close_curly,\ close_curly_line = find_open_and_close_braces( line_index + searchforward, open_curly, '{', lines) cap_begin = open_curly + 1 caption = assemble_caption(open_curly_line, cap_begin, close_curly_line, close_curly, lines) if type(cur_image) == list: extracted_image_data.append( (cur_image[MAIN_CAPTION_OR_IMAGE], caption, context)) for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]: extracted_image_data.append( (sub_img, caption, context)) else: extracted_image_data.append( (cur_image, caption, context)) break if caption == '': if type(cur_image) == list: extracted_image_data.append( (cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found', context)) for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]: extracted_image_data.append( (sub_img, 'No caption', context)) else: extracted_image_data.append( (cur_image, 'No caption found', context)) elif caption != '' and cur_image == '': if type(caption) == list: long_caption = caption[MAIN_CAPTION_OR_IMAGE] for subcap in caption[SUB_CAPTION_OR_IMAGE]: long_caption = long_caption + ': ' + subcap else: long_caption = caption extracted_image_data.append(('', 'noimg' + long_caption, context)) # if we're leaving the figure, no sense keeping the data cur_image = '' caption = '' return cur_image, caption, extracted_image_data
[ "def", "put_it_together", "(", "cur_image", ",", "caption", ",", "context", ",", "extracted_image_data", ",", "line_index", ",", "lines", ")", ":", "if", "type", "(", "cur_image", ")", "==", "list", ":", "if", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", "==", "'ERROR'", ":", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", "=", "''", "for", "image", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "if", "image", "==", "'ERROR'", ":", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ".", "remove", "(", "image", ")", "if", "cur_image", "!=", "''", "and", "caption", "!=", "''", ":", "if", "type", "(", "cur_image", ")", "==", "list", "and", "type", "(", "caption", ")", "==", "list", ":", "if", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", "!=", "''", "and", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "!=", "''", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "context", ")", ")", "if", "type", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "# why is the main image a list?", "# it's a good idea to attach the main caption to other", "# things, but the main image can only be used once", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", "=", "''", "if", "type", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "if", "type", "(", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "for", "index", "in", "range", "(", "len", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", ")", ":", "if", "index", "<", "len", "(", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", ":", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "+", "' : '", "+", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", "[", "index", "]", "else", ":", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "+", "' : '", "+", "'Caption not extracted'", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", "[", "index", "]", ",", "long_caption", ",", "context", ")", ")", "else", ":", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "+", "' : '", "+", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", "for", "sub_image", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "extracted_image_data", ".", "append", "(", "(", "sub_image", ",", "long_caption", ",", "context", ")", ")", "else", ":", "if", "type", "(", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "for", "sub_cap", "in", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "long_caption", "=", "long_caption", "+", "' : '", "+", "sub_cap", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ",", "long_caption", ",", "context", ")", ")", "else", ":", "# wtf are they lists for?", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ",", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ",", "context", ")", ")", "elif", "type", "(", "cur_image", ")", "==", "list", ":", "if", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", "!=", "''", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "caption", ",", "context", ")", ")", "if", "type", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "for", "image", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "extracted_image_data", ".", "append", "(", "(", "image", ",", "caption", ",", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ",", "caption", ",", "context", ")", ")", "elif", "type", "(", "caption", ")", "==", "list", ":", "if", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "!=", "''", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "context", ")", ")", "if", "type", "(", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ")", "==", "list", ":", "# multiple caps for one image:", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "for", "subcap", "in", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "if", "long_caption", "!=", "''", ":", "long_caption", "+=", "' : '", "long_caption", "+=", "subcap", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "long_caption", ",", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ".", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "caption", ",", "context", ")", ")", "elif", "cur_image", "!=", "''", "and", "caption", "==", "''", ":", "# we may have missed the caption somewhere.", "REASONABLE_SEARCHBACK", "=", "25", "REASONABLE_SEARCHFORWARD", "=", "5", "curly_no_tag_preceding", "=", "'(?<!\\\\w){'", "for", "searchback", "in", "range", "(", "REASONABLE_SEARCHBACK", ")", ":", "if", "line_index", "-", "searchback", "<", "0", ":", "continue", "back_line", "=", "lines", "[", "line_index", "-", "searchback", "]", "m", "=", "re", ".", "search", "(", "curly_no_tag_preceding", ",", "back_line", ")", "if", "m", ":", "open_curly", "=", "m", ".", "start", "(", ")", "open_curly", ",", "open_curly_line", ",", "close_curly", ",", "close_curly_line", "=", "find_open_and_close_braces", "(", "line_index", "-", "searchback", ",", "open_curly", ",", "'{'", ",", "lines", ")", "cap_begin", "=", "open_curly", "+", "1", "caption", "=", "assemble_caption", "(", "open_curly_line", ",", "cap_begin", ",", "close_curly_line", ",", "close_curly", ",", "lines", ")", "if", "type", "(", "cur_image", ")", "==", "list", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "caption", ",", "context", ")", ")", "for", "sub_img", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "extracted_image_data", ".", "append", "(", "(", "sub_img", ",", "caption", ",", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "caption", ",", "context", ")", ")", "break", "if", "caption", "==", "''", ":", "for", "searchforward", "in", "range", "(", "REASONABLE_SEARCHFORWARD", ")", ":", "if", "line_index", "+", "searchforward", ">=", "len", "(", "lines", ")", ":", "break", "fwd_line", "=", "lines", "[", "line_index", "+", "searchforward", "]", "m", "=", "re", ".", "search", "(", "curly_no_tag_preceding", ",", "fwd_line", ")", "if", "m", ":", "open_curly", "=", "m", ".", "start", "(", ")", "open_curly", ",", "open_curly_line", ",", "close_curly", ",", "close_curly_line", "=", "find_open_and_close_braces", "(", "line_index", "+", "searchforward", ",", "open_curly", ",", "'{'", ",", "lines", ")", "cap_begin", "=", "open_curly", "+", "1", "caption", "=", "assemble_caption", "(", "open_curly_line", ",", "cap_begin", ",", "close_curly_line", ",", "close_curly", ",", "lines", ")", "if", "type", "(", "cur_image", ")", "==", "list", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "caption", ",", "context", ")", ")", "for", "sub_img", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "extracted_image_data", ".", "append", "(", "(", "sub_img", ",", "caption", ",", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "caption", ",", "context", ")", ")", "break", "if", "caption", "==", "''", ":", "if", "type", "(", "cur_image", ")", "==", "list", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", "[", "MAIN_CAPTION_OR_IMAGE", "]", ",", "'No caption found'", ",", "context", ")", ")", "for", "sub_img", "in", "cur_image", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "extracted_image_data", ".", "append", "(", "(", "sub_img", ",", "'No caption'", ",", "context", ")", ")", "else", ":", "extracted_image_data", ".", "append", "(", "(", "cur_image", ",", "'No caption found'", ",", "context", ")", ")", "elif", "caption", "!=", "''", "and", "cur_image", "==", "''", ":", "if", "type", "(", "caption", ")", "==", "list", ":", "long_caption", "=", "caption", "[", "MAIN_CAPTION_OR_IMAGE", "]", "for", "subcap", "in", "caption", "[", "SUB_CAPTION_OR_IMAGE", "]", ":", "long_caption", "=", "long_caption", "+", "': '", "+", "subcap", "else", ":", "long_caption", "=", "caption", "extracted_image_data", ".", "append", "(", "(", "''", ",", "'noimg'", "+", "long_caption", ",", "context", ")", ")", "# if we're leaving the figure, no sense keeping the data", "cur_image", "=", "''", "caption", "=", "''", "return", "cur_image", ",", "caption", ",", "extracted_image_data" ]
Put it together. Takes the current image(s) and caption(s) and assembles them into something useful in the extracted_image_data list. :param: cur_image (string || list): the image currently being dealt with, or the list of images, in the case of subimages :param: caption (string || list): the caption or captions currently in scope :param: extracted_image_data ([(string, string), (string, string), ...]): a list of tuples of images matched to captions from this document. :param: line_index (int): the index where we are in the lines (for searchback and searchforward purposes) :param: lines ([string, string, ...]): the lines in the TeX :return: (cur_image, caption, extracted_image_data): the same arguments it was sent, processed appropriately
[ "Put", "it", "together", "." ]
python
train
pantsbuild/pants
src/python/pants/backend/python/subsystems/python_native_code.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/python/subsystems/python_native_code.py#L77-L91
def _get_targets_by_declared_platform_with_placeholders(self, targets_by_platform): """ Aggregates a dict that maps a platform string to a list of targets that specify the platform. If no targets have platforms arguments, return a dict containing platforms inherited from the PythonSetup object. :param tgts: a list of :class:`Target` objects. :returns: a dict mapping a platform string to a list of targets that specify the platform. """ if not targets_by_platform: for platform in self._python_setup.platforms: targets_by_platform[platform] = ['(No target) Platform inherited from either the ' '--platforms option or a pants.ini file.'] return targets_by_platform
[ "def", "_get_targets_by_declared_platform_with_placeholders", "(", "self", ",", "targets_by_platform", ")", ":", "if", "not", "targets_by_platform", ":", "for", "platform", "in", "self", ".", "_python_setup", ".", "platforms", ":", "targets_by_platform", "[", "platform", "]", "=", "[", "'(No target) Platform inherited from either the '", "'--platforms option or a pants.ini file.'", "]", "return", "targets_by_platform" ]
Aggregates a dict that maps a platform string to a list of targets that specify the platform. If no targets have platforms arguments, return a dict containing platforms inherited from the PythonSetup object. :param tgts: a list of :class:`Target` objects. :returns: a dict mapping a platform string to a list of targets that specify the platform.
[ "Aggregates", "a", "dict", "that", "maps", "a", "platform", "string", "to", "a", "list", "of", "targets", "that", "specify", "the", "platform", ".", "If", "no", "targets", "have", "platforms", "arguments", "return", "a", "dict", "containing", "platforms", "inherited", "from", "the", "PythonSetup", "object", "." ]
python
train
ioos/compliance-checker
compliance_checker/ioos.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/ioos.py#L163-L177
def check_altitude_units(self, ds): """ If there's a variable named z, it must have units. @TODO: this is duplicated with check_variable_units :param netCDF4.Dataset ds: An open netCDF dataset """ if 'z' in ds.variables: msgs = [] val = 'units' in ds.variables['z'].ncattrs() if not val: msgs.append("Variable 'z' has no units attr") return Result(BaseCheck.LOW, val, 'Altitude Units', msgs) return Result(BaseCheck.LOW, (0, 0), 'Altitude Units', ["Dataset has no 'z' variable"])
[ "def", "check_altitude_units", "(", "self", ",", "ds", ")", ":", "if", "'z'", "in", "ds", ".", "variables", ":", "msgs", "=", "[", "]", "val", "=", "'units'", "in", "ds", ".", "variables", "[", "'z'", "]", ".", "ncattrs", "(", ")", "if", "not", "val", ":", "msgs", ".", "append", "(", "\"Variable 'z' has no units attr\"", ")", "return", "Result", "(", "BaseCheck", ".", "LOW", ",", "val", ",", "'Altitude Units'", ",", "msgs", ")", "return", "Result", "(", "BaseCheck", ".", "LOW", ",", "(", "0", ",", "0", ")", ",", "'Altitude Units'", ",", "[", "\"Dataset has no 'z' variable\"", "]", ")" ]
If there's a variable named z, it must have units. @TODO: this is duplicated with check_variable_units :param netCDF4.Dataset ds: An open netCDF dataset
[ "If", "there", "s", "a", "variable", "named", "z", "it", "must", "have", "units", "." ]
python
train
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1230-L1239
def _get_datetimes(self, timestep=1): """List of datetimes based on design day date and timestep.""" start_moy = DateTime(self._month, self._day_of_month).moy if timestep == 1: start_moy = start_moy + 30 num_moys = 24 * timestep return tuple( DateTime.from_moy(start_moy + (i * (1 / timestep) * 60)) for i in xrange(num_moys) )
[ "def", "_get_datetimes", "(", "self", ",", "timestep", "=", "1", ")", ":", "start_moy", "=", "DateTime", "(", "self", ".", "_month", ",", "self", ".", "_day_of_month", ")", ".", "moy", "if", "timestep", "==", "1", ":", "start_moy", "=", "start_moy", "+", "30", "num_moys", "=", "24", "*", "timestep", "return", "tuple", "(", "DateTime", ".", "from_moy", "(", "start_moy", "+", "(", "i", "*", "(", "1", "/", "timestep", ")", "*", "60", ")", ")", "for", "i", "in", "xrange", "(", "num_moys", ")", ")" ]
List of datetimes based on design day date and timestep.
[ "List", "of", "datetimes", "based", "on", "design", "day", "date", "and", "timestep", "." ]
python
train
agabrown/PyGaia
pygaia/errors/astrometric.py
https://github.com/agabrown/PyGaia/blob/ae972b0622a15f713ffae471f925eac25ccdae47/pygaia/errors/astrometric.py#L230-L256
def positionMinError(G, vmini, extension=0.0): """ Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the smallest astrometric errors. NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS. Parameters ---------- G - Value(s) of G-band magnitude. vmini - Value(s) of (V-I) colour. Keywords -------- extension - Add this amount of years to the mission lifetime and scale the errors accordingly. Returns ------- The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond. """ parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension) return _astrometricErrorFactors['alphaStar'].min()*parallaxError, \ _astrometricErrorFactors['delta'].min()*parallaxError
[ "def", "positionMinError", "(", "G", ",", "vmini", ",", "extension", "=", "0.0", ")", ":", "parallaxError", "=", "parallaxErrorSkyAvg", "(", "G", ",", "vmini", ",", "extension", "=", "extension", ")", "return", "_astrometricErrorFactors", "[", "'alphaStar'", "]", ".", "min", "(", ")", "*", "parallaxError", ",", "_astrometricErrorFactors", "[", "'delta'", "]", ".", "min", "(", ")", "*", "parallaxError" ]
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the smallest astrometric errors. NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS. Parameters ---------- G - Value(s) of G-band magnitude. vmini - Value(s) of (V-I) colour. Keywords -------- extension - Add this amount of years to the mission lifetime and scale the errors accordingly. Returns ------- The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
[ "Calculate", "the", "minimum", "position", "errors", "from", "G", "and", "(", "V", "-", "I", ")", ".", "These", "correspond", "to", "the", "sky", "regions", "with", "the", "smallest", "astrometric", "errors", "." ]
python
test
jmcarp/betfair.py
betfair/betfair.py
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L460-L477
def get_account_statement( self, locale=None, from_record=None, record_count=None, item_date_range=None, include_item=None, wallet=None): """Get account statement. :param str locale: The language to be used where applicable :param int from_record: Specifies the first record that will be returned :param int record_count: Specifies the maximum number of records to be returned :param TimeRange item_date_range: Return items with an itemDate within this date range :param IncludeItem include_item: Which items to include :param Wallet wallte: Which wallet to return statementItems for """ return self.make_api_request( 'Account', 'getAccountStatement', utils.get_kwargs(locals()), model=models.AccountStatementReport, )
[ "def", "get_account_statement", "(", "self", ",", "locale", "=", "None", ",", "from_record", "=", "None", ",", "record_count", "=", "None", ",", "item_date_range", "=", "None", ",", "include_item", "=", "None", ",", "wallet", "=", "None", ")", ":", "return", "self", ".", "make_api_request", "(", "'Account'", ",", "'getAccountStatement'", ",", "utils", ".", "get_kwargs", "(", "locals", "(", ")", ")", ",", "model", "=", "models", ".", "AccountStatementReport", ",", ")" ]
Get account statement. :param str locale: The language to be used where applicable :param int from_record: Specifies the first record that will be returned :param int record_count: Specifies the maximum number of records to be returned :param TimeRange item_date_range: Return items with an itemDate within this date range :param IncludeItem include_item: Which items to include :param Wallet wallte: Which wallet to return statementItems for
[ "Get", "account", "statement", "." ]
python
train
lemieuxl/pyplink
pyplink/pyplink.py
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L204-L220
def seek(self, n): """Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to. """ if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") if 0 <= n < self._nb_markers: self._n = n self._bed.seek(self._get_seek_position(n)) else: # Invalid seek value raise ValueError("invalid position in BED: {}".format(n))
[ "def", "seek", "(", "self", ",", "n", ")", ":", "if", "self", ".", "_mode", "!=", "\"r\"", ":", "raise", "UnsupportedOperation", "(", "\"not available in 'w' mode\"", ")", "if", "0", "<=", "n", "<", "self", ".", "_nb_markers", ":", "self", ".", "_n", "=", "n", "self", ".", "_bed", ".", "seek", "(", "self", ".", "_get_seek_position", "(", "n", ")", ")", "else", ":", "# Invalid seek value", "raise", "ValueError", "(", "\"invalid position in BED: {}\"", ".", "format", "(", "n", ")", ")" ]
Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to.
[ "Gets", "to", "a", "certain", "marker", "position", "in", "the", "BED", "file", "." ]
python
train
mzucker/noteshrink
noteshrink.py
https://github.com/mzucker/noteshrink/blob/7d876e5b43923c6bf8d64b7ef18f6855bfb30ce3/noteshrink.py#L141-L182
def postprocess(output_filename, options): '''Runs the postprocessing command on the file provided.''' assert options.postprocess_cmd base, _ = os.path.splitext(output_filename) post_filename = base + options.postprocess_ext cmd = options.postprocess_cmd cmd = cmd.replace('%i', output_filename) cmd = cmd.replace('%o', post_filename) cmd = cmd.replace('%e', options.postprocess_ext) subprocess_args = shlex.split(cmd) if os.path.exists(post_filename): os.unlink(post_filename) if not options.quiet: print(' running "{}"...'.format(cmd), end=' ') sys.stdout.flush() try: result = subprocess.call(subprocess_args) before = os.stat(output_filename).st_size after = os.stat(post_filename).st_size except OSError: result = -1 if result == 0: if not options.quiet: print('{:.1f}% reduction'.format( 100*(1.0-float(after)/before))) return post_filename else: sys.stderr.write('warning: postprocessing failed!\n') return None
[ "def", "postprocess", "(", "output_filename", ",", "options", ")", ":", "assert", "options", ".", "postprocess_cmd", "base", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "output_filename", ")", "post_filename", "=", "base", "+", "options", ".", "postprocess_ext", "cmd", "=", "options", ".", "postprocess_cmd", "cmd", "=", "cmd", ".", "replace", "(", "'%i'", ",", "output_filename", ")", "cmd", "=", "cmd", ".", "replace", "(", "'%o'", ",", "post_filename", ")", "cmd", "=", "cmd", ".", "replace", "(", "'%e'", ",", "options", ".", "postprocess_ext", ")", "subprocess_args", "=", "shlex", ".", "split", "(", "cmd", ")", "if", "os", ".", "path", ".", "exists", "(", "post_filename", ")", ":", "os", ".", "unlink", "(", "post_filename", ")", "if", "not", "options", ".", "quiet", ":", "print", "(", "' running \"{}\"...'", ".", "format", "(", "cmd", ")", ",", "end", "=", "' '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "try", ":", "result", "=", "subprocess", ".", "call", "(", "subprocess_args", ")", "before", "=", "os", ".", "stat", "(", "output_filename", ")", ".", "st_size", "after", "=", "os", ".", "stat", "(", "post_filename", ")", ".", "st_size", "except", "OSError", ":", "result", "=", "-", "1", "if", "result", "==", "0", ":", "if", "not", "options", ".", "quiet", ":", "print", "(", "'{:.1f}% reduction'", ".", "format", "(", "100", "*", "(", "1.0", "-", "float", "(", "after", ")", "/", "before", ")", ")", ")", "return", "post_filename", "else", ":", "sys", ".", "stderr", ".", "write", "(", "'warning: postprocessing failed!\\n'", ")", "return", "None" ]
Runs the postprocessing command on the file provided.
[ "Runs", "the", "postprocessing", "command", "on", "the", "file", "provided", "." ]
python
train
pymc-devs/pymc
pymc/Model.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L248-L256
def _finalize(self): """Reset the status and tell the database to finalize the traces.""" if self.status in ['running', 'halt']: if self.verbose > 0: print_('\nSampling finished normally.') self.status = 'ready' self.save_state() self.db._finalize()
[ "def", "_finalize", "(", "self", ")", ":", "if", "self", ".", "status", "in", "[", "'running'", ",", "'halt'", "]", ":", "if", "self", ".", "verbose", ">", "0", ":", "print_", "(", "'\\nSampling finished normally.'", ")", "self", ".", "status", "=", "'ready'", "self", ".", "save_state", "(", ")", "self", ".", "db", ".", "_finalize", "(", ")" ]
Reset the status and tell the database to finalize the traces.
[ "Reset", "the", "status", "and", "tell", "the", "database", "to", "finalize", "the", "traces", "." ]
python
train
jaraco/irc
irc/client.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/client.py#L1186-L1192
def dcc_listen(self, dcctype="chat"): """Listen for connections from a DCC peer. Returns a DCCConnection instance. """ warnings.warn("Use self.dcc(type).listen()", DeprecationWarning) return self.dcc(dcctype).listen()
[ "def", "dcc_listen", "(", "self", ",", "dcctype", "=", "\"chat\"", ")", ":", "warnings", ".", "warn", "(", "\"Use self.dcc(type).listen()\"", ",", "DeprecationWarning", ")", "return", "self", ".", "dcc", "(", "dcctype", ")", ".", "listen", "(", ")" ]
Listen for connections from a DCC peer. Returns a DCCConnection instance.
[ "Listen", "for", "connections", "from", "a", "DCC", "peer", "." ]
python
train
BDNYC/astrodbkit
astrodbkit/astrocat.py
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L468-L483
def drop_catalog(self, cat_name): """ Remove an imported catalog from the Dataset object Parameters ---------- cat_name: str The name given to the catalog """ # Delete the name and data self.catalogs.pop(cat_name) delattr(self, cat_name) # Update history print("Deleted {} catalog.".format(cat_name)) self.history += "\n{}: Deleted {} catalog.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), cat_name)
[ "def", "drop_catalog", "(", "self", ",", "cat_name", ")", ":", "# Delete the name and data", "self", ".", "catalogs", ".", "pop", "(", "cat_name", ")", "delattr", "(", "self", ",", "cat_name", ")", "# Update history", "print", "(", "\"Deleted {} catalog.\"", ".", "format", "(", "cat_name", ")", ")", "self", ".", "history", "+=", "\"\\n{}: Deleted {} catalog.\"", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ",", "cat_name", ")" ]
Remove an imported catalog from the Dataset object Parameters ---------- cat_name: str The name given to the catalog
[ "Remove", "an", "imported", "catalog", "from", "the", "Dataset", "object", "Parameters", "----------", "cat_name", ":", "str", "The", "name", "given", "to", "the", "catalog" ]
python
train
Feneric/doxypypy
doxypypy/doxypypy.py
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L25-L32
def coroutine(func): """Basic decorator to implement the coroutine pattern.""" def __start(*args, **kwargs): """Automatically calls next() on the internal generator function.""" __cr = func(*args, **kwargs) next(__cr) return __cr return __start
[ "def", "coroutine", "(", "func", ")", ":", "def", "__start", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Automatically calls next() on the internal generator function.\"\"\"", "__cr", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "next", "(", "__cr", ")", "return", "__cr", "return", "__start" ]
Basic decorator to implement the coroutine pattern.
[ "Basic", "decorator", "to", "implement", "the", "coroutine", "pattern", "." ]
python
train
gwastro/pycbc
pycbc/workflow/datafind.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/datafind.py#L835-L887
def get_segment_summary_times(scienceFile, segmentName): """ This function will find the times for which the segment_summary is set for the flag given by segmentName. Parameters ----------- scienceFile : SegFile The segment file that we want to use to determine this. segmentName : string The DQ flag to search for times in the segment_summary table. Returns --------- summSegList : ligo.segments.segmentlist The times that are covered in the segment summary table. """ # Parse the segmentName segmentName = segmentName.split(':') if not len(segmentName) in [2,3]: raise ValueError("Invalid channel name %s." %(segmentName)) ifo = segmentName[0] channel = segmentName[1] version = '' if len(segmentName) == 3: version = int(segmentName[2]) # Load the filename xmldoc = utils.load_filename(scienceFile.cache_entry.path, gz=scienceFile.cache_entry.path.endswith("gz"), contenthandler=ContentHandler) # Get the segment_def_id for the segmentName segmentDefTable = table.get_table(xmldoc, "segment_definer") for entry in segmentDefTable: if (entry.ifos == ifo) and (entry.name == channel): if len(segmentName) == 2 or (entry.version==version): segDefID = entry.segment_def_id break else: raise ValueError("Cannot find channel %s in segment_definer table."\ %(segmentName)) # Get the segmentlist corresponding to this segmentName in segment_summary segmentSummTable = table.get_table(xmldoc, "segment_summary") summSegList = segments.segmentlist([]) for entry in segmentSummTable: if entry.segment_def_id == segDefID: segment = segments.segment(entry.start_time, entry.end_time) summSegList.append(segment) summSegList.coalesce() return summSegList
[ "def", "get_segment_summary_times", "(", "scienceFile", ",", "segmentName", ")", ":", "# Parse the segmentName", "segmentName", "=", "segmentName", ".", "split", "(", "':'", ")", "if", "not", "len", "(", "segmentName", ")", "in", "[", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"Invalid channel name %s.\"", "%", "(", "segmentName", ")", ")", "ifo", "=", "segmentName", "[", "0", "]", "channel", "=", "segmentName", "[", "1", "]", "version", "=", "''", "if", "len", "(", "segmentName", ")", "==", "3", ":", "version", "=", "int", "(", "segmentName", "[", "2", "]", ")", "# Load the filename", "xmldoc", "=", "utils", ".", "load_filename", "(", "scienceFile", ".", "cache_entry", ".", "path", ",", "gz", "=", "scienceFile", ".", "cache_entry", ".", "path", ".", "endswith", "(", "\"gz\"", ")", ",", "contenthandler", "=", "ContentHandler", ")", "# Get the segment_def_id for the segmentName", "segmentDefTable", "=", "table", ".", "get_table", "(", "xmldoc", ",", "\"segment_definer\"", ")", "for", "entry", "in", "segmentDefTable", ":", "if", "(", "entry", ".", "ifos", "==", "ifo", ")", "and", "(", "entry", ".", "name", "==", "channel", ")", ":", "if", "len", "(", "segmentName", ")", "==", "2", "or", "(", "entry", ".", "version", "==", "version", ")", ":", "segDefID", "=", "entry", ".", "segment_def_id", "break", "else", ":", "raise", "ValueError", "(", "\"Cannot find channel %s in segment_definer table.\"", "%", "(", "segmentName", ")", ")", "# Get the segmentlist corresponding to this segmentName in segment_summary", "segmentSummTable", "=", "table", ".", "get_table", "(", "xmldoc", ",", "\"segment_summary\"", ")", "summSegList", "=", "segments", ".", "segmentlist", "(", "[", "]", ")", "for", "entry", "in", "segmentSummTable", ":", "if", "entry", ".", "segment_def_id", "==", "segDefID", ":", "segment", "=", "segments", ".", "segment", "(", "entry", ".", "start_time", ",", "entry", ".", "end_time", ")", "summSegList", ".", "append", "(", "segment", ")", "summSegList", ".", "coalesce", "(", ")", "return", "summSegList" ]
This function will find the times for which the segment_summary is set for the flag given by segmentName. Parameters ----------- scienceFile : SegFile The segment file that we want to use to determine this. segmentName : string The DQ flag to search for times in the segment_summary table. Returns --------- summSegList : ligo.segments.segmentlist The times that are covered in the segment summary table.
[ "This", "function", "will", "find", "the", "times", "for", "which", "the", "segment_summary", "is", "set", "for", "the", "flag", "given", "by", "segmentName", "." ]
python
train
Nike-Inc/cerberus-python-client
cerberus/client.py
https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L523-L539
def _get_secrets(self, secure_data_path, version=None): """ Return full json secrets based on the secure data path Keyword arguments: secure_data_path (string) -- full path in the secret deposit box that contains the key /shared/sdb-path/secret """ if not version: version = "CURRENT" payload = {'versionId': str(version)} secret_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/secret/', secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.json()
[ "def", "_get_secrets", "(", "self", ",", "secure_data_path", ",", "version", "=", "None", ")", ":", "if", "not", "version", ":", "version", "=", "\"CURRENT\"", "payload", "=", "{", "'versionId'", ":", "str", "(", "version", ")", "}", "secret_resp", "=", "get_with_retry", "(", "str", ".", "join", "(", "''", ",", "[", "self", ".", "cerberus_url", ",", "'/v1/secret/'", ",", "secure_data_path", "]", ")", ",", "params", "=", "payload", ",", "headers", "=", "self", ".", "HEADERS", ")", "throw_if_bad_response", "(", "secret_resp", ")", "return", "secret_resp", ".", "json", "(", ")" ]
Return full json secrets based on the secure data path Keyword arguments: secure_data_path (string) -- full path in the secret deposit box that contains the key /shared/sdb-path/secret
[ "Return", "full", "json", "secrets", "based", "on", "the", "secure", "data", "path", "Keyword", "arguments", ":" ]
python
train
RedHatInsights/insights-core
insights/core/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L638-L680
def get_elements(self, element, xmlns=None): """ Return a list of elements those match the searching condition. If the XML input has namespaces, elements and attributes with prefixes in the form prefix:sometag get expanded to {namespace}element where the prefix is replaced by the full URI. Also, if there is a default namespace, that full URI gets prepended to all of the non-prefixed tags. Element names can contain letters, digits, hyphens, underscores, and periods. But element names must start with a letter or underscore. Here the while-clause is to set searching condition from `/element1/element2` to `/{namespace}element1/{namespace}/element2` Parameters: element: Searching condition to search certain elements in an XML file. For more details about how to set searching condition, refer to section `19.7.2.1. Example` and `19.7.2.2. Supported XPath syntax` in https://docs.python.org/2/library/xml.etree.elementtree.html xmlns: XML namespace, default value to None. None means that xmlns equals to the `self.xmlns` (default namespace) instead of "" all the time. Only string type parameter (including "") will be regarded as a valid xml namespace. Returns: (list): List of elements those match the searching condition """ real_element = "" real_xmlns = "" if xmlns is None: real_xmlns = "{" + self.xmlns + "}" if self.xmlns else "" else: real_xmlns = "{" + xmlns + "}" while "/" in element: l = element.split("/", 1) element = l[1] real_element += l[0] + "/" if element[0].isalpha() or element[0] == "_": real_element += real_xmlns real_element += element return self.dom.findall(real_element)
[ "def", "get_elements", "(", "self", ",", "element", ",", "xmlns", "=", "None", ")", ":", "real_element", "=", "\"\"", "real_xmlns", "=", "\"\"", "if", "xmlns", "is", "None", ":", "real_xmlns", "=", "\"{\"", "+", "self", ".", "xmlns", "+", "\"}\"", "if", "self", ".", "xmlns", "else", "\"\"", "else", ":", "real_xmlns", "=", "\"{\"", "+", "xmlns", "+", "\"}\"", "while", "\"/\"", "in", "element", ":", "l", "=", "element", ".", "split", "(", "\"/\"", ",", "1", ")", "element", "=", "l", "[", "1", "]", "real_element", "+=", "l", "[", "0", "]", "+", "\"/\"", "if", "element", "[", "0", "]", ".", "isalpha", "(", ")", "or", "element", "[", "0", "]", "==", "\"_\"", ":", "real_element", "+=", "real_xmlns", "real_element", "+=", "element", "return", "self", ".", "dom", ".", "findall", "(", "real_element", ")" ]
Return a list of elements those match the searching condition. If the XML input has namespaces, elements and attributes with prefixes in the form prefix:sometag get expanded to {namespace}element where the prefix is replaced by the full URI. Also, if there is a default namespace, that full URI gets prepended to all of the non-prefixed tags. Element names can contain letters, digits, hyphens, underscores, and periods. But element names must start with a letter or underscore. Here the while-clause is to set searching condition from `/element1/element2` to `/{namespace}element1/{namespace}/element2` Parameters: element: Searching condition to search certain elements in an XML file. For more details about how to set searching condition, refer to section `19.7.2.1. Example` and `19.7.2.2. Supported XPath syntax` in https://docs.python.org/2/library/xml.etree.elementtree.html xmlns: XML namespace, default value to None. None means that xmlns equals to the `self.xmlns` (default namespace) instead of "" all the time. Only string type parameter (including "") will be regarded as a valid xml namespace. Returns: (list): List of elements those match the searching condition
[ "Return", "a", "list", "of", "elements", "those", "match", "the", "searching", "condition", ".", "If", "the", "XML", "input", "has", "namespaces", "elements", "and", "attributes", "with", "prefixes", "in", "the", "form", "prefix", ":", "sometag", "get", "expanded", "to", "{", "namespace", "}", "element", "where", "the", "prefix", "is", "replaced", "by", "the", "full", "URI", ".", "Also", "if", "there", "is", "a", "default", "namespace", "that", "full", "URI", "gets", "prepended", "to", "all", "of", "the", "non", "-", "prefixed", "tags", ".", "Element", "names", "can", "contain", "letters", "digits", "hyphens", "underscores", "and", "periods", ".", "But", "element", "names", "must", "start", "with", "a", "letter", "or", "underscore", ".", "Here", "the", "while", "-", "clause", "is", "to", "set", "searching", "condition", "from", "/", "element1", "/", "element2", "to", "/", "{", "namespace", "}", "element1", "/", "{", "namespace", "}", "/", "element2" ]
python
train
Jaymon/captain
captain/echo.py
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/echo.py#L101-L122
def progress(length, **kwargs): """display a progress that can update in place example -- total_length = 1000 with echo.progress(total_length) as p: for x in range(total_length): # do something crazy p.update(x) length -- int -- the total size of what you will be updating progress on """ quiet = False progress_class = kwargs.pop("progress_class", Progress) kwargs["write_method"] = istdout.info kwargs["width"] = kwargs.get("width", globals()["WIDTH"]) kwargs["length"] = length pbar = progress_class(**kwargs) pbar.update(0) yield pbar pbar.update(length) br()
[ "def", "progress", "(", "length", ",", "*", "*", "kwargs", ")", ":", "quiet", "=", "False", "progress_class", "=", "kwargs", ".", "pop", "(", "\"progress_class\"", ",", "Progress", ")", "kwargs", "[", "\"write_method\"", "]", "=", "istdout", ".", "info", "kwargs", "[", "\"width\"", "]", "=", "kwargs", ".", "get", "(", "\"width\"", ",", "globals", "(", ")", "[", "\"WIDTH\"", "]", ")", "kwargs", "[", "\"length\"", "]", "=", "length", "pbar", "=", "progress_class", "(", "*", "*", "kwargs", ")", "pbar", ".", "update", "(", "0", ")", "yield", "pbar", "pbar", ".", "update", "(", "length", ")", "br", "(", ")" ]
display a progress that can update in place example -- total_length = 1000 with echo.progress(total_length) as p: for x in range(total_length): # do something crazy p.update(x) length -- int -- the total size of what you will be updating progress on
[ "display", "a", "progress", "that", "can", "update", "in", "place" ]
python
valid