repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py#L121-L134
def init_from_storage_write_to_datastore(self): """Init list of sumibssions from Storage and saves them to Datastore. Should be called only once (typically by master) during evaluation of the competition. """ # Load submissions self._attacks = self._load_submissions_from_datastore_dir( ATTACK_SUBDIR, ATTACK_ID_PATTERN) self._targeted_attacks = self._load_submissions_from_datastore_dir( TARGETED_ATTACK_SUBDIR, TARGETED_ATTACK_ID_PATTERN) self._defenses = self._load_submissions_from_datastore_dir( DEFENSE_SUBDIR, DEFENSE_ID_PATTERN) self._write_to_datastore()
[ "def", "init_from_storage_write_to_datastore", "(", "self", ")", ":", "# Load submissions", "self", ".", "_attacks", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "ATTACK_SUBDIR", ",", "ATTACK_ID_PATTERN", ")", "self", ".", "_targeted_attacks", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "TARGETED_ATTACK_SUBDIR", ",", "TARGETED_ATTACK_ID_PATTERN", ")", "self", ".", "_defenses", "=", "self", ".", "_load_submissions_from_datastore_dir", "(", "DEFENSE_SUBDIR", ",", "DEFENSE_ID_PATTERN", ")", "self", ".", "_write_to_datastore", "(", ")" ]
Init list of sumibssions from Storage and saves them to Datastore. Should be called only once (typically by master) during evaluation of the competition.
[ "Init", "list", "of", "sumibssions", "from", "Storage", "and", "saves", "them", "to", "Datastore", "." ]
python
train
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L104-L132
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first): """Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.""" key = self._actual_key(key) locations = [x[0] for x in lst] if not include_self and key in locations: lst.pop(locations.index(key)) lst.sort(key=lambda x: x[1], reverse=biggest_first) lst = [x for x, y in lst] # RoW in both key and lst, but not defined; only RoW remains if exclusive if key == 'RoW' and 'RoW' not in self and exclusive: return ['RoW'] if 'RoW' in lst else [] elif exclusive: removed, remaining = set(), [] while lst: current = lst.pop(0) faces = self[current] if not faces.intersection(removed): removed.update(faces) remaining.append(current) lst = remaining # If RoW not resolved, make it the smallest if 'RoW' not in self and 'RoW' in lst: lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW')) return lst
[ "def", "_finish_filter", "(", "self", ",", "lst", ",", "key", ",", "include_self", ",", "exclusive", ",", "biggest_first", ")", ":", "key", "=", "self", ".", "_actual_key", "(", "key", ")", "locations", "=", "[", "x", "[", "0", "]", "for", "x", "in", "lst", "]", "if", "not", "include_self", "and", "key", "in", "locations", ":", "lst", ".", "pop", "(", "locations", ".", "index", "(", "key", ")", ")", "lst", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "biggest_first", ")", "lst", "=", "[", "x", "for", "x", ",", "y", "in", "lst", "]", "# RoW in both key and lst, but not defined; only RoW remains if exclusive", "if", "key", "==", "'RoW'", "and", "'RoW'", "not", "in", "self", "and", "exclusive", ":", "return", "[", "'RoW'", "]", "if", "'RoW'", "in", "lst", "else", "[", "]", "elif", "exclusive", ":", "removed", ",", "remaining", "=", "set", "(", ")", ",", "[", "]", "while", "lst", ":", "current", "=", "lst", ".", "pop", "(", "0", ")", "faces", "=", "self", "[", "current", "]", "if", "not", "faces", ".", "intersection", "(", "removed", ")", ":", "removed", ".", "update", "(", "faces", ")", "remaining", ".", "append", "(", "current", ")", "lst", "=", "remaining", "# If RoW not resolved, make it the smallest", "if", "'RoW'", "not", "in", "self", "and", "'RoW'", "in", "lst", ":", "lst", "[", "-", "1", "if", "biggest_first", "else", "0", "]", "=", "lst", ".", "pop", "(", "lst", ".", "index", "(", "'RoW'", ")", ")", "return", "lst" ]
Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.
[ "Finish", "filtering", "a", "GIS", "operation", ".", "Can", "optionally", "exclude", "the", "input", "key", "sort", "results", "and", "exclude", "overlapping", "results", ".", "Internal", "function", "not", "normally", "called", "directly", "." ]
python
train
mattloper/chumpy
chumpy/utils.py
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/utils.py#L44-L78
def sparse_is_desireable(lhs, rhs): ''' Examines a pair of matrices and determines if the result of their multiplication should be sparse or not. ''' return False if len(lhs.shape) == 1: return False else: lhs_rows, lhs_cols = lhs.shape if len(rhs.shape) == 1: rhs_rows = 1 rhs_cols = rhs.size else: rhs_rows, rhs_cols = rhs.shape result_size = lhs_rows * rhs_cols if sp.issparse(lhs) and sp.issparse(rhs): return True elif sp.issparse(lhs): lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size rhs_zero_cols = np.all(rhs==0, axis=0).sum() elif sp.issparse(rhs): lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size else: lhs_zero_rows = np.all(lhs==0, axis=1).sum() rhs_zero_cols = np.all(rhs==0, axis=0).sum() num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols # A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff. return (float(num_zeros) / float(size)) >= 0.5
[ "def", "sparse_is_desireable", "(", "lhs", ",", "rhs", ")", ":", "return", "False", "if", "len", "(", "lhs", ".", "shape", ")", "==", "1", ":", "return", "False", "else", ":", "lhs_rows", ",", "lhs_cols", "=", "lhs", ".", "shape", "if", "len", "(", "rhs", ".", "shape", ")", "==", "1", ":", "rhs_rows", "=", "1", "rhs_cols", "=", "rhs", ".", "size", "else", ":", "rhs_rows", ",", "rhs_cols", "=", "rhs", ".", "shape", "result_size", "=", "lhs_rows", "*", "rhs_cols", "if", "sp", ".", "issparse", "(", "lhs", ")", "and", "sp", ".", "issparse", "(", "rhs", ")", ":", "return", "True", "elif", "sp", ".", "issparse", "(", "lhs", ")", ":", "lhs_zero_rows", "=", "lhs_rows", "-", "np", ".", "unique", "(", "lhs", ".", "nonzero", "(", ")", "[", "0", "]", ")", ".", "size", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "elif", "sp", ".", "issparse", "(", "rhs", ")", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "rhs_cols", "-", "np", ".", "unique", "(", "rhs", ".", "nonzero", "(", ")", "[", "1", "]", ")", ".", "size", "else", ":", "lhs_zero_rows", "=", "np", ".", "all", "(", "lhs", "==", "0", ",", "axis", "=", "1", ")", ".", "sum", "(", ")", "rhs_zero_cols", "=", "np", ".", "all", "(", "rhs", "==", "0", ",", "axis", "=", "0", ")", ".", "sum", "(", ")", "num_zeros", "=", "lhs_zero_rows", "*", "rhs_cols", "+", "rhs_zero_cols", "*", "lhs_rows", "-", "lhs_zero_rows", "*", "rhs_zero_cols", "# A sparse matrix uses roughly 16 bytes per nonzero element (8 + 2 4-byte inds), while a dense matrix uses 8 bytes per element. So the break even point for sparsity is 50% nonzero. But in practice, it seems to be that the compression in a csc or csr matrix gets us break even at ~65% nonzero, which lets us say 50% is a conservative, worst cases cutoff.", "return", "(", "float", "(", "num_zeros", ")", "/", "float", "(", "size", ")", ")", ">=", "0.5" ]
Examines a pair of matrices and determines if the result of their multiplication should be sparse or not.
[ "Examines", "a", "pair", "of", "matrices", "and", "determines", "if", "the", "result", "of", "their", "multiplication", "should", "be", "sparse", "or", "not", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/script.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/script.py#L107-L117
def _load_script(self): """Loads the script from the filesystem :raises exceptions.IOError: if the script file could not be opened """ script_text = filesystem.read_file(self.path, self.filename) if not script_text: raise IOError("Script file could not be opened or was empty: {0}" "".format(os.path.join(self.path, self.filename))) self.script = script_text
[ "def", "_load_script", "(", "self", ")", ":", "script_text", "=", "filesystem", ".", "read_file", "(", "self", ".", "path", ",", "self", ".", "filename", ")", "if", "not", "script_text", ":", "raise", "IOError", "(", "\"Script file could not be opened or was empty: {0}\"", "\"\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "self", ".", "filename", ")", ")", ")", "self", ".", "script", "=", "script_text" ]
Loads the script from the filesystem :raises exceptions.IOError: if the script file could not be opened
[ "Loads", "the", "script", "from", "the", "filesystem" ]
python
train
paramiko/paramiko
paramiko/kex_gss.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/kex_gss.py#L91-L115
def start_kex(self): """ Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange. """ self._generate_x() if self.transport.server_mode: # compute f = g^x mod p, but don't send it yet self.f = pow(self.G, self.x, self.P) self.transport._expect_packet(MSG_KEXGSS_INIT) return # compute e = g^x mod p (where g=2), and send it self.e = pow(self.G, self.x, self.P) # Initialize GSS-API Key Exchange self.gss_host = self.transport.gss_host m = Message() m.add_byte(c_MSG_KEXGSS_INIT) m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host)) m.add_mpint(self.e) self.transport._send_message(m) self.transport._expect_packet( MSG_KEXGSS_HOSTKEY, MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR, )
[ "def", "start_kex", "(", "self", ")", ":", "self", ".", "_generate_x", "(", ")", "if", "self", ".", "transport", ".", "server_mode", ":", "# compute f = g^x mod p, but don't send it yet", "self", ".", "f", "=", "pow", "(", "self", ".", "G", ",", "self", ".", "x", ",", "self", ".", "P", ")", "self", ".", "transport", ".", "_expect_packet", "(", "MSG_KEXGSS_INIT", ")", "return", "# compute e = g^x mod p (where g=2), and send it", "self", ".", "e", "=", "pow", "(", "self", ".", "G", ",", "self", ".", "x", ",", "self", ".", "P", ")", "# Initialize GSS-API Key Exchange", "self", ".", "gss_host", "=", "self", ".", "transport", ".", "gss_host", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "c_MSG_KEXGSS_INIT", ")", "m", ".", "add_string", "(", "self", ".", "kexgss", ".", "ssh_init_sec_context", "(", "target", "=", "self", ".", "gss_host", ")", ")", "m", ".", "add_mpint", "(", "self", ".", "e", ")", "self", ".", "transport", ".", "_send_message", "(", "m", ")", "self", ".", "transport", ".", "_expect_packet", "(", "MSG_KEXGSS_HOSTKEY", ",", "MSG_KEXGSS_CONTINUE", ",", "MSG_KEXGSS_COMPLETE", ",", "MSG_KEXGSS_ERROR", ",", ")" ]
Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange.
[ "Start", "the", "GSS", "-", "API", "/", "SSPI", "Authenticated", "Diffie", "-", "Hellman", "Key", "Exchange", "." ]
python
train
estnltk/estnltk
estnltk/database/elastic/__init__.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/database/elastic/__init__.py#L16-L32
def create_index(index_name, **kwargs): """ Parameters ---------- index_name : str Name of the index to be created **kwargs Arguments to pass to Elasticsearch instance. Returns ------- Index """ es = elasticsearch.Elasticsearch(**kwargs) es.indices.create(index=index_name, body=mapping) return connect(index_name, **kwargs)
[ "def", "create_index", "(", "index_name", ",", "*", "*", "kwargs", ")", ":", "es", "=", "elasticsearch", ".", "Elasticsearch", "(", "*", "*", "kwargs", ")", "es", ".", "indices", ".", "create", "(", "index", "=", "index_name", ",", "body", "=", "mapping", ")", "return", "connect", "(", "index_name", ",", "*", "*", "kwargs", ")" ]
Parameters ---------- index_name : str Name of the index to be created **kwargs Arguments to pass to Elasticsearch instance. Returns ------- Index
[ "Parameters", "----------", "index_name", ":", "str", "Name", "of", "the", "index", "to", "be", "created", "**", "kwargs", "Arguments", "to", "pass", "to", "Elasticsearch", "instance", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/gui/errorhandling.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/gui/errorhandling.py#L19-L32
def handle_error(self, error, download_request): """ Checks what error occured and looks for an appropriate solution. Args: error: Exception The error that has occured. download_request: The request which resulted in the error. """ if hasattr(error, "errno") and error.errno == errno.EACCES: self.handle_certificate_problem(str(error)) else: self.handle_general_download_error(str(error), download_request)
[ "def", "handle_error", "(", "self", ",", "error", ",", "download_request", ")", ":", "if", "hasattr", "(", "error", ",", "\"errno\"", ")", "and", "error", ".", "errno", "==", "errno", ".", "EACCES", ":", "self", ".", "handle_certificate_problem", "(", "str", "(", "error", ")", ")", "else", ":", "self", ".", "handle_general_download_error", "(", "str", "(", "error", ")", ",", "download_request", ")" ]
Checks what error occured and looks for an appropriate solution. Args: error: Exception The error that has occured. download_request: The request which resulted in the error.
[ "Checks", "what", "error", "occured", "and", "looks", "for", "an", "appropriate", "solution", "." ]
python
train
apache/incubator-mxnet
example/gluon/house_prices/kaggle_k_fold_cross_validation.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/house_prices/kaggle_k_fold_cross_validation.py#L82-L102
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model.""" dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss
[ "def", "train", "(", "net", ",", "X_train", ",", "y_train", ",", "epochs", ",", "verbose_epoch", ",", "learning_rate", ",", "weight_decay", ",", "batch_size", ")", ":", "dataset_train", "=", "gluon", ".", "data", ".", "ArrayDataset", "(", "X_train", ",", "y_train", ")", "data_iter_train", "=", "gluon", ".", "data", ".", "DataLoader", "(", "dataset_train", ",", "batch_size", ",", "shuffle", "=", "True", ")", "trainer", "=", "gluon", ".", "Trainer", "(", "net", ".", "collect_params", "(", ")", ",", "'adam'", ",", "{", "'learning_rate'", ":", "learning_rate", ",", "'wd'", ":", "weight_decay", "}", ")", "net", ".", "initialize", "(", "force_reinit", "=", "True", ")", "for", "epoch", "in", "range", "(", "epochs", ")", ":", "for", "data", ",", "label", "in", "data_iter_train", ":", "with", "autograd", ".", "record", "(", ")", ":", "output", "=", "net", "(", "data", ")", "loss", "=", "square_loss", "(", "output", ",", "label", ")", "loss", ".", "backward", "(", ")", "trainer", ".", "step", "(", "batch_size", ")", "avg_loss", "=", "get_rmse_log", "(", "net", ",", "X_train", ",", "y_train", ")", "if", "epoch", ">", "verbose_epoch", ":", "print", "(", "\"Epoch %d, train loss: %f\"", "%", "(", "epoch", ",", "avg_loss", ")", ")", "return", "avg_loss" ]
Trains the model.
[ "Trains", "the", "model", "." ]
python
train
wummel/linkchecker
linkcheck/ansicolor.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/ansicolor.py#L195-L211
def has_colors (fp): """Test if given file is an ANSI color enabled tty.""" # The is_tty() function ensures that we do not colorize # redirected streams, as this is almost never what we want if not is_tty(fp): return False if os.name == 'nt': return True elif has_curses: import curses try: curses.setupterm(os.environ.get("TERM"), fp.fileno()) # More than 8 colors are good enough. return curses.tigetnum("colors") >= 8 except curses.error: return False return False
[ "def", "has_colors", "(", "fp", ")", ":", "# The is_tty() function ensures that we do not colorize", "# redirected streams, as this is almost never what we want", "if", "not", "is_tty", "(", "fp", ")", ":", "return", "False", "if", "os", ".", "name", "==", "'nt'", ":", "return", "True", "elif", "has_curses", ":", "import", "curses", "try", ":", "curses", ".", "setupterm", "(", "os", ".", "environ", ".", "get", "(", "\"TERM\"", ")", ",", "fp", ".", "fileno", "(", ")", ")", "# More than 8 colors are good enough.", "return", "curses", ".", "tigetnum", "(", "\"colors\"", ")", ">=", "8", "except", "curses", ".", "error", ":", "return", "False", "return", "False" ]
Test if given file is an ANSI color enabled tty.
[ "Test", "if", "given", "file", "is", "an", "ANSI", "color", "enabled", "tty", "." ]
python
train
gtalarico/airtable-python-wrapper
airtable/airtable.py
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L217-L256
def get_iter(self, **options): """ Record Retriever Iterator Returns iterator with lists in batches according to pageSize. To get all records at once use :any:`get_all` >>> for page in airtable.get_iter(): ... for record in page: ... print(record) [{'fields': ... }, ...] Keyword Args: max_records (``int``, optional): The maximum total number of records that will be returned. See :any:`MaxRecordsParam` view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. page_size (``int``, optional ): The number of records returned in each request. Must be less than or equal to 100. Default is 100. See :any:`PageSizeParam`. fields (``str``, ``list``, optional): Name of field or fields to be retrieved. Default is all fields. See :any:`FieldsParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. formula (``str``, optional): Airtable formula. See :any:`FormulaParam`. Returns: iterator (``list``): List of Records, grouped by pageSize """ offset = None while True: data = self._get(self.url_table, offset=offset, **options) records = data.get('records', []) time.sleep(self.API_LIMIT) yield records offset = data.get('offset') if not offset: break
[ "def", "get_iter", "(", "self", ",", "*", "*", "options", ")", ":", "offset", "=", "None", "while", "True", ":", "data", "=", "self", ".", "_get", "(", "self", ".", "url_table", ",", "offset", "=", "offset", ",", "*", "*", "options", ")", "records", "=", "data", ".", "get", "(", "'records'", ",", "[", "]", ")", "time", ".", "sleep", "(", "self", ".", "API_LIMIT", ")", "yield", "records", "offset", "=", "data", ".", "get", "(", "'offset'", ")", "if", "not", "offset", ":", "break" ]
Record Retriever Iterator Returns iterator with lists in batches according to pageSize. To get all records at once use :any:`get_all` >>> for page in airtable.get_iter(): ... for record in page: ... print(record) [{'fields': ... }, ...] Keyword Args: max_records (``int``, optional): The maximum total number of records that will be returned. See :any:`MaxRecordsParam` view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. page_size (``int``, optional ): The number of records returned in each request. Must be less than or equal to 100. Default is 100. See :any:`PageSizeParam`. fields (``str``, ``list``, optional): Name of field or fields to be retrieved. Default is all fields. See :any:`FieldsParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. formula (``str``, optional): Airtable formula. See :any:`FormulaParam`. Returns: iterator (``list``): List of Records, grouped by pageSize
[ "Record", "Retriever", "Iterator", "Returns", "iterator", "with", "lists", "in", "batches", "according", "to", "pageSize", ".", "To", "get", "all", "records", "at", "once", "use", ":", "any", ":", "get_all", ">>>", "for", "page", "in", "airtable", ".", "get_iter", "()", ":", "...", "for", "record", "in", "page", ":", "...", "print", "(", "record", ")", "[", "{", "fields", ":", "...", "}", "...", "]", "Keyword", "Args", ":", "max_records", "(", "int", "optional", ")", ":", "The", "maximum", "total", "number", "of", "records", "that", "will", "be", "returned", ".", "See", ":", "any", ":", "MaxRecordsParam", "view", "(", "str", "optional", ")", ":", "The", "name", "or", "ID", "of", "a", "view", ".", "See", ":", "any", ":", "ViewParam", ".", "page_size", "(", "int", "optional", ")", ":", "The", "number", "of", "records", "returned", "in", "each", "request", ".", "Must", "be", "less", "than", "or", "equal", "to", "100", ".", "Default", "is", "100", ".", "See", ":", "any", ":", "PageSizeParam", ".", "fields", "(", "str", "list", "optional", ")", ":", "Name", "of", "field", "or", "fields", "to", "be", "retrieved", ".", "Default", "is", "all", "fields", ".", "See", ":", "any", ":", "FieldsParam", ".", "sort", "(", "list", "optional", ")", ":", "List", "of", "fields", "to", "sort", "by", ".", "Default", "order", "is", "ascending", ".", "See", ":", "any", ":", "SortParam", ".", "formula", "(", "str", "optional", ")", ":", "Airtable", "formula", ".", "See", ":", "any", ":", "FormulaParam", ".", "Returns", ":", "iterator", "(", "list", ")", ":", "List", "of", "Records", "grouped", "by", "pageSize" ]
python
train
geophysics-ubonn/crtomo_tools
src/td_correct_temperature.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/td_correct_temperature.py#L176-L210
def save_mag_to_file(mag, filename, rhofile): """Save the values in rho- or mag-format. """ if rhofile: # bring data in shape null = np.zeros(len(mag)) if mag.shape[1] == 3: null = np.column_stack((null, null, null, null)) result = np.column_stack((mag, null)) # save datapoints with open(filename, 'w') as fid: fid.write('{0}\n'.format(mag.shape[0])) with open(filename, 'ab') as fid: np.savetxt(fid, np.array(result), fmt='%f') else: # bring data in shape with open('inv/rho00.mag', 'r') as fid: coor = np.loadtxt(fid, skiprows=1, usecols=[0, 1]) # calculated back to log if mag.shape[1] == 3: logx = [math.log(d, 10) for d in mag[:, 0]] logy = [math.log(d, 10) for d in mag[:, 1]] logz = [math.log(d, 10) for d in mag[:, 2]] mag_log = np.column_stack((logx, logy, logz)) else: mag_log = [math.log(d, 10) for d in mag] content = np.column_stack((coor[:, 0], coor[:, 1], mag_log)) # save datapoints with open(filename, 'w') as fid: fid.write('{0}\n'.format(content.shape[0])) with open(filename, 'ab') as fid: np.savetxt(fid, np.array(content), fmt='%f')
[ "def", "save_mag_to_file", "(", "mag", ",", "filename", ",", "rhofile", ")", ":", "if", "rhofile", ":", "# bring data in shape", "null", "=", "np", ".", "zeros", "(", "len", "(", "mag", ")", ")", "if", "mag", ".", "shape", "[", "1", "]", "==", "3", ":", "null", "=", "np", ".", "column_stack", "(", "(", "null", ",", "null", ",", "null", ",", "null", ")", ")", "result", "=", "np", ".", "column_stack", "(", "(", "mag", ",", "null", ")", ")", "# save datapoints", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fid", ":", "fid", ".", "write", "(", "'{0}\\n'", ".", "format", "(", "mag", ".", "shape", "[", "0", "]", ")", ")", "with", "open", "(", "filename", ",", "'ab'", ")", "as", "fid", ":", "np", ".", "savetxt", "(", "fid", ",", "np", ".", "array", "(", "result", ")", ",", "fmt", "=", "'%f'", ")", "else", ":", "# bring data in shape", "with", "open", "(", "'inv/rho00.mag'", ",", "'r'", ")", "as", "fid", ":", "coor", "=", "np", ".", "loadtxt", "(", "fid", ",", "skiprows", "=", "1", ",", "usecols", "=", "[", "0", ",", "1", "]", ")", "# calculated back to log", "if", "mag", ".", "shape", "[", "1", "]", "==", "3", ":", "logx", "=", "[", "math", ".", "log", "(", "d", ",", "10", ")", "for", "d", "in", "mag", "[", ":", ",", "0", "]", "]", "logy", "=", "[", "math", ".", "log", "(", "d", ",", "10", ")", "for", "d", "in", "mag", "[", ":", ",", "1", "]", "]", "logz", "=", "[", "math", ".", "log", "(", "d", ",", "10", ")", "for", "d", "in", "mag", "[", ":", ",", "2", "]", "]", "mag_log", "=", "np", ".", "column_stack", "(", "(", "logx", ",", "logy", ",", "logz", ")", ")", "else", ":", "mag_log", "=", "[", "math", ".", "log", "(", "d", ",", "10", ")", "for", "d", "in", "mag", "]", "content", "=", "np", ".", "column_stack", "(", "(", "coor", "[", ":", ",", "0", "]", ",", "coor", "[", ":", ",", "1", "]", ",", "mag_log", ")", ")", "# save datapoints", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fid", ":", "fid", ".", "write", "(", "'{0}\\n'", ".", "format", "(", "content", ".", "shape", "[", "0", "]", ")", ")", "with", "open", "(", "filename", ",", "'ab'", ")", "as", "fid", ":", "np", ".", "savetxt", "(", "fid", ",", "np", ".", "array", "(", "content", ")", ",", "fmt", "=", "'%f'", ")" ]
Save the values in rho- or mag-format.
[ "Save", "the", "values", "in", "rho", "-", "or", "mag", "-", "format", "." ]
python
train
psss/did
did/plugins/bugzilla.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/bugzilla.py#L148-L156
def summary(self): """ Bug summary including resolution if enabled """ if not self.bug.resolution: return self.bug.summary if (self.bug.resolution.lower() in self.parent.resolutions or "all" in self.parent.resolutions): return "{0} [{1}]".format( self.bug.summary, self.bug.resolution.lower()) return self.bug.summary
[ "def", "summary", "(", "self", ")", ":", "if", "not", "self", ".", "bug", ".", "resolution", ":", "return", "self", ".", "bug", ".", "summary", "if", "(", "self", ".", "bug", ".", "resolution", ".", "lower", "(", ")", "in", "self", ".", "parent", ".", "resolutions", "or", "\"all\"", "in", "self", ".", "parent", ".", "resolutions", ")", ":", "return", "\"{0} [{1}]\"", ".", "format", "(", "self", ".", "bug", ".", "summary", ",", "self", ".", "bug", ".", "resolution", ".", "lower", "(", ")", ")", "return", "self", ".", "bug", ".", "summary" ]
Bug summary including resolution if enabled
[ "Bug", "summary", "including", "resolution", "if", "enabled" ]
python
train
Yubico/python-yubico
yubico/yubikey_config.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_config.py#L550-L557
def _get_flag(which, flags): """ Find 'which' entry in 'flags'. """ res = [this for this in flags if this.is_equal(which)] if len(res) == 0: return None if len(res) == 1: return res[0] assert()
[ "def", "_get_flag", "(", "which", ",", "flags", ")", ":", "res", "=", "[", "this", "for", "this", "in", "flags", "if", "this", ".", "is_equal", "(", "which", ")", "]", "if", "len", "(", "res", ")", "==", "0", ":", "return", "None", "if", "len", "(", "res", ")", "==", "1", ":", "return", "res", "[", "0", "]", "assert", "(", ")" ]
Find 'which' entry in 'flags'.
[ "Find", "which", "entry", "in", "flags", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2334-L2343
def fetch_fieldnames(self, sql: str, *args) -> List[str]: """Executes SQL; returns just the output fieldnames.""" self.ensure_db_open() cursor = self.db.cursor() self.db_exec_with_cursor(cursor, sql, *args) try: return [i[0] for i in cursor.description] except: # nopep8 log.exception("fetch_fieldnames: SQL was: " + sql) raise
[ "def", "fetch_fieldnames", "(", "self", ",", "sql", ":", "str", ",", "*", "args", ")", "->", "List", "[", "str", "]", ":", "self", ".", "ensure_db_open", "(", ")", "cursor", "=", "self", ".", "db", ".", "cursor", "(", ")", "self", ".", "db_exec_with_cursor", "(", "cursor", ",", "sql", ",", "*", "args", ")", "try", ":", "return", "[", "i", "[", "0", "]", "for", "i", "in", "cursor", ".", "description", "]", "except", ":", "# nopep8", "log", ".", "exception", "(", "\"fetch_fieldnames: SQL was: \"", "+", "sql", ")", "raise" ]
Executes SQL; returns just the output fieldnames.
[ "Executes", "SQL", ";", "returns", "just", "the", "output", "fieldnames", "." ]
python
train
ssato/python-anytemplate
anytemplate/utils.py
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/utils.py#L195-L205
def _write_to_filepath(content, output): """ :param content: Content string to write to :param output: Output file path """ outdir = os.path.dirname(output) if outdir and not os.path.exists(outdir): os.makedirs(outdir) with anytemplate.compat.copen(output, 'w') as out: out.write(content)
[ "def", "_write_to_filepath", "(", "content", ",", "output", ")", ":", "outdir", "=", "os", ".", "path", ".", "dirname", "(", "output", ")", "if", "outdir", "and", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "makedirs", "(", "outdir", ")", "with", "anytemplate", ".", "compat", ".", "copen", "(", "output", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "content", ")" ]
:param content: Content string to write to :param output: Output file path
[ ":", "param", "content", ":", "Content", "string", "to", "write", "to", ":", "param", "output", ":", "Output", "file", "path" ]
python
train
pkkid/python-plexapi
plexapi/myplex.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/myplex.py#L401-L418
def syncItems(self, client=None, clientId=None): """ Returns an instance of :class:`plexapi.sync.SyncList` for specified client. Parameters: client (:class:`~plexapi.myplex.MyPlexDevice`): a client to query SyncItems for. clientId (str): an identifier of a client to query SyncItems for. If both `client` and `clientId` provided the client would be preferred. If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier. """ if client: clientId = client.clientIdentifier elif clientId is None: clientId = X_PLEX_IDENTIFIER data = self.query(SyncList.key.format(clientId=clientId)) return SyncList(self, data)
[ "def", "syncItems", "(", "self", ",", "client", "=", "None", ",", "clientId", "=", "None", ")", ":", "if", "client", ":", "clientId", "=", "client", ".", "clientIdentifier", "elif", "clientId", "is", "None", ":", "clientId", "=", "X_PLEX_IDENTIFIER", "data", "=", "self", ".", "query", "(", "SyncList", ".", "key", ".", "format", "(", "clientId", "=", "clientId", ")", ")", "return", "SyncList", "(", "self", ",", "data", ")" ]
Returns an instance of :class:`plexapi.sync.SyncList` for specified client. Parameters: client (:class:`~plexapi.myplex.MyPlexDevice`): a client to query SyncItems for. clientId (str): an identifier of a client to query SyncItems for. If both `client` and `clientId` provided the client would be preferred. If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier.
[ "Returns", "an", "instance", "of", ":", "class", ":", "plexapi", ".", "sync", ".", "SyncList", "for", "specified", "client", "." ]
python
train
nwilming/ocupy
ocupy/saccade_geometry.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/saccade_geometry.py#L93-L122
def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """ if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
[ "def", "predict_fixation_duration", "(", "durations", ",", "angles", ",", "length_diffs", ",", "dataset", "=", "None", ",", "params", "=", "None", ")", ":", "if", "dataset", "is", "None", ":", "dataset", "=", "np", ".", "ones", "(", "durations", ".", "shape", ")", "corrected_durations", "=", "np", ".", "nan", "*", "np", ".", "ones", "(", "durations", ".", "shape", ")", "for", "i", ",", "ds", "in", "enumerate", "(", "np", ".", "unique", "(", "dataset", ")", ")", ":", "e", "=", "lambda", "v", ",", "x", ",", "y", ",", "z", ":", "(", "leastsq_dual_model", "(", "x", ",", "z", ",", "*", "v", ")", "-", "y", ")", "v0", "=", "[", "120", ",", "220.0", ",", "-", ".1", ",", "0.5", ",", ".1", ",", ".1", "]", "id_ds", "=", "dataset", "==", "ds", "idnan", "=", "(", "~", "np", ".", "isnan", "(", "angles", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "durations", ")", ")", "&", "(", "~", "np", ".", "isnan", "(", "length_diffs", ")", ")", "v", ",", "s", "=", "leastsq", "(", "e", ",", "v0", ",", "args", "=", "(", "angles", "[", "idnan", "&", "id_ds", "]", ",", "durations", "[", "idnan", "&", "id_ds", "]", ",", "length_diffs", "[", "idnan", "&", "id_ds", "]", ")", ",", "maxfev", "=", "10000", ")", "corrected_durations", "[", "id_ds", "]", "=", "(", "durations", "[", "id_ds", "]", "-", "(", "leastsq_dual_model", "(", "angles", "[", "id_ds", "]", ",", "length_diffs", "[", "id_ds", "]", ",", "*", "v", ")", ")", ")", "if", "params", "is", "not", "None", ":", "params", "[", "'v'", "+", "str", "(", "i", ")", "]", "=", "v", "params", "[", "'s'", "+", "str", "(", "i", ")", "]", "=", "s", "return", "corrected_durations" ]
Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations.
[ "Fits", "a", "non", "-", "linear", "piecewise", "regression", "to", "fixtaion", "durations", "for", "a", "fixmat", "." ]
python
train
Archived-Object/ligament
ligament/buildcontext.py
https://github.com/Archived-Object/ligament/blob/ff3d78130522676a20dc64086dc8a27b197cc20f/ligament/buildcontext.py#L94-L110
def verify_valid_dependencies(self): """ Checks if the assigned dependencies are valid valid dependency graphs are: - noncyclic (i.e. no `A -> B -> ... -> A`) - Contain no undefined dependencies (dependencies referencing undefined tasks) """ unobserved_dependencies = set(self.tasks.keys()) target_queue = [] while len(unobserved_dependencies) > 0: target_queue = [unobserved_dependencies.pop()] while target_queue is not []: target_queue += unobserved_dependencies
[ "def", "verify_valid_dependencies", "(", "self", ")", ":", "unobserved_dependencies", "=", "set", "(", "self", ".", "tasks", ".", "keys", "(", ")", ")", "target_queue", "=", "[", "]", "while", "len", "(", "unobserved_dependencies", ")", ">", "0", ":", "target_queue", "=", "[", "unobserved_dependencies", ".", "pop", "(", ")", "]", "while", "target_queue", "is", "not", "[", "]", ":", "target_queue", "+=", "unobserved_dependencies" ]
Checks if the assigned dependencies are valid valid dependency graphs are: - noncyclic (i.e. no `A -> B -> ... -> A`) - Contain no undefined dependencies (dependencies referencing undefined tasks)
[ "Checks", "if", "the", "assigned", "dependencies", "are", "valid", "valid", "dependency", "graphs", "are", ":" ]
python
train
treethought/flask-assistant
flask_assistant/response.py
https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/flask_assistant/response.py#L99-L121
def suggest(self, *replies): """Use suggestion chips to hint at responses to continue or pivot the conversation""" chips = [] for r in replies: chips.append({"title": r}) # NOTE: both of these formats work in the dialogflow console, # but only the first (suggestions) appears in actual Google Assistant # native chips for GA self._messages.append( {"platform": "ACTIONS_ON_GOOGLE", "suggestions": {"suggestions": chips}} ) # # quick replies for other platforms # self._messages.append( # { # "platform": "ACTIONS_ON_GOOGLE", # "quickReplies": {"title": None, "quickReplies": replies}, # } # ) return self
[ "def", "suggest", "(", "self", ",", "*", "replies", ")", ":", "chips", "=", "[", "]", "for", "r", "in", "replies", ":", "chips", ".", "append", "(", "{", "\"title\"", ":", "r", "}", ")", "# NOTE: both of these formats work in the dialogflow console,", "# but only the first (suggestions) appears in actual Google Assistant", "# native chips for GA", "self", ".", "_messages", ".", "append", "(", "{", "\"platform\"", ":", "\"ACTIONS_ON_GOOGLE\"", ",", "\"suggestions\"", ":", "{", "\"suggestions\"", ":", "chips", "}", "}", ")", "# # quick replies for other platforms", "# self._messages.append(", "# {", "# \"platform\": \"ACTIONS_ON_GOOGLE\",", "# \"quickReplies\": {\"title\": None, \"quickReplies\": replies},", "# }", "# )", "return", "self" ]
Use suggestion chips to hint at responses to continue or pivot the conversation
[ "Use", "suggestion", "chips", "to", "hint", "at", "responses", "to", "continue", "or", "pivot", "the", "conversation" ]
python
train
raiden-network/raiden
raiden/transfer/views.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/views.py#L427-L438
def get_channelstate_settled( chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, ) -> List[NettingChannelState]: """Return the state of settled channels in a token network.""" return get_channelstate_filter( chain_state, payment_network_id, token_address, lambda channel_state: channel.get_status(channel_state) == CHANNEL_STATE_SETTLED, )
[ "def", "get_channelstate_settled", "(", "chain_state", ":", "ChainState", ",", "payment_network_id", ":", "PaymentNetworkID", ",", "token_address", ":", "TokenAddress", ",", ")", "->", "List", "[", "NettingChannelState", "]", ":", "return", "get_channelstate_filter", "(", "chain_state", ",", "payment_network_id", ",", "token_address", ",", "lambda", "channel_state", ":", "channel", ".", "get_status", "(", "channel_state", ")", "==", "CHANNEL_STATE_SETTLED", ",", ")" ]
Return the state of settled channels in a token network.
[ "Return", "the", "state", "of", "settled", "channels", "in", "a", "token", "network", "." ]
python
train
deepmind/pysc2
pysc2/lib/actions.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/actions.py#L96-L101
def select_unit(action, action_space, select_unit_act, select_unit_id): """Select a specific unit from the multi-unit selection.""" del action_space select = action.action_ui.multi_panel select.type = select_unit_act select.unit_index = select_unit_id
[ "def", "select_unit", "(", "action", ",", "action_space", ",", "select_unit_act", ",", "select_unit_id", ")", ":", "del", "action_space", "select", "=", "action", ".", "action_ui", ".", "multi_panel", "select", ".", "type", "=", "select_unit_act", "select", ".", "unit_index", "=", "select_unit_id" ]
Select a specific unit from the multi-unit selection.
[ "Select", "a", "specific", "unit", "from", "the", "multi", "-", "unit", "selection", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/job_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L433-L439
def update_table_row(self, table, row_idx): """Add this instance as a row on a `astropy.table.Table` """ try: table[row_idx]['timestamp'] = self.timestamp table[row_idx]['status'] = self.status except IndexError: print("Index error", len(table), row_idx)
[ "def", "update_table_row", "(", "self", ",", "table", ",", "row_idx", ")", ":", "try", ":", "table", "[", "row_idx", "]", "[", "'timestamp'", "]", "=", "self", ".", "timestamp", "table", "[", "row_idx", "]", "[", "'status'", "]", "=", "self", ".", "status", "except", "IndexError", ":", "print", "(", "\"Index error\"", ",", "len", "(", "table", ")", ",", "row_idx", ")" ]
Add this instance as a row on a `astropy.table.Table`
[ "Add", "this", "instance", "as", "a", "row", "on", "a", "astropy", ".", "table", ".", "Table" ]
python
train
proycon/clam
clam/clamservice.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/clamservice.py#L869-L879
def inputindexbytemplate(project, user, inputtemplate): """Retrieve sorted index for the specified input template""" index = [] #pylint: disable=redefined-outer-name prefix = Project.path(project, user) + 'input/' for linkf, f in globsymlinks(prefix + '.*.INPUTTEMPLATE.' + inputtemplate.id + '.*'): seq = int(linkf.split('.')[-1]) index.append( (seq,f) ) #yield CLAMFile objects in proper sequence for seq, f in sorted(index): yield seq, clam.common.data.CLAMInputFile(Project.path(project, user), f[len(prefix):])
[ "def", "inputindexbytemplate", "(", "project", ",", "user", ",", "inputtemplate", ")", ":", "index", "=", "[", "]", "#pylint: disable=redefined-outer-name", "prefix", "=", "Project", ".", "path", "(", "project", ",", "user", ")", "+", "'input/'", "for", "linkf", ",", "f", "in", "globsymlinks", "(", "prefix", "+", "'.*.INPUTTEMPLATE.'", "+", "inputtemplate", ".", "id", "+", "'.*'", ")", ":", "seq", "=", "int", "(", "linkf", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")", "index", ".", "append", "(", "(", "seq", ",", "f", ")", ")", "#yield CLAMFile objects in proper sequence", "for", "seq", ",", "f", "in", "sorted", "(", "index", ")", ":", "yield", "seq", ",", "clam", ".", "common", ".", "data", ".", "CLAMInputFile", "(", "Project", ".", "path", "(", "project", ",", "user", ")", ",", "f", "[", "len", "(", "prefix", ")", ":", "]", ")" ]
Retrieve sorted index for the specified input template
[ "Retrieve", "sorted", "index", "for", "the", "specified", "input", "template" ]
python
train
4Kaylum/Brickfront
brickfront/client.py
https://github.com/4Kaylum/Brickfront/blob/9545f2183249862b077677d48fcfb9b4bfe1f87d/brickfront/client.py#L30-L39
def checkResponse(request): ''' Returns if a request has an okay error code, otherwise raises InvalidRequest. ''' # Check the status code of the returned request if str(request.status_code)[0] not in ['2', '3']: w = str(request.text).split('\\r')[0][2:] raise InvalidRequest(w) return
[ "def", "checkResponse", "(", "request", ")", ":", "# Check the status code of the returned request", "if", "str", "(", "request", ".", "status_code", ")", "[", "0", "]", "not", "in", "[", "'2'", ",", "'3'", "]", ":", "w", "=", "str", "(", "request", ".", "text", ")", ".", "split", "(", "'\\\\r'", ")", "[", "0", "]", "[", "2", ":", "]", "raise", "InvalidRequest", "(", "w", ")", "return" ]
Returns if a request has an okay error code, otherwise raises InvalidRequest.
[ "Returns", "if", "a", "request", "has", "an", "okay", "error", "code", "otherwise", "raises", "InvalidRequest", "." ]
python
train
Bogdanp/anom-py
anom/transaction.py
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/transaction.py#L82-L131
def transactional(*, adapter=None, retries=3, propagation=Transaction.Propagation.Nested): """Decorates functions so that all of their operations (except for queries) run inside a Datastore transaction. Parameters: adapter(Adapter, optional): The Adapter to use when running the transaction. Defaults to the current adapter. retries(int, optional): The number of times to retry the transaction if it couldn't be committed. propagation(Transaction.Propagation, optional): The propagation strategy to use. By default, transactions are nested, but you can force certain transactions to always run independently. Raises: anom.RetriesExceeded: When the decorator runbs out of retries while trying to commit the transaction. Returns: callable: The decorated function. """ def decorator(fn): @wraps(fn) def inner(*args, **kwargs): nonlocal adapter adapter = adapter or get_adapter() attempts, cause = 0, None while attempts <= retries: attempts += 1 transaction = adapter.transaction(propagation) try: transaction.begin() res = fn(*args, **kwargs) transaction.commit() return res except TransactionFailed as e: cause = e continue except Exception as e: transaction.rollback() raise e finally: transaction.end() raise RetriesExceeded(cause) return inner return decorator
[ "def", "transactional", "(", "*", ",", "adapter", "=", "None", ",", "retries", "=", "3", ",", "propagation", "=", "Transaction", ".", "Propagation", ".", "Nested", ")", ":", "def", "decorator", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nonlocal", "adapter", "adapter", "=", "adapter", "or", "get_adapter", "(", ")", "attempts", ",", "cause", "=", "0", ",", "None", "while", "attempts", "<=", "retries", ":", "attempts", "+=", "1", "transaction", "=", "adapter", ".", "transaction", "(", "propagation", ")", "try", ":", "transaction", ".", "begin", "(", ")", "res", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "transaction", ".", "commit", "(", ")", "return", "res", "except", "TransactionFailed", "as", "e", ":", "cause", "=", "e", "continue", "except", "Exception", "as", "e", ":", "transaction", ".", "rollback", "(", ")", "raise", "e", "finally", ":", "transaction", ".", "end", "(", ")", "raise", "RetriesExceeded", "(", "cause", ")", "return", "inner", "return", "decorator" ]
Decorates functions so that all of their operations (except for queries) run inside a Datastore transaction. Parameters: adapter(Adapter, optional): The Adapter to use when running the transaction. Defaults to the current adapter. retries(int, optional): The number of times to retry the transaction if it couldn't be committed. propagation(Transaction.Propagation, optional): The propagation strategy to use. By default, transactions are nested, but you can force certain transactions to always run independently. Raises: anom.RetriesExceeded: When the decorator runbs out of retries while trying to commit the transaction. Returns: callable: The decorated function.
[ "Decorates", "functions", "so", "that", "all", "of", "their", "operations", "(", "except", "for", "queries", ")", "run", "inside", "a", "Datastore", "transaction", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QAQuery.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L291-L334
def QA_fetch_index_min( code, start, end, format='numpy', frequence='1min', collections=DATABASE.index_min): '获取股票分钟线' if frequence in ['1min', '1m']: frequence = '1min' elif frequence in ['5min', '5m']: frequence = '5min' elif frequence in ['15min', '15m']: frequence = '15min' elif frequence in ['30min', '30m']: frequence = '30min' elif frequence in ['60min', '60m']: frequence = '60min' __data = [] code = QA_util_code_tolist(code) cursor = collections.find({ 'code': {'$in': code}, "time_stamp": { "$gte": QA_util_time_stamp(start), "$lte": QA_util_time_stamp(end) }, 'type': frequence }, {"_id": 0}, batch_size=10000) if format in ['dict', 'json']: return [data for data in cursor] # for item in cursor: __data = pd.DataFrame([item for item in cursor]) __data = __data.assign(datetime=pd.to_datetime(__data['datetime'])) # __data.append([str(item['code']), float(item['open']), float(item['high']), float( # item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date'], item['type']]) # __data = DataFrame(__data, columns=[ # 'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date', 'type']) # __data['datetime'] = pd.to_datetime(__data['datetime']) __data = __data.set_index('datetime', drop=False) if format in ['numpy', 'np', 'n']: return numpy.asarray(__data) elif format in ['list', 'l', 'L']: return numpy.asarray(__data).tolist() elif format in ['P', 'p', 'pandas', 'pd']: return __data
[ "def", "QA_fetch_index_min", "(", "code", ",", "start", ",", "end", ",", "format", "=", "'numpy'", ",", "frequence", "=", "'1min'", ",", "collections", "=", "DATABASE", ".", "index_min", ")", ":", "if", "frequence", "in", "[", "'1min'", ",", "'1m'", "]", ":", "frequence", "=", "'1min'", "elif", "frequence", "in", "[", "'5min'", ",", "'5m'", "]", ":", "frequence", "=", "'5min'", "elif", "frequence", "in", "[", "'15min'", ",", "'15m'", "]", ":", "frequence", "=", "'15min'", "elif", "frequence", "in", "[", "'30min'", ",", "'30m'", "]", ":", "frequence", "=", "'30min'", "elif", "frequence", "in", "[", "'60min'", ",", "'60m'", "]", ":", "frequence", "=", "'60min'", "__data", "=", "[", "]", "code", "=", "QA_util_code_tolist", "(", "code", ")", "cursor", "=", "collections", ".", "find", "(", "{", "'code'", ":", "{", "'$in'", ":", "code", "}", ",", "\"time_stamp\"", ":", "{", "\"$gte\"", ":", "QA_util_time_stamp", "(", "start", ")", ",", "\"$lte\"", ":", "QA_util_time_stamp", "(", "end", ")", "}", ",", "'type'", ":", "frequence", "}", ",", "{", "\"_id\"", ":", "0", "}", ",", "batch_size", "=", "10000", ")", "if", "format", "in", "[", "'dict'", ",", "'json'", "]", ":", "return", "[", "data", "for", "data", "in", "cursor", "]", "# for item in cursor:", "__data", "=", "pd", ".", "DataFrame", "(", "[", "item", "for", "item", "in", "cursor", "]", ")", "__data", "=", "__data", ".", "assign", "(", "datetime", "=", "pd", ".", "to_datetime", "(", "__data", "[", "'datetime'", "]", ")", ")", "# __data.append([str(item['code']), float(item['open']), float(item['high']), float(", "# item['low']), float(item['close']), int(item['up_count']), int(item['down_count']), float(item['vol']), float(item['amount']), item['datetime'], item['time_stamp'], item['date'], item['type']])", "# __data = DataFrame(__data, columns=[", "# 'code', 'open', 'high', 'low', 'close', 'up_count', 'down_count', 'volume', 'amount', 'datetime', 'time_stamp', 'date', 'type'])", "# __data['datetime'] = pd.to_datetime(__data['datetime'])", "__data", "=", "__data", ".", "set_index", "(", "'datetime'", ",", "drop", "=", "False", ")", "if", "format", "in", "[", "'numpy'", ",", "'np'", ",", "'n'", "]", ":", "return", "numpy", ".", "asarray", "(", "__data", ")", "elif", "format", "in", "[", "'list'", ",", "'l'", ",", "'L'", "]", ":", "return", "numpy", ".", "asarray", "(", "__data", ")", ".", "tolist", "(", ")", "elif", "format", "in", "[", "'P'", ",", "'p'", ",", "'pandas'", ",", "'pd'", "]", ":", "return", "__data" ]
获取股票分钟线
[ "获取股票分钟线" ]
python
train
tensorflow/mesh
mesh_tensorflow/layers.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L27-L82
def dense(x, output_dim, reduced_dims=None, expert_dims=None, use_bias=True, activation=None, master_dtype=tf.float32, slice_dtype=tf.float32, variable_dtype=None, name=None): """Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim]. """ if variable_dtype is None: variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype) if expert_dims is None: expert_dims = [] if reduced_dims is None: reduced_dims = x.shape.dims[-1:] w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim]) output_shape = mtf.Shape( [d for d in x.shape.dims if d not in reduced_dims] + [output_dim]) with tf.variable_scope(name, default_name="dense"): stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5 w = mtf.get_variable( x.mesh, "kernel", w_shape, initializer=tf.random_normal_initializer(stddev=stddev), dtype=variable_dtype) w = mtf.cast(w, x.dtype) y = mtf.einsum([x, w], output_shape) if use_bias: b = mtf.get_variable( x.mesh, "bias", mtf.Shape(expert_dims + [output_dim]), initializer=tf.zeros_initializer(), dtype=variable_dtype) y += b if activation is not None: y = activation(y) return y
[ "def", "dense", "(", "x", ",", "output_dim", ",", "reduced_dims", "=", "None", ",", "expert_dims", "=", "None", ",", "use_bias", "=", "True", ",", "activation", "=", "None", ",", "master_dtype", "=", "tf", ".", "float32", ",", "slice_dtype", "=", "tf", ".", "float32", ",", "variable_dtype", "=", "None", ",", "name", "=", "None", ")", ":", "if", "variable_dtype", "is", "None", ":", "variable_dtype", "=", "mtf", ".", "VariableDType", "(", "master_dtype", ",", "slice_dtype", ",", "x", ".", "dtype", ")", "if", "expert_dims", "is", "None", ":", "expert_dims", "=", "[", "]", "if", "reduced_dims", "is", "None", ":", "reduced_dims", "=", "x", ".", "shape", ".", "dims", "[", "-", "1", ":", "]", "w_shape", "=", "mtf", ".", "Shape", "(", "expert_dims", "+", "reduced_dims", "+", "[", "output_dim", "]", ")", "output_shape", "=", "mtf", ".", "Shape", "(", "[", "d", "for", "d", "in", "x", ".", "shape", ".", "dims", "if", "d", "not", "in", "reduced_dims", "]", "+", "[", "output_dim", "]", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dense\"", ")", ":", "stddev", "=", "mtf", ".", "list_product", "(", "d", ".", "size", "for", "d", "in", "reduced_dims", ")", "**", "-", "0.5", "w", "=", "mtf", ".", "get_variable", "(", "x", ".", "mesh", ",", "\"kernel\"", ",", "w_shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "stddev", ")", ",", "dtype", "=", "variable_dtype", ")", "w", "=", "mtf", ".", "cast", "(", "w", ",", "x", ".", "dtype", ")", "y", "=", "mtf", ".", "einsum", "(", "[", "x", ",", "w", "]", ",", "output_shape", ")", "if", "use_bias", ":", "b", "=", "mtf", ".", "get_variable", "(", "x", ".", "mesh", ",", "\"bias\"", ",", "mtf", ".", "Shape", "(", "expert_dims", "+", "[", "output_dim", "]", ")", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "dtype", "=", "variable_dtype", ")", "y", "+=", "b", "if", "activation", "is", "not", "None", ":", "y", "=", "activation", "(", "y", ")", "return", "y" ]
Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim].
[ "Dense", "layer", "doing", "(", "kernel", "*", "x", "+", "bias", ")", "computation", "." ]
python
train
pyopenapi/pyswagger
pyswagger/core.py
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/core.py#L194-L232
def prepare_obj(self, obj, jref): """ basic preparation of an object(those in sepc._version_.objects), and cache the 'prepared' object. """ if not obj: raise Exception('unexpected, passing {0}:{1} to prepare'.format(obj, jref)) s = Scanner(self) if self.version == '1.2': # upgrade from 1.2 to 2.0 converter = Upgrade(self.__sep) s.scan(root=obj, route=[converter]) obj = converter.swagger if not obj: raise Exception('unable to upgrade from 1.2: {0}'.format(jref)) s.scan(root=obj, route=[AssignParent()]) # fix for yaml that treat response code as number s.scan(root=obj, route=[YamlFixer()], leaves=[Operation]) # normalize $ref url, jp = utils.jr_split(jref) s.scan(root=obj, route=[NormalizeRef(url)]) # cache this object if url not in self.__objs: if jp == '#': self.__objs[url] = obj else: self.__objs[url] = {jp: obj} else: if not isinstance(self.__objs[url], dict): raise Exception('it should be able to resolve with BaseObj') self.__objs[url].update({jp: obj}) # pre resolve Schema Object # note: make sure this object is cached before using 'Resolve' scanner s.scan(root=obj, route=[Resolve()]) return obj
[ "def", "prepare_obj", "(", "self", ",", "obj", ",", "jref", ")", ":", "if", "not", "obj", ":", "raise", "Exception", "(", "'unexpected, passing {0}:{1} to prepare'", ".", "format", "(", "obj", ",", "jref", ")", ")", "s", "=", "Scanner", "(", "self", ")", "if", "self", ".", "version", "==", "'1.2'", ":", "# upgrade from 1.2 to 2.0", "converter", "=", "Upgrade", "(", "self", ".", "__sep", ")", "s", ".", "scan", "(", "root", "=", "obj", ",", "route", "=", "[", "converter", "]", ")", "obj", "=", "converter", ".", "swagger", "if", "not", "obj", ":", "raise", "Exception", "(", "'unable to upgrade from 1.2: {0}'", ".", "format", "(", "jref", ")", ")", "s", ".", "scan", "(", "root", "=", "obj", ",", "route", "=", "[", "AssignParent", "(", ")", "]", ")", "# fix for yaml that treat response code as number", "s", ".", "scan", "(", "root", "=", "obj", ",", "route", "=", "[", "YamlFixer", "(", ")", "]", ",", "leaves", "=", "[", "Operation", "]", ")", "# normalize $ref", "url", ",", "jp", "=", "utils", ".", "jr_split", "(", "jref", ")", "s", ".", "scan", "(", "root", "=", "obj", ",", "route", "=", "[", "NormalizeRef", "(", "url", ")", "]", ")", "# cache this object", "if", "url", "not", "in", "self", ".", "__objs", ":", "if", "jp", "==", "'#'", ":", "self", ".", "__objs", "[", "url", "]", "=", "obj", "else", ":", "self", ".", "__objs", "[", "url", "]", "=", "{", "jp", ":", "obj", "}", "else", ":", "if", "not", "isinstance", "(", "self", ".", "__objs", "[", "url", "]", ",", "dict", ")", ":", "raise", "Exception", "(", "'it should be able to resolve with BaseObj'", ")", "self", ".", "__objs", "[", "url", "]", ".", "update", "(", "{", "jp", ":", "obj", "}", ")", "# pre resolve Schema Object", "# note: make sure this object is cached before using 'Resolve' scanner", "s", ".", "scan", "(", "root", "=", "obj", ",", "route", "=", "[", "Resolve", "(", ")", "]", ")", "return", "obj" ]
basic preparation of an object(those in sepc._version_.objects), and cache the 'prepared' object.
[ "basic", "preparation", "of", "an", "object", "(", "those", "in", "sepc", ".", "_version_", ".", "objects", ")", "and", "cache", "the", "prepared", "object", "." ]
python
train
Cognexa/cxflow
cxflow/cli/resume.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/resume.py#L11-L35
def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None: """ Load config from the directory specified and start the training. :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created """ config = None try: config_path = find_config(config_path) restore_from = restore_from or path.dirname(config_path) config = load_config(config_file=config_path, additional_args=cl_arguments) validate_config(config) logging.debug('\tLoaded config: %s', config) except Exception as ex: # pylint: disable=broad-except fallback('Loading config failed', ex) run(config=config, output_root=output_root, restore_from=restore_from)
[ "def", "resume", "(", "config_path", ":", "str", ",", "restore_from", ":", "Optional", "[", "str", "]", ",", "cl_arguments", ":", "Iterable", "[", "str", "]", ",", "output_root", ":", "str", ")", "->", "None", ":", "config", "=", "None", "try", ":", "config_path", "=", "find_config", "(", "config_path", ")", "restore_from", "=", "restore_from", "or", "path", ".", "dirname", "(", "config_path", ")", "config", "=", "load_config", "(", "config_file", "=", "config_path", ",", "additional_args", "=", "cl_arguments", ")", "validate_config", "(", "config", ")", "logging", ".", "debug", "(", "'\\tLoaded config: %s'", ",", "config", ")", "except", "Exception", "as", "ex", ":", "# pylint: disable=broad-except", "fallback", "(", "'Loading config failed'", ",", "ex", ")", "run", "(", "config", "=", "config", ",", "output_root", "=", "output_root", ",", "restore_from", "=", "restore_from", ")" ]
Load config from the directory specified and start the training. :param config_path: path to the config file or the directory in which it is stored :param restore_from: backend-specific path to the already trained model to be restored from. If ``None`` is passed, it is inferred from the configuration file location as the directory it is located in. :param cl_arguments: additional command line arguments which will update the configuration :param output_root: output root in which the training directory will be created
[ "Load", "config", "from", "the", "directory", "specified", "and", "start", "the", "training", "." ]
python
train
obriencj/python-javatools
javatools/opcodes.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/opcodes.py#L162-L168
def _unpack(struct, bc, offset=0): """ returns the unpacked data tuple, and the next offset past the unpacked data """ return struct.unpack_from(bc, offset), offset + struct.size
[ "def", "_unpack", "(", "struct", ",", "bc", ",", "offset", "=", "0", ")", ":", "return", "struct", ".", "unpack_from", "(", "bc", ",", "offset", ")", ",", "offset", "+", "struct", ".", "size" ]
returns the unpacked data tuple, and the next offset past the unpacked data
[ "returns", "the", "unpacked", "data", "tuple", "and", "the", "next", "offset", "past", "the", "unpacked", "data" ]
python
train
hawkular/hawkular-client-python
hawkular/metrics.py
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L244-L253
def query_tag_values(self, metric_type=None, **tags): """ Query for possible tag values. :param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes :param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax """ tagql = self._transform_tags(**tags) return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + '/{}'.format(tagql))
[ "def", "query_tag_values", "(", "self", ",", "metric_type", "=", "None", ",", "*", "*", "tags", ")", ":", "tagql", "=", "self", ".", "_transform_tags", "(", "*", "*", "tags", ")", "return", "self", ".", "_get", "(", "self", ".", "_get_metrics_tags_url", "(", "self", ".", "_get_url", "(", "metric_type", ")", ")", "+", "'/{}'", ".", "format", "(", "tagql", ")", ")" ]
Query for possible tag values. :param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes :param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
[ "Query", "for", "possible", "tag", "values", "." ]
python
train
saltstack/salt
salt/output/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L69-L79
def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter)
[ "def", "update_progress", "(", "opts", ",", "progress", ",", "progress_iter", ",", "out", ")", ":", "# Look up the outputter", "try", ":", "progress_outputter", "=", "salt", ".", "loader", ".", "outputters", "(", "opts", ")", "[", "out", "]", "except", "KeyError", ":", "# Outputter is not loaded", "log", ".", "warning", "(", "'Progress outputter not available.'", ")", "return", "False", "progress_outputter", "(", "progress", ",", "progress_iter", ")" ]
Update the progress iterator for the given outputter
[ "Update", "the", "progress", "iterator", "for", "the", "given", "outputter" ]
python
train
Shapeways/coyote_framework
coyote_framework/drivers/coyote_driver.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/drivers/coyote_driver.py#L8-L16
def visit(self, url=''): """Visit the url, checking for rr errors in the response @param url: URL @return: Visit result """ result = super(CoyoteDriver, self).visit(url) source = self.page_source() return result
[ "def", "visit", "(", "self", ",", "url", "=", "''", ")", ":", "result", "=", "super", "(", "CoyoteDriver", ",", "self", ")", ".", "visit", "(", "url", ")", "source", "=", "self", ".", "page_source", "(", ")", "return", "result" ]
Visit the url, checking for rr errors in the response @param url: URL @return: Visit result
[ "Visit", "the", "url", "checking", "for", "rr", "errors", "in", "the", "response" ]
python
train
ngmiller/mipsy
mipsy/util.py
https://github.com/ngmiller/mipsy/blob/78c058f44685765193acd386e81fada3b4187b95/mipsy/util.py#L51-L60
def query(self, label): """ Returns (hit, index) tuple. hit is a boolean, signifying label presence in the cache index is an integer, the instruction index for the label entry """ try: return True, self.cache[label] except KeyError, e: return False, 0
[ "def", "query", "(", "self", ",", "label", ")", ":", "try", ":", "return", "True", ",", "self", ".", "cache", "[", "label", "]", "except", "KeyError", ",", "e", ":", "return", "False", ",", "0" ]
Returns (hit, index) tuple. hit is a boolean, signifying label presence in the cache index is an integer, the instruction index for the label entry
[ "Returns", "(", "hit", "index", ")", "tuple", ".", "hit", "is", "a", "boolean", "signifying", "label", "presence", "in", "the", "cache", "index", "is", "an", "integer", "the", "instruction", "index", "for", "the", "label", "entry" ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/search_api.py#L424-L445
def search_alert_for_facet(self, facet, **kwargs): # noqa: E501 """Lists the values of a specific facet over the customer's non-deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_alert_for_facet_with_http_info(facet, **kwargs) # noqa: E501 else: (data) = self.search_alert_for_facet_with_http_info(facet, **kwargs) # noqa: E501 return data
[ "def", "search_alert_for_facet", "(", "self", ",", "facet", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_alert_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_alert_for_facet_with_http_info", "(", "facet", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Lists the values of a specific facet over the customer's non-deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread.
[ "Lists", "the", "values", "of", "a", "specific", "facet", "over", "the", "customer", "s", "non", "-", "deleted", "alerts", "#", "noqa", ":", "E501" ]
python
train
bkosciow/python_iot-1
iot_message/message.py
https://github.com/bkosciow/python_iot-1/blob/32880760e0d218a686ebdb0b6ee3ce07e5cbf018/iot_message/message.py#L44-L65
def prepare_message(self, data=None): """ Return message as dict :return dict """ message = { 'protocol': self.protocol, 'node': self._node, 'chip_id': self._chip_id, 'event': '', 'parameters': {}, 'response': '', 'targets': [ 'ALL' ] } if type(data) is dict: for k, v in data.items(): if k in message: message[k] = v return message
[ "def", "prepare_message", "(", "self", ",", "data", "=", "None", ")", ":", "message", "=", "{", "'protocol'", ":", "self", ".", "protocol", ",", "'node'", ":", "self", ".", "_node", ",", "'chip_id'", ":", "self", ".", "_chip_id", ",", "'event'", ":", "''", ",", "'parameters'", ":", "{", "}", ",", "'response'", ":", "''", ",", "'targets'", ":", "[", "'ALL'", "]", "}", "if", "type", "(", "data", ")", "is", "dict", ":", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ":", "if", "k", "in", "message", ":", "message", "[", "k", "]", "=", "v", "return", "message" ]
Return message as dict :return dict
[ "Return", "message", "as", "dict", ":", "return", "dict" ]
python
test
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/signature.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/signature.py#L109-L205
def base_string_uri(uri, host=None): """**Base String URI** Per `section 3.4.1.2`_ of RFC 5849. For example, the HTTP request:: GET /r%20v/X?id=123 HTTP/1.1 Host: EXAMPLE.COM:80 is represented by the base string URI: "http://example.com/r%20v/X". In another example, the HTTPS request:: GET /?q=1 HTTP/1.1 Host: www.example.net:8080 is represented by the base string URI: "https://www.example.net:8080/". .. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2 The host argument overrides the netloc part of the uri argument. """ if not isinstance(uri, unicode_type): raise ValueError('uri must be a unicode object.') # FIXME: urlparse does not support unicode scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri) # The scheme, authority, and path of the request resource URI `RFC3986` # are included by constructing an "http" or "https" URI representing # the request resource (without the query or fragment) as follows: # # .. _`RFC3986`: https://tools.ietf.org/html/rfc3986 if not scheme or not netloc: raise ValueError('uri must include a scheme and netloc') # Per `RFC 2616 section 5.1.2`_: # # Note that the absolute path cannot be empty; if none is present in # the original URI, it MUST be given as "/" (the server root). # # .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2 if not path: path = '/' # 1. The scheme and host MUST be in lowercase. scheme = scheme.lower() netloc = netloc.lower() # 2. The host and port values MUST match the content of the HTTP # request "Host" header field. if host is not None: netloc = host.lower() # 3. The port MUST be included if it is not the default port for the # scheme, and MUST be excluded if it is the default. Specifically, # the port MUST be excluded when making an HTTP request `RFC2616`_ # to port 80 or when making an HTTPS request `RFC2818`_ to port 443. # All other non-default port numbers MUST be included. # # .. _`RFC2616`: https://tools.ietf.org/html/rfc2616 # .. _`RFC2818`: https://tools.ietf.org/html/rfc2818 default_ports = ( ('http', '80'), ('https', '443'), ) if ':' in netloc: host, port = netloc.split(':', 1) if (scheme, port) in default_ports: netloc = host v = urlparse.urlunparse((scheme, netloc, path, params, '', '')) # RFC 5849 does not specify which characters are encoded in the # "base string URI", nor how they are encoded - which is very bad, since # the signatures won't match if there are any differences. Fortunately, # most URIs only use characters that are clearly not encoded (e.g. digits # and A-Z, a-z), so have avoided any differences between implementations. # # The example from its section 3.4.1.2 illustrates that spaces in # the path are percent encoded. But it provides no guidance as to what other # characters (if any) must be encoded (nor how); nor if characters in the # other components are to be encoded or not. # # This implementation **assumes** that **only** the space is percent-encoded # and it is done to the entire value (not just to spaces in the path). # # This code may need to be changed if it is discovered that other characters # are expected to be encoded. # # Note: the "base string URI" returned by this function will be encoded # again before being concatenated into the "signature base string". So any # spaces in the URI will actually appear in the "signature base string" # as "%2520" (the "%20" further encoded according to section 3.6). return v.replace(' ', '%20')
[ "def", "base_string_uri", "(", "uri", ",", "host", "=", "None", ")", ":", "if", "not", "isinstance", "(", "uri", ",", "unicode_type", ")", ":", "raise", "ValueError", "(", "'uri must be a unicode object.'", ")", "# FIXME: urlparse does not support unicode", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "=", "urlparse", ".", "urlparse", "(", "uri", ")", "# The scheme, authority, and path of the request resource URI `RFC3986`", "# are included by constructing an \"http\" or \"https\" URI representing", "# the request resource (without the query or fragment) as follows:", "#", "# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986", "if", "not", "scheme", "or", "not", "netloc", ":", "raise", "ValueError", "(", "'uri must include a scheme and netloc'", ")", "# Per `RFC 2616 section 5.1.2`_:", "#", "# Note that the absolute path cannot be empty; if none is present in", "# the original URI, it MUST be given as \"/\" (the server root).", "#", "# .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2", "if", "not", "path", ":", "path", "=", "'/'", "# 1. The scheme and host MUST be in lowercase.", "scheme", "=", "scheme", ".", "lower", "(", ")", "netloc", "=", "netloc", ".", "lower", "(", ")", "# 2. The host and port values MUST match the content of the HTTP", "# request \"Host\" header field.", "if", "host", "is", "not", "None", ":", "netloc", "=", "host", ".", "lower", "(", ")", "# 3. The port MUST be included if it is not the default port for the", "# scheme, and MUST be excluded if it is the default. Specifically,", "# the port MUST be excluded when making an HTTP request `RFC2616`_", "# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.", "# All other non-default port numbers MUST be included.", "#", "# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616", "# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818", "default_ports", "=", "(", "(", "'http'", ",", "'80'", ")", ",", "(", "'https'", ",", "'443'", ")", ",", ")", "if", "':'", "in", "netloc", ":", "host", ",", "port", "=", "netloc", ".", "split", "(", "':'", ",", "1", ")", "if", "(", "scheme", ",", "port", ")", "in", "default_ports", ":", "netloc", "=", "host", "v", "=", "urlparse", ".", "urlunparse", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "''", ",", "''", ")", ")", "# RFC 5849 does not specify which characters are encoded in the", "# \"base string URI\", nor how they are encoded - which is very bad, since", "# the signatures won't match if there are any differences. Fortunately,", "# most URIs only use characters that are clearly not encoded (e.g. digits", "# and A-Z, a-z), so have avoided any differences between implementations.", "#", "# The example from its section 3.4.1.2 illustrates that spaces in", "# the path are percent encoded. But it provides no guidance as to what other", "# characters (if any) must be encoded (nor how); nor if characters in the", "# other components are to be encoded or not.", "#", "# This implementation **assumes** that **only** the space is percent-encoded", "# and it is done to the entire value (not just to spaces in the path).", "#", "# This code may need to be changed if it is discovered that other characters", "# are expected to be encoded.", "#", "# Note: the \"base string URI\" returned by this function will be encoded", "# again before being concatenated into the \"signature base string\". So any", "# spaces in the URI will actually appear in the \"signature base string\"", "# as \"%2520\" (the \"%20\" further encoded according to section 3.6).", "return", "v", ".", "replace", "(", "' '", ",", "'%20'", ")" ]
**Base String URI** Per `section 3.4.1.2`_ of RFC 5849. For example, the HTTP request:: GET /r%20v/X?id=123 HTTP/1.1 Host: EXAMPLE.COM:80 is represented by the base string URI: "http://example.com/r%20v/X". In another example, the HTTPS request:: GET /?q=1 HTTP/1.1 Host: www.example.net:8080 is represented by the base string URI: "https://www.example.net:8080/". .. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2 The host argument overrides the netloc part of the uri argument.
[ "**", "Base", "String", "URI", "**", "Per", "section", "3", ".", "4", ".", "1", ".", "2", "_", "of", "RFC", "5849", "." ]
python
train
sosy-lab/benchexec
benchexec/runexecutor.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/runexecutor.py#L434-L450
def _kill_process0(self, pid, sig=signal.SIGKILL): """ Send signal to given process, either directly or with sudo. If the target is the sudo process itself, the signal will be lost, because we do not have the rights to send signals to sudo. Use _kill_process() because of this. """ if self._user is None: super(RunExecutor, self)._kill_process(pid, sig) else: logging.debug('Sending signal %s to %s with sudo.', sig, pid) try: # Cast sig to int, under Python 3.5 the signal.SIG* constants are nums, not ints. subprocess.check_call(args=self._build_cmdline(['kill', '-'+str(int(sig)), str(pid)])) except subprocess.CalledProcessError as e: # may happen for example if process no longer exists logging.debug(e)
[ "def", "_kill_process0", "(", "self", ",", "pid", ",", "sig", "=", "signal", ".", "SIGKILL", ")", ":", "if", "self", ".", "_user", "is", "None", ":", "super", "(", "RunExecutor", ",", "self", ")", ".", "_kill_process", "(", "pid", ",", "sig", ")", "else", ":", "logging", ".", "debug", "(", "'Sending signal %s to %s with sudo.'", ",", "sig", ",", "pid", ")", "try", ":", "# Cast sig to int, under Python 3.5 the signal.SIG* constants are nums, not ints.", "subprocess", ".", "check_call", "(", "args", "=", "self", ".", "_build_cmdline", "(", "[", "'kill'", ",", "'-'", "+", "str", "(", "int", "(", "sig", ")", ")", ",", "str", "(", "pid", ")", "]", ")", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "# may happen for example if process no longer exists", "logging", ".", "debug", "(", "e", ")" ]
Send signal to given process, either directly or with sudo. If the target is the sudo process itself, the signal will be lost, because we do not have the rights to send signals to sudo. Use _kill_process() because of this.
[ "Send", "signal", "to", "given", "process", "either", "directly", "or", "with", "sudo", ".", "If", "the", "target", "is", "the", "sudo", "process", "itself", "the", "signal", "will", "be", "lost", "because", "we", "do", "not", "have", "the", "rights", "to", "send", "signals", "to", "sudo", ".", "Use", "_kill_process", "()", "because", "of", "this", "." ]
python
train
HDI-Project/ballet
ballet/project.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/project.py#L103-L115
def relative_to_contrib(diff, project): """Compute relative path of changed file to contrib dir Args: diff (git.diff.Diff): file diff project (Project): project Returns: Path """ path = pathlib.Path(diff.b_path) contrib_path = project.contrib_module_path return path.relative_to(contrib_path)
[ "def", "relative_to_contrib", "(", "diff", ",", "project", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "diff", ".", "b_path", ")", "contrib_path", "=", "project", ".", "contrib_module_path", "return", "path", ".", "relative_to", "(", "contrib_path", ")" ]
Compute relative path of changed file to contrib dir Args: diff (git.diff.Diff): file diff project (Project): project Returns: Path
[ "Compute", "relative", "path", "of", "changed", "file", "to", "contrib", "dir" ]
python
train
desbma/sacad
sacad/cover.py
https://github.com/desbma/sacad/blob/a7a010c4d9618a0c90927f1acb530101ca05fac4/sacad/cover.py#L510-L525
async def guessImageMetadataFromHttpData(response): """ Identify an image format and size from the beginning of its HTTP data. """ metadata = None img_data = bytearray() while len(img_data) < CoverSourceResult.MAX_FILE_METADATA_PEEK_SIZE: new_img_data = await response.content.read(__class__.METADATA_PEEK_SIZE_INCREMENT) if not new_img_data: break img_data.extend(new_img_data) metadata = __class__.guessImageMetadataFromData(img_data) if (metadata is not None) and all(metadata): return metadata return metadata
[ "async", "def", "guessImageMetadataFromHttpData", "(", "response", ")", ":", "metadata", "=", "None", "img_data", "=", "bytearray", "(", ")", "while", "len", "(", "img_data", ")", "<", "CoverSourceResult", ".", "MAX_FILE_METADATA_PEEK_SIZE", ":", "new_img_data", "=", "await", "response", ".", "content", ".", "read", "(", "__class__", ".", "METADATA_PEEK_SIZE_INCREMENT", ")", "if", "not", "new_img_data", ":", "break", "img_data", ".", "extend", "(", "new_img_data", ")", "metadata", "=", "__class__", ".", "guessImageMetadataFromData", "(", "img_data", ")", "if", "(", "metadata", "is", "not", "None", ")", "and", "all", "(", "metadata", ")", ":", "return", "metadata", "return", "metadata" ]
Identify an image format and size from the beginning of its HTTP data.
[ "Identify", "an", "image", "format", "and", "size", "from", "the", "beginning", "of", "its", "HTTP", "data", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/wishart.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/wishart.py#L416-L423
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"): """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p].""" with self._name_scope(name): # Linspace only takes scalars, so we'll add in the offset afterwards. seq = tf.linspace( tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p, tf.cast( p, tf.int32)) return seq + tf.expand_dims(a, [-1])
[ "def", "_multi_gamma_sequence", "(", "self", ",", "a", ",", "p", ",", "name", "=", "\"multi_gamma_sequence\"", ")", ":", "with", "self", ".", "_name_scope", "(", "name", ")", ":", "# Linspace only takes scalars, so we'll add in the offset afterwards.", "seq", "=", "tf", ".", "linspace", "(", "tf", ".", "constant", "(", "0.", ",", "dtype", "=", "self", ".", "dtype", ")", ",", "0.5", "-", "0.5", "*", "p", ",", "tf", ".", "cast", "(", "p", ",", "tf", ".", "int32", ")", ")", "return", "seq", "+", "tf", ".", "expand_dims", "(", "a", ",", "[", "-", "1", "]", ")" ]
Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p].
[ "Creates", "sequence", "used", "in", "multivariate", "(", "di", ")", "gamma", ";", "shape", "=", "shape", "(", "a", ")", "+", "[", "p", "]", "." ]
python
test
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1323-L1353
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0): '''Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels. ''' hits = np.sum(hits, axis=(-1)).astype('u8') mask = np.ones(shape=(80, 336), dtype=np.uint8) mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0 ma = np.ma.masked_where(mask, hits) if max_cut_threshold is not None: return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma) else: return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma)
[ "def", "select_good_pixel_region", "(", "hits", ",", "col_span", ",", "row_span", ",", "min_cut_threshold", "=", "0.2", ",", "max_cut_threshold", "=", "2.0", ")", ":", "hits", "=", "np", ".", "sum", "(", "hits", ",", "axis", "=", "(", "-", "1", ")", ")", ".", "astype", "(", "'u8'", ")", "mask", "=", "np", ".", "ones", "(", "shape", "=", "(", "80", ",", "336", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "mask", "[", "min", "(", "col_span", ")", ":", "max", "(", "col_span", ")", "+", "1", ",", "min", "(", "row_span", ")", ":", "max", "(", "row_span", ")", "+", "1", "]", "=", "0", "ma", "=", "np", ".", "ma", ".", "masked_where", "(", "mask", ",", "hits", ")", "if", "max_cut_threshold", "is", "not", "None", ":", "return", "np", ".", "ma", ".", "masked_where", "(", "np", ".", "logical_or", "(", "ma", "<", "min_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ",", "ma", ">", "max_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ")", ",", "ma", ")", "else", ":", "return", "np", ".", "ma", ".", "masked_where", "(", "ma", "<", "min_cut_threshold", "*", "np", ".", "ma", ".", "median", "(", "ma", ")", ",", "ma", ")" ]
Takes the hit array and masks all pixels with a certain occupancy. Parameters ---------- hits : array like If dim > 2 the additional dimensions are summed up. min_cut_threshold : float A number to specify the minimum threshold, which pixel to take. Pixels are masked if occupancy < min_cut_threshold * np.ma.median(occupancy) 0 means that no pixels are masked max_cut_threshold : float A number to specify the maximum threshold, which pixel to take. Pixels are masked if occupancy > max_cut_threshold * np.ma.median(occupancy) Can be set to None that no pixels are masked by max_cut_threshold Returns ------- numpy.ma.array, shape=(80,336) The hits array with masked pixels.
[ "Takes", "the", "hit", "array", "and", "masks", "all", "pixels", "with", "a", "certain", "occupancy", "." ]
python
train
log2timeline/plaso
plaso/output/shared_elastic.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/shared_elastic.py#L198-L220
def _InsertEvent(self, event, force_flush=False): """Inserts an event. Events are buffered in the form of documents and inserted to Elasticsearch when either forced to flush or when the flush interval (threshold) has been reached. Args: event (EventObject): event. force_flush (bool): True if buffered event documents should be inserted into Elasticsearch. """ if event: event_document = {'index': { '_index': self._index_name, '_type': self._document_type}} event_values = self._GetSanitizedEventValues(event) self._event_documents.append(event_document) self._event_documents.append(event_values) self._number_of_buffered_events += 1 if force_flush or self._number_of_buffered_events > self._flush_interval: self._FlushEvents()
[ "def", "_InsertEvent", "(", "self", ",", "event", ",", "force_flush", "=", "False", ")", ":", "if", "event", ":", "event_document", "=", "{", "'index'", ":", "{", "'_index'", ":", "self", ".", "_index_name", ",", "'_type'", ":", "self", ".", "_document_type", "}", "}", "event_values", "=", "self", ".", "_GetSanitizedEventValues", "(", "event", ")", "self", ".", "_event_documents", ".", "append", "(", "event_document", ")", "self", ".", "_event_documents", ".", "append", "(", "event_values", ")", "self", ".", "_number_of_buffered_events", "+=", "1", "if", "force_flush", "or", "self", ".", "_number_of_buffered_events", ">", "self", ".", "_flush_interval", ":", "self", ".", "_FlushEvents", "(", ")" ]
Inserts an event. Events are buffered in the form of documents and inserted to Elasticsearch when either forced to flush or when the flush interval (threshold) has been reached. Args: event (EventObject): event. force_flush (bool): True if buffered event documents should be inserted into Elasticsearch.
[ "Inserts", "an", "event", "." ]
python
train
maaku/python-bitcoin
bitcoin/hash.py
https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/hash.py#L146-L156
def new(self, string=None, *args, **kwargs): """Returns a `_ChainedHashAlgorithm` if the underlying tuple (specifying the list of algorithms) is not empty, otherwise a `_NopHashAlgorithm` instance is returned.""" if len(self): hobj = _ChainedHashAlgorithm(self, *args, **kwargs) else: hobj = _NopHashAlgorithm(*args, **kwargs) if string is not None: hobj.update(string) return hobj
[ "def", "new", "(", "self", ",", "string", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "self", ")", ":", "hobj", "=", "_ChainedHashAlgorithm", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "hobj", "=", "_NopHashAlgorithm", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "string", "is", "not", "None", ":", "hobj", ".", "update", "(", "string", ")", "return", "hobj" ]
Returns a `_ChainedHashAlgorithm` if the underlying tuple (specifying the list of algorithms) is not empty, otherwise a `_NopHashAlgorithm` instance is returned.
[ "Returns", "a", "_ChainedHashAlgorithm", "if", "the", "underlying", "tuple", "(", "specifying", "the", "list", "of", "algorithms", ")", "is", "not", "empty", "otherwise", "a", "_NopHashAlgorithm", "instance", "is", "returned", "." ]
python
train
kgori/treeCl
treeCl/bootstrap.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/bootstrap.py#L143-L160
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): """ Optimise value of x using levenberg-marquardt """ x_new = x x_old = x-1 # dummy value f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
[ "def", "optimise_levenberg_marquardt", "(", "x", ",", "a", ",", "c", ",", "damping", "=", "0.001", ",", "tolerance", "=", "0.001", ")", ":", "x_new", "=", "x", "x_old", "=", "x", "-", "1", "# dummy value", "f_old", "=", "f", "(", "x_new", ",", "a", ",", "c", ")", "while", "np", ".", "abs", "(", "x_new", "-", "x_old", ")", ".", "sum", "(", ")", ">", "tolerance", ":", "x_old", "=", "x_new", "x_tmp", "=", "levenberg_marquardt_update", "(", "x_old", ",", "a", ",", "c", ",", "damping", ")", "f_new", "=", "f", "(", "x_tmp", ",", "a", ",", "c", ")", "if", "f_new", "<", "f_old", ":", "damping", "=", "np", ".", "max", "(", "damping", "/", "10.", ",", "1e-20", ")", "x_new", "=", "x_tmp", "f_old", "=", "f_new", "else", ":", "damping", "*=", "10.", "return", "x_new" ]
Optimise value of x using levenberg-marquardt
[ "Optimise", "value", "of", "x", "using", "levenberg", "-", "marquardt" ]
python
train
quantmind/dynts
dynts/api/timeseries.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/api/timeseries.py#L512-L518
def rollapply(self, func, window=20, **kwargs): '''A generic :ref:`rolling function <rolling-function>` for function *func*. Same construct as :meth:`dynts.TimeSeries.apply` but with default ``window`` set to ``20``. ''' return self.apply(func, window=window, **kwargs)
[ "def", "rollapply", "(", "self", ",", "func", ",", "window", "=", "20", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "apply", "(", "func", ",", "window", "=", "window", ",", "*", "*", "kwargs", ")" ]
A generic :ref:`rolling function <rolling-function>` for function *func*. Same construct as :meth:`dynts.TimeSeries.apply` but with default ``window`` set to ``20``.
[ "A", "generic", ":", "ref", ":", "rolling", "function", "<rolling", "-", "function", ">", "for", "function", "*", "func", "*", ".", "Same", "construct", "as", ":", "meth", ":", "dynts", ".", "TimeSeries", ".", "apply", "but", "with", "default", "window", "set", "to", "20", "." ]
python
train
mpenning/polymer
polymer/Polymer.py
https://github.com/mpenning/polymer/blob/1cdf4ed2573c894bde9d398fa173816b6b47e9f3/polymer/Polymer.py#L380-L515
def supervise(self): """If not in a hot_loop, call supervise() to start the tasks""" self.retval = set([]) stats = TaskMgrStats( worker_count=self.worker_count, log_interval=self.log_interval, hot_loop=self.hot_loop, ) hot_loop = self.hot_loop if self.log_level >= 2: logmsg = "TaskMgr.supervise() started {0} workers".format(self.worker_count) self.log.info(logmsg) self.workers = self.spawn_workers() ## Add work self.num_tasks = 0 if not hot_loop: if self.log_level >= 2: logmsg = "TaskMgr.supervise() received {0} tasks".format( len(self.work_todo) ) self.log.info(logmsg) for task in self.work_todo: self.num_tasks += 1 if self.log_level >= 2: logmsg = "TaskMgr.supervise() queued task: {0}".format(task) self.log.info(logmsg) self.queue_task(task) finished = False while not finished: try: if hot_loop: # Calculate the adaptive loop delay delay = self.calc_wait_time(stats.exec_times) self.queue_tasks_from_controller(delay=delay) # queue tasks time.sleep(delay) r_msg = self.r_q.get_nowait() # __ACK__ or __FINISHED__ task = r_msg.get("task") w_id = r_msg.get("w_id") state = r_msg.get("state", "") if state == "__ACK__": self.worker_assignments[w_id] = task self.work_todo.remove(task) if self.log_level >= 3: self.log.debug("r_msg: {0}".format(r_msg)) if self.log_level >= 3: self.log.debug("w_id={0} received task={1}".format(w_id, task)) elif state == "__FINISHED__": now = time.time() task_exec_time = task.task_stop - task.task_start task_queue_time = now - task.queue_time - task_exec_time stats.exec_times.append(task_exec_time) stats.queue_times.append(task_queue_time) if self.log_level >= 1: self.log.debug( "TaskMgr.work_todo: {0} tasks left".format( len(self.work_todo) ) ) if self.log_level >= 3: self.log.debug("TaskMgr.work_todo: {0}".format(self.work_todo)) self.log.debug("r_msg: {0}".format(r_msg)) if not hot_loop: self.retval.add(task) # Add result to retval self.worker_assignments.pop(w_id) # Delete the key finished = self.is_finished() else: self.controller.to_q.put(task) # Send to the controller self.worker_assignments.pop(w_id) # Delete the key elif state == "__ERROR__": now = time.time() task_exec_time = task.task_stop - task.task_start task_queue_time = now - task.queue_time - task_exec_time stats.exec_times.append(task_exec_time) stats.queue_times.append(task_queue_time) if self.log_level >= 1: self.log.error("r_msg: {0}".format(r_msg)) self.log.error("".join(r_msg.get("error"))) self.log.debug( "TaskMgr.work_todo: {0} tasks left".format( len(self.work_todo) ) ) if self.log_level >= 3: self.log.debug("TaskMgr.work_todo: {0}".format(self.work_todo)) if not hot_loop: if not self.resubmit_on_error: # If task is in work_todo, delete it for tt in self.work_todo: if tt == task: self.work_todo.remove(task) # Remove task... try: # Delete the worker assignment... self.worker_assignments.pop(w_id) except: pass self.retval.add(task) # Add result to retval self.respawn_dead_workers() except Empty: state = "__EMPTY__" except Exception as e: tb_str = "".join(tb.format_exception(*(sys.exc_info()))) raise e(tb_str) if stats.log_time: if self.log_level >= 0: self.log.info(stats.log_message) # Adaptive loop delay delay = self.calc_wait_time(stats.exec_times) time.sleep(delay) self.respawn_dead_workers() finished = self.is_finished() if not hot_loop: self.kill_workers() for w_id, p in self.workers.items(): p.join() ## Log a final stats summary... if self.log_level > 0: self.log.info(stats.log_message) return self.retval
[ "def", "supervise", "(", "self", ")", ":", "self", ".", "retval", "=", "set", "(", "[", "]", ")", "stats", "=", "TaskMgrStats", "(", "worker_count", "=", "self", ".", "worker_count", ",", "log_interval", "=", "self", ".", "log_interval", ",", "hot_loop", "=", "self", ".", "hot_loop", ",", ")", "hot_loop", "=", "self", ".", "hot_loop", "if", "self", ".", "log_level", ">=", "2", ":", "logmsg", "=", "\"TaskMgr.supervise() started {0} workers\"", ".", "format", "(", "self", ".", "worker_count", ")", "self", ".", "log", ".", "info", "(", "logmsg", ")", "self", ".", "workers", "=", "self", ".", "spawn_workers", "(", ")", "## Add work", "self", ".", "num_tasks", "=", "0", "if", "not", "hot_loop", ":", "if", "self", ".", "log_level", ">=", "2", ":", "logmsg", "=", "\"TaskMgr.supervise() received {0} tasks\"", ".", "format", "(", "len", "(", "self", ".", "work_todo", ")", ")", "self", ".", "log", ".", "info", "(", "logmsg", ")", "for", "task", "in", "self", ".", "work_todo", ":", "self", ".", "num_tasks", "+=", "1", "if", "self", ".", "log_level", ">=", "2", ":", "logmsg", "=", "\"TaskMgr.supervise() queued task: {0}\"", ".", "format", "(", "task", ")", "self", ".", "log", ".", "info", "(", "logmsg", ")", "self", ".", "queue_task", "(", "task", ")", "finished", "=", "False", "while", "not", "finished", ":", "try", ":", "if", "hot_loop", ":", "# Calculate the adaptive loop delay", "delay", "=", "self", ".", "calc_wait_time", "(", "stats", ".", "exec_times", ")", "self", ".", "queue_tasks_from_controller", "(", "delay", "=", "delay", ")", "# queue tasks", "time", ".", "sleep", "(", "delay", ")", "r_msg", "=", "self", ".", "r_q", ".", "get_nowait", "(", ")", "# __ACK__ or __FINISHED__", "task", "=", "r_msg", ".", "get", "(", "\"task\"", ")", "w_id", "=", "r_msg", ".", "get", "(", "\"w_id\"", ")", "state", "=", "r_msg", ".", "get", "(", "\"state\"", ",", "\"\"", ")", "if", "state", "==", "\"__ACK__\"", ":", "self", ".", "worker_assignments", "[", "w_id", "]", "=", "task", "self", ".", "work_todo", ".", "remove", "(", "task", ")", "if", "self", ".", "log_level", ">=", "3", ":", "self", ".", "log", ".", "debug", "(", "\"r_msg: {0}\"", ".", "format", "(", "r_msg", ")", ")", "if", "self", ".", "log_level", ">=", "3", ":", "self", ".", "log", ".", "debug", "(", "\"w_id={0} received task={1}\"", ".", "format", "(", "w_id", ",", "task", ")", ")", "elif", "state", "==", "\"__FINISHED__\"", ":", "now", "=", "time", ".", "time", "(", ")", "task_exec_time", "=", "task", ".", "task_stop", "-", "task", ".", "task_start", "task_queue_time", "=", "now", "-", "task", ".", "queue_time", "-", "task_exec_time", "stats", ".", "exec_times", ".", "append", "(", "task_exec_time", ")", "stats", ".", "queue_times", ".", "append", "(", "task_queue_time", ")", "if", "self", ".", "log_level", ">=", "1", ":", "self", ".", "log", ".", "debug", "(", "\"TaskMgr.work_todo: {0} tasks left\"", ".", "format", "(", "len", "(", "self", ".", "work_todo", ")", ")", ")", "if", "self", ".", "log_level", ">=", "3", ":", "self", ".", "log", ".", "debug", "(", "\"TaskMgr.work_todo: {0}\"", ".", "format", "(", "self", ".", "work_todo", ")", ")", "self", ".", "log", ".", "debug", "(", "\"r_msg: {0}\"", ".", "format", "(", "r_msg", ")", ")", "if", "not", "hot_loop", ":", "self", ".", "retval", ".", "add", "(", "task", ")", "# Add result to retval", "self", ".", "worker_assignments", ".", "pop", "(", "w_id", ")", "# Delete the key", "finished", "=", "self", ".", "is_finished", "(", ")", "else", ":", "self", ".", "controller", ".", "to_q", ".", "put", "(", "task", ")", "# Send to the controller", "self", ".", "worker_assignments", ".", "pop", "(", "w_id", ")", "# Delete the key", "elif", "state", "==", "\"__ERROR__\"", ":", "now", "=", "time", ".", "time", "(", ")", "task_exec_time", "=", "task", ".", "task_stop", "-", "task", ".", "task_start", "task_queue_time", "=", "now", "-", "task", ".", "queue_time", "-", "task_exec_time", "stats", ".", "exec_times", ".", "append", "(", "task_exec_time", ")", "stats", ".", "queue_times", ".", "append", "(", "task_queue_time", ")", "if", "self", ".", "log_level", ">=", "1", ":", "self", ".", "log", ".", "error", "(", "\"r_msg: {0}\"", ".", "format", "(", "r_msg", ")", ")", "self", ".", "log", ".", "error", "(", "\"\"", ".", "join", "(", "r_msg", ".", "get", "(", "\"error\"", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "\"TaskMgr.work_todo: {0} tasks left\"", ".", "format", "(", "len", "(", "self", ".", "work_todo", ")", ")", ")", "if", "self", ".", "log_level", ">=", "3", ":", "self", ".", "log", ".", "debug", "(", "\"TaskMgr.work_todo: {0}\"", ".", "format", "(", "self", ".", "work_todo", ")", ")", "if", "not", "hot_loop", ":", "if", "not", "self", ".", "resubmit_on_error", ":", "# If task is in work_todo, delete it", "for", "tt", "in", "self", ".", "work_todo", ":", "if", "tt", "==", "task", ":", "self", ".", "work_todo", ".", "remove", "(", "task", ")", "# Remove task...", "try", ":", "# Delete the worker assignment...", "self", ".", "worker_assignments", ".", "pop", "(", "w_id", ")", "except", ":", "pass", "self", ".", "retval", ".", "add", "(", "task", ")", "# Add result to retval", "self", ".", "respawn_dead_workers", "(", ")", "except", "Empty", ":", "state", "=", "\"__EMPTY__\"", "except", "Exception", "as", "e", ":", "tb_str", "=", "\"\"", ".", "join", "(", "tb", ".", "format_exception", "(", "*", "(", "sys", ".", "exc_info", "(", ")", ")", ")", ")", "raise", "e", "(", "tb_str", ")", "if", "stats", ".", "log_time", ":", "if", "self", ".", "log_level", ">=", "0", ":", "self", ".", "log", ".", "info", "(", "stats", ".", "log_message", ")", "# Adaptive loop delay", "delay", "=", "self", ".", "calc_wait_time", "(", "stats", ".", "exec_times", ")", "time", ".", "sleep", "(", "delay", ")", "self", ".", "respawn_dead_workers", "(", ")", "finished", "=", "self", ".", "is_finished", "(", ")", "if", "not", "hot_loop", ":", "self", ".", "kill_workers", "(", ")", "for", "w_id", ",", "p", "in", "self", ".", "workers", ".", "items", "(", ")", ":", "p", ".", "join", "(", ")", "## Log a final stats summary...", "if", "self", ".", "log_level", ">", "0", ":", "self", ".", "log", ".", "info", "(", "stats", ".", "log_message", ")", "return", "self", ".", "retval" ]
If not in a hot_loop, call supervise() to start the tasks
[ "If", "not", "in", "a", "hot_loop", "call", "supervise", "()", "to", "start", "the", "tasks" ]
python
test
bokeh/bokeh
bokeh/plotting/helpers.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/plotting/helpers.py#L286-L315
def _pop_colors_and_alpha(glyphclass, kwargs, prefix="", default_alpha=1.0): """ Given a kwargs dict, a prefix, and a default value, looks for different color and alpha fields of the given prefix, and fills in the default value if it doesn't exist. """ result = dict() # TODO: The need to do this and the complexity of managing this kind of # thing throughout the codebase really suggests that we need to have # a real stylesheet class, where defaults and Types can declaratively # substitute for this kind of imperative logic. color = kwargs.pop(prefix + "color", get_default_color()) for argname in ("fill_color", "line_color"): if argname not in glyphclass.properties(): continue result[argname] = kwargs.pop(prefix + argname, color) # NOTE: text fill color should really always default to black, hard coding # this here now until the stylesheet solution exists if "text_color" in glyphclass.properties(): result["text_color"] = kwargs.pop(prefix + "text_color", "black") alpha = kwargs.pop(prefix + "alpha", default_alpha) for argname in ("fill_alpha", "line_alpha", "text_alpha"): if argname not in glyphclass.properties(): continue result[argname] = kwargs.pop(prefix + argname, alpha) return result
[ "def", "_pop_colors_and_alpha", "(", "glyphclass", ",", "kwargs", ",", "prefix", "=", "\"\"", ",", "default_alpha", "=", "1.0", ")", ":", "result", "=", "dict", "(", ")", "# TODO: The need to do this and the complexity of managing this kind of", "# thing throughout the codebase really suggests that we need to have", "# a real stylesheet class, where defaults and Types can declaratively", "# substitute for this kind of imperative logic.", "color", "=", "kwargs", ".", "pop", "(", "prefix", "+", "\"color\"", ",", "get_default_color", "(", ")", ")", "for", "argname", "in", "(", "\"fill_color\"", ",", "\"line_color\"", ")", ":", "if", "argname", "not", "in", "glyphclass", ".", "properties", "(", ")", ":", "continue", "result", "[", "argname", "]", "=", "kwargs", ".", "pop", "(", "prefix", "+", "argname", ",", "color", ")", "# NOTE: text fill color should really always default to black, hard coding", "# this here now until the stylesheet solution exists", "if", "\"text_color\"", "in", "glyphclass", ".", "properties", "(", ")", ":", "result", "[", "\"text_color\"", "]", "=", "kwargs", ".", "pop", "(", "prefix", "+", "\"text_color\"", ",", "\"black\"", ")", "alpha", "=", "kwargs", ".", "pop", "(", "prefix", "+", "\"alpha\"", ",", "default_alpha", ")", "for", "argname", "in", "(", "\"fill_alpha\"", ",", "\"line_alpha\"", ",", "\"text_alpha\"", ")", ":", "if", "argname", "not", "in", "glyphclass", ".", "properties", "(", ")", ":", "continue", "result", "[", "argname", "]", "=", "kwargs", ".", "pop", "(", "prefix", "+", "argname", ",", "alpha", ")", "return", "result" ]
Given a kwargs dict, a prefix, and a default value, looks for different color and alpha fields of the given prefix, and fills in the default value if it doesn't exist.
[ "Given", "a", "kwargs", "dict", "a", "prefix", "and", "a", "default", "value", "looks", "for", "different", "color", "and", "alpha", "fields", "of", "the", "given", "prefix", "and", "fills", "in", "the", "default", "value", "if", "it", "doesn", "t", "exist", "." ]
python
train
hozn/coilmq
coilmq/config/__init__.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/config/__init__.py#L69-L110
def init_logging(logfile=None, loglevel=logging.INFO, configfile=None): """ Configures the logging using either basic filename + loglevel or passed config file path. This is performed separately from L{init_config()} in order to support the case where logging should happen independent of (usu. *after*) other aspects of the configuration initialization. For example, if logging may need to be initialized within a daemon context. @param logfile: An explicitly specified logfile destination. If this is specified in addition to default logging, a warning will be issued. @type logfile: C{str} @param loglevel: Which level to use when logging to explicitly specified file or stdout. @type loglevel: C{int} @param configfile: The path to a configuration file. This takes precedence over any explicitly specified logfile/loglevel (but a warning will be logged if both are specified). If the file is not specified or does not exist annd no logfile was specified, then the default.cfg configuration file will be used to initialize logging. @type configfile: C{str} """ # If a config file was specified, we will use that in place of the # explicitly use_configfile = False if configfile and os.path.exists(configfile): testcfg = ConfigParser() read = testcfg.read(configfile) use_configfile = (read and testcfg.has_section('loggers')) if use_configfile: logging.config.fileConfig(configfile) if logfile: msg = "Config file conflicts with explicitly specified logfile; config file takes precedence." logging.warn(msg) else: format = '%(asctime)s [%(threadName)s] %(name)s - %(levelname)s - %(message)s' if logfile: logging.basicConfig( filename=logfile, level=loglevel, format=format) else: logging.basicConfig(level=loglevel, format=format)
[ "def", "init_logging", "(", "logfile", "=", "None", ",", "loglevel", "=", "logging", ".", "INFO", ",", "configfile", "=", "None", ")", ":", "# If a config file was specified, we will use that in place of the", "# explicitly", "use_configfile", "=", "False", "if", "configfile", "and", "os", ".", "path", ".", "exists", "(", "configfile", ")", ":", "testcfg", "=", "ConfigParser", "(", ")", "read", "=", "testcfg", ".", "read", "(", "configfile", ")", "use_configfile", "=", "(", "read", "and", "testcfg", ".", "has_section", "(", "'loggers'", ")", ")", "if", "use_configfile", ":", "logging", ".", "config", ".", "fileConfig", "(", "configfile", ")", "if", "logfile", ":", "msg", "=", "\"Config file conflicts with explicitly specified logfile; config file takes precedence.\"", "logging", ".", "warn", "(", "msg", ")", "else", ":", "format", "=", "'%(asctime)s [%(threadName)s] %(name)s - %(levelname)s - %(message)s'", "if", "logfile", ":", "logging", ".", "basicConfig", "(", "filename", "=", "logfile", ",", "level", "=", "loglevel", ",", "format", "=", "format", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "loglevel", ",", "format", "=", "format", ")" ]
Configures the logging using either basic filename + loglevel or passed config file path. This is performed separately from L{init_config()} in order to support the case where logging should happen independent of (usu. *after*) other aspects of the configuration initialization. For example, if logging may need to be initialized within a daemon context. @param logfile: An explicitly specified logfile destination. If this is specified in addition to default logging, a warning will be issued. @type logfile: C{str} @param loglevel: Which level to use when logging to explicitly specified file or stdout. @type loglevel: C{int} @param configfile: The path to a configuration file. This takes precedence over any explicitly specified logfile/loglevel (but a warning will be logged if both are specified). If the file is not specified or does not exist annd no logfile was specified, then the default.cfg configuration file will be used to initialize logging. @type configfile: C{str}
[ "Configures", "the", "logging", "using", "either", "basic", "filename", "+", "loglevel", "or", "passed", "config", "file", "path", "." ]
python
train
aewallin/allantools
allantools/ci.py
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L693-L708
def edf_totdev(N, m, alpha): """ Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7 """ alpha = int(alpha) if alpha in [0, -1, -2]: # alpha 0 WFM # alpha -1 FFM # alpha -2 RWFM NIST_SP1065_table7 = [(1.50, 0.0), (1.17, 0.22), (0.93, 0.36)] (b, c) = NIST_SP1065_table7[int(abs(alpha))] return b*(float(N)/float(m))-c else: return edf_simple(N, m, alpha)
[ "def", "edf_totdev", "(", "N", ",", "m", ",", "alpha", ")", ":", "alpha", "=", "int", "(", "alpha", ")", "if", "alpha", "in", "[", "0", ",", "-", "1", ",", "-", "2", "]", ":", "# alpha 0 WFM", "# alpha -1 FFM", "# alpha -2 RWFM", "NIST_SP1065_table7", "=", "[", "(", "1.50", ",", "0.0", ")", ",", "(", "1.17", ",", "0.22", ")", ",", "(", "0.93", ",", "0.36", ")", "]", "(", "b", ",", "c", ")", "=", "NIST_SP1065_table7", "[", "int", "(", "abs", "(", "alpha", ")", ")", "]", "return", "b", "*", "(", "float", "(", "N", ")", "/", "float", "(", "m", ")", ")", "-", "c", "else", ":", "return", "edf_simple", "(", "N", ",", "m", ",", "alpha", ")" ]
Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7
[ "Equivalent", "degrees", "of", "freedom", "for", "Total", "Deviation", "FIXME", ":", "what", "is", "the", "right", "behavior", "for", "alpha", "outside", "0", "-", "1", "-", "2?", "NIST", "SP1065", "page", "41", "Table", "7" ]
python
train
molmod/molmod
molmod/graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1339-L1381
def get_closed_cycles(self): """Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cycles will be sorted by the lowest index they contain. 2) Each cycle starts with its lowest index. (unique starting point) 3) Singletons are discarded. (because they are boring) """ # A) construct all the cycles closed_cycles = [] todo = set(self.forward.keys()) if todo != set(self.forward.values()): raise GraphError("The subject and pattern graph must have the same " "numbering.") current_vertex = None while len(todo) > 0: if current_vertex == None: current_vertex = todo.pop() current_cycle = [] else: todo.discard(current_vertex) current_cycle.append(current_vertex) next_vertex = self.get_destination(current_vertex) if next_vertex == current_cycle[0]: if len(current_cycle) > 1: # bring the lowest element in front pivot = np.argmin(current_cycle) current_cycle = current_cycle[pivot:] + \ current_cycle[:pivot] closed_cycles.append(current_cycle) current_vertex = None else: current_vertex = next_vertex # B) normalize the cycle representation closed_cycles.sort() # a normal sort is sufficient because only the # first item of each cycle is considered # transform the structure into a tuple of tuples closed_cycles = tuple(tuple(cycle) for cycle in closed_cycles) return closed_cycles
[ "def", "get_closed_cycles", "(", "self", ")", ":", "# A) construct all the cycles", "closed_cycles", "=", "[", "]", "todo", "=", "set", "(", "self", ".", "forward", ".", "keys", "(", ")", ")", "if", "todo", "!=", "set", "(", "self", ".", "forward", ".", "values", "(", ")", ")", ":", "raise", "GraphError", "(", "\"The subject and pattern graph must have the same \"", "\"numbering.\"", ")", "current_vertex", "=", "None", "while", "len", "(", "todo", ")", ">", "0", ":", "if", "current_vertex", "==", "None", ":", "current_vertex", "=", "todo", ".", "pop", "(", ")", "current_cycle", "=", "[", "]", "else", ":", "todo", ".", "discard", "(", "current_vertex", ")", "current_cycle", ".", "append", "(", "current_vertex", ")", "next_vertex", "=", "self", ".", "get_destination", "(", "current_vertex", ")", "if", "next_vertex", "==", "current_cycle", "[", "0", "]", ":", "if", "len", "(", "current_cycle", ")", ">", "1", ":", "# bring the lowest element in front", "pivot", "=", "np", ".", "argmin", "(", "current_cycle", ")", "current_cycle", "=", "current_cycle", "[", "pivot", ":", "]", "+", "current_cycle", "[", ":", "pivot", "]", "closed_cycles", ".", "append", "(", "current_cycle", ")", "current_vertex", "=", "None", "else", ":", "current_vertex", "=", "next_vertex", "# B) normalize the cycle representation", "closed_cycles", ".", "sort", "(", ")", "# a normal sort is sufficient because only the", "# first item of each cycle is considered", "# transform the structure into a tuple of tuples", "closed_cycles", "=", "tuple", "(", "tuple", "(", "cycle", ")", "for", "cycle", "in", "closed_cycles", ")", "return", "closed_cycles" ]
Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cycles will be sorted by the lowest index they contain. 2) Each cycle starts with its lowest index. (unique starting point) 3) Singletons are discarded. (because they are boring)
[ "Return", "the", "closed", "cycles", "corresponding", "to", "this", "permutation" ]
python
train
ros-infrastructure/ros_buildfarm
ros_buildfarm/doc_job.py
https://github.com/ros-infrastructure/ros_buildfarm/blob/c63ad85b21470f3262086fcd987528a0efc0cf6d/ros_buildfarm/doc_job.py#L159-L249
def configure_doc_job( config_url, rosdistro_name, doc_build_name, repo_name, os_name, os_code_name, arch, config=None, build_file=None, index=None, dist_file=None, dist_cache=None, jenkins=None, views=None, is_disabled=False, groovy_script=None, doc_repository=None, dry_run=False): """ Configure a single Jenkins doc job. This includes the following steps: - clone the doc repository to use - clone the ros_buildfarm repository - write the distribution repository keys into files - invoke the run_doc_job.py script """ if config is None: config = get_config_index(config_url) if build_file is None: build_files = get_doc_build_files(config, rosdistro_name) build_file = build_files[doc_build_name] if index is None: index = get_index(config.rosdistro_index_url) if dist_file is None: dist_file = get_distribution_file(index, rosdistro_name, build_file) if not dist_file: raise JobValidationError( 'No distribution file matches the build file') repo_names = dist_file.repositories.keys() if repo_name is not None: if repo_name not in repo_names: raise JobValidationError( "Invalid repository name '%s' " % repo_name + 'choose one of the following: %s' % ', '.join(sorted(repo_names))) repo = dist_file.repositories[repo_name] if not repo.doc_repository: raise JobValidationError( "Repository '%s' has no doc section" % repo_name) if not repo.doc_repository.version: raise JobValidationError( "Repository '%s' has no doc version" % repo_name) doc_repository = repo.doc_repository if os_name not in build_file.targets.keys(): raise JobValidationError( "Invalid OS name '%s' " % os_name + 'choose one of the following: ' + ', '.join(sorted(build_file.targets.keys()))) if os_code_name not in build_file.targets[os_name].keys(): raise JobValidationError( "Invalid OS code name '%s' " % os_code_name + 'choose one of the following: ' + ', '.join(sorted(build_file.targets[os_name].keys()))) if arch not in build_file.targets[os_name][os_code_name]: raise JobValidationError( "Invalid architecture '%s' " % arch + 'choose one of the following: %s' % ', '.join(sorted( build_file.targets[os_name][os_code_name]))) if dist_cache is None and build_file.notify_maintainers: dist_cache = get_distribution_cache(index, rosdistro_name) if jenkins is None: from ros_buildfarm.jenkins import connect jenkins = connect(config.jenkins_url) if views is None: view_name = get_doc_view_name( rosdistro_name, doc_build_name) configure_doc_view(jenkins, view_name, dry_run=dry_run) job_name = get_doc_job_name( rosdistro_name, doc_build_name, repo_name, os_name, os_code_name, arch) job_config = _get_doc_job_config( config, config_url, rosdistro_name, doc_build_name, build_file, os_name, os_code_name, arch, doc_repository, repo_name, dist_cache=dist_cache, is_disabled=is_disabled) # jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero if isinstance(jenkins, object) and jenkins is not False: from ros_buildfarm.jenkins import configure_job configure_job(jenkins, job_name, job_config, dry_run=dry_run) return job_name, job_config
[ "def", "configure_doc_job", "(", "config_url", ",", "rosdistro_name", ",", "doc_build_name", ",", "repo_name", ",", "os_name", ",", "os_code_name", ",", "arch", ",", "config", "=", "None", ",", "build_file", "=", "None", ",", "index", "=", "None", ",", "dist_file", "=", "None", ",", "dist_cache", "=", "None", ",", "jenkins", "=", "None", ",", "views", "=", "None", ",", "is_disabled", "=", "False", ",", "groovy_script", "=", "None", ",", "doc_repository", "=", "None", ",", "dry_run", "=", "False", ")", ":", "if", "config", "is", "None", ":", "config", "=", "get_config_index", "(", "config_url", ")", "if", "build_file", "is", "None", ":", "build_files", "=", "get_doc_build_files", "(", "config", ",", "rosdistro_name", ")", "build_file", "=", "build_files", "[", "doc_build_name", "]", "if", "index", "is", "None", ":", "index", "=", "get_index", "(", "config", ".", "rosdistro_index_url", ")", "if", "dist_file", "is", "None", ":", "dist_file", "=", "get_distribution_file", "(", "index", ",", "rosdistro_name", ",", "build_file", ")", "if", "not", "dist_file", ":", "raise", "JobValidationError", "(", "'No distribution file matches the build file'", ")", "repo_names", "=", "dist_file", ".", "repositories", ".", "keys", "(", ")", "if", "repo_name", "is", "not", "None", ":", "if", "repo_name", "not", "in", "repo_names", ":", "raise", "JobValidationError", "(", "\"Invalid repository name '%s' \"", "%", "repo_name", "+", "'choose one of the following: %s'", "%", "', '", ".", "join", "(", "sorted", "(", "repo_names", ")", ")", ")", "repo", "=", "dist_file", ".", "repositories", "[", "repo_name", "]", "if", "not", "repo", ".", "doc_repository", ":", "raise", "JobValidationError", "(", "\"Repository '%s' has no doc section\"", "%", "repo_name", ")", "if", "not", "repo", ".", "doc_repository", ".", "version", ":", "raise", "JobValidationError", "(", "\"Repository '%s' has no doc version\"", "%", "repo_name", ")", "doc_repository", "=", "repo", ".", "doc_repository", "if", "os_name", "not", "in", "build_file", ".", "targets", ".", "keys", "(", ")", ":", "raise", "JobValidationError", "(", "\"Invalid OS name '%s' \"", "%", "os_name", "+", "'choose one of the following: '", "+", "', '", ".", "join", "(", "sorted", "(", "build_file", ".", "targets", ".", "keys", "(", ")", ")", ")", ")", "if", "os_code_name", "not", "in", "build_file", ".", "targets", "[", "os_name", "]", ".", "keys", "(", ")", ":", "raise", "JobValidationError", "(", "\"Invalid OS code name '%s' \"", "%", "os_code_name", "+", "'choose one of the following: '", "+", "', '", ".", "join", "(", "sorted", "(", "build_file", ".", "targets", "[", "os_name", "]", ".", "keys", "(", ")", ")", ")", ")", "if", "arch", "not", "in", "build_file", ".", "targets", "[", "os_name", "]", "[", "os_code_name", "]", ":", "raise", "JobValidationError", "(", "\"Invalid architecture '%s' \"", "%", "arch", "+", "'choose one of the following: %s'", "%", "', '", ".", "join", "(", "sorted", "(", "build_file", ".", "targets", "[", "os_name", "]", "[", "os_code_name", "]", ")", ")", ")", "if", "dist_cache", "is", "None", "and", "build_file", ".", "notify_maintainers", ":", "dist_cache", "=", "get_distribution_cache", "(", "index", ",", "rosdistro_name", ")", "if", "jenkins", "is", "None", ":", "from", "ros_buildfarm", ".", "jenkins", "import", "connect", "jenkins", "=", "connect", "(", "config", ".", "jenkins_url", ")", "if", "views", "is", "None", ":", "view_name", "=", "get_doc_view_name", "(", "rosdistro_name", ",", "doc_build_name", ")", "configure_doc_view", "(", "jenkins", ",", "view_name", ",", "dry_run", "=", "dry_run", ")", "job_name", "=", "get_doc_job_name", "(", "rosdistro_name", ",", "doc_build_name", ",", "repo_name", ",", "os_name", ",", "os_code_name", ",", "arch", ")", "job_config", "=", "_get_doc_job_config", "(", "config", ",", "config_url", ",", "rosdistro_name", ",", "doc_build_name", ",", "build_file", ",", "os_name", ",", "os_code_name", ",", "arch", ",", "doc_repository", ",", "repo_name", ",", "dist_cache", "=", "dist_cache", ",", "is_disabled", "=", "is_disabled", ")", "# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero", "if", "isinstance", "(", "jenkins", ",", "object", ")", "and", "jenkins", "is", "not", "False", ":", "from", "ros_buildfarm", ".", "jenkins", "import", "configure_job", "configure_job", "(", "jenkins", ",", "job_name", ",", "job_config", ",", "dry_run", "=", "dry_run", ")", "return", "job_name", ",", "job_config" ]
Configure a single Jenkins doc job. This includes the following steps: - clone the doc repository to use - clone the ros_buildfarm repository - write the distribution repository keys into files - invoke the run_doc_job.py script
[ "Configure", "a", "single", "Jenkins", "doc", "job", "." ]
python
valid
rsalmei/clearly
clearly/server.py
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/server.py#L169-L178
def get_stats(self, request, context): """Returns the server statistics.""" _log_request(request, context) m = self.listener.memory return clearly_pb2.StatsMessage( task_count=m.task_count, event_count=m.event_count, len_tasks=len(m.tasks), len_workers=len(m.workers) )
[ "def", "get_stats", "(", "self", ",", "request", ",", "context", ")", ":", "_log_request", "(", "request", ",", "context", ")", "m", "=", "self", ".", "listener", ".", "memory", "return", "clearly_pb2", ".", "StatsMessage", "(", "task_count", "=", "m", ".", "task_count", ",", "event_count", "=", "m", ".", "event_count", ",", "len_tasks", "=", "len", "(", "m", ".", "tasks", ")", ",", "len_workers", "=", "len", "(", "m", ".", "workers", ")", ")" ]
Returns the server statistics.
[ "Returns", "the", "server", "statistics", "." ]
python
train
asphalt-framework/asphalt
asphalt/core/utils.py
https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/utils.py#L62-L67
def callable_name(func: Callable) -> str: """Return the qualified name (e.g. package.module.func) for the given callable.""" if func.__module__ == 'builtins': return func.__name__ else: return '{}.{}'.format(func.__module__, func.__qualname__)
[ "def", "callable_name", "(", "func", ":", "Callable", ")", "->", "str", ":", "if", "func", ".", "__module__", "==", "'builtins'", ":", "return", "func", ".", "__name__", "else", ":", "return", "'{}.{}'", ".", "format", "(", "func", ".", "__module__", ",", "func", ".", "__qualname__", ")" ]
Return the qualified name (e.g. package.module.func) for the given callable.
[ "Return", "the", "qualified", "name", "(", "e", ".", "g", ".", "package", ".", "module", ".", "func", ")", "for", "the", "given", "callable", "." ]
python
train
david-caro/python-autosemver
autosemver/packaging.py
https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/packaging.py#L118-L146
def get_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False): """ Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved """ changelog = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') changelog_file = os.path.join(project_dir, 'CHANGELOG') if os.path.exists(pkg_info_file) and os.path.exists(changelog_file): with open(changelog_file) as changelog_fd: changelog = changelog_fd.read() else: changelog = api.get_changelog( repo_path=project_dir, bugtracker_url=bugtracker_url, rpm_format=rpm_format, ) return changelog
[ "def", "get_changelog", "(", "project_dir", "=", "os", ".", "curdir", ",", "bugtracker_url", "=", "''", ",", "rpm_format", "=", "False", ")", ":", "changelog", "=", "''", "pkg_info_file", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "'PKG-INFO'", ")", "changelog_file", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "'CHANGELOG'", ")", "if", "os", ".", "path", ".", "exists", "(", "pkg_info_file", ")", "and", "os", ".", "path", ".", "exists", "(", "changelog_file", ")", ":", "with", "open", "(", "changelog_file", ")", "as", "changelog_fd", ":", "changelog", "=", "changelog_fd", ".", "read", "(", ")", "else", ":", "changelog", "=", "api", ".", "get_changelog", "(", "repo_path", "=", "project_dir", ",", "bugtracker_url", "=", "bugtracker_url", ",", "rpm_format", "=", "rpm_format", ",", ")", "return", "changelog" ]
Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved
[ "Retrieves", "the", "changelog", "from", "the", "CHANGELOG", "file", "(", "if", "in", "a", "package", ")", "or", "generates", "it", "from", "the", "git", "history", ".", "Optionally", "in", "rpm", "-", "compatible", "format", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/templatetags/danceschool_tags.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/templatetags/danceschool_tags.py#L164-L172
def getReturnPage(context, prior=False): ''' This tag makes it easy to get return links from within a template without requiring custom logic inside the view. Just include {% getReturnPage as returnPage %} and then reference {{ returnPage.url }} and {{ returnPage.title }} as needed. ''' siteHistory = getattr(context.get('request',None),'session',{}).get('SITE_HISTORY',{}) return returnPageHelper(siteHistory,prior=prior)
[ "def", "getReturnPage", "(", "context", ",", "prior", "=", "False", ")", ":", "siteHistory", "=", "getattr", "(", "context", ".", "get", "(", "'request'", ",", "None", ")", ",", "'session'", ",", "{", "}", ")", ".", "get", "(", "'SITE_HISTORY'", ",", "{", "}", ")", "return", "returnPageHelper", "(", "siteHistory", ",", "prior", "=", "prior", ")" ]
This tag makes it easy to get return links from within a template without requiring custom logic inside the view. Just include {% getReturnPage as returnPage %} and then reference {{ returnPage.url }} and {{ returnPage.title }} as needed.
[ "This", "tag", "makes", "it", "easy", "to", "get", "return", "links", "from", "within", "a", "template", "without", "requiring", "custom", "logic", "inside", "the", "view", ".", "Just", "include", "{", "%", "getReturnPage", "as", "returnPage", "%", "}", "and", "then", "reference", "{{", "returnPage", ".", "url", "}}", "and", "{{", "returnPage", ".", "title", "}}", "as", "needed", "." ]
python
train
iKevinY/EulerPy
EulerPy/euler.py
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L25-L57
def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources()
[ "def", "generate", "(", "num", ",", "prompt_default", "=", "True", ")", ":", "p", "=", "Problem", "(", "num", ")", "problem_text", "=", "p", ".", "text", "msg", "=", "\"Generate file for problem %i?\"", "%", "num", "click", ".", "confirm", "(", "msg", ",", "default", "=", "prompt_default", ",", "abort", "=", "True", ")", "# Allow skipped problem files to be recreated", "if", "p", ".", "glob", ":", "filename", "=", "str", "(", "p", ".", "file", ")", "msg", "=", "'\"{}\" already exists. Overwrite?'", ".", "format", "(", "filename", ")", "click", ".", "confirm", "(", "click", ".", "style", "(", "msg", ",", "fg", "=", "'red'", ")", ",", "abort", "=", "True", ")", "else", ":", "# Try to keep prefix consistent with existing files", "previous_file", "=", "Problem", "(", "num", "-", "1", ")", ".", "file", "prefix", "=", "previous_file", ".", "prefix", "if", "previous_file", "else", "''", "filename", "=", "p", ".", "filename", "(", "prefix", "=", "prefix", ")", "header", "=", "'Project Euler Problem %i'", "%", "num", "divider", "=", "'='", "*", "len", "(", "header", ")", "text", "=", "'\\n'", ".", "join", "(", "[", "header", ",", "divider", ",", "''", ",", "problem_text", "]", ")", "content", "=", "'\\n'", ".", "join", "(", "[", "'\"\"\"'", ",", "text", ",", "'\"\"\"'", "]", ")", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "content", "+", "'\\n\\n\\n'", ")", "click", ".", "secho", "(", "'Successfully created \"{}\".'", ".", "format", "(", "filename", ")", ",", "fg", "=", "'green'", ")", "# Copy over problem resources if required", "if", "p", ".", "resources", ":", "p", ".", "copy_resources", "(", ")" ]
Generates Python file for a problem.
[ "Generates", "Python", "file", "for", "a", "problem", "." ]
python
train
jordanncg/Bison
bison/cli.py
https://github.com/jordanncg/Bison/blob/c7f04fd67d141fe26cd29db3c3fb3fc0fd0c45df/bison/cli.py#L31-L50
def main(): """A""" parser = argparse.ArgumentParser(description='A site preprocessor based \ on Jinja2, a templating engine for Python') subparsers = parser.add_subparsers(description='The following options \ are available:') # 'create' command parser_create = subparsers.add_parser('create', help='Create a new \ project') parser_create.add_argument('--path', help='The path where the project \ will be created') parser_create.set_defaults(target=create) args = parser.parse_args() arg_values = {key : value for key, value in vars(args).items() \ if key != 'target'} args.target(**arg_values)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'A site preprocessor based \\\n on Jinja2, a templating engine for Python'", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "description", "=", "'The following options \\\n are available:'", ")", "# 'create' command", "parser_create", "=", "subparsers", ".", "add_parser", "(", "'create'", ",", "help", "=", "'Create a new \\\n project'", ")", "parser_create", ".", "add_argument", "(", "'--path'", ",", "help", "=", "'The path where the project \\\n will be created'", ")", "parser_create", ".", "set_defaults", "(", "target", "=", "create", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "arg_values", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "vars", "(", "args", ")", ".", "items", "(", ")", "if", "key", "!=", "'target'", "}", "args", ".", "target", "(", "*", "*", "arg_values", ")" ]
A
[ "A" ]
python
train
SwissDataScienceCenter/renku-python
renku/models/cwl/_ascwl.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/models/cwl/_ascwl.py#L111-L197
def ascwl( inst, recurse=True, filter=None, dict_factory=dict, retain_collection_types=False, basedir=None, ): """Return the ``attrs`` attribute values of *inst* as a dict. Support ``jsonldPredicate`` in a field metadata for generating mappings from lists. Adapted from ``attr._funcs``. """ attrs = fields(inst.__class__) rv = dict_factory() def convert_value(v): """Convert special types.""" if isinstance(v, Path): v = str(v) return os.path.relpath(v, str(basedir)) if basedir else v return v for a in attrs: if a.name.startswith('__'): continue a_name = a.name.rstrip('_') v = getattr(inst, a.name) if filter is not None and not filter(a, v): continue if recurse is True: if has(v.__class__): rv[a_name] = ascwl( v, recurse=True, filter=filter, dict_factory=dict_factory, basedir=basedir, ) elif isinstance(v, (tuple, list, set)): cf = v.__class__ if retain_collection_types is True else list rv[a_name] = cf([ ascwl( i, recurse=True, filter=filter, dict_factory=dict_factory, basedir=basedir, ) if has(i.__class__) else i for i in v ]) if 'jsonldPredicate' in a.metadata: k = a.metadata['jsonldPredicate'].get('mapSubject') if k: vv = dict_factory() for i in rv[a_name]: kk = i.pop(k) vv[kk] = i rv[a_name] = vv elif isinstance(v, dict): df = dict_factory rv[a_name] = df(( ascwl( kk, dict_factory=df, basedir=basedir, ) if has(kk.__class__) else convert_value(kk), ascwl( vv, dict_factory=df, basedir=basedir, ) if has(vv.__class__) else vv ) for kk, vv in iteritems(v)) else: rv[a_name] = convert_value(v) else: rv[a_name] = convert_value(v) if isinstance(inst, CWLClass): rv['class'] = inst.__class__.__name__ return rv
[ "def", "ascwl", "(", "inst", ",", "recurse", "=", "True", ",", "filter", "=", "None", ",", "dict_factory", "=", "dict", ",", "retain_collection_types", "=", "False", ",", "basedir", "=", "None", ",", ")", ":", "attrs", "=", "fields", "(", "inst", ".", "__class__", ")", "rv", "=", "dict_factory", "(", ")", "def", "convert_value", "(", "v", ")", ":", "\"\"\"Convert special types.\"\"\"", "if", "isinstance", "(", "v", ",", "Path", ")", ":", "v", "=", "str", "(", "v", ")", "return", "os", ".", "path", ".", "relpath", "(", "v", ",", "str", "(", "basedir", ")", ")", "if", "basedir", "else", "v", "return", "v", "for", "a", "in", "attrs", ":", "if", "a", ".", "name", ".", "startswith", "(", "'__'", ")", ":", "continue", "a_name", "=", "a", ".", "name", ".", "rstrip", "(", "'_'", ")", "v", "=", "getattr", "(", "inst", ",", "a", ".", "name", ")", "if", "filter", "is", "not", "None", "and", "not", "filter", "(", "a", ",", "v", ")", ":", "continue", "if", "recurse", "is", "True", ":", "if", "has", "(", "v", ".", "__class__", ")", ":", "rv", "[", "a_name", "]", "=", "ascwl", "(", "v", ",", "recurse", "=", "True", ",", "filter", "=", "filter", ",", "dict_factory", "=", "dict_factory", ",", "basedir", "=", "basedir", ",", ")", "elif", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ",", "set", ")", ")", ":", "cf", "=", "v", ".", "__class__", "if", "retain_collection_types", "is", "True", "else", "list", "rv", "[", "a_name", "]", "=", "cf", "(", "[", "ascwl", "(", "i", ",", "recurse", "=", "True", ",", "filter", "=", "filter", ",", "dict_factory", "=", "dict_factory", ",", "basedir", "=", "basedir", ",", ")", "if", "has", "(", "i", ".", "__class__", ")", "else", "i", "for", "i", "in", "v", "]", ")", "if", "'jsonldPredicate'", "in", "a", ".", "metadata", ":", "k", "=", "a", ".", "metadata", "[", "'jsonldPredicate'", "]", ".", "get", "(", "'mapSubject'", ")", "if", "k", ":", "vv", "=", "dict_factory", "(", ")", "for", "i", "in", "rv", "[", "a_name", "]", ":", "kk", "=", "i", ".", "pop", "(", "k", ")", "vv", "[", "kk", "]", "=", "i", "rv", "[", "a_name", "]", "=", "vv", "elif", "isinstance", "(", "v", ",", "dict", ")", ":", "df", "=", "dict_factory", "rv", "[", "a_name", "]", "=", "df", "(", "(", "ascwl", "(", "kk", ",", "dict_factory", "=", "df", ",", "basedir", "=", "basedir", ",", ")", "if", "has", "(", "kk", ".", "__class__", ")", "else", "convert_value", "(", "kk", ")", ",", "ascwl", "(", "vv", ",", "dict_factory", "=", "df", ",", "basedir", "=", "basedir", ",", ")", "if", "has", "(", "vv", ".", "__class__", ")", "else", "vv", ")", "for", "kk", ",", "vv", "in", "iteritems", "(", "v", ")", ")", "else", ":", "rv", "[", "a_name", "]", "=", "convert_value", "(", "v", ")", "else", ":", "rv", "[", "a_name", "]", "=", "convert_value", "(", "v", ")", "if", "isinstance", "(", "inst", ",", "CWLClass", ")", ":", "rv", "[", "'class'", "]", "=", "inst", ".", "__class__", ".", "__name__", "return", "rv" ]
Return the ``attrs`` attribute values of *inst* as a dict. Support ``jsonldPredicate`` in a field metadata for generating mappings from lists. Adapted from ``attr._funcs``.
[ "Return", "the", "attrs", "attribute", "values", "of", "*", "inst", "*", "as", "a", "dict", "." ]
python
train
lsst-sqre/documenteer
documenteer/sphinxconfig/stackconf.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/stackconf.py#L220-L254
def _insert_automodapi_configs(c): """Add configurations related to automodapi, autodoc, and numpydoc to the state. """ # Don't show summaries of the members in each class along with the # class' docstring c['numpydoc_show_class_members'] = False c['autosummary_generate'] = True c['automodapi_toctreedirnm'] = 'py-api' c['automodsumm_inherited_members'] = True # Docstrings for classes and methods are inherited from parents. c['autodoc_inherit_docstrings'] = True # Class documentation should only contain the class docstring and # ignore the __init__ docstring, account to LSST coding standards. # c['autoclass_content'] = "both" c['autoclass_content'] = "class" # Default flags for automodapi directives. Special members are dunder # methods. # NOTE: We want to used `inherited-members`, but it seems to be causing # documentation duplication in the automodapi listings. We're leaving # this out for now. See https://jira.lsstcorp.org/browse/DM-14782 for # additional notes. # NOTE: Without inherited members set, special-members doesn't need seem # to have an effect (even for special members where the docstrings are # directly written in the class, not inherited. # c['autodoc_default_flags'] = ['inherited-members'] c['autodoc_default_flags'] = ['show-inheritance', 'special-members'] return c
[ "def", "_insert_automodapi_configs", "(", "c", ")", ":", "# Don't show summaries of the members in each class along with the", "# class' docstring", "c", "[", "'numpydoc_show_class_members'", "]", "=", "False", "c", "[", "'autosummary_generate'", "]", "=", "True", "c", "[", "'automodapi_toctreedirnm'", "]", "=", "'py-api'", "c", "[", "'automodsumm_inherited_members'", "]", "=", "True", "# Docstrings for classes and methods are inherited from parents.", "c", "[", "'autodoc_inherit_docstrings'", "]", "=", "True", "# Class documentation should only contain the class docstring and", "# ignore the __init__ docstring, account to LSST coding standards.", "# c['autoclass_content'] = \"both\"", "c", "[", "'autoclass_content'", "]", "=", "\"class\"", "# Default flags for automodapi directives. Special members are dunder", "# methods.", "# NOTE: We want to used `inherited-members`, but it seems to be causing", "# documentation duplication in the automodapi listings. We're leaving", "# this out for now. See https://jira.lsstcorp.org/browse/DM-14782 for", "# additional notes.", "# NOTE: Without inherited members set, special-members doesn't need seem", "# to have an effect (even for special members where the docstrings are", "# directly written in the class, not inherited.", "# c['autodoc_default_flags'] = ['inherited-members']", "c", "[", "'autodoc_default_flags'", "]", "=", "[", "'show-inheritance'", ",", "'special-members'", "]", "return", "c" ]
Add configurations related to automodapi, autodoc, and numpydoc to the state.
[ "Add", "configurations", "related", "to", "automodapi", "autodoc", "and", "numpydoc", "to", "the", "state", "." ]
python
train
benhoff/pluginmanager
pluginmanager/plugin_interface.py
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/plugin_interface.py#L244-L256
def set_plugin_filepaths(self, filepaths, except_blacklisted=True): """ Sets internal state to `filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set. """ self.file_manager.set_plugin_filepaths(filepaths, except_blacklisted)
[ "def", "set_plugin_filepaths", "(", "self", ",", "filepaths", ",", "except_blacklisted", "=", "True", ")", ":", "self", ".", "file_manager", ".", "set_plugin_filepaths", "(", "filepaths", ",", "except_blacklisted", ")" ]
Sets internal state to `filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set.
[ "Sets", "internal", "state", "to", "filepaths", ".", "Recommend", "passing", "in", "absolute", "filepaths", ".", "Method", "will", "attempt", "to", "convert", "to", "absolute", "paths", "if", "they", "are", "not", "already", "." ]
python
train
ARMmbed/yotta
yotta/lib/target.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/target.py#L311-L325
def _loadConfig(self): ''' load the configuration information from the target hierarchy ''' config_dicts = [self.additional_config, self.app_config] + [t.getConfig() for t in self.hierarchy] # create an identical set of dictionaries, but with the names of the # sources in place of the values. When these are merged they will show # where each merged property came from: config_blame = [ _mirrorStructure(self.additional_config, 'command-line config'), _mirrorStructure(self.app_config, 'application\'s config.json'), ] + [ _mirrorStructure(t.getConfig(), t.getName()) for t in self.hierarchy ] self.config = _mergeDictionaries(*config_dicts) self.config_blame = _mergeDictionaries(*config_blame)
[ "def", "_loadConfig", "(", "self", ")", ":", "config_dicts", "=", "[", "self", ".", "additional_config", ",", "self", ".", "app_config", "]", "+", "[", "t", ".", "getConfig", "(", ")", "for", "t", "in", "self", ".", "hierarchy", "]", "# create an identical set of dictionaries, but with the names of the", "# sources in place of the values. When these are merged they will show", "# where each merged property came from:", "config_blame", "=", "[", "_mirrorStructure", "(", "self", ".", "additional_config", ",", "'command-line config'", ")", ",", "_mirrorStructure", "(", "self", ".", "app_config", ",", "'application\\'s config.json'", ")", ",", "]", "+", "[", "_mirrorStructure", "(", "t", ".", "getConfig", "(", ")", ",", "t", ".", "getName", "(", ")", ")", "for", "t", "in", "self", ".", "hierarchy", "]", "self", ".", "config", "=", "_mergeDictionaries", "(", "*", "config_dicts", ")", "self", ".", "config_blame", "=", "_mergeDictionaries", "(", "*", "config_blame", ")" ]
load the configuration information from the target hierarchy
[ "load", "the", "configuration", "information", "from", "the", "target", "hierarchy" ]
python
valid
nickmckay/LiPD-utilities
Python/lipd/timeseries.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L252-L295
def _extract_special(current, table_data): """ Extract year, age, and depth column from table data :param dict table_data: Data at the table level :param dict current: Current data :return dict current: """ logger_ts.info("enter extract_special") try: # Add age, year, and depth columns to ts_root where possible for k, v in table_data['columns'].items(): s = "" # special case for year bp, or any variation of it. Translate key to "age"" if "bp" in k.lower(): s = "age" # all other normal cases. clean key and set key. elif any(x in k.lower() for x in ('age', 'depth', 'year', "yr", "distance_from_top", "distance")): # Some keys have units hanging on them (i.e. 'year_ad', 'depth_cm'). We don't want units on the keys if re_pandas_x_und.match(k): s = k.split('_')[0] elif "distance" in k: s = "depth" else: s = k # create the entry in ts_root. if s: try: current[s] = v['values'] except KeyError as e: # Values key was not found. logger_ts.warn("extract_special: KeyError: 'values' not found, {}".format(e)) try: current[s + 'Units'] = v['units'] except KeyError as e: # Values key was not found. logger_ts.warn("extract_special: KeyError: 'units' not found, {}".format(e)) except Exception as e: logger_ts.error("extract_special: {}".format(e)) return current
[ "def", "_extract_special", "(", "current", ",", "table_data", ")", ":", "logger_ts", ".", "info", "(", "\"enter extract_special\"", ")", "try", ":", "# Add age, year, and depth columns to ts_root where possible", "for", "k", ",", "v", "in", "table_data", "[", "'columns'", "]", ".", "items", "(", ")", ":", "s", "=", "\"\"", "# special case for year bp, or any variation of it. Translate key to \"age\"\"", "if", "\"bp\"", "in", "k", ".", "lower", "(", ")", ":", "s", "=", "\"age\"", "# all other normal cases. clean key and set key.", "elif", "any", "(", "x", "in", "k", ".", "lower", "(", ")", "for", "x", "in", "(", "'age'", ",", "'depth'", ",", "'year'", ",", "\"yr\"", ",", "\"distance_from_top\"", ",", "\"distance\"", ")", ")", ":", "# Some keys have units hanging on them (i.e. 'year_ad', 'depth_cm'). We don't want units on the keys", "if", "re_pandas_x_und", ".", "match", "(", "k", ")", ":", "s", "=", "k", ".", "split", "(", "'_'", ")", "[", "0", "]", "elif", "\"distance\"", "in", "k", ":", "s", "=", "\"depth\"", "else", ":", "s", "=", "k", "# create the entry in ts_root.", "if", "s", ":", "try", ":", "current", "[", "s", "]", "=", "v", "[", "'values'", "]", "except", "KeyError", "as", "e", ":", "# Values key was not found.", "logger_ts", ".", "warn", "(", "\"extract_special: KeyError: 'values' not found, {}\"", ".", "format", "(", "e", ")", ")", "try", ":", "current", "[", "s", "+", "'Units'", "]", "=", "v", "[", "'units'", "]", "except", "KeyError", "as", "e", ":", "# Values key was not found.", "logger_ts", ".", "warn", "(", "\"extract_special: KeyError: 'units' not found, {}\"", ".", "format", "(", "e", ")", ")", "except", "Exception", "as", "e", ":", "logger_ts", ".", "error", "(", "\"extract_special: {}\"", ".", "format", "(", "e", ")", ")", "return", "current" ]
Extract year, age, and depth column from table data :param dict table_data: Data at the table level :param dict current: Current data :return dict current:
[ "Extract", "year", "age", "and", "depth", "column", "from", "table", "data", ":", "param", "dict", "table_data", ":", "Data", "at", "the", "table", "level", ":", "param", "dict", "current", ":", "Current", "data", ":", "return", "dict", "current", ":" ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L4607-L4669
def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS # Headers with C++ extensions shouldn't be considered C system headers if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']: is_system = False if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) target_dir_pub = os.path.normpath(target_dir + '/../public') target_dir_pub = target_dir_pub.replace('\\', '/') if target_base == include_base and ( include_dir == target_dir or include_dir == target_dir_pub): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER
[ "def", "_ClassifyInclude", "(", "fileinfo", ",", "include", ",", "is_system", ")", ":", "# This is a list of all standard c++ header files, except", "# those already checked for above.", "is_cpp_h", "=", "include", "in", "_CPP_HEADERS", "# Headers with C++ extensions shouldn't be considered C system headers", "if", "is_system", "and", "os", ".", "path", ".", "splitext", "(", "include", ")", "[", "1", "]", "in", "[", "'.hpp'", ",", "'.hxx'", ",", "'.h++'", "]", ":", "is_system", "=", "False", "if", "is_system", ":", "if", "is_cpp_h", ":", "return", "_CPP_SYS_HEADER", "else", ":", "return", "_C_SYS_HEADER", "# If the target file and the include we're checking share a", "# basename when we drop common extensions, and the include", "# lives in . , then it's likely to be owned by the target file.", "target_dir", ",", "target_base", "=", "(", "os", ".", "path", ".", "split", "(", "_DropCommonSuffixes", "(", "fileinfo", ".", "RepositoryName", "(", ")", ")", ")", ")", "include_dir", ",", "include_base", "=", "os", ".", "path", ".", "split", "(", "_DropCommonSuffixes", "(", "include", ")", ")", "target_dir_pub", "=", "os", ".", "path", ".", "normpath", "(", "target_dir", "+", "'/../public'", ")", "target_dir_pub", "=", "target_dir_pub", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "target_base", "==", "include_base", "and", "(", "include_dir", "==", "target_dir", "or", "include_dir", "==", "target_dir_pub", ")", ":", "return", "_LIKELY_MY_HEADER", "# If the target and include share some initial basename", "# component, it's possible the target is implementing the", "# include, so it's allowed to be first, but we'll never", "# complain if it's not there.", "target_first_component", "=", "_RE_FIRST_COMPONENT", ".", "match", "(", "target_base", ")", "include_first_component", "=", "_RE_FIRST_COMPONENT", ".", "match", "(", "include_base", ")", "if", "(", "target_first_component", "and", "include_first_component", "and", "target_first_component", ".", "group", "(", "0", ")", "==", "include_first_component", ".", "group", "(", "0", ")", ")", ":", "return", "_POSSIBLE_MY_HEADER", "return", "_OTHER_HEADER" ]
Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER
[ "Figures", "out", "what", "kind", "of", "header", "include", "is", "." ]
python
valid
openxc/openxc-python
openxc/sources/trace.py
https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/sources/trace.py#L45-L52
def _store_timestamp(self, timestamp): """If not already saved, cache the first timestamp in the active trace file on the instance. """ if getattr(self, 'first_timestamp', None) is None: self.first_timestamp = timestamp LOG.debug("Storing %d as the first timestamp of the trace file %s", self.first_timestamp, self.filename)
[ "def", "_store_timestamp", "(", "self", ",", "timestamp", ")", ":", "if", "getattr", "(", "self", ",", "'first_timestamp'", ",", "None", ")", "is", "None", ":", "self", ".", "first_timestamp", "=", "timestamp", "LOG", ".", "debug", "(", "\"Storing %d as the first timestamp of the trace file %s\"", ",", "self", ".", "first_timestamp", ",", "self", ".", "filename", ")" ]
If not already saved, cache the first timestamp in the active trace file on the instance.
[ "If", "not", "already", "saved", "cache", "the", "first", "timestamp", "in", "the", "active", "trace", "file", "on", "the", "instance", "." ]
python
train
mikedh/trimesh
trimesh/path/exchange/svg_io.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/exchange/svg_io.py#L25-L76
def svg_to_path(file_obj, file_type=None): """ Load an SVG file into a Path2D object. Parameters ----------- file_obj : open file object Contains SVG data file_type: None Not used Returns ----------- loaded : dict With kwargs for Path2D constructor """ def element_transform(e, max_depth=100): """ Find a transformation matrix for an XML element. """ matrices = [] current = e for i in range(max_depth): if 'transform' in current.attrib: mat = transform_to_matrices(current.attrib['transform']) matrices.extend(mat) # cached[current] = mat current = current.getparent() if current is None: break if len(matrices) == 0: return np.eye(3) elif len(matrices) == 1: return matrices[0] else: return util.multi_dot(matrices[::-1]) # first parse the XML xml = etree.fromstring(file_obj.read()) # store paths and transforms as # (path string, 3x3 matrix) paths = [] # store every path element for element in xml.iter('{*}path'): paths.append((element.attrib['d'], element_transform(element))) return _svg_path_convert(paths)
[ "def", "svg_to_path", "(", "file_obj", ",", "file_type", "=", "None", ")", ":", "def", "element_transform", "(", "e", ",", "max_depth", "=", "100", ")", ":", "\"\"\"\n Find a transformation matrix for an XML element.\n \"\"\"", "matrices", "=", "[", "]", "current", "=", "e", "for", "i", "in", "range", "(", "max_depth", ")", ":", "if", "'transform'", "in", "current", ".", "attrib", ":", "mat", "=", "transform_to_matrices", "(", "current", ".", "attrib", "[", "'transform'", "]", ")", "matrices", ".", "extend", "(", "mat", ")", "# cached[current] = mat", "current", "=", "current", ".", "getparent", "(", ")", "if", "current", "is", "None", ":", "break", "if", "len", "(", "matrices", ")", "==", "0", ":", "return", "np", ".", "eye", "(", "3", ")", "elif", "len", "(", "matrices", ")", "==", "1", ":", "return", "matrices", "[", "0", "]", "else", ":", "return", "util", ".", "multi_dot", "(", "matrices", "[", ":", ":", "-", "1", "]", ")", "# first parse the XML", "xml", "=", "etree", ".", "fromstring", "(", "file_obj", ".", "read", "(", ")", ")", "# store paths and transforms as", "# (path string, 3x3 matrix)", "paths", "=", "[", "]", "# store every path element", "for", "element", "in", "xml", ".", "iter", "(", "'{*}path'", ")", ":", "paths", ".", "append", "(", "(", "element", ".", "attrib", "[", "'d'", "]", ",", "element_transform", "(", "element", ")", ")", ")", "return", "_svg_path_convert", "(", "paths", ")" ]
Load an SVG file into a Path2D object. Parameters ----------- file_obj : open file object Contains SVG data file_type: None Not used Returns ----------- loaded : dict With kwargs for Path2D constructor
[ "Load", "an", "SVG", "file", "into", "a", "Path2D", "object", "." ]
python
train
opencobra/cobrapy
cobra/core/dictlist.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/dictlist.py#L334-L343
def insert(self, index, object): """insert object before index""" self._check(object.id) list.insert(self, index, object) # all subsequent entries now have been shifted up by 1 _dict = self._dict for i, j in iteritems(_dict): if j >= index: _dict[i] = j + 1 _dict[object.id] = index
[ "def", "insert", "(", "self", ",", "index", ",", "object", ")", ":", "self", ".", "_check", "(", "object", ".", "id", ")", "list", ".", "insert", "(", "self", ",", "index", ",", "object", ")", "# all subsequent entries now have been shifted up by 1", "_dict", "=", "self", ".", "_dict", "for", "i", ",", "j", "in", "iteritems", "(", "_dict", ")", ":", "if", "j", ">=", "index", ":", "_dict", "[", "i", "]", "=", "j", "+", "1", "_dict", "[", "object", ".", "id", "]", "=", "index" ]
insert object before index
[ "insert", "object", "before", "index" ]
python
valid
Yelp/kafka-utils
kafka_utils/kafka_check/commands/replication_factor.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_check/commands/replication_factor.py#L42-L55
def run_command(self): """Replication factor command, checks replication factor settings and compare it with min.isr in the cluster.""" topics = get_topic_partition_metadata(self.cluster_config.broker_list) topics_with_wrong_rf = _find_topics_with_wrong_rp( topics, self.zk, self.args.default_min_isr, ) errcode = status_code.OK if not topics_with_wrong_rf else status_code.CRITICAL out = _prepare_output(topics_with_wrong_rf, self.args.verbose) return errcode, out
[ "def", "run_command", "(", "self", ")", ":", "topics", "=", "get_topic_partition_metadata", "(", "self", ".", "cluster_config", ".", "broker_list", ")", "topics_with_wrong_rf", "=", "_find_topics_with_wrong_rp", "(", "topics", ",", "self", ".", "zk", ",", "self", ".", "args", ".", "default_min_isr", ",", ")", "errcode", "=", "status_code", ".", "OK", "if", "not", "topics_with_wrong_rf", "else", "status_code", ".", "CRITICAL", "out", "=", "_prepare_output", "(", "topics_with_wrong_rf", ",", "self", ".", "args", ".", "verbose", ")", "return", "errcode", ",", "out" ]
Replication factor command, checks replication factor settings and compare it with min.isr in the cluster.
[ "Replication", "factor", "command", "checks", "replication", "factor", "settings", "and", "compare", "it", "with", "min", ".", "isr", "in", "the", "cluster", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/bank_output_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/bank_output_utils.py#L50-L100
def return_search_summary(start_time=0, end_time=0, nevents=0, ifos=None, **kwargs): """ Function to create a SearchSummary object where all columns are populated but all are set to values that test False (ie. strings to '', floats/ints to 0, ...). This avoids errors when you try to create a table containing columns you don't care about, but which still need populating. NOTE: This will also produce a process_id with 0 values. For most applications these should be set to their correct values. It then populates columns if given them as options. Returns -------- lsctables.SeachSummary The "empty" SearchSummary object. """ if ifos is None: ifos = [] # create an empty search summary search_summary = lsctables.SearchSummary() cols = lsctables.SearchSummaryTable.validcolumns for entry in cols.keys(): if cols[entry] in ['real_4','real_8']: setattr(search_summary,entry,0.) elif cols[entry] == 'int_4s': setattr(search_summary,entry,0) elif cols[entry] == 'lstring': setattr(search_summary,entry,'') elif entry == 'process_id': search_summary.process_id = ilwd.ilwdchar("process:process_id:0") else: raise ValueError("Column %s not recognized" %(entry) ) # fill in columns if len(ifos): search_summary.ifos = ','.join(ifos) if nevents: search_summary.nevents = nevents if start_time and end_time: search_summary.in_start_time = int(start_time) search_summary.in_start_time_ns = int(start_time % 1 * 1e9) search_summary.in_end_time = int(end_time) search_summary.in_end_time_ns = int(end_time % 1 * 1e9) search_summary.out_start_time = int(start_time) search_summary.out_start_time_ns = int(start_time % 1 * 1e9) search_summary.out_end_time = int(end_time) search_summary.out_end_time_ns = int(end_time % 1 * 1e9) return search_summary
[ "def", "return_search_summary", "(", "start_time", "=", "0", ",", "end_time", "=", "0", ",", "nevents", "=", "0", ",", "ifos", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ifos", "is", "None", ":", "ifos", "=", "[", "]", "# create an empty search summary", "search_summary", "=", "lsctables", ".", "SearchSummary", "(", ")", "cols", "=", "lsctables", ".", "SearchSummaryTable", ".", "validcolumns", "for", "entry", "in", "cols", ".", "keys", "(", ")", ":", "if", "cols", "[", "entry", "]", "in", "[", "'real_4'", ",", "'real_8'", "]", ":", "setattr", "(", "search_summary", ",", "entry", ",", "0.", ")", "elif", "cols", "[", "entry", "]", "==", "'int_4s'", ":", "setattr", "(", "search_summary", ",", "entry", ",", "0", ")", "elif", "cols", "[", "entry", "]", "==", "'lstring'", ":", "setattr", "(", "search_summary", ",", "entry", ",", "''", ")", "elif", "entry", "==", "'process_id'", ":", "search_summary", ".", "process_id", "=", "ilwd", ".", "ilwdchar", "(", "\"process:process_id:0\"", ")", "else", ":", "raise", "ValueError", "(", "\"Column %s not recognized\"", "%", "(", "entry", ")", ")", "# fill in columns", "if", "len", "(", "ifos", ")", ":", "search_summary", ".", "ifos", "=", "','", ".", "join", "(", "ifos", ")", "if", "nevents", ":", "search_summary", ".", "nevents", "=", "nevents", "if", "start_time", "and", "end_time", ":", "search_summary", ".", "in_start_time", "=", "int", "(", "start_time", ")", "search_summary", ".", "in_start_time_ns", "=", "int", "(", "start_time", "%", "1", "*", "1e9", ")", "search_summary", ".", "in_end_time", "=", "int", "(", "end_time", ")", "search_summary", ".", "in_end_time_ns", "=", "int", "(", "end_time", "%", "1", "*", "1e9", ")", "search_summary", ".", "out_start_time", "=", "int", "(", "start_time", ")", "search_summary", ".", "out_start_time_ns", "=", "int", "(", "start_time", "%", "1", "*", "1e9", ")", "search_summary", ".", "out_end_time", "=", "int", "(", "end_time", ")", "search_summary", ".", "out_end_time_ns", "=", "int", "(", "end_time", "%", "1", "*", "1e9", ")", "return", "search_summary" ]
Function to create a SearchSummary object where all columns are populated but all are set to values that test False (ie. strings to '', floats/ints to 0, ...). This avoids errors when you try to create a table containing columns you don't care about, but which still need populating. NOTE: This will also produce a process_id with 0 values. For most applications these should be set to their correct values. It then populates columns if given them as options. Returns -------- lsctables.SeachSummary The "empty" SearchSummary object.
[ "Function", "to", "create", "a", "SearchSummary", "object", "where", "all", "columns", "are", "populated", "but", "all", "are", "set", "to", "values", "that", "test", "False", "(", "ie", ".", "strings", "to", "floats", "/", "ints", "to", "0", "...", ")", ".", "This", "avoids", "errors", "when", "you", "try", "to", "create", "a", "table", "containing", "columns", "you", "don", "t", "care", "about", "but", "which", "still", "need", "populating", ".", "NOTE", ":", "This", "will", "also", "produce", "a", "process_id", "with", "0", "values", ".", "For", "most", "applications", "these", "should", "be", "set", "to", "their", "correct", "values", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/layers/initializers.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/initializers.py#L106-L116
def get_config(self): """Returns initializer configuration as a JSON-serializable dict.""" return { 'initializers': [ tf.compat.v2.initializers.serialize( tf.keras.initializers.get(init)) for init in self.initializers ], 'sizes': self.sizes, 'validate_args': self.validate_args, }
[ "def", "get_config", "(", "self", ")", ":", "return", "{", "'initializers'", ":", "[", "tf", ".", "compat", ".", "v2", ".", "initializers", ".", "serialize", "(", "tf", ".", "keras", ".", "initializers", ".", "get", "(", "init", ")", ")", "for", "init", "in", "self", ".", "initializers", "]", ",", "'sizes'", ":", "self", ".", "sizes", ",", "'validate_args'", ":", "self", ".", "validate_args", ",", "}" ]
Returns initializer configuration as a JSON-serializable dict.
[ "Returns", "initializer", "configuration", "as", "a", "JSON", "-", "serializable", "dict", "." ]
python
test
elastic/elasticsearch-py
elasticsearch/client/cluster.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/cluster.py#L101-L121
def reroute(self, body=None, params=None): """ Explicitly execute a cluster reroute allocation command including specific commands. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_ :arg body: The definition of `commands` to perform (`move`, `cancel`, `allocate`) :arg dry_run: Simulate the operation only and return the resulting state :arg explain: Return an explanation of why the commands can or cannot be executed :arg master_timeout: Explicit operation timeout for connection to master node :arg metric: Limit the information returned to the specified metrics. Defaults to all but metadata, valid choices are: '_all', 'blocks', 'metadata', 'nodes', 'routing_table', 'master_node', 'version' :arg retry_failed: Retries allocation of shards that are blocked due to too many subsequent allocation failures :arg timeout: Explicit operation timeout """ return self.transport.perform_request('POST', '/_cluster/reroute', params=params, body=body)
[ "def", "reroute", "(", "self", ",", "body", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "'POST'", ",", "'/_cluster/reroute'", ",", "params", "=", "params", ",", "body", "=", "body", ")" ]
Explicitly execute a cluster reroute allocation command including specific commands. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_ :arg body: The definition of `commands` to perform (`move`, `cancel`, `allocate`) :arg dry_run: Simulate the operation only and return the resulting state :arg explain: Return an explanation of why the commands can or cannot be executed :arg master_timeout: Explicit operation timeout for connection to master node :arg metric: Limit the information returned to the specified metrics. Defaults to all but metadata, valid choices are: '_all', 'blocks', 'metadata', 'nodes', 'routing_table', 'master_node', 'version' :arg retry_failed: Retries allocation of shards that are blocked due to too many subsequent allocation failures :arg timeout: Explicit operation timeout
[ "Explicitly", "execute", "a", "cluster", "reroute", "allocation", "command", "including", "specific", "commands", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "cluster", "-", "reroute", ".", "html", ">", "_" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L421-L465
def __reorganize(self): """ Reorganize the keys into their proper section order for the NOAA output file DO NOT parse data tables (paleoData or chronData). We will do those separately. :param str key: :param any value: :return none: """ logger_lpd_noaa.info("enter reorganize") # NOAA files are organized in sections differently than NOAA. try to translate these sections. for key, value in self.lipd_data.items(): # if this key has a noaa match, it'll be returned. otherwise, empty string for no match noaa_key = self.__get_noaa_key(key) # check if this lipd key is in the NOAA_KEYS conversion dictionary. # if it's not, then stash it in our ignore list. if key not in LIPD_NOAA_MAP_FLAT: self.noaa_data_sorted["Ignore"][noaa_key] = value # studyName is placed two times in file. Line #1, and under the 'title' section elif noaa_key == "Study_Name": # study name gets put in two locations self.noaa_data_sorted["Top"][noaa_key] = value self.noaa_data_sorted["Title"][noaa_key] = value # put archiveType in self, because we'll reuse it later for the 9-part-variables as well elif noaa_key == "Archive": self.lsts_tmp["archive"].append(value) # Dataset_DOI is a repeatable element. the key could be a single DOI, or a list of DOIs. elif noaa_key == "Dataset_DOI": self.__parse_dois(value) # all other keys. determine which noaa section they belong in. else: # noaa keys are sorted by section. for header, content in NOAA_KEYS_BY_SECTION.items(): try: # if our key is a noaa header key, then that means it's the ONLY key in the section. # set value directly if noaa_key == header: self.noaa_data_sorted[header] = value # all other cases, the key is part of the section elif noaa_key in content: self.noaa_data_sorted[header][noaa_key] = value except KeyError: # this shouldn't ever really happen, but just in case logger_lpd_noaa.warn("lpd_noaa: reorganize: KeyError: {}".format(noaa_key)) return
[ "def", "__reorganize", "(", "self", ")", ":", "logger_lpd_noaa", ".", "info", "(", "\"enter reorganize\"", ")", "# NOAA files are organized in sections differently than NOAA. try to translate these sections.", "for", "key", ",", "value", "in", "self", ".", "lipd_data", ".", "items", "(", ")", ":", "# if this key has a noaa match, it'll be returned. otherwise, empty string for no match", "noaa_key", "=", "self", ".", "__get_noaa_key", "(", "key", ")", "# check if this lipd key is in the NOAA_KEYS conversion dictionary.", "# if it's not, then stash it in our ignore list.", "if", "key", "not", "in", "LIPD_NOAA_MAP_FLAT", ":", "self", ".", "noaa_data_sorted", "[", "\"Ignore\"", "]", "[", "noaa_key", "]", "=", "value", "# studyName is placed two times in file. Line #1, and under the 'title' section", "elif", "noaa_key", "==", "\"Study_Name\"", ":", "# study name gets put in two locations", "self", ".", "noaa_data_sorted", "[", "\"Top\"", "]", "[", "noaa_key", "]", "=", "value", "self", ".", "noaa_data_sorted", "[", "\"Title\"", "]", "[", "noaa_key", "]", "=", "value", "# put archiveType in self, because we'll reuse it later for the 9-part-variables as well", "elif", "noaa_key", "==", "\"Archive\"", ":", "self", ".", "lsts_tmp", "[", "\"archive\"", "]", ".", "append", "(", "value", ")", "# Dataset_DOI is a repeatable element. the key could be a single DOI, or a list of DOIs.", "elif", "noaa_key", "==", "\"Dataset_DOI\"", ":", "self", ".", "__parse_dois", "(", "value", ")", "# all other keys. determine which noaa section they belong in.", "else", ":", "# noaa keys are sorted by section.", "for", "header", ",", "content", "in", "NOAA_KEYS_BY_SECTION", ".", "items", "(", ")", ":", "try", ":", "# if our key is a noaa header key, then that means it's the ONLY key in the section.", "# set value directly", "if", "noaa_key", "==", "header", ":", "self", ".", "noaa_data_sorted", "[", "header", "]", "=", "value", "# all other cases, the key is part of the section", "elif", "noaa_key", "in", "content", ":", "self", ".", "noaa_data_sorted", "[", "header", "]", "[", "noaa_key", "]", "=", "value", "except", "KeyError", ":", "# this shouldn't ever really happen, but just in case", "logger_lpd_noaa", ".", "warn", "(", "\"lpd_noaa: reorganize: KeyError: {}\"", ".", "format", "(", "noaa_key", ")", ")", "return" ]
Reorganize the keys into their proper section order for the NOAA output file DO NOT parse data tables (paleoData or chronData). We will do those separately. :param str key: :param any value: :return none:
[ "Reorganize", "the", "keys", "into", "their", "proper", "section", "order", "for", "the", "NOAA", "output", "file", "DO", "NOT", "parse", "data", "tables", "(", "paleoData", "or", "chronData", ")", ".", "We", "will", "do", "those", "separately", ".", ":", "param", "str", "key", ":", ":", "param", "any", "value", ":", ":", "return", "none", ":" ]
python
train
googleapis/google-cloud-python
logging/google/cloud/logging/_gapic.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_gapic.py#L499-L515
def _item_to_sink(iterator, log_sink_pb): """Convert a sink protobuf to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type log_sink_pb: :class:`.logging_config_pb2.LogSink` :param log_sink_pb: Sink protobuf returned from the API. :rtype: :class:`~google.cloud.logging.sink.Sink` :returns: The next sink in the page. """ # NOTE: LogSink message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. resource = MessageToDict(log_sink_pb) return Sink.from_api_repr(resource, iterator.client)
[ "def", "_item_to_sink", "(", "iterator", ",", "log_sink_pb", ")", ":", "# NOTE: LogSink message type does not have an ``Any`` field", "# so `MessageToDict`` can safely be used.", "resource", "=", "MessageToDict", "(", "log_sink_pb", ")", "return", "Sink", ".", "from_api_repr", "(", "resource", ",", "iterator", ".", "client", ")" ]
Convert a sink protobuf to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type log_sink_pb: :class:`.logging_config_pb2.LogSink` :param log_sink_pb: Sink protobuf returned from the API. :rtype: :class:`~google.cloud.logging.sink.Sink` :returns: The next sink in the page.
[ "Convert", "a", "sink", "protobuf", "to", "the", "native", "object", "." ]
python
train
moonlitesolutions/SolrClient
SolrClient/helpers/reindexer.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/helpers/reindexer.py#L153-L165
def _get_query(self, cursor): ''' Query tempalte for source Solr, sorts by id by default. ''' query = {'q':'*:*', 'sort':'id desc', 'rows':self._rows, 'cursorMark':cursor} if self._date_field: query['sort'] = "{} asc, id desc".format(self._date_field) if self._per_shard: query['distrib'] = 'false' return query
[ "def", "_get_query", "(", "self", ",", "cursor", ")", ":", "query", "=", "{", "'q'", ":", "'*:*'", ",", "'sort'", ":", "'id desc'", ",", "'rows'", ":", "self", ".", "_rows", ",", "'cursorMark'", ":", "cursor", "}", "if", "self", ".", "_date_field", ":", "query", "[", "'sort'", "]", "=", "\"{} asc, id desc\"", ".", "format", "(", "self", ".", "_date_field", ")", "if", "self", ".", "_per_shard", ":", "query", "[", "'distrib'", "]", "=", "'false'", "return", "query" ]
Query tempalte for source Solr, sorts by id by default.
[ "Query", "tempalte", "for", "source", "Solr", "sorts", "by", "id", "by", "default", "." ]
python
train
Shopify/shopify_python_api
shopify/base.py
https://github.com/Shopify/shopify_python_api/blob/88d3ba332fb2cd331f87517a16f2c2d4296cee90/shopify/base.py#L33-L50
def connection(cls): """HTTP connection for the current thread""" local = cls._threadlocal if not getattr(local, 'connection', None): # Make sure these variables are no longer affected by other threads. local.user = cls.user local.password = cls.password local.site = cls.site local.timeout = cls.timeout local.headers = cls.headers local.format = cls.format local.version = cls.version local.url = cls.url if cls.site is None: raise ValueError("No shopify session is active") local.connection = ShopifyConnection( cls.site, cls.user, cls.password, cls.timeout, cls.format) return local.connection
[ "def", "connection", "(", "cls", ")", ":", "local", "=", "cls", ".", "_threadlocal", "if", "not", "getattr", "(", "local", ",", "'connection'", ",", "None", ")", ":", "# Make sure these variables are no longer affected by other threads.", "local", ".", "user", "=", "cls", ".", "user", "local", ".", "password", "=", "cls", ".", "password", "local", ".", "site", "=", "cls", ".", "site", "local", ".", "timeout", "=", "cls", ".", "timeout", "local", ".", "headers", "=", "cls", ".", "headers", "local", ".", "format", "=", "cls", ".", "format", "local", ".", "version", "=", "cls", ".", "version", "local", ".", "url", "=", "cls", ".", "url", "if", "cls", ".", "site", "is", "None", ":", "raise", "ValueError", "(", "\"No shopify session is active\"", ")", "local", ".", "connection", "=", "ShopifyConnection", "(", "cls", ".", "site", ",", "cls", ".", "user", ",", "cls", ".", "password", ",", "cls", ".", "timeout", ",", "cls", ".", "format", ")", "return", "local", ".", "connection" ]
HTTP connection for the current thread
[ "HTTP", "connection", "for", "the", "current", "thread" ]
python
train
datastax/python-driver
cassandra/query.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/query.py#L972-L1026
def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): """ Retrieves the actual tracing details from Cassandra and populates the attributes of this instance. Because tracing details are stored asynchronously by Cassandra, this may need to retry the session detail fetch. If the trace is still not available after `max_wait` seconds, :exc:`.TraceUnavailable` will be raised; if `max_wait` is :const:`None`, this will retry forever. `wait_for_complete=False` bypasses the wait for duration to be populated. This can be used to query events from partial sessions. `query_cl` specifies a consistency level to use for polling the trace tables, if it should be different than the session default. """ attempt = 0 start = time.time() while True: time_spent = time.time() - start if max_wait is not None and time_spent >= max_wait: raise TraceUnavailable( "Trace information was not available within %f seconds. Consider raising Session.max_trace_wait." % (max_wait,)) log.debug("Attempting to fetch trace info for trace ID: %s", self.trace_id) session_results = self._execute( SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 continue if is_complete: log.debug("Fetched trace info for trace ID: %s", self.trace_id) else: log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) session_row = session_results[0] self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None self.started_at = session_row.started_at self.coordinator = session_row.coordinator self.parameters = session_row.parameters # since C* 2.2 self.client = getattr(session_row, 'client', None) log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) time_spent = time.time() - start event_results = self._execute( SimpleStatement(self._SELECT_EVENTS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) log.debug("Fetched trace events for trace ID: %s", self.trace_id) self.events = tuple(TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread) for r in event_results) break
[ "def", "populate", "(", "self", ",", "max_wait", "=", "2.0", ",", "wait_for_complete", "=", "True", ",", "query_cl", "=", "None", ")", ":", "attempt", "=", "0", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "time_spent", "=", "time", ".", "time", "(", ")", "-", "start", "if", "max_wait", "is", "not", "None", "and", "time_spent", ">=", "max_wait", ":", "raise", "TraceUnavailable", "(", "\"Trace information was not available within %f seconds. Consider raising Session.max_trace_wait.\"", "%", "(", "max_wait", ",", ")", ")", "log", ".", "debug", "(", "\"Attempting to fetch trace info for trace ID: %s\"", ",", "self", ".", "trace_id", ")", "session_results", "=", "self", ".", "_execute", "(", "SimpleStatement", "(", "self", ".", "_SELECT_SESSIONS_FORMAT", ",", "consistency_level", "=", "query_cl", ")", ",", "(", "self", ".", "trace_id", ",", ")", ",", "time_spent", ",", "max_wait", ")", "# PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries", "is_complete", "=", "session_results", "and", "session_results", "[", "0", "]", ".", "duration", "is", "not", "None", "and", "session_results", "[", "0", "]", ".", "started_at", "is", "not", "None", "if", "not", "session_results", "or", "(", "wait_for_complete", "and", "not", "is_complete", ")", ":", "time", ".", "sleep", "(", "self", ".", "_BASE_RETRY_SLEEP", "*", "(", "2", "**", "attempt", ")", ")", "attempt", "+=", "1", "continue", "if", "is_complete", ":", "log", ".", "debug", "(", "\"Fetched trace info for trace ID: %s\"", ",", "self", ".", "trace_id", ")", "else", ":", "log", ".", "debug", "(", "\"Fetching parital trace info for trace ID: %s\"", ",", "self", ".", "trace_id", ")", "session_row", "=", "session_results", "[", "0", "]", "self", ".", "request_type", "=", "session_row", ".", "request", "self", ".", "duration", "=", "timedelta", "(", "microseconds", "=", "session_row", ".", "duration", ")", "if", "is_complete", "else", "None", "self", ".", "started_at", "=", "session_row", ".", "started_at", "self", ".", "coordinator", "=", "session_row", ".", "coordinator", "self", ".", "parameters", "=", "session_row", ".", "parameters", "# since C* 2.2", "self", ".", "client", "=", "getattr", "(", "session_row", ",", "'client'", ",", "None", ")", "log", ".", "debug", "(", "\"Attempting to fetch trace events for trace ID: %s\"", ",", "self", ".", "trace_id", ")", "time_spent", "=", "time", ".", "time", "(", ")", "-", "start", "event_results", "=", "self", ".", "_execute", "(", "SimpleStatement", "(", "self", ".", "_SELECT_EVENTS_FORMAT", ",", "consistency_level", "=", "query_cl", ")", ",", "(", "self", ".", "trace_id", ",", ")", ",", "time_spent", ",", "max_wait", ")", "log", ".", "debug", "(", "\"Fetched trace events for trace ID: %s\"", ",", "self", ".", "trace_id", ")", "self", ".", "events", "=", "tuple", "(", "TraceEvent", "(", "r", ".", "activity", ",", "r", ".", "event_id", ",", "r", ".", "source", ",", "r", ".", "source_elapsed", ",", "r", ".", "thread", ")", "for", "r", "in", "event_results", ")", "break" ]
Retrieves the actual tracing details from Cassandra and populates the attributes of this instance. Because tracing details are stored asynchronously by Cassandra, this may need to retry the session detail fetch. If the trace is still not available after `max_wait` seconds, :exc:`.TraceUnavailable` will be raised; if `max_wait` is :const:`None`, this will retry forever. `wait_for_complete=False` bypasses the wait for duration to be populated. This can be used to query events from partial sessions. `query_cl` specifies a consistency level to use for polling the trace tables, if it should be different than the session default.
[ "Retrieves", "the", "actual", "tracing", "details", "from", "Cassandra", "and", "populates", "the", "attributes", "of", "this", "instance", ".", "Because", "tracing", "details", "are", "stored", "asynchronously", "by", "Cassandra", "this", "may", "need", "to", "retry", "the", "session", "detail", "fetch", ".", "If", "the", "trace", "is", "still", "not", "available", "after", "max_wait", "seconds", ":", "exc", ":", ".", "TraceUnavailable", "will", "be", "raised", ";", "if", "max_wait", "is", ":", "const", ":", "None", "this", "will", "retry", "forever", "." ]
python
train
ff0000/scarlet
scarlet/cms/sites.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/sites.py#L205-L219
def password_change(self, request): """ Handles the "change password" task -- both form display and validation. Uses the default auth views. """ from django.contrib.auth.views import password_change url = reverse('admin:cms_password_change_done') defaults = { 'post_change_redirect': url, 'template_name': 'cms/password_change_form.html', } if self.password_change_template is not None: defaults['template_name'] = self.password_change_template return password_change(request, **defaults)
[ "def", "password_change", "(", "self", ",", "request", ")", ":", "from", "django", ".", "contrib", ".", "auth", ".", "views", "import", "password_change", "url", "=", "reverse", "(", "'admin:cms_password_change_done'", ")", "defaults", "=", "{", "'post_change_redirect'", ":", "url", ",", "'template_name'", ":", "'cms/password_change_form.html'", ",", "}", "if", "self", ".", "password_change_template", "is", "not", "None", ":", "defaults", "[", "'template_name'", "]", "=", "self", ".", "password_change_template", "return", "password_change", "(", "request", ",", "*", "*", "defaults", ")" ]
Handles the "change password" task -- both form display and validation. Uses the default auth views.
[ "Handles", "the", "change", "password", "task", "--", "both", "form", "display", "and", "validation", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/salt/nodes.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/salt/nodes.py#L107-L117
def from_etree(cls, etree_element): """ creates a ``PrimaryTextNode`` instance from the etree representation of a <nodes> element from a SaltXMI file. """ ins = SaltNode.from_etree(etree_element) # TODO: this looks dangerous, ask Stackoverflow about it! # convert SaltNode into PrimaryTextNode ins.__class__ = PrimaryTextNode.mro()[0] ins.text = extract_primary_text(etree_element) return ins
[ "def", "from_etree", "(", "cls", ",", "etree_element", ")", ":", "ins", "=", "SaltNode", ".", "from_etree", "(", "etree_element", ")", "# TODO: this looks dangerous, ask Stackoverflow about it!", "# convert SaltNode into PrimaryTextNode", "ins", ".", "__class__", "=", "PrimaryTextNode", ".", "mro", "(", ")", "[", "0", "]", "ins", ".", "text", "=", "extract_primary_text", "(", "etree_element", ")", "return", "ins" ]
creates a ``PrimaryTextNode`` instance from the etree representation of a <nodes> element from a SaltXMI file.
[ "creates", "a", "PrimaryTextNode", "instance", "from", "the", "etree", "representation", "of", "a", "<nodes", ">", "element", "from", "a", "SaltXMI", "file", "." ]
python
train
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L285-L320
def bezier_unit_tangent(seg, t): """Returns the unit tangent of the segment at t. Notes ----- If you receive a RuntimeWarning, try the following: >>> import numpy >>> old_numpy_error_settings = numpy.seterr(invalid='raise') This can be undone with: >>> numpy.seterr(**old_numpy_error_settings) """ assert 0 <= t <= 1 dseg = seg.derivative(t) # Note: dseg might be numpy value, use np.seterr(invalid='raise') try: unit_tangent = dseg/abs(dseg) except (ZeroDivisionError, FloatingPointError): # This may be a removable singularity, if so we just need to compute # the limit. # Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2}) dseg_poly = seg.poly().deriv() dseg_abs_squared_poly = (real(dseg_poly) ** 2 + imag(dseg_poly) ** 2) try: unit_tangent = csqrt(rational_limit(dseg_poly**2, dseg_abs_squared_poly, t)) except ValueError: bef = seg.poly().deriv()(t - 1e-4) aft = seg.poly().deriv()(t + 1e-4) mes = ("Unit tangent appears to not be well-defined at " "t = {}, \n".format(t) + "seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) + "seg.poly().deriv()(t + 1e-4) = {}".format(aft)) raise ValueError(mes) return unit_tangent
[ "def", "bezier_unit_tangent", "(", "seg", ",", "t", ")", ":", "assert", "0", "<=", "t", "<=", "1", "dseg", "=", "seg", ".", "derivative", "(", "t", ")", "# Note: dseg might be numpy value, use np.seterr(invalid='raise')", "try", ":", "unit_tangent", "=", "dseg", "/", "abs", "(", "dseg", ")", "except", "(", "ZeroDivisionError", ",", "FloatingPointError", ")", ":", "# This may be a removable singularity, if so we just need to compute", "# the limit.", "# Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2})", "dseg_poly", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "dseg_abs_squared_poly", "=", "(", "real", "(", "dseg_poly", ")", "**", "2", "+", "imag", "(", "dseg_poly", ")", "**", "2", ")", "try", ":", "unit_tangent", "=", "csqrt", "(", "rational_limit", "(", "dseg_poly", "**", "2", ",", "dseg_abs_squared_poly", ",", "t", ")", ")", "except", "ValueError", ":", "bef", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "(", "t", "-", "1e-4", ")", "aft", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "(", "t", "+", "1e-4", ")", "mes", "=", "(", "\"Unit tangent appears to not be well-defined at \"", "\"t = {}, \\n\"", ".", "format", "(", "t", ")", "+", "\"seg.poly().deriv()(t - 1e-4) = {}\\n\"", ".", "format", "(", "bef", ")", "+", "\"seg.poly().deriv()(t + 1e-4) = {}\"", ".", "format", "(", "aft", ")", ")", "raise", "ValueError", "(", "mes", ")", "return", "unit_tangent" ]
Returns the unit tangent of the segment at t. Notes ----- If you receive a RuntimeWarning, try the following: >>> import numpy >>> old_numpy_error_settings = numpy.seterr(invalid='raise') This can be undone with: >>> numpy.seterr(**old_numpy_error_settings)
[ "Returns", "the", "unit", "tangent", "of", "the", "segment", "at", "t", "." ]
python
train
refinery29/chassis
chassis/services/dependency_injection/__init__.py
https://github.com/refinery29/chassis/blob/1238d5214cbb8f3e1fe7c0dc2fa72f45bf085192/chassis/services/dependency_injection/__init__.py#L27-L32
def _check_type(name, obj, expected_type): """ Raise a TypeError if object is not of expected type """ if not isinstance(obj, expected_type): raise TypeError( '"%s" must be an a %s' % (name, expected_type.__name__) )
[ "def", "_check_type", "(", "name", ",", "obj", ",", "expected_type", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "expected_type", ")", ":", "raise", "TypeError", "(", "'\"%s\" must be an a %s'", "%", "(", "name", ",", "expected_type", ".", "__name__", ")", ")" ]
Raise a TypeError if object is not of expected type
[ "Raise", "a", "TypeError", "if", "object", "is", "not", "of", "expected", "type" ]
python
train
PMEAL/OpenPNM
openpnm/topotools/topotools.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L1471-L1498
def find_pore_to_pore_distance(network, pores1=None, pores2=None): r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix. ''' from scipy.spatial.distance import cdist p1 = sp.array(pores1, ndmin=1) p2 = sp.array(pores2, ndmin=1) coords = network['pore.coords'] return cdist(coords[p1], coords[p2])
[ "def", "find_pore_to_pore_distance", "(", "network", ",", "pores1", "=", "None", ",", "pores2", "=", "None", ")", ":", "from", "scipy", ".", "spatial", ".", "distance", "import", "cdist", "p1", "=", "sp", ".", "array", "(", "pores1", ",", "ndmin", "=", "1", ")", "p2", "=", "sp", ".", "array", "(", "pores2", ",", "ndmin", "=", "1", ")", "coords", "=", "network", "[", "'pore.coords'", "]", "return", "cdist", "(", "coords", "[", "p1", "]", ",", "coords", "[", "p2", "]", ")" ]
r''' Find the distance between all pores on set one to each pore in set 2 Parameters ---------- network : OpenPNM Network Object The network object containing the pore coordinates pores1 : array_like The pore indices of the first set pores2 : array_Like The pore indices of the second set. It's OK if these indices are partially or completely duplicating ``pores``. Returns ------- A distance matrix with ``len(pores1)`` rows and ``len(pores2)`` columns. The distance between pore *i* in ``pores1`` and *j* in ``pores2`` is located at *(i, j)* and *(j, i)* in the distance matrix.
[ "r", "Find", "the", "distance", "between", "all", "pores", "on", "set", "one", "to", "each", "pore", "in", "set", "2" ]
python
train
aichaos/rivescript-python
rivescript/rivescript.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L771-L787
def get_uservar(self, user, name): """Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable. """ if name == '__lastmatch__': # Treat var `__lastmatch__` since it can't receive "undefined" value return self.last_match(user) else: return self._session.get(user, name)
[ "def", "get_uservar", "(", "self", ",", "user", ",", "name", ")", ":", "if", "name", "==", "'__lastmatch__'", ":", "# Treat var `__lastmatch__` since it can't receive \"undefined\" value", "return", "self", ".", "last_match", "(", "user", ")", "else", ":", "return", "self", ".", "_session", ".", "get", "(", "user", ",", "name", ")" ]
Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable.
[ "Get", "a", "variable", "about", "a", "user", "." ]
python
train
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L51-L68
def asdim(dimension): """Convert the input to a Dimension. Args: dimension: tuple, dict or string type to convert to Dimension Returns: A Dimension object constructed from the dimension spec. No copy is performed if the input is already a Dimension. """ if isinstance(dimension, Dimension): return dimension elif isinstance(dimension, (tuple, dict, basestring)): return Dimension(dimension) else: raise ValueError('%s type could not be interpreted as Dimension. ' 'Dimensions must be declared as a string, tuple, ' 'dictionary or Dimension type.')
[ "def", "asdim", "(", "dimension", ")", ":", "if", "isinstance", "(", "dimension", ",", "Dimension", ")", ":", "return", "dimension", "elif", "isinstance", "(", "dimension", ",", "(", "tuple", ",", "dict", ",", "basestring", ")", ")", ":", "return", "Dimension", "(", "dimension", ")", "else", ":", "raise", "ValueError", "(", "'%s type could not be interpreted as Dimension. '", "'Dimensions must be declared as a string, tuple, '", "'dictionary or Dimension type.'", ")" ]
Convert the input to a Dimension. Args: dimension: tuple, dict or string type to convert to Dimension Returns: A Dimension object constructed from the dimension spec. No copy is performed if the input is already a Dimension.
[ "Convert", "the", "input", "to", "a", "Dimension", "." ]
python
train
mezz64/pyEight
pyeight/user.py
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L581-L594
async def update_trend_data(self, startdate, enddate): """Update trends data json for specified time period.""" url = '{}/users/{}/trends'.format(API_URL, self.userid) params = { 'tz': self.device.tzone, 'from': startdate, 'to': enddate } trends = await self.device.api_get(url, params) if trends is None: _LOGGER.error('Unable to fetch eight trend data.') else: self.trends = trends['days']
[ "async", "def", "update_trend_data", "(", "self", ",", "startdate", ",", "enddate", ")", ":", "url", "=", "'{}/users/{}/trends'", ".", "format", "(", "API_URL", ",", "self", ".", "userid", ")", "params", "=", "{", "'tz'", ":", "self", ".", "device", ".", "tzone", ",", "'from'", ":", "startdate", ",", "'to'", ":", "enddate", "}", "trends", "=", "await", "self", ".", "device", ".", "api_get", "(", "url", ",", "params", ")", "if", "trends", "is", "None", ":", "_LOGGER", ".", "error", "(", "'Unable to fetch eight trend data.'", ")", "else", ":", "self", ".", "trends", "=", "trends", "[", "'days'", "]" ]
Update trends data json for specified time period.
[ "Update", "trends", "data", "json", "for", "specified", "time", "period", "." ]
python
train
programa-stic/barf-project
barf/core/reil/emulator/tainter.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/emulator/tainter.py#L195-L202
def __taint_move(self, instr): """Taint registers move instruction. """ # Get taint information. op0_taint = self.get_operand_taint(instr.operands[0]) # Propagate taint. self.set_operand_taint(instr.operands[2], op0_taint)
[ "def", "__taint_move", "(", "self", ",", "instr", ")", ":", "# Get taint information.", "op0_taint", "=", "self", ".", "get_operand_taint", "(", "instr", ".", "operands", "[", "0", "]", ")", "# Propagate taint.", "self", ".", "set_operand_taint", "(", "instr", ".", "operands", "[", "2", "]", ",", "op0_taint", ")" ]
Taint registers move instruction.
[ "Taint", "registers", "move", "instruction", "." ]
python
train
senaite/senaite.core
bika/lims/browser/dashboard/dashboard.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/dashboard/dashboard.py#L205-L228
def check_dashboard_cookie(self): """ Check if the dashboard cookie should exist through bikasetup configuration. If it should exist but doesn't exist yet, the function creates it with all values as default. If it should exist and already exists, it returns the value. Otherwise, the function returns None. :return: a dictionary of strings """ # Getting cookie cookie_raw = self.request.get(DASHBOARD_FILTER_COOKIE, None) # If it doesn't exist, create it with default values if cookie_raw is None: cookie_raw = self._create_raw_data() self.request.response.setCookie( DASHBOARD_FILTER_COOKIE, json.dumps(cookie_raw), quoted=False, path='/') return cookie_raw return get_strings(json.loads(cookie_raw))
[ "def", "check_dashboard_cookie", "(", "self", ")", ":", "# Getting cookie", "cookie_raw", "=", "self", ".", "request", ".", "get", "(", "DASHBOARD_FILTER_COOKIE", ",", "None", ")", "# If it doesn't exist, create it with default values", "if", "cookie_raw", "is", "None", ":", "cookie_raw", "=", "self", ".", "_create_raw_data", "(", ")", "self", ".", "request", ".", "response", ".", "setCookie", "(", "DASHBOARD_FILTER_COOKIE", ",", "json", ".", "dumps", "(", "cookie_raw", ")", ",", "quoted", "=", "False", ",", "path", "=", "'/'", ")", "return", "cookie_raw", "return", "get_strings", "(", "json", ".", "loads", "(", "cookie_raw", ")", ")" ]
Check if the dashboard cookie should exist through bikasetup configuration. If it should exist but doesn't exist yet, the function creates it with all values as default. If it should exist and already exists, it returns the value. Otherwise, the function returns None. :return: a dictionary of strings
[ "Check", "if", "the", "dashboard", "cookie", "should", "exist", "through", "bikasetup", "configuration", "." ]
python
train
openstax/cnx-epub
cnxepub/models.py
https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/models.py#L263-L270
def bind(self, model, template="{}"): """Bind the ``model`` to the reference. This uses the model's ``id`` attribute and the given ``template`` to dynamically produce a uri when accessed. """ self._bound_model = model self._uri_template = template self._set_uri_from_bound_model()
[ "def", "bind", "(", "self", ",", "model", ",", "template", "=", "\"{}\"", ")", ":", "self", ".", "_bound_model", "=", "model", "self", ".", "_uri_template", "=", "template", "self", ".", "_set_uri_from_bound_model", "(", ")" ]
Bind the ``model`` to the reference. This uses the model's ``id`` attribute and the given ``template`` to dynamically produce a uri when accessed.
[ "Bind", "the", "model", "to", "the", "reference", ".", "This", "uses", "the", "model", "s", "id", "attribute", "and", "the", "given", "template", "to", "dynamically", "produce", "a", "uri", "when", "accessed", "." ]
python
train
labstreaminglayer/liblsl-Python
pylsl/pylsl.py
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L457-L495
def push_chunk(self, x, timestamp=0.0, pushthrough=True): """Push a list of samples into the outlet. samples -- A list of samples, either as a list of lists or a list of multiplexed values. timestamp -- Optionally the capture time of the most recent sample, in agreement with local_clock(); if omitted, the current time is used. The time stamps of other samples are automatically derived according to the sampling rate of the stream. (default 0.0) pushthrough Whether to push the chunk through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ try: n_values = self.channel_count * len(x) data_buff = (self.value_type * n_values).from_buffer(x) handle_error(self.do_push_chunk(self.obj, data_buff, c_long(n_values), c_double(timestamp), c_int(pushthrough))) except TypeError: if len(x): if type(x[0]) is list: x = [v for sample in x for v in sample] if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] if len(x) % self.channel_count == 0: constructor = self.value_type*len(x) # noinspection PyCallingNonCallable handle_error(self.do_push_chunk(self.obj, constructor(*x), c_long(len(x)), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("each sample must have the same number of " "channels.")
[ "def", "push_chunk", "(", "self", ",", "x", ",", "timestamp", "=", "0.0", ",", "pushthrough", "=", "True", ")", ":", "try", ":", "n_values", "=", "self", ".", "channel_count", "*", "len", "(", "x", ")", "data_buff", "=", "(", "self", ".", "value_type", "*", "n_values", ")", ".", "from_buffer", "(", "x", ")", "handle_error", "(", "self", ".", "do_push_chunk", "(", "self", ".", "obj", ",", "data_buff", ",", "c_long", "(", "n_values", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "except", "TypeError", ":", "if", "len", "(", "x", ")", ":", "if", "type", "(", "x", "[", "0", "]", ")", "is", "list", ":", "x", "=", "[", "v", "for", "sample", "in", "x", "for", "v", "in", "sample", "]", "if", "self", ".", "channel_format", "==", "cf_string", ":", "x", "=", "[", "v", ".", "encode", "(", "'utf-8'", ")", "for", "v", "in", "x", "]", "if", "len", "(", "x", ")", "%", "self", ".", "channel_count", "==", "0", ":", "constructor", "=", "self", ".", "value_type", "*", "len", "(", "x", ")", "# noinspection PyCallingNonCallable", "handle_error", "(", "self", ".", "do_push_chunk", "(", "self", ".", "obj", ",", "constructor", "(", "*", "x", ")", ",", "c_long", "(", "len", "(", "x", ")", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"each sample must have the same number of \"", "\"channels.\"", ")" ]
Push a list of samples into the outlet. samples -- A list of samples, either as a list of lists or a list of multiplexed values. timestamp -- Optionally the capture time of the most recent sample, in agreement with local_clock(); if omitted, the current time is used. The time stamps of other samples are automatically derived according to the sampling rate of the stream. (default 0.0) pushthrough Whether to push the chunk through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True)
[ "Push", "a", "list", "of", "samples", "into", "the", "outlet", "." ]
python
test
econ-ark/HARK
HARK/ConsumptionSaving/ConsMarkovModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsMarkovModel.py#L502-L519
def makeLinearcFunc(self,mNrm,cNrm): ''' Make a linear interpolation to represent the (unconstrained) consumption function conditional on the current period state. Parameters ---------- mNrm : np.array Array of normalized market resource values for interpolation. cNrm : np.array Array of normalized consumption values for interpolation. Returns ------- cFuncUnc: an instance of HARK.interpolation.LinearInterp ''' cFuncUnc = LinearInterp(mNrm,cNrm,self.MPCminNow_j*self.hNrmNow_j,self.MPCminNow_j) return cFuncUnc
[ "def", "makeLinearcFunc", "(", "self", ",", "mNrm", ",", "cNrm", ")", ":", "cFuncUnc", "=", "LinearInterp", "(", "mNrm", ",", "cNrm", ",", "self", ".", "MPCminNow_j", "*", "self", ".", "hNrmNow_j", ",", "self", ".", "MPCminNow_j", ")", "return", "cFuncUnc" ]
Make a linear interpolation to represent the (unconstrained) consumption function conditional on the current period state. Parameters ---------- mNrm : np.array Array of normalized market resource values for interpolation. cNrm : np.array Array of normalized consumption values for interpolation. Returns ------- cFuncUnc: an instance of HARK.interpolation.LinearInterp
[ "Make", "a", "linear", "interpolation", "to", "represent", "the", "(", "unconstrained", ")", "consumption", "function", "conditional", "on", "the", "current", "period", "state", "." ]
python
train
kennethreitz/records
records.py
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L207-L226
def first(self, default=None, as_dict=False, as_ordereddict=False): """Returns a single record for the RecordCollection, or `default`. If `default` is an instance or subclass of Exception, then raise it instead of returning it.""" # Try to get a record, or return/raise default. try: record = self[0] except IndexError: if isexception(default): raise default return default # Cast and return. if as_dict: return record.as_dict() elif as_ordereddict: return record.as_dict(ordered=True) else: return record
[ "def", "first", "(", "self", ",", "default", "=", "None", ",", "as_dict", "=", "False", ",", "as_ordereddict", "=", "False", ")", ":", "# Try to get a record, or return/raise default.", "try", ":", "record", "=", "self", "[", "0", "]", "except", "IndexError", ":", "if", "isexception", "(", "default", ")", ":", "raise", "default", "return", "default", "# Cast and return.", "if", "as_dict", ":", "return", "record", ".", "as_dict", "(", ")", "elif", "as_ordereddict", ":", "return", "record", ".", "as_dict", "(", "ordered", "=", "True", ")", "else", ":", "return", "record" ]
Returns a single record for the RecordCollection, or `default`. If `default` is an instance or subclass of Exception, then raise it instead of returning it.
[ "Returns", "a", "single", "record", "for", "the", "RecordCollection", "or", "default", ".", "If", "default", "is", "an", "instance", "or", "subclass", "of", "Exception", "then", "raise", "it", "instead", "of", "returning", "it", "." ]
python
train
alfred82santa/dirty-models
dirty_models/models.py
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/models.py#L650-L666
def delete_attr_by_path(self, field_path): """ It deletes fields looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: str """ fields, next_field = self._get_fields_by_path(field_path) for field in fields: if next_field: try: self.get_field_value(field).delete_attr_by_path(next_field) except AttributeError: pass else: self.delete_field_value(field)
[ "def", "delete_attr_by_path", "(", "self", ",", "field_path", ")", ":", "fields", ",", "next_field", "=", "self", ".", "_get_fields_by_path", "(", "field_path", ")", "for", "field", "in", "fields", ":", "if", "next_field", ":", "try", ":", "self", ".", "get_field_value", "(", "field", ")", ".", "delete_attr_by_path", "(", "next_field", ")", "except", "AttributeError", ":", "pass", "else", ":", "self", ".", "delete_field_value", "(", "field", ")" ]
It deletes fields looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: str
[ "It", "deletes", "fields", "looked", "up", "by", "field", "path", ".", "Field", "path", "is", "dot", "-", "formatted", "string", "path", ":", "parent_field", ".", "child_field", "." ]
python
train
artefactual-labs/agentarchives
agentarchives/archivesspace/client.py
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivesspace/client.py#L287-L352
def edit_record(self, new_record): """ Update a record in ArchivesSpace using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * targetfield * notes * start_date * end_date * date_expression :raises ValueError: if the 'id' field isn't specified, or no fields to edit were specified. """ try: record_id = new_record["id"] except KeyError: raise ValueError("No record ID provided!") record = self.get_record(record_id) # TODO: add more fields? field_map = {"title": "title", "level": "levelOfDescription"} fields_updated = False for field, targetfield in field_map.items(): try: record[targetfield] = new_record[field] fields_updated = True except KeyError: continue if self._process_notes(record, new_record): fields_updated = True # Create dates object if any of the date fields is populated if ( "start_date" in new_record or "end_date" in new_record or "date_expression" in new_record ): date = { "jsonmodel_type": "date", "date_type": "inclusive", "label": "creation", } if "date_expression" in new_record: date["expression"] = new_record["date_expression"] if "start_date" in new_record: date["begin"] = new_record["start_date"] if "end_date" in new_record: date["end"] = new_record["end_date"] if len(record["dates"]) == 0: record["dates"] = [date] else: record["dates"][0] = date fields_updated = True if not fields_updated: raise ValueError("No fields to update specified!") self._post(record_id, data=json.dumps(record))
[ "def", "edit_record", "(", "self", ",", "new_record", ")", ":", "try", ":", "record_id", "=", "new_record", "[", "\"id\"", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No record ID provided!\"", ")", "record", "=", "self", ".", "get_record", "(", "record_id", ")", "# TODO: add more fields?", "field_map", "=", "{", "\"title\"", ":", "\"title\"", ",", "\"level\"", ":", "\"levelOfDescription\"", "}", "fields_updated", "=", "False", "for", "field", ",", "targetfield", "in", "field_map", ".", "items", "(", ")", ":", "try", ":", "record", "[", "targetfield", "]", "=", "new_record", "[", "field", "]", "fields_updated", "=", "True", "except", "KeyError", ":", "continue", "if", "self", ".", "_process_notes", "(", "record", ",", "new_record", ")", ":", "fields_updated", "=", "True", "# Create dates object if any of the date fields is populated", "if", "(", "\"start_date\"", "in", "new_record", "or", "\"end_date\"", "in", "new_record", "or", "\"date_expression\"", "in", "new_record", ")", ":", "date", "=", "{", "\"jsonmodel_type\"", ":", "\"date\"", ",", "\"date_type\"", ":", "\"inclusive\"", ",", "\"label\"", ":", "\"creation\"", ",", "}", "if", "\"date_expression\"", "in", "new_record", ":", "date", "[", "\"expression\"", "]", "=", "new_record", "[", "\"date_expression\"", "]", "if", "\"start_date\"", "in", "new_record", ":", "date", "[", "\"begin\"", "]", "=", "new_record", "[", "\"start_date\"", "]", "if", "\"end_date\"", "in", "new_record", ":", "date", "[", "\"end\"", "]", "=", "new_record", "[", "\"end_date\"", "]", "if", "len", "(", "record", "[", "\"dates\"", "]", ")", "==", "0", ":", "record", "[", "\"dates\"", "]", "=", "[", "date", "]", "else", ":", "record", "[", "\"dates\"", "]", "[", "0", "]", "=", "date", "fields_updated", "=", "True", "if", "not", "fields_updated", ":", "raise", "ValueError", "(", "\"No fields to update specified!\"", ")", "self", ".", "_post", "(", "record_id", ",", "data", "=", "json", ".", "dumps", "(", "record", ")", ")" ]
Update a record in ArchivesSpace using the provided new_record. The format of new_record is identical to the format returned by get_resource_component_and_children and related methods; consult the documentation for that method in ArchivistsToolkitClient to see the format. This means it's possible, for example, to request a record, modify the returned dict, and pass that dict to this method to update the server. Currently supported fields are: * title * targetfield * notes * start_date * end_date * date_expression :raises ValueError: if the 'id' field isn't specified, or no fields to edit were specified.
[ "Update", "a", "record", "in", "ArchivesSpace", "using", "the", "provided", "new_record", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_webui.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_webui.py#L420-L431
def webui_data_stores_data_store_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui") data_stores = ET.SubElement(webui, "data-stores") data_store = ET.SubElement(data_stores, "data-store") key = ET.SubElement(data_store, "key") key.text = kwargs.pop('key') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "webui_data_stores_data_store_key", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "webui", "=", "ET", ".", "SubElement", "(", "config", ",", "\"webui\"", ",", "xmlns", "=", "\"http://tail-f.com/ns/webui\"", ")", "data_stores", "=", "ET", ".", "SubElement", "(", "webui", ",", "\"data-stores\"", ")", "data_store", "=", "ET", ".", "SubElement", "(", "data_stores", ",", "\"data-store\"", ")", "key", "=", "ET", ".", "SubElement", "(", "data_store", ",", "\"key\"", ")", "key", ".", "text", "=", "kwargs", ".", "pop", "(", "'key'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
night-crawler/django-docker-helpers
django_docker_helpers/config/__init__.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/__init__.py#L118-L169
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, required: bool = False, **kwargs): """ Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified """ for p in self.parsers: try: val = p.get( variable_path, default=self.sentinel, coerce_type=coerce_type, coercer=coercer, **kwargs ) if val != self.sentinel: self.enqueue(variable_path, p, val) return val except Exception as e: if not self.silent: raise if self.suppress_logs: continue self.logger.error('Parser {0} cannot get key `{1}`: {2}'.format( p.__class__.__name__, variable_path, str(e) )) self.enqueue(variable_path, value=default) if not default and required: raise exceptions.RequiredValueIsEmpty( 'No default provided and no value read for `{0}`'.format(variable_path)) return default
[ "def", "get", "(", "self", ",", "variable_path", ":", "str", ",", "default", ":", "t", ".", "Optional", "[", "t", ".", "Any", "]", "=", "None", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ",", "required", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", ":", "for", "p", "in", "self", ".", "parsers", ":", "try", ":", "val", "=", "p", ".", "get", "(", "variable_path", ",", "default", "=", "self", ".", "sentinel", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ",", "*", "*", "kwargs", ")", "if", "val", "!=", "self", ".", "sentinel", ":", "self", ".", "enqueue", "(", "variable_path", ",", "p", ",", "val", ")", "return", "val", "except", "Exception", "as", "e", ":", "if", "not", "self", ".", "silent", ":", "raise", "if", "self", ".", "suppress_logs", ":", "continue", "self", ".", "logger", ".", "error", "(", "'Parser {0} cannot get key `{1}`: {2}'", ".", "format", "(", "p", ".", "__class__", ".", "__name__", ",", "variable_path", ",", "str", "(", "e", ")", ")", ")", "self", ".", "enqueue", "(", "variable_path", ",", "value", "=", "default", ")", "if", "not", "default", "and", "required", ":", "raise", "exceptions", ".", "RequiredValueIsEmpty", "(", "'No default provided and no value read for `{0}`'", ".", "format", "(", "variable_path", ")", ")", "return", "default" ]
Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified
[ "Tries", "to", "read", "a", "variable_path", "from", "each", "of", "the", "passed", "parsers", ".", "It", "stops", "if", "read", "was", "successful", "and", "returns", "a", "retrieved", "value", ".", "If", "none", "of", "the", "parsers", "contain", "a", "value", "for", "the", "specified", "path", "it", "returns", "default", "." ]
python
train
vtraag/louvain-igraph
src/functions.py
https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/functions.py#L81-L136
def find_partition_multiplex(graphs, partition_type, **kwargs): """ Detect communities for multiplex graphs. Each graph should be defined on the same set of vertices, only the edges may differ for different graphs. See :func:`Optimiser.optimise_partition_multiplex` for a more detailed explanation. Parameters ---------- graphs : list of :class:`ig.Graph` List of :class:`louvain.VertexPartition` layers to optimise. partition_type : type of :class:`MutableVertexPartition` The type of partition to use for optimisation (identical for all graphs). **kwargs Remaining keyword arguments, passed on to constructor of ``partition_type``. Returns ------- list of int membership of nodes. float Improvement in quality of combined partitions, see :func:`Optimiser.optimise_partition_multiplex`. Notes ----- We don't return a partition in this case because a partition is always defined on a single graph. We therefore simply return the membership (which is the same for all layers). See Also -------- :func:`Optimiser.optimise_partition_multiplex` :func:`slices_to_layers` Examples -------- >>> n = 100 >>> G_1 = ig.Graph.Lattice([n], 1) >>> G_2 = ig.Graph.Lattice([n], 1) >>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2], ... louvain.ModularityVertexPartition) """ n_layers = len(graphs) partitions = [] layer_weights = [1]*n_layers for graph in graphs: partitions.append(partition_type(graph, **kwargs)) optimiser = Optimiser() improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights) return partitions[0].membership, improvement
[ "def", "find_partition_multiplex", "(", "graphs", ",", "partition_type", ",", "*", "*", "kwargs", ")", ":", "n_layers", "=", "len", "(", "graphs", ")", "partitions", "=", "[", "]", "layer_weights", "=", "[", "1", "]", "*", "n_layers", "for", "graph", "in", "graphs", ":", "partitions", ".", "append", "(", "partition_type", "(", "graph", ",", "*", "*", "kwargs", ")", ")", "optimiser", "=", "Optimiser", "(", ")", "improvement", "=", "optimiser", ".", "optimise_partition_multiplex", "(", "partitions", ",", "layer_weights", ")", "return", "partitions", "[", "0", "]", ".", "membership", ",", "improvement" ]
Detect communities for multiplex graphs. Each graph should be defined on the same set of vertices, only the edges may differ for different graphs. See :func:`Optimiser.optimise_partition_multiplex` for a more detailed explanation. Parameters ---------- graphs : list of :class:`ig.Graph` List of :class:`louvain.VertexPartition` layers to optimise. partition_type : type of :class:`MutableVertexPartition` The type of partition to use for optimisation (identical for all graphs). **kwargs Remaining keyword arguments, passed on to constructor of ``partition_type``. Returns ------- list of int membership of nodes. float Improvement in quality of combined partitions, see :func:`Optimiser.optimise_partition_multiplex`. Notes ----- We don't return a partition in this case because a partition is always defined on a single graph. We therefore simply return the membership (which is the same for all layers). See Also -------- :func:`Optimiser.optimise_partition_multiplex` :func:`slices_to_layers` Examples -------- >>> n = 100 >>> G_1 = ig.Graph.Lattice([n], 1) >>> G_2 = ig.Graph.Lattice([n], 1) >>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2], ... louvain.ModularityVertexPartition)
[ "Detect", "communities", "for", "multiplex", "graphs", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L222-L231
def setLocation(self, location): """ Change the upper left-hand corner to a new ``Location`` Doesn't change width or height """ if not location or not isinstance(location, Location): raise ValueError("setLocation expected a Location object") self.x = location.x self.y = location.y return self
[ "def", "setLocation", "(", "self", ",", "location", ")", ":", "if", "not", "location", "or", "not", "isinstance", "(", "location", ",", "Location", ")", ":", "raise", "ValueError", "(", "\"setLocation expected a Location object\"", ")", "self", ".", "x", "=", "location", ".", "x", "self", ".", "y", "=", "location", ".", "y", "return", "self" ]
Change the upper left-hand corner to a new ``Location`` Doesn't change width or height
[ "Change", "the", "upper", "left", "-", "hand", "corner", "to", "a", "new", "Location" ]
python
train
adamrehn/slidingwindow
slidingwindow/SlidingWindow.py
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L40-L44
def setRect(self, rect): """ Sets the window bounds from a tuple of (x,y,w,h) """ self.x, self.y, self.w, self.h = rect
[ "def", "setRect", "(", "self", ",", "rect", ")", ":", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "w", ",", "self", ".", "h", "=", "rect" ]
Sets the window bounds from a tuple of (x,y,w,h)
[ "Sets", "the", "window", "bounds", "from", "a", "tuple", "of", "(", "x", "y", "w", "h", ")" ]
python
train
Ezhil-Language-Foundation/open-tamil
tamil/utf8.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L265-L270
def uyirmei_constructed( mei_idx, uyir_idx): """ construct uyirmei letter give mei index and uyir index """ idx,idy = mei_idx,uyir_idx assert ( idy >= 0 and idy < uyir_len() ) assert ( idx >= 0 and idx < 6+mei_len() ) return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx]
[ "def", "uyirmei_constructed", "(", "mei_idx", ",", "uyir_idx", ")", ":", "idx", ",", "idy", "=", "mei_idx", ",", "uyir_idx", "assert", "(", "idy", ">=", "0", "and", "idy", "<", "uyir_len", "(", ")", ")", "assert", "(", "idx", ">=", "0", "and", "idx", "<", "6", "+", "mei_len", "(", ")", ")", "return", "grantha_agaram_letters", "[", "mei_idx", "]", "+", "accent_symbols", "[", "uyir_idx", "]" ]
construct uyirmei letter give mei index and uyir index
[ "construct", "uyirmei", "letter", "give", "mei", "index", "and", "uyir", "index" ]
python
train
tensorflow/probability
tensorflow_probability/python/sts/semilocal_linear_trend.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/semilocal_linear_trend.py#L266-L296
def semilocal_linear_trend_transition_noise(level_scale, slope_mean, slope_scale, autoregressive_coef): """Build the transition noise model for a semi-local linear trend model.""" # At each timestep, the stochasticity of `level` and `slope` are given # by `level_scale` and `slope_scale` respectively. broadcast_batch_shape = dist_util.get_broadcast_shape( level_scale, slope_mean, slope_scale, autoregressive_coef) broadcast_ones = tf.ones(broadcast_batch_shape, dtype=level_scale.dtype) scale_diag = tf.stack([level_scale * broadcast_ones, slope_scale * broadcast_ones], axis=-1) # We additionally fold in a bias term implementing the nonzero `slope_mean`. # The overall `slope` update is (from `SemiLocalLinearTrend` docstring) # slope[t] = (slope_mean + # autoregressive_coef * (slope[t-1] - slope_mean) + # Normal(0., slope_scale)) # which we rewrite as # slope[t] = ( # autoregressive_coef * slope[t-1] + # linear transition # Normal(loc=slope_mean - autoregressive_coef * slope_mean, # noise bias # scale=slope_scale)) # noise scale bias = tf.stack([tf.zeros_like(broadcast_ones), slope_mean * (1 - autoregressive_coef) * broadcast_ones], axis=-1) return tfd.MultivariateNormalDiag( loc=bias, scale_diag=scale_diag)
[ "def", "semilocal_linear_trend_transition_noise", "(", "level_scale", ",", "slope_mean", ",", "slope_scale", ",", "autoregressive_coef", ")", ":", "# At each timestep, the stochasticity of `level` and `slope` are given", "# by `level_scale` and `slope_scale` respectively.", "broadcast_batch_shape", "=", "dist_util", ".", "get_broadcast_shape", "(", "level_scale", ",", "slope_mean", ",", "slope_scale", ",", "autoregressive_coef", ")", "broadcast_ones", "=", "tf", ".", "ones", "(", "broadcast_batch_shape", ",", "dtype", "=", "level_scale", ".", "dtype", ")", "scale_diag", "=", "tf", ".", "stack", "(", "[", "level_scale", "*", "broadcast_ones", ",", "slope_scale", "*", "broadcast_ones", "]", ",", "axis", "=", "-", "1", ")", "# We additionally fold in a bias term implementing the nonzero `slope_mean`.", "# The overall `slope` update is (from `SemiLocalLinearTrend` docstring)", "# slope[t] = (slope_mean +", "# autoregressive_coef * (slope[t-1] - slope_mean) +", "# Normal(0., slope_scale))", "# which we rewrite as", "# slope[t] = (", "# autoregressive_coef * slope[t-1] + # linear transition", "# Normal(loc=slope_mean - autoregressive_coef * slope_mean, # noise bias", "# scale=slope_scale)) # noise scale", "bias", "=", "tf", ".", "stack", "(", "[", "tf", ".", "zeros_like", "(", "broadcast_ones", ")", ",", "slope_mean", "*", "(", "1", "-", "autoregressive_coef", ")", "*", "broadcast_ones", "]", ",", "axis", "=", "-", "1", ")", "return", "tfd", ".", "MultivariateNormalDiag", "(", "loc", "=", "bias", ",", "scale_diag", "=", "scale_diag", ")" ]
Build the transition noise model for a semi-local linear trend model.
[ "Build", "the", "transition", "noise", "model", "for", "a", "semi", "-", "local", "linear", "trend", "model", "." ]
python
test
aetros/aetros-cli
aetros/utils/__init__.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/__init__.py#L877-L900
def human_size(size_bytes, precision=0): """ Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc """ if size_bytes == 1: # because I really hate unnecessary plurals return "1 byte" suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)] num = float(size_bytes) for suffix, precision in suffixes_table: if num < 1024.0: break num /= 1024.0 if precision == 0: formatted_size = "%d" % num else: formatted_size = str(round(num, ndigits=precision)) return "%s %s" % (formatted_size, suffix)
[ "def", "human_size", "(", "size_bytes", ",", "precision", "=", "0", ")", ":", "if", "size_bytes", "==", "1", ":", "# because I really hate unnecessary plurals", "return", "\"1 byte\"", "suffixes_table", "=", "[", "(", "'bytes'", ",", "0", ")", ",", "(", "'KB'", ",", "0", ")", ",", "(", "'MB'", ",", "1", ")", ",", "(", "'GB'", ",", "2", ")", ",", "(", "'TB'", ",", "2", ")", ",", "(", "'PB'", ",", "2", ")", "]", "num", "=", "float", "(", "size_bytes", ")", "for", "suffix", ",", "precision", "in", "suffixes_table", ":", "if", "num", "<", "1024.0", ":", "break", "num", "/=", "1024.0", "if", "precision", "==", "0", ":", "formatted_size", "=", "\"%d\"", "%", "num", "else", ":", "formatted_size", "=", "str", "(", "round", "(", "num", ",", "ndigits", "=", "precision", ")", ")", "return", "\"%s %s\"", "%", "(", "formatted_size", ",", "suffix", ")" ]
Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
[ "Format", "a", "size", "in", "bytes", "into", "a", "human", "file", "size", "e", ".", "g", ".", "bytes", "KB", "MB", "GB", "TB", "PB", "Note", "that", "bytes", "/", "KB", "will", "be", "reported", "in", "whole", "numbers", "but", "MB", "and", "above", "will", "have", "greater", "precision", "e", ".", "g", ".", "1", "byte", "43", "bytes", "443", "KB", "4", ".", "3", "MB", "4", ".", "43", "GB", "etc" ]
python
train