repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Staffjoy/client_python
staffjoy/resource.py
https://github.com/Staffjoy/client_python/blob/e8811b0c06651a15e691c96cbfd41e7da4f7f213/staffjoy/resource.py#L180-L204
def create(cls, parent=None, **kwargs): """Create an object and return it""" if parent is None: raise Exception("Parent class is required") route = copy(parent.route) if cls.ID_NAME is not None: route[cls.ID_NAME] = "" obj = cls(key=parent.key, route=route, config=parent.config) start = datetime.now() response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs) cls._delay_for_ratelimits(start) if response.status_code not in cls.TRUTHY_CODES: return cls._handle_request_exception(response) # No envelope on post requests data = response.json() obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME)) obj.data = data return obj
[ "def", "create", "(", "cls", ",", "parent", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "parent", "is", "None", ":", "raise", "Exception", "(", "\"Parent class is required\"", ")", "route", "=", "copy", "(", "parent", ".", "route", ")", "if", "cls", ".", "ID_NAME", "is", "not", "None", ":", "route", "[", "cls", ".", "ID_NAME", "]", "=", "\"\"", "obj", "=", "cls", "(", "key", "=", "parent", ".", "key", ",", "route", "=", "route", ",", "config", "=", "parent", ".", "config", ")", "start", "=", "datetime", ".", "now", "(", ")", "response", "=", "requests", ".", "post", "(", "obj", ".", "_url", "(", ")", ",", "auth", "=", "(", "obj", ".", "key", ",", "\"\"", ")", ",", "data", "=", "kwargs", ")", "cls", ".", "_delay_for_ratelimits", "(", "start", ")", "if", "response", ".", "status_code", "not", "in", "cls", ".", "TRUTHY_CODES", ":", "return", "cls", ".", "_handle_request_exception", "(", "response", ")", "# No envelope on post requests", "data", "=", "response", ".", "json", "(", ")", "obj", ".", "route", "[", "obj", ".", "ID_NAME", "]", "=", "data", ".", "get", "(", "\"id\"", ",", "data", ".", "get", "(", "obj", ".", "ID_NAME", ")", ")", "obj", ".", "data", "=", "data", "return", "obj" ]
Create an object and return it
[ "Create", "an", "object", "and", "return", "it" ]
python
train
31.04
insilichem/ommprotocol
ommprotocol/io.py
https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/io.py#L582-L613
def from_csv(cls, path): """ Get box vectors from comma-separated values in file `path`. The csv file must containt only one line, which in turn can contain three values (orthogonal vectors) or nine values (triclinic box). The values should be in nanometers. Parameters ---------- path : str Path to CSV file Returns ------- vectors : simtk.unit.Quantity([3, 3], unit=nanometers """ with open(path) as f: fields = map(float, next(f).split(',')) if len(fields) == 3: return u.Quantity([[fields[0], 0, 0], [0, fields[1], 0], [0, 0, fields[2]]], unit=u.nanometers) elif len(fields) == 9: return u.Quantity([fields[0:3], fields[3:6], fields[6:9]], unit=u.nanometers) else: raise ValueError('This type of CSV is not supported. Please ' 'provide a comma-separated list of three or nine ' 'floats in a single-line file.')
[ "def", "from_csv", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "fields", "=", "map", "(", "float", ",", "next", "(", "f", ")", ".", "split", "(", "','", ")", ")", "if", "len", "(", "fields", ")", "==", "3", ":", "return", "u", ".", "Quantity", "(", "[", "[", "fields", "[", "0", "]", ",", "0", ",", "0", "]", ",", "[", "0", ",", "fields", "[", "1", "]", ",", "0", "]", ",", "[", "0", ",", "0", ",", "fields", "[", "2", "]", "]", "]", ",", "unit", "=", "u", ".", "nanometers", ")", "elif", "len", "(", "fields", ")", "==", "9", ":", "return", "u", ".", "Quantity", "(", "[", "fields", "[", "0", ":", "3", "]", ",", "fields", "[", "3", ":", "6", "]", ",", "fields", "[", "6", ":", "9", "]", "]", ",", "unit", "=", "u", ".", "nanometers", ")", "else", ":", "raise", "ValueError", "(", "'This type of CSV is not supported. Please '", "'provide a comma-separated list of three or nine '", "'floats in a single-line file.'", ")" ]
Get box vectors from comma-separated values in file `path`. The csv file must containt only one line, which in turn can contain three values (orthogonal vectors) or nine values (triclinic box). The values should be in nanometers. Parameters ---------- path : str Path to CSV file Returns ------- vectors : simtk.unit.Quantity([3, 3], unit=nanometers
[ "Get", "box", "vectors", "from", "comma", "-", "separated", "values", "in", "file", "path", "." ]
python
train
36.15625
Locu/chronology
pykronos/pykronos/utils/cache.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/pykronos/pykronos/utils/cache.py#L132-L149
def _bucket_events(self, event_iterable): """ Convert an iterable of events into an iterable of lists of events per bucket. """ current_bucket_time = None current_bucket_events = None for event in event_iterable: event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD]) if current_bucket_time is None or current_bucket_time < event_bucket_time: if current_bucket_events is not None: yield current_bucket_events current_bucket_time = event_bucket_time current_bucket_events = [] current_bucket_events.append(event) if current_bucket_events is not None and current_bucket_events != []: yield current_bucket_events
[ "def", "_bucket_events", "(", "self", ",", "event_iterable", ")", ":", "current_bucket_time", "=", "None", "current_bucket_events", "=", "None", "for", "event", "in", "event_iterable", ":", "event_bucket_time", "=", "self", ".", "_bucket_time", "(", "event", "[", "TIMESTAMP_FIELD", "]", ")", "if", "current_bucket_time", "is", "None", "or", "current_bucket_time", "<", "event_bucket_time", ":", "if", "current_bucket_events", "is", "not", "None", ":", "yield", "current_bucket_events", "current_bucket_time", "=", "event_bucket_time", "current_bucket_events", "=", "[", "]", "current_bucket_events", ".", "append", "(", "event", ")", "if", "current_bucket_events", "is", "not", "None", "and", "current_bucket_events", "!=", "[", "]", ":", "yield", "current_bucket_events" ]
Convert an iterable of events into an iterable of lists of events per bucket.
[ "Convert", "an", "iterable", "of", "events", "into", "an", "iterable", "of", "lists", "of", "events", "per", "bucket", "." ]
python
train
38.333333
davidmiller/letter
letter/__init__.py
https://github.com/davidmiller/letter/blob/c0c66ae2c6a792106e9a8374a01421817c8a8ae0/letter/__init__.py#L228-L244
def deliver(self, message, to): """ Deliver our message Arguments: - `message`: MIMEMultipart Return: None Exceptions: None """ # Send the message via local SMTP server. s = smtplib.SMTP(self.host, self.port) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. s.sendmail(message['From'], to, message.as_string()) s.quit() return
[ "def", "deliver", "(", "self", ",", "message", ",", "to", ")", ":", "# Send the message via local SMTP server.", "s", "=", "smtplib", ".", "SMTP", "(", "self", ".", "host", ",", "self", ".", "port", ")", "# sendmail function takes 3 arguments: sender's address, recipient's address", "# and message to send - here it is sent as one string.", "s", ".", "sendmail", "(", "message", "[", "'From'", "]", ",", "to", ",", "message", ".", "as_string", "(", ")", ")", "s", ".", "quit", "(", ")", "return" ]
Deliver our message Arguments: - `message`: MIMEMultipart Return: None Exceptions: None
[ "Deliver", "our", "message" ]
python
train
29.823529
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L95-L100
def cfloat64_array_to_numpy(cptr, length): """Convert a ctypes double pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_double)): return np.fromiter(cptr, dtype=np.float64, count=length) else: raise RuntimeError('Expected double pointer')
[ "def", "cfloat64_array_to_numpy", "(", "cptr", ",", "length", ")", ":", "if", "isinstance", "(", "cptr", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_double", ")", ")", ":", "return", "np", ".", "fromiter", "(", "cptr", ",", "dtype", "=", "np", ".", "float64", ",", "count", "=", "length", ")", "else", ":", "raise", "RuntimeError", "(", "'Expected double pointer'", ")" ]
Convert a ctypes double pointer array to a numpy array.
[ "Convert", "a", "ctypes", "double", "pointer", "array", "to", "a", "numpy", "array", "." ]
python
train
48.333333
lionheart/django-pyodbc
django_pyodbc/compiler.py
https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/compiler.py#L346-L393
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name): """ Apply any necessary fixes to the outer_fields, inner_select, and order strings due to slicing. """ # Using ROW_NUMBER requires an ordering if order is None: meta = self.query.get_meta() column = meta.pk.db_column or meta.pk.get_attname() order = '{0}.{1} ASC'.format( inner_table_name, self.connection.ops.quote_name(column), ) else: alias_id = 0 # remap order for injected subselect new_order = [] for x in order.split(','): # find the ordering direction m = _re_find_order_direction.search(x) if m: direction = m.groups()[0] else: direction = 'ASC' # remove the ordering direction x = _re_find_order_direction.sub('', x) # remove any namespacing or table name from the column name col = x.rsplit('.', 1)[-1] # Is the ordering column missing from the inner select? # 'inner_select' contains the full query without the leading 'SELECT '. # It's possible that this can get a false hit if the ordering # column is used in the WHERE while not being in the SELECT. It's # not worth the complexity to properly handle that edge case. if x not in inner_select: # Ordering requires the column to be selected by the inner select alias_id += 1 # alias column name col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format( col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote), alias_id, left_sql_quote=self.connection.ops.left_sql_quote, right_sql_quote=self.connection.ops.right_sql_quote, ) # add alias to inner_select inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select) new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction)) order = ', '.join(new_order) return outer_fields, inner_select, order
[ "def", "_fix_slicing_order", "(", "self", ",", "outer_fields", ",", "inner_select", ",", "order", ",", "inner_table_name", ")", ":", "# Using ROW_NUMBER requires an ordering", "if", "order", "is", "None", ":", "meta", "=", "self", ".", "query", ".", "get_meta", "(", ")", "column", "=", "meta", ".", "pk", ".", "db_column", "or", "meta", ".", "pk", ".", "get_attname", "(", ")", "order", "=", "'{0}.{1} ASC'", ".", "format", "(", "inner_table_name", ",", "self", ".", "connection", ".", "ops", ".", "quote_name", "(", "column", ")", ",", ")", "else", ":", "alias_id", "=", "0", "# remap order for injected subselect", "new_order", "=", "[", "]", "for", "x", "in", "order", ".", "split", "(", "','", ")", ":", "# find the ordering direction", "m", "=", "_re_find_order_direction", ".", "search", "(", "x", ")", "if", "m", ":", "direction", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "direction", "=", "'ASC'", "# remove the ordering direction", "x", "=", "_re_find_order_direction", ".", "sub", "(", "''", ",", "x", ")", "# remove any namespacing or table name from the column name", "col", "=", "x", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "# Is the ordering column missing from the inner select?", "# 'inner_select' contains the full query without the leading 'SELECT '.", "# It's possible that this can get a false hit if the ordering", "# column is used in the WHERE while not being in the SELECT. It's", "# not worth the complexity to properly handle that edge case.", "if", "x", "not", "in", "inner_select", ":", "# Ordering requires the column to be selected by the inner select", "alias_id", "+=", "1", "# alias column name", "col", "=", "'{left_sql_quote}{0}___o{1}{right_sql_quote}'", ".", "format", "(", "col", ".", "strip", "(", "self", ".", "connection", ".", "ops", ".", "left_sql_quote", "+", "self", ".", "connection", ".", "ops", ".", "right_sql_quote", ")", ",", "alias_id", ",", "left_sql_quote", "=", "self", ".", "connection", ".", "ops", ".", "left_sql_quote", ",", "right_sql_quote", "=", "self", ".", "connection", ".", "ops", ".", "right_sql_quote", ",", ")", "# add alias to inner_select", "inner_select", "=", "'({0}) AS {1}, {2}'", ".", "format", "(", "x", ",", "col", ",", "inner_select", ")", "new_order", ".", "append", "(", "'{0}.{1} {2}'", ".", "format", "(", "inner_table_name", ",", "col", ",", "direction", ")", ")", "order", "=", "', '", ".", "join", "(", "new_order", ")", "return", "outer_fields", ",", "inner_select", ",", "order" ]
Apply any necessary fixes to the outer_fields, inner_select, and order strings due to slicing.
[ "Apply", "any", "necessary", "fixes", "to", "the", "outer_fields", "inner_select", "and", "order", "strings", "due", "to", "slicing", "." ]
python
train
50.145833
GuyAllard/markov_clustering
markov_clustering/modularity.py
https://github.com/GuyAllard/markov_clustering/blob/28787cf64ef06bf024ff915246008c767ea830cf/markov_clustering/modularity.py#L25-L42
def convert_to_adjacency_matrix(matrix): """ Converts transition matrix into adjacency matrix :param matrix: The matrix to be converted :returns: adjacency matrix """ for i in range(matrix.shape[0]): if isspmatrix(matrix): col = find(matrix[:,i])[2] else: col = matrix[:,i].T.tolist()[0] coeff = max( Fraction(c).limit_denominator().denominator for c in col ) matrix[:,i] *= coeff return matrix
[ "def", "convert_to_adjacency_matrix", "(", "matrix", ")", ":", "for", "i", "in", "range", "(", "matrix", ".", "shape", "[", "0", "]", ")", ":", "if", "isspmatrix", "(", "matrix", ")", ":", "col", "=", "find", "(", "matrix", "[", ":", ",", "i", "]", ")", "[", "2", "]", "else", ":", "col", "=", "matrix", "[", ":", ",", "i", "]", ".", "T", ".", "tolist", "(", ")", "[", "0", "]", "coeff", "=", "max", "(", "Fraction", "(", "c", ")", ".", "limit_denominator", "(", ")", ".", "denominator", "for", "c", "in", "col", ")", "matrix", "[", ":", ",", "i", "]", "*=", "coeff", "return", "matrix" ]
Converts transition matrix into adjacency matrix :param matrix: The matrix to be converted :returns: adjacency matrix
[ "Converts", "transition", "matrix", "into", "adjacency", "matrix" ]
python
train
26.277778
tanghaibao/jcvi
jcvi/utils/orderedcollections.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/orderedcollections.py#L184-L197
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True): """ Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py> """ od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): od[name].append(value) return od
[ "def", "parse_qs", "(", "qs", ",", "keep_blank_values", "=", "0", ",", "strict_parsing", "=", "0", ",", "keep_attr_order", "=", "True", ")", ":", "od", "=", "DefaultOrderedDict", "(", "list", ")", "if", "keep_attr_order", "else", "defaultdict", "(", "list", ")", "for", "name", ",", "value", "in", "parse_qsl", "(", "qs", ",", "keep_blank_values", ",", "strict_parsing", ")", ":", "od", "[", "name", "]", ".", "append", "(", "value", ")", "return", "od" ]
Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
[ "Kind", "of", "like", "urlparse", ".", "parse_qs", "except", "returns", "an", "ordered", "dict", ".", "Also", "avoids", "replicating", "that", "function", "s", "bad", "habit", "of", "overriding", "the", "built", "-", "in", "dict", "type", "." ]
python
train
39.428571
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/client.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/client.py#L95-L108
def get_processing_block_ids(self): """Get list of processing block ids using the processing block id""" # Initialise empty list _processing_block_ids = [] # Pattern used to search processing block ids pattern = '*:processing_block:*' block_ids = self._db.get_ids(pattern) for block_id in block_ids: id_split = block_id.split(':')[-1] _processing_block_ids.append(id_split) return sorted(_processing_block_ids)
[ "def", "get_processing_block_ids", "(", "self", ")", ":", "# Initialise empty list", "_processing_block_ids", "=", "[", "]", "# Pattern used to search processing block ids", "pattern", "=", "'*:processing_block:*'", "block_ids", "=", "self", ".", "_db", ".", "get_ids", "(", "pattern", ")", "for", "block_id", "in", "block_ids", ":", "id_split", "=", "block_id", ".", "split", "(", "':'", ")", "[", "-", "1", "]", "_processing_block_ids", ".", "append", "(", "id_split", ")", "return", "sorted", "(", "_processing_block_ids", ")" ]
Get list of processing block ids using the processing block id
[ "Get", "list", "of", "processing", "block", "ids", "using", "the", "processing", "block", "id" ]
python
train
34.857143
oceanprotocol/osmosis-azure-driver
osmosis_azure_driver/computing_plugin.py
https://github.com/oceanprotocol/osmosis-azure-driver/blob/36bcfa96547fb6117346b02b0ac6a74345c59695/osmosis_azure_driver/computing_plugin.py#L135-L184
def exec_container(self, asset_url, algorithm_url, resource_group_name, account_name, account_key, location, share_name_input='compute', share_name_output='output', docker_image='python:3.6-alpine', memory=1.5, cpu=1): """Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location: """ try: container_group_name = 'compute' + str(int(time.time())) result_file = self._create_container_group(resource_group_name=resource_group_name, name=container_group_name, image=docker_image, location=location, memory=memory, cpu=cpu, algorithm=algorithm_url, asset=asset_url, input_mount_point='/input', output_moint_point='/output', account_name=account_name, account_key=account_key, share_name_input=share_name_input, share_name_output=share_name_output ) while self.client.container_groups.get(resource_group_name, container_group_name).provisioning_state != 'Succeeded': logging.info("Waiting to resources ") while self.client.container_groups.get(resource_group_name, container_group_name). \ containers[0].instance_view.current_state.state != 'Terminated': logging.info("Waiting to terminate") self.delete_vm(container_group_name, resource_group_name) return result_file except Exception: logging.error("There was a problem executing your container") raise Exception
[ "def", "exec_container", "(", "self", ",", "asset_url", ",", "algorithm_url", ",", "resource_group_name", ",", "account_name", ",", "account_key", ",", "location", ",", "share_name_input", "=", "'compute'", ",", "share_name_output", "=", "'output'", ",", "docker_image", "=", "'python:3.6-alpine'", ",", "memory", "=", "1.5", ",", "cpu", "=", "1", ")", ":", "try", ":", "container_group_name", "=", "'compute'", "+", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "result_file", "=", "self", ".", "_create_container_group", "(", "resource_group_name", "=", "resource_group_name", ",", "name", "=", "container_group_name", ",", "image", "=", "docker_image", ",", "location", "=", "location", ",", "memory", "=", "memory", ",", "cpu", "=", "cpu", ",", "algorithm", "=", "algorithm_url", ",", "asset", "=", "asset_url", ",", "input_mount_point", "=", "'/input'", ",", "output_moint_point", "=", "'/output'", ",", "account_name", "=", "account_name", ",", "account_key", "=", "account_key", ",", "share_name_input", "=", "share_name_input", ",", "share_name_output", "=", "share_name_output", ")", "while", "self", ".", "client", ".", "container_groups", ".", "get", "(", "resource_group_name", ",", "container_group_name", ")", ".", "provisioning_state", "!=", "'Succeeded'", ":", "logging", ".", "info", "(", "\"Waiting to resources \"", ")", "while", "self", ".", "client", ".", "container_groups", ".", "get", "(", "resource_group_name", ",", "container_group_name", ")", ".", "containers", "[", "0", "]", ".", "instance_view", ".", "current_state", ".", "state", "!=", "'Terminated'", ":", "logging", ".", "info", "(", "\"Waiting to terminate\"", ")", "self", ".", "delete_vm", "(", "container_group_name", ",", "resource_group_name", ")", "return", "result_file", "except", "Exception", ":", "logging", ".", "error", "(", "\"There was a problem executing your container\"", ")", "raise", "Exception" ]
Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm. :param asset_url :param algorithm_url :param resource_group_name: :param account_name: :param account_key: :param share_name_input: :param share_name_output: :param location:
[ "Prepare", "a", "docker", "image", "that", "will", "run", "in", "the", "cloud", "mounting", "the", "asset", "and", "executing", "the", "algorithm", ".", ":", "param", "asset_url", ":", "param", "algorithm_url", ":", "param", "resource_group_name", ":", ":", "param", "account_name", ":", ":", "param", "account_key", ":", ":", "param", "share_name_input", ":", ":", "param", "share_name_output", ":", ":", "param", "location", ":" ]
python
train
54.88
mlperf/training
image_classification/tensorflow/official/resnet/resnet_model.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/resnet_model.py#L328-L367
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE, *args, **kwargs): """Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as tf.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of tf.variable_scope. Then, tf.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with tf.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as tf.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary. """ if dtype in CASTABLE_TYPES: var = getter(name, shape, tf.float32, *args, **kwargs) return tf.cast(var, dtype=dtype, name=name + '_cast') else: return getter(name, shape, dtype, *args, **kwargs)
[ "def", "_custom_dtype_getter", "(", "self", ",", "getter", ",", "name", ",", "shape", "=", "None", ",", "dtype", "=", "DEFAULT_DTYPE", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "dtype", "in", "CASTABLE_TYPES", ":", "var", "=", "getter", "(", "name", ",", "shape", ",", "tf", ".", "float32", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "tf", ".", "cast", "(", "var", ",", "dtype", "=", "dtype", ",", "name", "=", "name", "+", "'_cast'", ")", "else", ":", "return", "getter", "(", "name", ",", "shape", ",", "dtype", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as tf.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of tf.variable_scope. Then, tf.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with tf.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as tf.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary.
[ "Creates", "variables", "in", "fp32", "then", "casts", "to", "fp16", "if", "necessary", "." ]
python
train
50.375
SoCo/SoCo
soco/events.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/events.py#L234-L270
def do_NOTIFY(self): # pylint: disable=invalid-name """Serve a ``NOTIFY`` request. A ``NOTIFY`` request will be sent by a Sonos device when a state variable changes. See the `UPnP Spec §4.3 [pdf] <http://upnp.org/specs/arch/UPnP-arch -DeviceArchitecture-v1.1.pdf>`_ for details. """ timestamp = time.time() headers = requests.structures.CaseInsensitiveDict(self.headers) seq = headers['seq'] # Event sequence number sid = headers['sid'] # Event Subscription Identifier content_length = int(headers['content-length']) content = self.rfile.read(content_length) # Find the relevant service and queue from the sid with _subscriptions_lock: subscription = _subscriptions.get(sid) # It might have been removed by another thread if subscription: service = subscription.service log.info( "Event %s received for %s service on thread %s at %s", seq, service.service_id, threading.current_thread(), timestamp) log.debug("Event content: %s", content) variables = parse_event_xml(content) # Build the Event object event = Event(sid, seq, service, timestamp, variables) # pass the event details on to the service so it can update its # cache. # pylint: disable=protected-access service._update_cache_on_event(event) # Put the event on the queue subscription.events.put(event) else: log.info("No service registered for %s", sid) self.send_response(200) self.end_headers()
[ "def", "do_NOTIFY", "(", "self", ")", ":", "# pylint: disable=invalid-name", "timestamp", "=", "time", ".", "time", "(", ")", "headers", "=", "requests", ".", "structures", ".", "CaseInsensitiveDict", "(", "self", ".", "headers", ")", "seq", "=", "headers", "[", "'seq'", "]", "# Event sequence number", "sid", "=", "headers", "[", "'sid'", "]", "# Event Subscription Identifier", "content_length", "=", "int", "(", "headers", "[", "'content-length'", "]", ")", "content", "=", "self", ".", "rfile", ".", "read", "(", "content_length", ")", "# Find the relevant service and queue from the sid", "with", "_subscriptions_lock", ":", "subscription", "=", "_subscriptions", ".", "get", "(", "sid", ")", "# It might have been removed by another thread", "if", "subscription", ":", "service", "=", "subscription", ".", "service", "log", ".", "info", "(", "\"Event %s received for %s service on thread %s at %s\"", ",", "seq", ",", "service", ".", "service_id", ",", "threading", ".", "current_thread", "(", ")", ",", "timestamp", ")", "log", ".", "debug", "(", "\"Event content: %s\"", ",", "content", ")", "variables", "=", "parse_event_xml", "(", "content", ")", "# Build the Event object", "event", "=", "Event", "(", "sid", ",", "seq", ",", "service", ",", "timestamp", ",", "variables", ")", "# pass the event details on to the service so it can update its", "# cache.", "# pylint: disable=protected-access", "service", ".", "_update_cache_on_event", "(", "event", ")", "# Put the event on the queue", "subscription", ".", "events", ".", "put", "(", "event", ")", "else", ":", "log", ".", "info", "(", "\"No service registered for %s\"", ",", "sid", ")", "self", ".", "send_response", "(", "200", ")", "self", ".", "end_headers", "(", ")" ]
Serve a ``NOTIFY`` request. A ``NOTIFY`` request will be sent by a Sonos device when a state variable changes. See the `UPnP Spec §4.3 [pdf] <http://upnp.org/specs/arch/UPnP-arch -DeviceArchitecture-v1.1.pdf>`_ for details.
[ "Serve", "a", "NOTIFY", "request", "." ]
python
train
45.324324
saltstack/salt
salt/client/ssh/wrapper/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L108-L118
def _check_pillar(kwargs, pillar=None): ''' Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors ''' if kwargs.get('force'): return True pillar_dict = pillar if pillar is not None else __pillar__ if '_errors' in pillar_dict: return False return True
[ "def", "_check_pillar", "(", "kwargs", ",", "pillar", "=", "None", ")", ":", "if", "kwargs", ".", "get", "(", "'force'", ")", ":", "return", "True", "pillar_dict", "=", "pillar", "if", "pillar", "is", "not", "None", "else", "__pillar__", "if", "'_errors'", "in", "pillar_dict", ":", "return", "False", "return", "True" ]
Check the pillar for errors, refuse to run the state if there are errors in the pillar and return the pillar errors
[ "Check", "the", "pillar", "for", "errors", "refuse", "to", "run", "the", "state", "if", "there", "are", "errors", "in", "the", "pillar", "and", "return", "the", "pillar", "errors" ]
python
train
31.818182
closeio/quotequail
quotequail/_html.py
https://github.com/closeio/quotequail/blob/8a3960c033d595b25a8bbc2c340be898e3065b5f/quotequail/_html.py#L281-L372
def tree_line_generator(el, max_lines=None): """ Internal generator that iterates through an LXML tree and yields a tuple per line. In this context, lines are blocks of text separated by <br> tags or by block elements. The tuples contain the following elements: - A tuple with the element reference (element, position) for the start of the line. The tuple consists of: - The LXML HTML element which references the line - Whether the text starts at the beginning of the referenced element, or after the closing tag - A similar tuple indicating the ending of the line. - The email indentation level, if detected. - The plain (non-HTML) text of the line If max_lines is specified, the generator stops after yielding the given amount of lines. For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields: - ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar') - ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz'). To illustrate the indentation level, the HTML tree '<div><blockquote>hi</blockquote>world</div>' yields: - ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi') - ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world') """ def _trim_spaces(text): return MULTIPLE_WHITESPACE_RE.sub(' ', text).strip() counter = 1 if max_lines != None and counter > max_lines: return # Buffer for the current line. line = '' # The reference tuple (element, position) for the start of the line. start_ref = None # The indentation level at the start of the line. start_indentation_level = None for token in tree_token_generator(el): if token is None: continue elif isinstance(token, tuple): el, state, indentation_level = token tag_name = el.tag.lower() line_break = (tag_name == 'br' and state == BEGIN) is_block = (tag_name not in INLINE_TAGS) is_forward = (is_block and state == BEGIN and el.attrib.get('style') in FORWARD_STYLES) if is_block or line_break: line = _trim_spaces(line) if line or line_break or is_forward: end_ref = (el, state) yield start_ref, end_ref, start_indentation_level, line counter += 1 if max_lines != None and counter > max_lines: return line = '' if is_forward: # Simulate forward yield (end_ref, end_ref, start_indentation_level, FORWARD_LINE) counter += 1 if max_lines != None and counter > max_lines: return if not line: start_ref = (el, state) start_indentation_level = indentation_level elif isinstance(token, string_class): line += token else: raise RuntimeError('invalid token: {}'.format(token)) line = _trim_spaces(line) if line: yield line
[ "def", "tree_line_generator", "(", "el", ",", "max_lines", "=", "None", ")", ":", "def", "_trim_spaces", "(", "text", ")", ":", "return", "MULTIPLE_WHITESPACE_RE", ".", "sub", "(", "' '", ",", "text", ")", ".", "strip", "(", ")", "counter", "=", "1", "if", "max_lines", "!=", "None", "and", "counter", ">", "max_lines", ":", "return", "# Buffer for the current line.", "line", "=", "''", "# The reference tuple (element, position) for the start of the line.", "start_ref", "=", "None", "# The indentation level at the start of the line.", "start_indentation_level", "=", "None", "for", "token", "in", "tree_token_generator", "(", "el", ")", ":", "if", "token", "is", "None", ":", "continue", "elif", "isinstance", "(", "token", ",", "tuple", ")", ":", "el", ",", "state", ",", "indentation_level", "=", "token", "tag_name", "=", "el", ".", "tag", ".", "lower", "(", ")", "line_break", "=", "(", "tag_name", "==", "'br'", "and", "state", "==", "BEGIN", ")", "is_block", "=", "(", "tag_name", "not", "in", "INLINE_TAGS", ")", "is_forward", "=", "(", "is_block", "and", "state", "==", "BEGIN", "and", "el", ".", "attrib", ".", "get", "(", "'style'", ")", "in", "FORWARD_STYLES", ")", "if", "is_block", "or", "line_break", ":", "line", "=", "_trim_spaces", "(", "line", ")", "if", "line", "or", "line_break", "or", "is_forward", ":", "end_ref", "=", "(", "el", ",", "state", ")", "yield", "start_ref", ",", "end_ref", ",", "start_indentation_level", ",", "line", "counter", "+=", "1", "if", "max_lines", "!=", "None", "and", "counter", ">", "max_lines", ":", "return", "line", "=", "''", "if", "is_forward", ":", "# Simulate forward", "yield", "(", "end_ref", ",", "end_ref", ",", "start_indentation_level", ",", "FORWARD_LINE", ")", "counter", "+=", "1", "if", "max_lines", "!=", "None", "and", "counter", ">", "max_lines", ":", "return", "if", "not", "line", ":", "start_ref", "=", "(", "el", ",", "state", ")", "start_indentation_level", "=", "indentation_level", "elif", "isinstance", "(", "token", ",", "string_class", ")", ":", "line", "+=", "token", "else", ":", "raise", "RuntimeError", "(", "'invalid token: {}'", ".", "format", "(", "token", ")", ")", "line", "=", "_trim_spaces", "(", "line", ")", "if", "line", ":", "yield", "line" ]
Internal generator that iterates through an LXML tree and yields a tuple per line. In this context, lines are blocks of text separated by <br> tags or by block elements. The tuples contain the following elements: - A tuple with the element reference (element, position) for the start of the line. The tuple consists of: - The LXML HTML element which references the line - Whether the text starts at the beginning of the referenced element, or after the closing tag - A similar tuple indicating the ending of the line. - The email indentation level, if detected. - The plain (non-HTML) text of the line If max_lines is specified, the generator stops after yielding the given amount of lines. For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields: - ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar') - ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz'). To illustrate the indentation level, the HTML tree '<div><blockquote>hi</blockquote>world</div>' yields: - ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi') - ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
[ "Internal", "generator", "that", "iterates", "through", "an", "LXML", "tree", "and", "yields", "a", "tuple", "per", "line", ".", "In", "this", "context", "lines", "are", "blocks", "of", "text", "separated", "by", "<br", ">", "tags", "or", "by", "block", "elements", ".", "The", "tuples", "contain", "the", "following", "elements", ":" ]
python
train
34.73913
ValvePython/steam
steam/webauth.py
https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/webauth.py#L155-L232
def login(self, password='', captcha='', email_code='', twofactor_code='', language='english'): """Attempts web login and returns on a session with cookies set :param password: password, if it wasn't provided on instance init :type password: :class:`str` :param captcha: text reponse for captcha challenge :type captcha: :class:`str` :param email_code: email code for steam guard :type email_code: :class:`str` :param twofactor_code: 2FA code for steam guard :type twofactor_code: :class:`str` :param language: select language for steam web pages (sets language cookie) :type language: :class:`str` :return: a session on success and :class:`None` otherwise :rtype: :class:`requests.Session`, :class:`None` :raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc :raises LoginIncorrect: wrong username or password :raises CaptchaRequired: when captcha is needed :raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect :raises EmailCodeRequired: when email is needed :raises TwoFactorCodeRequired: when 2FA is needed """ if self.logged_on: return self.session if password: self.password = password else: if self.password: password = self.password else: raise LoginIncorrect("password is not specified") if not captcha and self.captcha_code: captcha = self.captcha_code self._load_key() resp = self._send_login(password=password, captcha=captcha, email_code=email_code, twofactor_code=twofactor_code) if resp['success'] and resp['login_complete']: self.logged_on = True self.password = self.captcha_code = '' self.captcha_gid = -1 for cookie in list(self.session.cookies): for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']: self.session.cookies.set(cookie.name, cookie.value, domain=domain, secure=cookie.secure) self.session_id = generate_session_id() for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']: self.session.cookies.set('Steam_Language', language, domain=domain) self.session.cookies.set('birthtime', '-3333', domain=domain) self.session.cookies.set('sessionid', self.session_id, domain=domain) self._finalize_login(resp) return self.session else: if resp.get('captcha_needed', False): self.captcha_gid = resp['captcha_gid'] self.captcha_code = '' if resp.get('clear_password_field', False): self.password = '' raise CaptchaRequiredLoginIncorrect(resp['message']) else: raise CaptchaRequired(resp['message']) elif resp.get('emailauth_needed', False): self.steam_id = SteamID(resp['emailsteamid']) raise EmailCodeRequired(resp['message']) elif resp.get('requires_twofactor', False): raise TwoFactorCodeRequired(resp['message']) else: self.password = '' raise LoginIncorrect(resp['message']) return None
[ "def", "login", "(", "self", ",", "password", "=", "''", ",", "captcha", "=", "''", ",", "email_code", "=", "''", ",", "twofactor_code", "=", "''", ",", "language", "=", "'english'", ")", ":", "if", "self", ".", "logged_on", ":", "return", "self", ".", "session", "if", "password", ":", "self", ".", "password", "=", "password", "else", ":", "if", "self", ".", "password", ":", "password", "=", "self", ".", "password", "else", ":", "raise", "LoginIncorrect", "(", "\"password is not specified\"", ")", "if", "not", "captcha", "and", "self", ".", "captcha_code", ":", "captcha", "=", "self", ".", "captcha_code", "self", ".", "_load_key", "(", ")", "resp", "=", "self", ".", "_send_login", "(", "password", "=", "password", ",", "captcha", "=", "captcha", ",", "email_code", "=", "email_code", ",", "twofactor_code", "=", "twofactor_code", ")", "if", "resp", "[", "'success'", "]", "and", "resp", "[", "'login_complete'", "]", ":", "self", ".", "logged_on", "=", "True", "self", ".", "password", "=", "self", ".", "captcha_code", "=", "''", "self", ".", "captcha_gid", "=", "-", "1", "for", "cookie", "in", "list", "(", "self", ".", "session", ".", "cookies", ")", ":", "for", "domain", "in", "[", "'store.steampowered.com'", ",", "'help.steampowered.com'", ",", "'steamcommunity.com'", "]", ":", "self", ".", "session", ".", "cookies", ".", "set", "(", "cookie", ".", "name", ",", "cookie", ".", "value", ",", "domain", "=", "domain", ",", "secure", "=", "cookie", ".", "secure", ")", "self", ".", "session_id", "=", "generate_session_id", "(", ")", "for", "domain", "in", "[", "'store.steampowered.com'", ",", "'help.steampowered.com'", ",", "'steamcommunity.com'", "]", ":", "self", ".", "session", ".", "cookies", ".", "set", "(", "'Steam_Language'", ",", "language", ",", "domain", "=", "domain", ")", "self", ".", "session", ".", "cookies", ".", "set", "(", "'birthtime'", ",", "'-3333'", ",", "domain", "=", "domain", ")", "self", ".", "session", ".", "cookies", ".", "set", "(", "'sessionid'", ",", "self", ".", "session_id", ",", "domain", "=", "domain", ")", "self", ".", "_finalize_login", "(", "resp", ")", "return", "self", ".", "session", "else", ":", "if", "resp", ".", "get", "(", "'captcha_needed'", ",", "False", ")", ":", "self", ".", "captcha_gid", "=", "resp", "[", "'captcha_gid'", "]", "self", ".", "captcha_code", "=", "''", "if", "resp", ".", "get", "(", "'clear_password_field'", ",", "False", ")", ":", "self", ".", "password", "=", "''", "raise", "CaptchaRequiredLoginIncorrect", "(", "resp", "[", "'message'", "]", ")", "else", ":", "raise", "CaptchaRequired", "(", "resp", "[", "'message'", "]", ")", "elif", "resp", ".", "get", "(", "'emailauth_needed'", ",", "False", ")", ":", "self", ".", "steam_id", "=", "SteamID", "(", "resp", "[", "'emailsteamid'", "]", ")", "raise", "EmailCodeRequired", "(", "resp", "[", "'message'", "]", ")", "elif", "resp", ".", "get", "(", "'requires_twofactor'", ",", "False", ")", ":", "raise", "TwoFactorCodeRequired", "(", "resp", "[", "'message'", "]", ")", "else", ":", "self", ".", "password", "=", "''", "raise", "LoginIncorrect", "(", "resp", "[", "'message'", "]", ")", "return", "None" ]
Attempts web login and returns on a session with cookies set :param password: password, if it wasn't provided on instance init :type password: :class:`str` :param captcha: text reponse for captcha challenge :type captcha: :class:`str` :param email_code: email code for steam guard :type email_code: :class:`str` :param twofactor_code: 2FA code for steam guard :type twofactor_code: :class:`str` :param language: select language for steam web pages (sets language cookie) :type language: :class:`str` :return: a session on success and :class:`None` otherwise :rtype: :class:`requests.Session`, :class:`None` :raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc :raises LoginIncorrect: wrong username or password :raises CaptchaRequired: when captcha is needed :raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect :raises EmailCodeRequired: when email is needed :raises TwoFactorCodeRequired: when 2FA is needed
[ "Attempts", "web", "login", "and", "returns", "on", "a", "session", "with", "cookies", "set" ]
python
train
43.910256
lambdamusic/Ontospy
ontospy/core/utils.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/utils.py#L195-L219
def pprint2columns(llist, max_length=60): """ llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too """ if len(llist) == 0: return None col_width = max(len(word) for word in llist) + 2 # padding # llist length must be even, otherwise splitting fails if not len(llist) % 2 == 0: llist += [' '] # add a fake element if col_width > max_length: for el in llist: print(el) else: column1 = llist[:int(len(llist) / 2)] column2 = llist[int(len(llist) / 2):] for c1, c2 in zip(column1, column2): space = " " * (col_width - len(c1)) print("%s%s%s" % (c1, space, c2))
[ "def", "pprint2columns", "(", "llist", ",", "max_length", "=", "60", ")", ":", "if", "len", "(", "llist", ")", "==", "0", ":", "return", "None", "col_width", "=", "max", "(", "len", "(", "word", ")", "for", "word", "in", "llist", ")", "+", "2", "# padding", "# llist length must be even, otherwise splitting fails", "if", "not", "len", "(", "llist", ")", "%", "2", "==", "0", ":", "llist", "+=", "[", "' '", "]", "# add a fake element", "if", "col_width", ">", "max_length", ":", "for", "el", "in", "llist", ":", "print", "(", "el", ")", "else", ":", "column1", "=", "llist", "[", ":", "int", "(", "len", "(", "llist", ")", "/", "2", ")", "]", "column2", "=", "llist", "[", "int", "(", "len", "(", "llist", ")", "/", "2", ")", ":", "]", "for", "c1", ",", "c2", "in", "zip", "(", "column1", ",", "column2", ")", ":", "space", "=", "\" \"", "*", "(", "col_width", "-", "len", "(", "c1", ")", ")", "print", "(", "\"%s%s%s\"", "%", "(", "c1", ",", "space", ",", "c2", ")", ")" ]
llist = a list of strings max_length = if a word is longer than that, for single col display > prints a list in two columns, taking care of alignment too
[ "llist", "=", "a", "list", "of", "strings", "max_length", "=", "if", "a", "word", "is", "longer", "than", "that", "for", "single", "col", "display" ]
python
train
30.64
apache/airflow
airflow/contrib/operators/gcp_container_operator.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_container_operator.py#L310-L322
def _get_field(self, extras, field, default=None): """ Fetches a field from extras, and returns it. This is some Airflow magic. The google_cloud_platform hook type adds custom UI elements to the hook page, which allow admins to specify service_account, key_path, etc. They get formatted as shown below. """ long_f = 'extra__google_cloud_platform__{}'.format(field) if long_f in extras: return extras[long_f] else: self.log.info('Field %s not found in extras.', field) return default
[ "def", "_get_field", "(", "self", ",", "extras", ",", "field", ",", "default", "=", "None", ")", ":", "long_f", "=", "'extra__google_cloud_platform__{}'", ".", "format", "(", "field", ")", "if", "long_f", "in", "extras", ":", "return", "extras", "[", "long_f", "]", "else", ":", "self", ".", "log", ".", "info", "(", "'Field %s not found in extras.'", ",", "field", ")", "return", "default" ]
Fetches a field from extras, and returns it. This is some Airflow magic. The google_cloud_platform hook type adds custom UI elements to the hook page, which allow admins to specify service_account, key_path, etc. They get formatted as shown below.
[ "Fetches", "a", "field", "from", "extras", "and", "returns", "it", ".", "This", "is", "some", "Airflow", "magic", ".", "The", "google_cloud_platform", "hook", "type", "adds", "custom", "UI", "elements", "to", "the", "hook", "page", "which", "allow", "admins", "to", "specify", "service_account", "key_path", "etc", ".", "They", "get", "formatted", "as", "shown", "below", "." ]
python
test
44.461538
Clinical-Genomics/scout
scout/adapter/mongo/variant_loader.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/variant_loader.py#L198-L288
def update_case_compounds(self, case_obj, build='37'): """Update the compounds for a case Loop over all coding intervals to get coordinates for all potential compound positions. Update all variants within a gene with a bulk operation. """ case_id = case_obj['_id'] # Possible categories 'snv', 'sv', 'str', 'cancer': categories = set() # Possible variant types 'clinical', 'research': variant_types = set() for file_type in FILE_TYPE_MAP: if case_obj.get('vcf_files',{}).get(file_type): categories.add(FILE_TYPE_MAP[file_type]['category']) variant_types.add(FILE_TYPE_MAP[file_type]['variant_type']) coding_intervals = self.get_coding_intervals(build=build) # Loop over all intervals for chrom in CHROMOSOMES: intervals = coding_intervals.get(chrom, IntervalTree()) for var_type in variant_types: for category in categories: LOG.info("Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}".format( chrom, var_type, category, case_id)) # Fetch all variants from a chromosome query = { 'variant_type': var_type, 'chrom': chrom, } # Get all variants from the database of the specific type variant_objs = self.variants( case_id=case_id, query=query, category=category, nr_of_variants=-1, sort_key='position' ) # Initiate a bulk bulk = {} current_region = None special = False # Loop over the variants and check if they are in a coding region for var_obj in variant_objs: var_id = var_obj['_id'] var_chrom = var_obj['chromosome'] var_start = var_obj['position'] var_end = var_obj['end'] + 1 update_bulk = True new_region = None # Check if the variant is in a coding region genomic_regions = coding_intervals.get(var_chrom, IntervalTree()).search(var_start, var_end) # If the variant is in a coding region if genomic_regions: # We know there is data here so get the interval id new_region = genomic_regions.pop().data if new_region and (new_region == current_region): # If the variant is in the same region as previous # we add it to the same bulk update_bulk = False current_region = new_region # If the variant is not in a current region we update the compounds # from the previous region, if any. Otherwise continue if update_bulk and bulk: self.update_compounds(bulk) self.update_mongo_compound_variants(bulk) bulk = {} if new_region: bulk[var_id] = var_obj if not bulk: continue self.update_compounds(bulk) self.update_mongo_compound_variants(bulk) LOG.info("All compounds updated") return
[ "def", "update_case_compounds", "(", "self", ",", "case_obj", ",", "build", "=", "'37'", ")", ":", "case_id", "=", "case_obj", "[", "'_id'", "]", "# Possible categories 'snv', 'sv', 'str', 'cancer':", "categories", "=", "set", "(", ")", "# Possible variant types 'clinical', 'research':", "variant_types", "=", "set", "(", ")", "for", "file_type", "in", "FILE_TYPE_MAP", ":", "if", "case_obj", ".", "get", "(", "'vcf_files'", ",", "{", "}", ")", ".", "get", "(", "file_type", ")", ":", "categories", ".", "add", "(", "FILE_TYPE_MAP", "[", "file_type", "]", "[", "'category'", "]", ")", "variant_types", ".", "add", "(", "FILE_TYPE_MAP", "[", "file_type", "]", "[", "'variant_type'", "]", ")", "coding_intervals", "=", "self", ".", "get_coding_intervals", "(", "build", "=", "build", ")", "# Loop over all intervals", "for", "chrom", "in", "CHROMOSOMES", ":", "intervals", "=", "coding_intervals", ".", "get", "(", "chrom", ",", "IntervalTree", "(", ")", ")", "for", "var_type", "in", "variant_types", ":", "for", "category", "in", "categories", ":", "LOG", ".", "info", "(", "\"Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}\"", ".", "format", "(", "chrom", ",", "var_type", ",", "category", ",", "case_id", ")", ")", "# Fetch all variants from a chromosome", "query", "=", "{", "'variant_type'", ":", "var_type", ",", "'chrom'", ":", "chrom", ",", "}", "# Get all variants from the database of the specific type", "variant_objs", "=", "self", ".", "variants", "(", "case_id", "=", "case_id", ",", "query", "=", "query", ",", "category", "=", "category", ",", "nr_of_variants", "=", "-", "1", ",", "sort_key", "=", "'position'", ")", "# Initiate a bulk", "bulk", "=", "{", "}", "current_region", "=", "None", "special", "=", "False", "# Loop over the variants and check if they are in a coding region", "for", "var_obj", "in", "variant_objs", ":", "var_id", "=", "var_obj", "[", "'_id'", "]", "var_chrom", "=", "var_obj", "[", "'chromosome'", "]", "var_start", "=", "var_obj", "[", "'position'", "]", "var_end", "=", "var_obj", "[", "'end'", "]", "+", "1", "update_bulk", "=", "True", "new_region", "=", "None", "# Check if the variant is in a coding region", "genomic_regions", "=", "coding_intervals", ".", "get", "(", "var_chrom", ",", "IntervalTree", "(", ")", ")", ".", "search", "(", "var_start", ",", "var_end", ")", "# If the variant is in a coding region", "if", "genomic_regions", ":", "# We know there is data here so get the interval id", "new_region", "=", "genomic_regions", ".", "pop", "(", ")", ".", "data", "if", "new_region", "and", "(", "new_region", "==", "current_region", ")", ":", "# If the variant is in the same region as previous", "# we add it to the same bulk", "update_bulk", "=", "False", "current_region", "=", "new_region", "# If the variant is not in a current region we update the compounds", "# from the previous region, if any. Otherwise continue", "if", "update_bulk", "and", "bulk", ":", "self", ".", "update_compounds", "(", "bulk", ")", "self", ".", "update_mongo_compound_variants", "(", "bulk", ")", "bulk", "=", "{", "}", "if", "new_region", ":", "bulk", "[", "var_id", "]", "=", "var_obj", "if", "not", "bulk", ":", "continue", "self", ".", "update_compounds", "(", "bulk", ")", "self", ".", "update_mongo_compound_variants", "(", "bulk", ")", "LOG", ".", "info", "(", "\"All compounds updated\"", ")", "return" ]
Update the compounds for a case Loop over all coding intervals to get coordinates for all potential compound positions. Update all variants within a gene with a bulk operation.
[ "Update", "the", "compounds", "for", "a", "case" ]
python
test
40.868132
gorakhargosh/pepe
pepe/content_types.py
https://github.com/gorakhargosh/pepe/blob/1e40853378d515c99f03b3f59efa9b943d26eb62/pepe/content_types.py#L97-L151
def get_comment_group_for_path(self, pathname, default_content_type=None): """ Obtains the comment group for a specified pathname. :param pathname: The path for which the comment group will be obtained. :return: Returns the comment group for the specified pathname or raises a ``ValueError`` if a content type is not found or raises a ``KeyError`` if a comment group is not found. Usage: >>> db = ContentTypesDatabase() >>> db.add_config(db._test_config, 'test_config.yaml') >>> g = db.get_comment_group_for_path >>> g("foobar.py") [['#', '']] >>> g("foobar.js") [['/*', '*/'], ['//', '']] >>> g('foobar.rst') Traceback (most recent call last): ... KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found' # If the content type cannot be determined, we assume the content # type to be ``python`` in this case. >>> g('foobar.f37993ajdha73', default_content_type='python') [['#', '']] >>> g("foobar.f37993ajdha73") Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73 >>> g("foobar.f37993ajdha73", default_content_type=None) Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73 """ content_type = self.guess_content_type(pathname) if not content_type: # Content type is not found. if default_content_type: content_type = default_content_type return self.get_comment_group(content_type) else: raise ValueError( "No content type defined for file path: %s" % pathname) else: try: return self.get_comment_group(content_type) except KeyError: raise KeyError( "No comment groups for content type `%s` for file `%s` found" % ( content_type, pathname))
[ "def", "get_comment_group_for_path", "(", "self", ",", "pathname", ",", "default_content_type", "=", "None", ")", ":", "content_type", "=", "self", ".", "guess_content_type", "(", "pathname", ")", "if", "not", "content_type", ":", "# Content type is not found.", "if", "default_content_type", ":", "content_type", "=", "default_content_type", "return", "self", ".", "get_comment_group", "(", "content_type", ")", "else", ":", "raise", "ValueError", "(", "\"No content type defined for file path: %s\"", "%", "pathname", ")", "else", ":", "try", ":", "return", "self", ".", "get_comment_group", "(", "content_type", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"No comment groups for content type `%s` for file `%s` found\"", "%", "(", "content_type", ",", "pathname", ")", ")" ]
Obtains the comment group for a specified pathname. :param pathname: The path for which the comment group will be obtained. :return: Returns the comment group for the specified pathname or raises a ``ValueError`` if a content type is not found or raises a ``KeyError`` if a comment group is not found. Usage: >>> db = ContentTypesDatabase() >>> db.add_config(db._test_config, 'test_config.yaml') >>> g = db.get_comment_group_for_path >>> g("foobar.py") [['#', '']] >>> g("foobar.js") [['/*', '*/'], ['//', '']] >>> g('foobar.rst') Traceback (most recent call last): ... KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found' # If the content type cannot be determined, we assume the content # type to be ``python`` in this case. >>> g('foobar.f37993ajdha73', default_content_type='python') [['#', '']] >>> g("foobar.f37993ajdha73") Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73 >>> g("foobar.f37993ajdha73", default_content_type=None) Traceback (most recent call last): ... ValueError: No content type defined for file path: foobar.f37993ajdha73
[ "Obtains", "the", "comment", "group", "for", "a", "specified", "pathname", "." ]
python
train
41.036364
IBM/pyxcli
pyxcli/response.py
https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/response.py#L75-L87
def as_single_element(self): """ Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError """ if self.as_return_etree is None: return None if len(self.as_return_etree.getchildren()) == 1: return _populate_bunch_with_element(self.as_return_etree. getchildren()[0]) return _populate_bunch_with_element(self.as_return_etree)
[ "def", "as_single_element", "(", "self", ")", ":", "if", "self", ".", "as_return_etree", "is", "None", ":", "return", "None", "if", "len", "(", "self", ".", "as_return_etree", ".", "getchildren", "(", ")", ")", "==", "1", ":", "return", "_populate_bunch_with_element", "(", "self", ".", "as_return_etree", ".", "getchildren", "(", ")", "[", "0", "]", ")", "return", "_populate_bunch_with_element", "(", "self", ".", "as_return_etree", ")" ]
Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError
[ "Processes", "the", "response", "as", "a", "single", "-", "element", "response", "like", "config_get", "or", "system_counters_get", ".", "If", "there", "is", "more", "then", "one", "element", "in", "the", "response", "or", "no", "elements", "this", "raises", "a", "ResponseError" ]
python
train
44.769231
rosenbrockc/fortpy
fortpy/isense/evaluator.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/isense/evaluator.py#L435-L474
def _complete_values(self, symbol = ""): """Compiles a list of possible symbols that can hold a value in place. These consist of local vars, global vars, and functions.""" result = {} #Also add the subroutines from the module and its dependencies. moddict = self._generic_filter_execs(self.context.module) self._cond_update(result, moddict, symbol) self._cond_update(result, self.context.module.interfaces, symbol) for depend in self.context.module.dependencies: if depend in self.context.module.parent.modules: #We don't want to display executables that are part of an interface, or that are embedded in #a derived type, since those will be called through the type or interface filtdict = self._generic_filter_execs(self.context.module.parent.modules[depend]) self._cond_update(result, filtdict, symbol) self._cond_update(result, self.context.module.parent.modules[depend].interfaces, symbol) #Add all the local vars if we are in an executable if (isinstance(self.context.element, Function) or isinstance(self.context.element, Subroutine)): self._cond_update(result, self.element.members, symbol) #Next add the global variables from the module if self.context.module is not None: self._cond_update(result, self.context.module.members, symbol) #Next add user defined functions to the mix for execkey in self.context.module.executables: iexec = self.context.module.executables[execkey] if isinstance(iexec, Function) and self._symbol_in(symbol, iexec.name): result[iexec.name] = iexec #Finally add the builtin functions to the mix. We need to add support #for these in a separate file so we have their call signatures. if symbol == "": #Use the abbreviated list of most common fortran builtins self._cond_update(result, cache.common_builtin, symbol) else: #we can use the full list as there will probably not be that #many left over. self._cond_update(result, cache.builtin, symbol) return result
[ "def", "_complete_values", "(", "self", ",", "symbol", "=", "\"\"", ")", ":", "result", "=", "{", "}", "#Also add the subroutines from the module and its dependencies.", "moddict", "=", "self", ".", "_generic_filter_execs", "(", "self", ".", "context", ".", "module", ")", "self", ".", "_cond_update", "(", "result", ",", "moddict", ",", "symbol", ")", "self", ".", "_cond_update", "(", "result", ",", "self", ".", "context", ".", "module", ".", "interfaces", ",", "symbol", ")", "for", "depend", "in", "self", ".", "context", ".", "module", ".", "dependencies", ":", "if", "depend", "in", "self", ".", "context", ".", "module", ".", "parent", ".", "modules", ":", "#We don't want to display executables that are part of an interface, or that are embedded in", "#a derived type, since those will be called through the type or interface", "filtdict", "=", "self", ".", "_generic_filter_execs", "(", "self", ".", "context", ".", "module", ".", "parent", ".", "modules", "[", "depend", "]", ")", "self", ".", "_cond_update", "(", "result", ",", "filtdict", ",", "symbol", ")", "self", ".", "_cond_update", "(", "result", ",", "self", ".", "context", ".", "module", ".", "parent", ".", "modules", "[", "depend", "]", ".", "interfaces", ",", "symbol", ")", "#Add all the local vars if we are in an executable", "if", "(", "isinstance", "(", "self", ".", "context", ".", "element", ",", "Function", ")", "or", "isinstance", "(", "self", ".", "context", ".", "element", ",", "Subroutine", ")", ")", ":", "self", ".", "_cond_update", "(", "result", ",", "self", ".", "element", ".", "members", ",", "symbol", ")", "#Next add the global variables from the module", "if", "self", ".", "context", ".", "module", "is", "not", "None", ":", "self", ".", "_cond_update", "(", "result", ",", "self", ".", "context", ".", "module", ".", "members", ",", "symbol", ")", "#Next add user defined functions to the mix", "for", "execkey", "in", "self", ".", "context", ".", "module", ".", "executables", ":", "iexec", "=", "self", ".", "context", ".", "module", ".", "executables", "[", "execkey", "]", "if", "isinstance", "(", "iexec", ",", "Function", ")", "and", "self", ".", "_symbol_in", "(", "symbol", ",", "iexec", ".", "name", ")", ":", "result", "[", "iexec", ".", "name", "]", "=", "iexec", "#Finally add the builtin functions to the mix. We need to add support", "#for these in a separate file so we have their call signatures.", "if", "symbol", "==", "\"\"", ":", "#Use the abbreviated list of most common fortran builtins", "self", ".", "_cond_update", "(", "result", ",", "cache", ".", "common_builtin", ",", "symbol", ")", "else", ":", "#we can use the full list as there will probably not be that", "#many left over.", "self", ".", "_cond_update", "(", "result", ",", "cache", ".", "builtin", ",", "symbol", ")", "return", "result" ]
Compiles a list of possible symbols that can hold a value in place. These consist of local vars, global vars, and functions.
[ "Compiles", "a", "list", "of", "possible", "symbols", "that", "can", "hold", "a", "value", "in", "place", ".", "These", "consist", "of", "local", "vars", "global", "vars", "and", "functions", "." ]
python
train
56.975
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L274-L278
def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d
[ "def", "_construct_axes_dict", "(", "self", ",", "axes", "=", "None", ",", "*", "*", "kwargs", ")", ":", "d", "=", "{", "a", ":", "self", ".", "_get_axis", "(", "a", ")", "for", "a", "in", "(", "axes", "or", "self", ".", "_AXIS_ORDERS", ")", "}", "d", ".", "update", "(", "kwargs", ")", "return", "d" ]
Return an axes dictionary for myself.
[ "Return", "an", "axes", "dictionary", "for", "myself", "." ]
python
train
42.8
CivicSpleen/ambry
ambry/orm/partition.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L1022-L1040
def shapes(self, simplify=None, predicate=None): """ Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects """ from shapely.wkt import loads if not predicate: predicate = lambda row: True if simplify: return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)] else: return [loads(row.geometry) for row in self if predicate(row)]
[ "def", "shapes", "(", "self", ",", "simplify", "=", "None", ",", "predicate", "=", "None", ")", ":", "from", "shapely", ".", "wkt", "import", "loads", "if", "not", "predicate", ":", "predicate", "=", "lambda", "row", ":", "True", "if", "simplify", ":", "return", "[", "loads", "(", "row", ".", "geometry", ")", ".", "simplify", "(", "simplify", ")", "for", "row", "in", "self", "if", "predicate", "(", "row", ")", "]", "else", ":", "return", "[", "loads", "(", "row", ".", "geometry", ")", "for", "row", "in", "self", "if", "predicate", "(", "row", ")", "]" ]
Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects
[ "Return", "geodata", "as", "a", "list", "of", "Shapely", "shapes" ]
python
train
35.526316
rigetti/pyquil
pyquil/gates.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L379-L398
def CSWAP(control, target_1, target_2): """Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits:: CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] :param control: The control qubit. :param target-1: The first target qubit. :param target-2: The second target qubit. The two target states are swapped if the control is in the ``|1>`` state. """ qubits = [unpack_qubit(q) for q in (control, target_1, target_2)] return Gate(name="CSWAP", params=[], qubits=qubits)
[ "def", "CSWAP", "(", "control", ",", "target_1", ",", "target_2", ")", ":", "qubits", "=", "[", "unpack_qubit", "(", "q", ")", "for", "q", "in", "(", "control", ",", "target_1", ",", "target_2", ")", "]", "return", "Gate", "(", "name", "=", "\"CSWAP\"", ",", "params", "=", "[", "]", ",", "qubits", "=", "qubits", ")" ]
Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits:: CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] :param control: The control qubit. :param target-1: The first target qubit. :param target-2: The second target qubit. The two target states are swapped if the control is in the ``|1>`` state.
[ "Produces", "a", "controlled", "-", "SWAP", "gate", ".", "This", "gate", "conditionally", "swaps", "the", "state", "of", "two", "qubits", "::" ]
python
train
40.45
neuropsychology/NeuroKit.py
neurokit/eeg/eeg_complexity.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/eeg/eeg_complexity.py#L20-L92
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity"): """ Compute complexity indices of epochs or raw object. DOCS INCOMPLETE :( """ data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central) # if data was Raw, make as if it was an Epoch so the following routine is only written once if isinstance(data, dict) is False: data = {0: data} # Create time windows if isinstance(times, tuple): times = list(times) if isinstance(times, list): if isinstance(times[0], list) is False: times = [times] else: times = [[0, None]] # Deal with names if isinstance(names, str): prefix = [names] * len(times) if len(times) > 1: for time_index, time_window in enumerate(times): prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1]) else: prefix = names # Iterate complexity_all = pd.DataFrame() for time_index, time_window in enumerate(times): if len(times) > 1 and verbose is True: print("Computing complexity features... window " + str(time_window) + "/" + str(len(times))) complexity_features = {} # Compute complexity for each channel for each epoch index = 0 for epoch_index, epoch in data.items(): if len(times) == 1 and verbose is True: print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%") index +=1 df = epoch[time_window[0]:time_window[1]].copy() complexity_features[epoch_index] = {} for channel in df: signal = df[channel].values features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e) for key, feature in features.items(): if key in complexity_features[epoch_index].keys(): complexity_features[epoch_index][key].append(feature) else: complexity_features[epoch_index][key] = [feature] for epoch_index, epoch in complexity_features.items(): for feature in epoch: complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean() # Convert to dataframe complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index") complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns] complexity_all = pd.concat([complexity_all, complexity_features], axis=1) return(complexity_all)
[ "def", "eeg_complexity", "(", "eeg", ",", "sampling_rate", ",", "times", "=", "None", ",", "index", "=", "None", ",", "include", "=", "\"all\"", ",", "exclude", "=", "None", ",", "hemisphere", "=", "\"both\"", ",", "central", "=", "True", ",", "verbose", "=", "True", ",", "shannon", "=", "True", ",", "sampen", "=", "True", ",", "multiscale", "=", "True", ",", "spectral", "=", "True", ",", "svd", "=", "True", ",", "correlation", "=", "True", ",", "higushi", "=", "True", ",", "petrosian", "=", "True", ",", "fisher", "=", "True", ",", "hurst", "=", "True", ",", "dfa", "=", "True", ",", "lyap_r", "=", "False", ",", "lyap_e", "=", "False", ",", "names", "=", "\"Complexity\"", ")", ":", "data", "=", "eeg_to_df", "(", "eeg", ",", "index", "=", "index", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ",", "hemisphere", "=", "hemisphere", ",", "central", "=", "central", ")", "# if data was Raw, make as if it was an Epoch so the following routine is only written once", "if", "isinstance", "(", "data", ",", "dict", ")", "is", "False", ":", "data", "=", "{", "0", ":", "data", "}", "# Create time windows", "if", "isinstance", "(", "times", ",", "tuple", ")", ":", "times", "=", "list", "(", "times", ")", "if", "isinstance", "(", "times", ",", "list", ")", ":", "if", "isinstance", "(", "times", "[", "0", "]", ",", "list", ")", "is", "False", ":", "times", "=", "[", "times", "]", "else", ":", "times", "=", "[", "[", "0", ",", "None", "]", "]", "# Deal with names", "if", "isinstance", "(", "names", ",", "str", ")", ":", "prefix", "=", "[", "names", "]", "*", "len", "(", "times", ")", "if", "len", "(", "times", ")", ">", "1", ":", "for", "time_index", ",", "time_window", "in", "enumerate", "(", "times", ")", ":", "prefix", "[", "time_index", "]", "=", "prefix", "[", "time_index", "]", "+", "\"_%.2f_%.2f\"", "%", "(", "time_window", "[", "0", "]", ",", "time_window", "[", "1", "]", ")", "else", ":", "prefix", "=", "names", "# Iterate", "complexity_all", "=", "pd", ".", "DataFrame", "(", ")", "for", "time_index", ",", "time_window", "in", "enumerate", "(", "times", ")", ":", "if", "len", "(", "times", ")", ">", "1", "and", "verbose", "is", "True", ":", "print", "(", "\"Computing complexity features... window \"", "+", "str", "(", "time_window", ")", "+", "\"/\"", "+", "str", "(", "len", "(", "times", ")", ")", ")", "complexity_features", "=", "{", "}", "# Compute complexity for each channel for each epoch", "index", "=", "0", "for", "epoch_index", ",", "epoch", "in", "data", ".", "items", "(", ")", ":", "if", "len", "(", "times", ")", "==", "1", "and", "verbose", "is", "True", ":", "print", "(", "\"Computing complexity features... \"", "+", "str", "(", "round", "(", "index", "/", "len", "(", "data", ".", "items", "(", ")", ")", "*", "100", ",", "2", ")", ")", "+", "\"%\"", ")", "index", "+=", "1", "df", "=", "epoch", "[", "time_window", "[", "0", "]", ":", "time_window", "[", "1", "]", "]", ".", "copy", "(", ")", "complexity_features", "[", "epoch_index", "]", "=", "{", "}", "for", "channel", "in", "df", ":", "signal", "=", "df", "[", "channel", "]", ".", "values", "features", "=", "complexity", "(", "signal", ",", "sampling_rate", "=", "sampling_rate", ",", "shannon", "=", "shannon", ",", "sampen", "=", "sampen", ",", "multiscale", "=", "multiscale", ",", "spectral", "=", "spectral", ",", "svd", "=", "svd", ",", "correlation", "=", "correlation", ",", "higushi", "=", "higushi", ",", "petrosian", "=", "petrosian", ",", "fisher", "=", "fisher", ",", "hurst", "=", "hurst", ",", "dfa", "=", "dfa", ",", "lyap_r", "=", "lyap_r", ",", "lyap_e", "=", "lyap_e", ")", "for", "key", ",", "feature", "in", "features", ".", "items", "(", ")", ":", "if", "key", "in", "complexity_features", "[", "epoch_index", "]", ".", "keys", "(", ")", ":", "complexity_features", "[", "epoch_index", "]", "[", "key", "]", ".", "append", "(", "feature", ")", "else", ":", "complexity_features", "[", "epoch_index", "]", "[", "key", "]", "=", "[", "feature", "]", "for", "epoch_index", ",", "epoch", "in", "complexity_features", ".", "items", "(", ")", ":", "for", "feature", "in", "epoch", ":", "complexity_features", "[", "epoch_index", "]", "[", "feature", "]", "=", "pd", ".", "Series", "(", "complexity_features", "[", "epoch_index", "]", "[", "feature", "]", ")", ".", "mean", "(", ")", "# Convert to dataframe", "complexity_features", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "complexity_features", ",", "orient", "=", "\"index\"", ")", "complexity_features", ".", "columns", "=", "[", "prefix", "[", "time_index", "]", "+", "\"_\"", "+", "s", "for", "s", "in", "complexity_features", ".", "columns", "]", "complexity_all", "=", "pd", ".", "concat", "(", "[", "complexity_all", ",", "complexity_features", "]", ",", "axis", "=", "1", ")", "return", "(", "complexity_all", ")" ]
Compute complexity indices of epochs or raw object. DOCS INCOMPLETE :(
[ "Compute", "complexity", "indices", "of", "epochs", "or", "raw", "object", "." ]
python
train
42.917808
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L972-L983
def respond_redirect(self, location='/'): """ Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to. """ self.send_response(301) self.send_header('Content-Length', 0) self.send_header('Location', location) self.end_headers() return
[ "def", "respond_redirect", "(", "self", ",", "location", "=", "'/'", ")", ":", "self", ".", "send_response", "(", "301", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "0", ")", "self", ".", "send_header", "(", "'Location'", ",", "location", ")", "self", ".", "end_headers", "(", ")", "return" ]
Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to.
[ "Respond", "to", "the", "client", "with", "a", "301", "message", "and", "redirect", "them", "with", "a", "Location", "header", "." ]
python
train
27.833333
Duke-GCB/DukeDSClient
ddsc/sdk/client.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/sdk/client.py#L102-L111
def create_project(self, name, description): """ Create a new project with the specified name and description :param name: str: name of the project to create :param description: str: description of the project to create :return: Project """ return self._create_item_response( self.data_service.create_project(name, description), Project)
[ "def", "create_project", "(", "self", ",", "name", ",", "description", ")", ":", "return", "self", ".", "_create_item_response", "(", "self", ".", "data_service", ".", "create_project", "(", "name", ",", "description", ")", ",", "Project", ")" ]
Create a new project with the specified name and description :param name: str: name of the project to create :param description: str: description of the project to create :return: Project
[ "Create", "a", "new", "project", "with", "the", "specified", "name", "and", "description", ":", "param", "name", ":", "str", ":", "name", "of", "the", "project", "to", "create", ":", "param", "description", ":", "str", ":", "description", "of", "the", "project", "to", "create", ":", "return", ":", "Project" ]
python
train
40.8
jeremymcrae/denovonear
scripts/run_batch.py
https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/scripts/run_batch.py#L96-L112
def get_random_string(): """ make a random string, which we can use for bsub job IDs, so that different jobs do not have the same job IDs. """ # set up a random string to associate with the run hash_string = "%8x" % random.getrandbits(32) hash_string = hash_string.strip() # done't allow the random strings to be equivalent to a number, since # the LSF cluster interprets those differently from letter-containing # strings while is_number(hash_string): hash_string = "%8x" % random.getrandbits(32) hash_string = hash_string.strip() return hash_string
[ "def", "get_random_string", "(", ")", ":", "# set up a random string to associate with the run", "hash_string", "=", "\"%8x\"", "%", "random", ".", "getrandbits", "(", "32", ")", "hash_string", "=", "hash_string", ".", "strip", "(", ")", "# done't allow the random strings to be equivalent to a number, since", "# the LSF cluster interprets those differently from letter-containing", "# strings", "while", "is_number", "(", "hash_string", ")", ":", "hash_string", "=", "\"%8x\"", "%", "random", ".", "getrandbits", "(", "32", ")", "hash_string", "=", "hash_string", ".", "strip", "(", ")", "return", "hash_string" ]
make a random string, which we can use for bsub job IDs, so that different jobs do not have the same job IDs.
[ "make", "a", "random", "string", "which", "we", "can", "use", "for", "bsub", "job", "IDs", "so", "that", "different", "jobs", "do", "not", "have", "the", "same", "job", "IDs", "." ]
python
train
35.823529
gem/oq-engine
openquake/server/db/upgrade_manager.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/db/upgrade_manager.py#L291-L299
def extract_upgrade_scripts(self): """ Extract the OpenQuake upgrade scripts from the links in the GitHub page """ link_pattern = '>\s*{0}\s*<'.format(self.pattern[1:-1]) page = urllib.request.urlopen(self.upgrades_url).read() for mo in re.finditer(link_pattern, page): scriptname = mo.group(0)[1:-1].strip() yield self.parse_script_name(scriptname)
[ "def", "extract_upgrade_scripts", "(", "self", ")", ":", "link_pattern", "=", "'>\\s*{0}\\s*<'", ".", "format", "(", "self", ".", "pattern", "[", "1", ":", "-", "1", "]", ")", "page", "=", "urllib", ".", "request", ".", "urlopen", "(", "self", ".", "upgrades_url", ")", ".", "read", "(", ")", "for", "mo", "in", "re", ".", "finditer", "(", "link_pattern", ",", "page", ")", ":", "scriptname", "=", "mo", ".", "group", "(", "0", ")", "[", "1", ":", "-", "1", "]", ".", "strip", "(", ")", "yield", "self", ".", "parse_script_name", "(", "scriptname", ")" ]
Extract the OpenQuake upgrade scripts from the links in the GitHub page
[ "Extract", "the", "OpenQuake", "upgrade", "scripts", "from", "the", "links", "in", "the", "GitHub", "page" ]
python
train
45.888889
playpauseandstop/rororo
rororo/timedelta.py
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/timedelta.py#L181-L188
def timedelta_seconds(value: datetime.timedelta) -> int: """Return full number of seconds from timedelta. By default, Python returns only one day seconds, not all timedelta seconds. :param value: Timedelta instance. """ return SECONDS_PER_DAY * value.days + value.seconds
[ "def", "timedelta_seconds", "(", "value", ":", "datetime", ".", "timedelta", ")", "->", "int", ":", "return", "SECONDS_PER_DAY", "*", "value", ".", "days", "+", "value", ".", "seconds" ]
Return full number of seconds from timedelta. By default, Python returns only one day seconds, not all timedelta seconds. :param value: Timedelta instance.
[ "Return", "full", "number", "of", "seconds", "from", "timedelta", "." ]
python
train
35.75
mitsei/dlkit
dlkit/primordium/locale/types/heading.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/primordium/locale/types/heading.py#L15-L34
def get_type_data(name): """Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type """ name = name.upper() try: return { 'authority': 'okapia.net', 'namespace': 'heading', 'identifier': name, 'domain': 'Headings', 'display_name': HEADING_TYPES[name] + ' Heading Type', 'display_label': HEADING_TYPES[name], 'description': ('The heading type for the ' + HEADING_TYPES[name] + ' heading.') } except KeyError: raise NotFound('Heading Type:' + name)
[ "def", "get_type_data", "(", "name", ")", ":", "name", "=", "name", ".", "upper", "(", ")", "try", ":", "return", "{", "'authority'", ":", "'okapia.net'", ",", "'namespace'", ":", "'heading'", ",", "'identifier'", ":", "name", ",", "'domain'", ":", "'Headings'", ",", "'display_name'", ":", "HEADING_TYPES", "[", "name", "]", "+", "' Heading Type'", ",", "'display_label'", ":", "HEADING_TYPES", "[", "name", "]", ",", "'description'", ":", "(", "'The heading type for the '", "+", "HEADING_TYPES", "[", "name", "]", "+", "' heading.'", ")", "}", "except", "KeyError", ":", "raise", "NotFound", "(", "'Heading Type:'", "+", "name", ")" ]
Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type
[ "Return", "dictionary", "representation", "of", "type", "." ]
python
train
31.65
fastai/fastai
fastai/script.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/script.py#L25-L33
def anno_parser(func): "Look at params (annotated with `Param`) in func and return an `ArgumentParser`" p = ArgumentParser(description=func.__doc__) for k,v in inspect.signature(func).parameters.items(): param = func.__annotations__.get(k, Param()) kwargs = param.kwargs if v.default != inspect.Parameter.empty: kwargs['default'] = v.default p.add_argument(f"{param.pre}{k}", **kwargs) return p
[ "def", "anno_parser", "(", "func", ")", ":", "p", "=", "ArgumentParser", "(", "description", "=", "func", ".", "__doc__", ")", "for", "k", ",", "v", "in", "inspect", ".", "signature", "(", "func", ")", ".", "parameters", ".", "items", "(", ")", ":", "param", "=", "func", ".", "__annotations__", ".", "get", "(", "k", ",", "Param", "(", ")", ")", "kwargs", "=", "param", ".", "kwargs", "if", "v", ".", "default", "!=", "inspect", ".", "Parameter", ".", "empty", ":", "kwargs", "[", "'default'", "]", "=", "v", ".", "default", "p", ".", "add_argument", "(", "f\"{param.pre}{k}\"", ",", "*", "*", "kwargs", ")", "return", "p" ]
Look at params (annotated with `Param`) in func and return an `ArgumentParser`
[ "Look", "at", "params", "(", "annotated", "with", "Param", ")", "in", "func", "and", "return", "an", "ArgumentParser" ]
python
train
48.222222
EventTeam/beliefs
src/beliefs/beliefstate.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L96-L101
def set_environment_variable(self, key, val): """ Sets a variable if that variable is not already set """ if self.get_environment_variable(key) in [None, val]: self.__dict__['environment_variables'][key] = val else: raise Contradiction("Could not set environment variable %s" % (key))
[ "def", "set_environment_variable", "(", "self", ",", "key", ",", "val", ")", ":", "if", "self", ".", "get_environment_variable", "(", "key", ")", "in", "[", "None", ",", "val", "]", ":", "self", ".", "__dict__", "[", "'environment_variables'", "]", "[", "key", "]", "=", "val", "else", ":", "raise", "Contradiction", "(", "\"Could not set environment variable %s\"", "%", "(", "key", ")", ")" ]
Sets a variable if that variable is not already set
[ "Sets", "a", "variable", "if", "that", "variable", "is", "not", "already", "set" ]
python
train
54.5
junzis/pyModeS
pyModeS/decoder/bds/__init__.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/__init__.py#L30-L89
def is50or60(msg, spd_ref, trk_ref, alt_ref): """Use reference ground speed and trk to determine BDS50 and DBS60. Args: msg (String): 28 bytes hexadecimal message string spd_ref (float): reference speed (ADS-B ground speed), kts trk_ref (float): reference track (ADS-B track angle), deg alt_ref (float): reference altitude (ADS-B altitude), ft Returns: String or None: BDS version, or possible versions, or None if nothing matches. """ def vxy(v, angle): vx = v * np.sin(np.radians(angle)) vy = v * np.cos(np.radians(angle)) return vx, vy if not (bds50.is50(msg) and bds60.is60(msg)): return None h50 = bds50.trk50(msg) v50 = bds50.gs50(msg) if h50 is None or v50 is None: return 'BDS50,BDS60' h60 = bds60.hdg60(msg) m60 = bds60.mach60(msg) i60 = bds60.ias60(msg) if h60 is None or (m60 is None and i60 is None): return 'BDS50,BDS60' m60 = np.nan if m60 is None else m60 i60 = np.nan if i60 is None else i60 XY5 = vxy(v50*aero.kts, h50) XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60) XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60) allbds = ['BDS50', 'BDS60', 'BDS60'] X = np.array([XY5, XY6m, XY6i]) Mu = np.array(vxy(spd_ref*aero.kts, trk_ref)) # compute Mahalanobis distance matrix # Cov = [[20**2, 0], [0, 20**2]] # mmatrix = np.sqrt(np.dot(np.dot(X-Mu, np.linalg.inv(Cov)), (X-Mu).T)) # dist = np.diag(mmatrix) # since the covariance matrix is identity matrix, # M-dist is same as eculidian distance try: dist = np.linalg.norm(X-Mu, axis=1) BDS = allbds[np.nanargmin(dist)] except ValueError: return 'BDS50,BDS60' return BDS
[ "def", "is50or60", "(", "msg", ",", "spd_ref", ",", "trk_ref", ",", "alt_ref", ")", ":", "def", "vxy", "(", "v", ",", "angle", ")", ":", "vx", "=", "v", "*", "np", ".", "sin", "(", "np", ".", "radians", "(", "angle", ")", ")", "vy", "=", "v", "*", "np", ".", "cos", "(", "np", ".", "radians", "(", "angle", ")", ")", "return", "vx", ",", "vy", "if", "not", "(", "bds50", ".", "is50", "(", "msg", ")", "and", "bds60", ".", "is60", "(", "msg", ")", ")", ":", "return", "None", "h50", "=", "bds50", ".", "trk50", "(", "msg", ")", "v50", "=", "bds50", ".", "gs50", "(", "msg", ")", "if", "h50", "is", "None", "or", "v50", "is", "None", ":", "return", "'BDS50,BDS60'", "h60", "=", "bds60", ".", "hdg60", "(", "msg", ")", "m60", "=", "bds60", ".", "mach60", "(", "msg", ")", "i60", "=", "bds60", ".", "ias60", "(", "msg", ")", "if", "h60", "is", "None", "or", "(", "m60", "is", "None", "and", "i60", "is", "None", ")", ":", "return", "'BDS50,BDS60'", "m60", "=", "np", ".", "nan", "if", "m60", "is", "None", "else", "m60", "i60", "=", "np", ".", "nan", "if", "i60", "is", "None", "else", "i60", "XY5", "=", "vxy", "(", "v50", "*", "aero", ".", "kts", ",", "h50", ")", "XY6m", "=", "vxy", "(", "aero", ".", "mach2tas", "(", "m60", ",", "alt_ref", "*", "aero", ".", "ft", ")", ",", "h60", ")", "XY6i", "=", "vxy", "(", "aero", ".", "cas2tas", "(", "i60", "*", "aero", ".", "kts", ",", "alt_ref", "*", "aero", ".", "ft", ")", ",", "h60", ")", "allbds", "=", "[", "'BDS50'", ",", "'BDS60'", ",", "'BDS60'", "]", "X", "=", "np", ".", "array", "(", "[", "XY5", ",", "XY6m", ",", "XY6i", "]", ")", "Mu", "=", "np", ".", "array", "(", "vxy", "(", "spd_ref", "*", "aero", ".", "kts", ",", "trk_ref", ")", ")", "# compute Mahalanobis distance matrix", "# Cov = [[20**2, 0], [0, 20**2]]", "# mmatrix = np.sqrt(np.dot(np.dot(X-Mu, np.linalg.inv(Cov)), (X-Mu).T))", "# dist = np.diag(mmatrix)", "# since the covariance matrix is identity matrix,", "# M-dist is same as eculidian distance", "try", ":", "dist", "=", "np", ".", "linalg", ".", "norm", "(", "X", "-", "Mu", ",", "axis", "=", "1", ")", "BDS", "=", "allbds", "[", "np", ".", "nanargmin", "(", "dist", ")", "]", "except", "ValueError", ":", "return", "'BDS50,BDS60'", "return", "BDS" ]
Use reference ground speed and trk to determine BDS50 and DBS60. Args: msg (String): 28 bytes hexadecimal message string spd_ref (float): reference speed (ADS-B ground speed), kts trk_ref (float): reference track (ADS-B track angle), deg alt_ref (float): reference altitude (ADS-B altitude), ft Returns: String or None: BDS version, or possible versions, or None if nothing matches.
[ "Use", "reference", "ground", "speed", "and", "trk", "to", "determine", "BDS50", "and", "DBS60", "." ]
python
train
29
urbn/Caesium
caesium/handler.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L294-L317
def get(self, id): """ Get an by object by unique identifier :id string id: the bson id of an object :rtype: JSON """ try: if self.request.headers.get("Id"): object_ = yield self.client.find_one({self.request.headers.get("Id"): id}) else: object_ = yield self.client.find_one_by_id(id) if object_: self.write(object_) return self.raise_error(404, "%s/%s not found" % (self.object_name, id)) except InvalidId as ex: self.raise_error(400, message="Your ID is malformed: %s" % id) except Exception as ex: self.logger.error(ex) self.raise_error()
[ "def", "get", "(", "self", ",", "id", ")", ":", "try", ":", "if", "self", ".", "request", ".", "headers", ".", "get", "(", "\"Id\"", ")", ":", "object_", "=", "yield", "self", ".", "client", ".", "find_one", "(", "{", "self", ".", "request", ".", "headers", ".", "get", "(", "\"Id\"", ")", ":", "id", "}", ")", "else", ":", "object_", "=", "yield", "self", ".", "client", ".", "find_one_by_id", "(", "id", ")", "if", "object_", ":", "self", ".", "write", "(", "object_", ")", "return", "self", ".", "raise_error", "(", "404", ",", "\"%s/%s not found\"", "%", "(", "self", ".", "object_name", ",", "id", ")", ")", "except", "InvalidId", "as", "ex", ":", "self", ".", "raise_error", "(", "400", ",", "message", "=", "\"Your ID is malformed: %s\"", "%", "id", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")", "self", ".", "raise_error", "(", ")" ]
Get an by object by unique identifier :id string id: the bson id of an object :rtype: JSON
[ "Get", "an", "by", "object", "by", "unique", "identifier" ]
python
train
30.625
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2512-L2516
def organization_field_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field" api_path = "/api/v2/organization_fields/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "organization_field_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/organization_fields/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "organization_fields#show", "-", "organization", "-", "field" ]
python
train
58.6
LordDarkula/chess_py
chess_py/core/algebraic/location.py
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L201-L210
def shift_right(self, times=1): """ Finds Location shifted right by 1 :rtype: Location """ try: return Location(self._rank, self._file + times) except IndexError as e: raise IndexError(e)
[ "def", "shift_right", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", ",", "self", ".", "_file", "+", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted right by 1 :rtype: Location
[ "Finds", "Location", "shifted", "right", "by", "1" ]
python
train
25.1
CivicSpleen/ambry
ambry/util/sortedcollection.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/util/sortedcollection.py#L193-L198
def find_le_index(self, k): 'Return last item with a key <= k. Raise ValueError if not found.' i = bisect_right(self._keys, k) if i: return i - 1 raise ValueError('No item found with key at or below: %r' % (k,))
[ "def", "find_le_index", "(", "self", ",", "k", ")", ":", "i", "=", "bisect_right", "(", "self", ".", "_keys", ",", "k", ")", "if", "i", ":", "return", "i", "-", "1", "raise", "ValueError", "(", "'No item found with key at or below: %r'", "%", "(", "k", ",", ")", ")" ]
Return last item with a key <= k. Raise ValueError if not found.
[ "Return", "last", "item", "with", "a", "key", "<", "=", "k", ".", "Raise", "ValueError", "if", "not", "found", "." ]
python
train
41.833333
gagneurlab/concise
concise/data/encode.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/encode.py#L14-L31
def get_metadata(): """Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence """ motifs = _load_motifs() motif_names = sorted(list(motifs.keys())) df = pd.Series(motif_names).str.split(expand=True) df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True) # compute the consensus consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names]) df["consensus"] = consensus return df
[ "def", "get_metadata", "(", ")", ":", "motifs", "=", "_load_motifs", "(", ")", "motif_names", "=", "sorted", "(", "list", "(", "motifs", ".", "keys", "(", ")", ")", ")", "df", "=", "pd", ".", "Series", "(", "motif_names", ")", ".", "str", ".", "split", "(", "expand", "=", "True", ")", "df", ".", "rename", "(", "columns", "=", "{", "0", ":", "\"PWM_id\"", ",", "1", ":", "\"info1\"", ",", "2", ":", "\"info2\"", "}", ",", "inplace", "=", "True", ")", "# compute the consensus", "consensus", "=", "pd", ".", "Series", "(", "[", "PWM", "(", "motifs", "[", "m", "]", ")", ".", "get_consensus", "(", ")", "for", "m", "in", "motif_names", "]", ")", "df", "[", "\"consensus\"", "]", "=", "consensus", "return", "df" ]
Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence
[ "Get", "pandas", ".", "DataFrame", "with", "metadata", "about", "the", "PWM", "s", ".", "Columns", ":" ]
python
train
34.388889
alex-kostirin/pyatomac
atomac/Clipboard.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/Clipboard.py#L133-L144
def clearContents(cls): """Clear contents of general pasteboard. Future enhancement can include specifying which clipboard to clear Returns: True on success; caller should expect to catch exceptions, probably from AppKit (ValueError) """ log_msg = 'Request to clear contents of pasteboard: general' logging.debug(log_msg) pb = AppKit.NSPasteboard.generalPasteboard() pb.clearContents() return True
[ "def", "clearContents", "(", "cls", ")", ":", "log_msg", "=", "'Request to clear contents of pasteboard: general'", "logging", ".", "debug", "(", "log_msg", ")", "pb", "=", "AppKit", ".", "NSPasteboard", ".", "generalPasteboard", "(", ")", "pb", ".", "clearContents", "(", ")", "return", "True" ]
Clear contents of general pasteboard. Future enhancement can include specifying which clipboard to clear Returns: True on success; caller should expect to catch exceptions, probably from AppKit (ValueError)
[ "Clear", "contents", "of", "general", "pasteboard", "." ]
python
valid
39.666667
TkTech/Jawa
jawa/attribute.py
https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/attribute.py#L67-L82
def unpack(self, source: IO): """ Read the ConstantPool from the file-like object `source`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param source: Any file-like object providing `read()` """ count = unpack('>H', source.read(2))[0] for _ in repeat(None, count): name_index, length = unpack('>HI', source.read(6)) info_blob = source.read(length) self._table.append((name_index, info_blob))
[ "def", "unpack", "(", "self", ",", "source", ":", "IO", ")", ":", "count", "=", "unpack", "(", "'>H'", ",", "source", ".", "read", "(", "2", ")", ")", "[", "0", "]", "for", "_", "in", "repeat", "(", "None", ",", "count", ")", ":", "name_index", ",", "length", "=", "unpack", "(", "'>HI'", ",", "source", ".", "read", "(", "6", ")", ")", "info_blob", "=", "source", ".", "read", "(", "length", ")", "self", ".", "_table", ".", "append", "(", "(", "name_index", ",", "info_blob", ")", ")" ]
Read the ConstantPool from the file-like object `source`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when loading a ClassFile. :param source: Any file-like object providing `read()`
[ "Read", "the", "ConstantPool", "from", "the", "file", "-", "like", "object", "source", "." ]
python
train
36.6875
tylerbutler/engineer
engineer/devtools/theme_tools.py
https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L20-L42
def compile_theme(theme_id=None): """Compiles a theme.""" from engineer.processors import convert_less from engineer.themes import ThemeManager if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Compiling...") convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id), theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
[ "def", "compile_theme", "(", "theme_id", "=", "None", ")", ":", "from", "engineer", ".", "processors", "import", "convert_less", "from", "engineer", ".", "themes", "import", "ThemeManager", "if", "theme_id", "is", "None", ":", "themes", "=", "ThemeManager", ".", "themes", "(", ")", ".", "values", "(", ")", "else", ":", "themes", "=", "[", "ThemeManager", ".", "theme", "(", "theme_id", ")", "]", "with", "(", "indent", "(", "2", ")", ")", ":", "puts", "(", "colored", ".", "yellow", "(", "\"Compiling %s themes.\"", "%", "len", "(", "themes", ")", ")", ")", "for", "theme", "in", "themes", ":", "theme_output_path", "=", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s_precompiled.css'", "%", "theme", ".", "id", ")", ")", ".", "normpath", "(", ")", "puts", "(", "colored", ".", "cyan", "(", "\"Compiling theme %s to %s\"", "%", "(", "theme", ".", "id", ",", "theme_output_path", ")", ")", ")", "with", "indent", "(", "4", ")", ":", "puts", "(", "\"Compiling...\"", ")", "convert_less", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s.less'", "%", "theme", ".", "id", ")", ",", "theme_output_path", ",", "minify", "=", "True", ")", "puts", "(", "colored", ".", "green", "(", "\"Done.\"", ",", "bold", "=", "True", ")", ")" ]
Compiles a theme.
[ "Compiles", "a", "theme", "." ]
python
train
38.391304
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L353-L386
def visit_call(self, node, parent): """visit a CallFunc node by returning a fresh instance of it""" newnode = nodes.Call(node.lineno, node.col_offset, parent) starargs = _visit_or_none(node, "starargs", self, newnode) kwargs = _visit_or_none(node, "kwargs", self, newnode) args = [self.visit(child, newnode) for child in node.args] if node.keywords: keywords = [self.visit(child, newnode) for child in node.keywords] else: keywords = None if starargs: new_starargs = nodes.Starred( col_offset=starargs.col_offset, lineno=starargs.lineno, parent=starargs.parent, ) new_starargs.postinit(value=starargs) args.append(new_starargs) if kwargs: new_kwargs = nodes.Keyword( arg=None, col_offset=kwargs.col_offset, lineno=kwargs.lineno, parent=kwargs.parent, ) new_kwargs.postinit(value=kwargs) if keywords: keywords.append(new_kwargs) else: keywords = [new_kwargs] newnode.postinit(self.visit(node.func, newnode), args, keywords) return newnode
[ "def", "visit_call", "(", "self", ",", "node", ",", "parent", ")", ":", "newnode", "=", "nodes", ".", "Call", "(", "node", ".", "lineno", ",", "node", ".", "col_offset", ",", "parent", ")", "starargs", "=", "_visit_or_none", "(", "node", ",", "\"starargs\"", ",", "self", ",", "newnode", ")", "kwargs", "=", "_visit_or_none", "(", "node", ",", "\"kwargs\"", ",", "self", ",", "newnode", ")", "args", "=", "[", "self", ".", "visit", "(", "child", ",", "newnode", ")", "for", "child", "in", "node", ".", "args", "]", "if", "node", ".", "keywords", ":", "keywords", "=", "[", "self", ".", "visit", "(", "child", ",", "newnode", ")", "for", "child", "in", "node", ".", "keywords", "]", "else", ":", "keywords", "=", "None", "if", "starargs", ":", "new_starargs", "=", "nodes", ".", "Starred", "(", "col_offset", "=", "starargs", ".", "col_offset", ",", "lineno", "=", "starargs", ".", "lineno", ",", "parent", "=", "starargs", ".", "parent", ",", ")", "new_starargs", ".", "postinit", "(", "value", "=", "starargs", ")", "args", ".", "append", "(", "new_starargs", ")", "if", "kwargs", ":", "new_kwargs", "=", "nodes", ".", "Keyword", "(", "arg", "=", "None", ",", "col_offset", "=", "kwargs", ".", "col_offset", ",", "lineno", "=", "kwargs", ".", "lineno", ",", "parent", "=", "kwargs", ".", "parent", ",", ")", "new_kwargs", ".", "postinit", "(", "value", "=", "kwargs", ")", "if", "keywords", ":", "keywords", ".", "append", "(", "new_kwargs", ")", "else", ":", "keywords", "=", "[", "new_kwargs", "]", "newnode", ".", "postinit", "(", "self", ".", "visit", "(", "node", ".", "func", ",", "newnode", ")", ",", "args", ",", "keywords", ")", "return", "newnode" ]
visit a CallFunc node by returning a fresh instance of it
[ "visit", "a", "CallFunc", "node", "by", "returning", "a", "fresh", "instance", "of", "it" ]
python
train
37.352941
PaulHancock/Aegean
AegeanTools/fits_image.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L63-L102
def get_beam(header): """ Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees. """ if "BPA" not in header: log.warning("BPA not present in fits header, using 0") bpa = 0 else: bpa = header["BPA"] if "BMAJ" not in header: log.warning("BMAJ not present in fits header.") bmaj = None else: bmaj = header["BMAJ"] if "BMIN" not in header: log.warning("BMIN not present in fits header.") bmin = None else: bmin = header["BMIN"] if None in [bmaj, bmin, bpa]: return None beam = Beam(bmaj, bmin, bpa) return beam
[ "def", "get_beam", "(", "header", ")", ":", "if", "\"BPA\"", "not", "in", "header", ":", "log", ".", "warning", "(", "\"BPA not present in fits header, using 0\"", ")", "bpa", "=", "0", "else", ":", "bpa", "=", "header", "[", "\"BPA\"", "]", "if", "\"BMAJ\"", "not", "in", "header", ":", "log", ".", "warning", "(", "\"BMAJ not present in fits header.\"", ")", "bmaj", "=", "None", "else", ":", "bmaj", "=", "header", "[", "\"BMAJ\"", "]", "if", "\"BMIN\"", "not", "in", "header", ":", "log", ".", "warning", "(", "\"BMIN not present in fits header.\"", ")", "bmin", "=", "None", "else", ":", "bmin", "=", "header", "[", "\"BMIN\"", "]", "if", "None", "in", "[", "bmaj", ",", "bmin", ",", "bpa", "]", ":", "return", "None", "beam", "=", "Beam", "(", "bmaj", ",", "bmin", ",", "bpa", ")", "return", "beam" ]
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees.
[ "Create", "a", ":", "class", ":", "AegeanTools", ".", "fits_image", ".", "Beam", "object", "from", "a", "fits", "header", "." ]
python
train
23.775
dstufft/storages
storages/utils.py
https://github.com/dstufft/storages/blob/0d893afc1db32cd83eaf8e2ad4ed51b37933d5f0/storages/utils.py#L48-L70
def safe_join(base, *paths): """ Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised). """ base = base paths = [p for p in paths] final_path = abspath(os.path.join(base, *paths)) base_path = abspath(base) base_path_len = len(base_path) # Ensure final_path starts with base_path (using normcase to ensure we # don't false-negative on case insensitive operating systems like Windows) # and that the next character after the final path is os.sep (or nothing, # in which case final_path must be equal to base_path). if not os.path.normcase(final_path).startswith(os.path.normcase(base_path)) \ or final_path[base_path_len:base_path_len + 1] not in ("", os.path.sep): raise ValueError("The joined path (%s) is located outside of the base " "path component (%s)" % (final_path, base_path)) return final_path
[ "def", "safe_join", "(", "base", ",", "*", "paths", ")", ":", "base", "=", "base", "paths", "=", "[", "p", "for", "p", "in", "paths", "]", "final_path", "=", "abspath", "(", "os", ".", "path", ".", "join", "(", "base", ",", "*", "paths", ")", ")", "base_path", "=", "abspath", "(", "base", ")", "base_path_len", "=", "len", "(", "base_path", ")", "# Ensure final_path starts with base_path (using normcase to ensure we", "# don't false-negative on case insensitive operating systems like Windows)", "# and that the next character after the final path is os.sep (or nothing,", "# in which case final_path must be equal to base_path).", "if", "not", "os", ".", "path", ".", "normcase", "(", "final_path", ")", ".", "startswith", "(", "os", ".", "path", ".", "normcase", "(", "base_path", ")", ")", "or", "final_path", "[", "base_path_len", ":", "base_path_len", "+", "1", "]", "not", "in", "(", "\"\"", ",", "os", ".", "path", ".", "sep", ")", ":", "raise", "ValueError", "(", "\"The joined path (%s) is located outside of the base \"", "\"path component (%s)\"", "%", "(", "final_path", ",", "base_path", ")", ")", "return", "final_path" ]
Joins one or more path components to the base path component intelligently. Returns a normalized, absolute version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised).
[ "Joins", "one", "or", "more", "path", "components", "to", "the", "base", "path", "component", "intelligently", ".", "Returns", "a", "normalized", "absolute", "version", "of", "the", "final", "path", "." ]
python
train
46.521739
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L687-L697
def device_characteristics_str(self, indent): """Convenience to string method. """ s = "{}\n".format(self.label) s += indent + "MAC Address: {}\n".format(self.mac_addr) s += indent + "IP Address: {}\n".format(self.ip_addr) s += indent + "Port: {}\n".format(self.port) s += indent + "Power: {}\n".format(str_map(self.power_level)) s += indent + "Location: {}\n".format(self.location) s += indent + "Group: {}\n".format(self.group) return s
[ "def", "device_characteristics_str", "(", "self", ",", "indent", ")", ":", "s", "=", "\"{}\\n\"", ".", "format", "(", "self", ".", "label", ")", "s", "+=", "indent", "+", "\"MAC Address: {}\\n\"", ".", "format", "(", "self", ".", "mac_addr", ")", "s", "+=", "indent", "+", "\"IP Address: {}\\n\"", ".", "format", "(", "self", ".", "ip_addr", ")", "s", "+=", "indent", "+", "\"Port: {}\\n\"", ".", "format", "(", "self", ".", "port", ")", "s", "+=", "indent", "+", "\"Power: {}\\n\"", ".", "format", "(", "str_map", "(", "self", ".", "power_level", ")", ")", "s", "+=", "indent", "+", "\"Location: {}\\n\"", ".", "format", "(", "self", ".", "location", ")", "s", "+=", "indent", "+", "\"Group: {}\\n\"", ".", "format", "(", "self", ".", "group", ")", "return", "s" ]
Convenience to string method.
[ "Convenience", "to", "string", "method", "." ]
python
train
46.181818
crs4/hl7apy
hl7apy/core.py
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/core.py#L285-L329
def set(self, name, value, index=-1): """ Assign the ``value`` to the child having the given ``name`` at the ``index`` position :type name: ``str`` :param name: the child name (e.g. PID) :type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` :param value: the child value :type index: ``int`` :param index: the child position (e.g. 1) """ # just copy the first element of the ElementProxy (e.g. message.pid = message2.pid) if isinstance(value, ElementProxy): value = value[0].to_er7() name = name.upper() reference = None if name is None else self.element.find_child_reference(name) child_ref, child_name = (None, None) if reference is None else (reference['ref'], reference['name']) if isinstance(value, basestring): # if the value is a basestring, parse it child = self.element.parse_child(value, child_name=child_name, reference=child_ref) elif isinstance(value, Element): # it is already an instance of Element child = value elif isinstance(value, BaseDataType): child = self.create_element(name, False, reference) child.value = value else: raise ChildNotValid(value, child_name) if child.name != child_name: # e.g. message.pid = Segment('SPM') is forbidden raise ChildNotValid(value, child_name) child_to_remove = self.child_at_index(child_name, index) if child_to_remove is None: self.append(child) else: self.replace_child(child_to_remove, child) # a set has been called, change the temporary parent to be the actual one self.element.set_parent_to_traversal()
[ "def", "set", "(", "self", ",", "name", ",", "value", ",", "index", "=", "-", "1", ")", ":", "# just copy the first element of the ElementProxy (e.g. message.pid = message2.pid)", "if", "isinstance", "(", "value", ",", "ElementProxy", ")", ":", "value", "=", "value", "[", "0", "]", ".", "to_er7", "(", ")", "name", "=", "name", ".", "upper", "(", ")", "reference", "=", "None", "if", "name", "is", "None", "else", "self", ".", "element", ".", "find_child_reference", "(", "name", ")", "child_ref", ",", "child_name", "=", "(", "None", ",", "None", ")", "if", "reference", "is", "None", "else", "(", "reference", "[", "'ref'", "]", ",", "reference", "[", "'name'", "]", ")", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "# if the value is a basestring, parse it", "child", "=", "self", ".", "element", ".", "parse_child", "(", "value", ",", "child_name", "=", "child_name", ",", "reference", "=", "child_ref", ")", "elif", "isinstance", "(", "value", ",", "Element", ")", ":", "# it is already an instance of Element", "child", "=", "value", "elif", "isinstance", "(", "value", ",", "BaseDataType", ")", ":", "child", "=", "self", ".", "create_element", "(", "name", ",", "False", ",", "reference", ")", "child", ".", "value", "=", "value", "else", ":", "raise", "ChildNotValid", "(", "value", ",", "child_name", ")", "if", "child", ".", "name", "!=", "child_name", ":", "# e.g. message.pid = Segment('SPM') is forbidden", "raise", "ChildNotValid", "(", "value", ",", "child_name", ")", "child_to_remove", "=", "self", ".", "child_at_index", "(", "child_name", ",", "index", ")", "if", "child_to_remove", "is", "None", ":", "self", ".", "append", "(", "child", ")", "else", ":", "self", ".", "replace_child", "(", "child_to_remove", ",", "child", ")", "# a set has been called, change the temporary parent to be the actual one", "self", ".", "element", ".", "set_parent_to_traversal", "(", ")" ]
Assign the ``value`` to the child having the given ``name`` at the ``index`` position :type name: ``str`` :param name: the child name (e.g. PID) :type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of :class:`ElementProxy <hl7apy.core.ElementProxy>` :param value: the child value :type index: ``int`` :param index: the child position (e.g. 1)
[ "Assign", "the", "value", "to", "the", "child", "having", "the", "given", "name", "at", "the", "index", "position" ]
python
train
40.777778
msmbuilder/msmbuilder
msmbuilder/msm/validation/transmat_errorbar.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/validation/transmat_errorbar.py#L4-L34
def create_perturb_params(countsmat, transmat=None): ''' Computes transition probabilities and standard errors of the transition probabilities due to finite sampling using the MSM counts matrix. First, the transition probabilities are computed by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then computed by first computing the standard deviation of the transition probability, treating each count as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the square root of the row-summed counts of row i to obtain the standard error. Parameters: ---------- countsmat: np.ndarray The msm counts matrix transmat: np.ndarray If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations, which will be divided by the row-summed counts in the original supplied counts matrix. Returns: ----------- transmat, np.ndarray: The MSM transition matrix scale, np.ndarray: The matrix of standard errors for each transition probability ''' norm = np.sum(countsmat, axis=1) if not transmat: transmat = (countsmat.transpose() / norm).transpose() counts = (np.ones((len(transmat), len(transmat))) * norm).transpose() scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15 return transmat, scale
[ "def", "create_perturb_params", "(", "countsmat", ",", "transmat", "=", "None", ")", ":", "norm", "=", "np", ".", "sum", "(", "countsmat", ",", "axis", "=", "1", ")", "if", "not", "transmat", ":", "transmat", "=", "(", "countsmat", ".", "transpose", "(", ")", "/", "norm", ")", ".", "transpose", "(", ")", "counts", "=", "(", "np", ".", "ones", "(", "(", "len", "(", "transmat", ")", ",", "len", "(", "transmat", ")", ")", ")", "*", "norm", ")", ".", "transpose", "(", ")", "scale", "=", "(", "(", "transmat", "-", "transmat", "**", "2", ")", "**", "0.5", "/", "counts", "**", "0.5", ")", "+", "10", "**", "-", "15", "return", "transmat", ",", "scale" ]
Computes transition probabilities and standard errors of the transition probabilities due to finite sampling using the MSM counts matrix. First, the transition probabilities are computed by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then computed by first computing the standard deviation of the transition probability, treating each count as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the square root of the row-summed counts of row i to obtain the standard error. Parameters: ---------- countsmat: np.ndarray The msm counts matrix transmat: np.ndarray If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations, which will be divided by the row-summed counts in the original supplied counts matrix. Returns: ----------- transmat, np.ndarray: The MSM transition matrix scale, np.ndarray: The matrix of standard errors for each transition probability
[ "Computes", "transition", "probabilities", "and", "standard", "errors", "of", "the", "transition", "probabilities", "due", "to", "finite", "sampling", "using", "the", "MSM", "counts", "matrix", ".", "First", "the", "transition", "probabilities", "are", "computed", "by", "dividing", "the", "each", "element", "c_ij", "by", "the", "row", "-", "sumemd", "counts", "of", "row", "i", ".", "THe", "standard", "errors", "are", "then", "computed", "by", "first", "computing", "the", "standard", "deviation", "of", "the", "transition", "probability", "treating", "each", "count", "as", "a", "Bernoulli", "process", "with", "p", "=", "t_ij", "(", "std", "=", "(", "t_ij", "-", "t_ij", "^2", ")", "^0", ".", "5", ")", ".", "This", "is", "then", "divided", "by", "the", "square", "root", "of", "the", "row", "-", "summed", "counts", "of", "row", "i", "to", "obtain", "the", "standard", "error", ".", "Parameters", ":", "----------", "countsmat", ":", "np", ".", "ndarray", "The", "msm", "counts", "matrix", "transmat", ":", "np", ".", "ndarray", "If", "you", "have", "a", "transition", "matrix", "you", "want", "to", "use", "(", "e", ".", "g", ".", "MLE", "symmetrized", ")", "you", "can", "supply", "that", "here", ".", "This", "function", "will", "use", "the", "transition", "probabilities", "from", "this", "matrix", "to", "calculate", "the", "Bernoulli", "standard", "deviations", "which", "will", "be", "divided", "by", "the", "row", "-", "summed", "counts", "in", "the", "original", "supplied", "counts", "matrix", "." ]
python
train
49.935484
tensorflow/probability
tensorflow_probability/python/distributions/joint_distribution_sequential.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_sequential.py#L276-L326
def _resolve_graph(self, distribution_names=None, leaf_name='x'): """Creates a `tuple` of `tuple`s of dependencies. This function is **experimental**. That said, we encourage its use and ask that you report problems to `[email protected]`. Args: distribution_names: `list` of `str` or `None` names corresponding to each of `model` elements. (`None`s are expanding into the appropriate `str`.) leaf_name: `str` used when no maker depends on a particular `model` element. Returns: graph: `tuple` of `(str tuple)` pairs representing the name of each distribution (maker) and the names of its dependencies. #### Example ```python d = tfd.JointDistributionSequential([ tfd.Independent(tfd.Exponential(rate=[100, 120]), 1), lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]), tfd.Normal(loc=0, scale=2.), lambda n, g: tfd.Normal(loc=n, scale=g), ]) d._resolve_graph() # ==> ( # ('e', ()), # ('g', ('e',)), # ('n', ()), # ('x', ('n', 'g')), # ) ``` """ # This function additionally depends on: # self._dist_fn_args # self._dist_fn_wrapped # TODO(b/129008220): Robustify this procedure. Eg, handle collisions better, # ignore args prefixed with `_`. if distribution_names is None or any(self._dist_fn_args): distribution_names = _resolve_distribution_names( self._dist_fn_args, distribution_names, leaf_name) if len(set(distribution_names)) != len(distribution_names): raise ValueError('Distribution names must be unique: {}'.format( distribution_names)) if len(distribution_names) != len(self._dist_fn_wrapped): raise ValueError('Distribution names must be 1:1 with `rvs`.') return tuple(zip(distribution_names, tuple(() if a is None else a for a in self._dist_fn_args)))
[ "def", "_resolve_graph", "(", "self", ",", "distribution_names", "=", "None", ",", "leaf_name", "=", "'x'", ")", ":", "# This function additionally depends on:", "# self._dist_fn_args", "# self._dist_fn_wrapped", "# TODO(b/129008220): Robustify this procedure. Eg, handle collisions better,", "# ignore args prefixed with `_`.", "if", "distribution_names", "is", "None", "or", "any", "(", "self", ".", "_dist_fn_args", ")", ":", "distribution_names", "=", "_resolve_distribution_names", "(", "self", ".", "_dist_fn_args", ",", "distribution_names", ",", "leaf_name", ")", "if", "len", "(", "set", "(", "distribution_names", ")", ")", "!=", "len", "(", "distribution_names", ")", ":", "raise", "ValueError", "(", "'Distribution names must be unique: {}'", ".", "format", "(", "distribution_names", ")", ")", "if", "len", "(", "distribution_names", ")", "!=", "len", "(", "self", ".", "_dist_fn_wrapped", ")", ":", "raise", "ValueError", "(", "'Distribution names must be 1:1 with `rvs`.'", ")", "return", "tuple", "(", "zip", "(", "distribution_names", ",", "tuple", "(", "(", ")", "if", "a", "is", "None", "else", "a", "for", "a", "in", "self", ".", "_dist_fn_args", ")", ")", ")" ]
Creates a `tuple` of `tuple`s of dependencies. This function is **experimental**. That said, we encourage its use and ask that you report problems to `[email protected]`. Args: distribution_names: `list` of `str` or `None` names corresponding to each of `model` elements. (`None`s are expanding into the appropriate `str`.) leaf_name: `str` used when no maker depends on a particular `model` element. Returns: graph: `tuple` of `(str tuple)` pairs representing the name of each distribution (maker) and the names of its dependencies. #### Example ```python d = tfd.JointDistributionSequential([ tfd.Independent(tfd.Exponential(rate=[100, 120]), 1), lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]), tfd.Normal(loc=0, scale=2.), lambda n, g: tfd.Normal(loc=n, scale=g), ]) d._resolve_graph() # ==> ( # ('e', ()), # ('g', ('e',)), # ('n', ()), # ('x', ('n', 'g')), # ) ```
[ "Creates", "a", "tuple", "of", "tuple", "s", "of", "dependencies", "." ]
python
test
38.254902
RiotGames/cloud-inquisitor
plugins/public/cinq-auditor-required-tags/cinq_auditor_required_tags/__init__.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-required-tags/cinq_auditor_required_tags/__init__.py#L351-L427
def process_actions(self, actions): """Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications """ notices = {} notification_contacts = {} for action in actions: resource = action['resource'] action_status = ActionStatus.SUCCEED try: if action['action'] == AuditActions.REMOVE: action_status = self.process_action( resource, AuditActions.REMOVE ) if action_status == ActionStatus.SUCCEED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.STOP: action_status = self.process_action( resource, AuditActions.STOP ) if action_status == ActionStatus.SUCCEED: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) elif action['action'] == AuditActions.FIXED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.ALERT: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) db.session.commit() if action_status == ActionStatus.SUCCEED: for owner in [ dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)} ]: if owner['value'] not in notification_contacts: contact = NotificationContact(type=owner['type'], value=owner['value']) notification_contacts[owner['value']] = contact notices[contact] = { 'fixed': [], 'not_fixed': [] } else: contact = notification_contacts[owner['value']] if action['action'] == AuditActions.FIXED: notices[contact]['fixed'].append(action) else: notices[contact]['not_fixed'].append(action) except Exception as ex: self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format( action['resource'].account.account_name, action['resource'].id, action['resource'], ex )) return notices
[ "def", "process_actions", "(", "self", ",", "actions", ")", ":", "notices", "=", "{", "}", "notification_contacts", "=", "{", "}", "for", "action", "in", "actions", ":", "resource", "=", "action", "[", "'resource'", "]", "action_status", "=", "ActionStatus", ".", "SUCCEED", "try", ":", "if", "action", "[", "'action'", "]", "==", "AuditActions", ".", "REMOVE", ":", "action_status", "=", "self", ".", "process_action", "(", "resource", ",", "AuditActions", ".", "REMOVE", ")", "if", "action_status", "==", "ActionStatus", ".", "SUCCEED", ":", "db", ".", "session", ".", "delete", "(", "action", "[", "'issue'", "]", ".", "issue", ")", "elif", "action", "[", "'action'", "]", "==", "AuditActions", ".", "STOP", ":", "action_status", "=", "self", ".", "process_action", "(", "resource", ",", "AuditActions", ".", "STOP", ")", "if", "action_status", "==", "ActionStatus", ".", "SUCCEED", ":", "action", "[", "'issue'", "]", ".", "update", "(", "{", "'missing_tags'", ":", "action", "[", "'missing_tags'", "]", ",", "'notes'", ":", "action", "[", "'notes'", "]", ",", "'last_alert'", ":", "action", "[", "'last_alert'", "]", ",", "'state'", ":", "action", "[", "'action'", "]", "}", ")", "elif", "action", "[", "'action'", "]", "==", "AuditActions", ".", "FIXED", ":", "db", ".", "session", ".", "delete", "(", "action", "[", "'issue'", "]", ".", "issue", ")", "elif", "action", "[", "'action'", "]", "==", "AuditActions", ".", "ALERT", ":", "action", "[", "'issue'", "]", ".", "update", "(", "{", "'missing_tags'", ":", "action", "[", "'missing_tags'", "]", ",", "'notes'", ":", "action", "[", "'notes'", "]", ",", "'last_alert'", ":", "action", "[", "'last_alert'", "]", ",", "'state'", ":", "action", "[", "'action'", "]", "}", ")", "db", ".", "session", ".", "commit", "(", ")", "if", "action_status", "==", "ActionStatus", ".", "SUCCEED", ":", "for", "owner", "in", "[", "dict", "(", "t", ")", "for", "t", "in", "{", "tuple", "(", "d", ".", "items", "(", ")", ")", "for", "d", "in", "(", "action", "[", "'owners'", "]", "+", "self", ".", "permanent_emails", ")", "}", "]", ":", "if", "owner", "[", "'value'", "]", "not", "in", "notification_contacts", ":", "contact", "=", "NotificationContact", "(", "type", "=", "owner", "[", "'type'", "]", ",", "value", "=", "owner", "[", "'value'", "]", ")", "notification_contacts", "[", "owner", "[", "'value'", "]", "]", "=", "contact", "notices", "[", "contact", "]", "=", "{", "'fixed'", ":", "[", "]", ",", "'not_fixed'", ":", "[", "]", "}", "else", ":", "contact", "=", "notification_contacts", "[", "owner", "[", "'value'", "]", "]", "if", "action", "[", "'action'", "]", "==", "AuditActions", ".", "FIXED", ":", "notices", "[", "contact", "]", "[", "'fixed'", "]", ".", "append", "(", "action", ")", "else", ":", "notices", "[", "contact", "]", "[", "'not_fixed'", "]", ".", "append", "(", "action", ")", "except", "Exception", "as", "ex", ":", "self", ".", "log", ".", "exception", "(", "'Unexpected error while processing resource {}/{}/{}/{}'", ".", "format", "(", "action", "[", "'resource'", "]", ".", "account", ".", "account_name", ",", "action", "[", "'resource'", "]", ".", "id", ",", "action", "[", "'resource'", "]", ",", "ex", ")", ")", "return", "notices" ]
Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications
[ "Process", "the", "actions", "we", "want", "to", "take" ]
python
train
40.753247
T-002/pycast
pycast/methods/regression.py
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/regression.py#L130-L151
def predict(self, timeseriesX, n, m): """ Calculates the dependent timeseries Y for the given parameters and independent timeseries. (y=m*x + n) :param TimeSeries timeseriesX: the independent Timeseries. :param float n: The interception with the x access that has been calculated during regression :param float m: The slope of the function that has been calculated during regression :return TimeSeries timeseries_y: the predicted values for the dependent TimeSeries. Its length and first dimension will equal to timeseriesX. """ new_entries = [] for entry in timeseriesX: predicted_value = m * entry[1] + n new_entries.append([entry[0], predicted_value]) return TimeSeries.from_twodim_list(new_entries)
[ "def", "predict", "(", "self", ",", "timeseriesX", ",", "n", ",", "m", ")", ":", "new_entries", "=", "[", "]", "for", "entry", "in", "timeseriesX", ":", "predicted_value", "=", "m", "*", "entry", "[", "1", "]", "+", "n", "new_entries", ".", "append", "(", "[", "entry", "[", "0", "]", ",", "predicted_value", "]", ")", "return", "TimeSeries", ".", "from_twodim_list", "(", "new_entries", ")" ]
Calculates the dependent timeseries Y for the given parameters and independent timeseries. (y=m*x + n) :param TimeSeries timeseriesX: the independent Timeseries. :param float n: The interception with the x access that has been calculated during regression :param float m: The slope of the function that has been calculated during regression :return TimeSeries timeseries_y: the predicted values for the dependent TimeSeries. Its length and first dimension will equal to timeseriesX.
[ "Calculates", "the", "dependent", "timeseries", "Y", "for", "the", "given", "parameters", "and", "independent", "timeseries", ".", "(", "y", "=", "m", "*", "x", "+", "n", ")" ]
python
train
38.409091
ARMmbed/icetea
icetea_lib/tools/tools.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/tools.py#L147-L161
def _is_pid_running_on_windows(pid): """ Check if PID is running for Windows systems """ import ctypes.wintypes kernel32 = ctypes.windll.kernel32 handle = kernel32.OpenProcess(1, 0, pid) if handle == 0: return False exit_code = ctypes.wintypes.DWORD() ret = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) is_alive = (ret == 0 or exit_code.value == _STILL_ALIVE) # pylint: disable=undefined-variable kernel32.CloseHandle(handle) return is_alive
[ "def", "_is_pid_running_on_windows", "(", "pid", ")", ":", "import", "ctypes", ".", "wintypes", "kernel32", "=", "ctypes", ".", "windll", ".", "kernel32", "handle", "=", "kernel32", ".", "OpenProcess", "(", "1", ",", "0", ",", "pid", ")", "if", "handle", "==", "0", ":", "return", "False", "exit_code", "=", "ctypes", ".", "wintypes", ".", "DWORD", "(", ")", "ret", "=", "kernel32", ".", "GetExitCodeProcess", "(", "handle", ",", "ctypes", ".", "byref", "(", "exit_code", ")", ")", "is_alive", "=", "(", "ret", "==", "0", "or", "exit_code", ".", "value", "==", "_STILL_ALIVE", ")", "# pylint: disable=undefined-variable", "kernel32", ".", "CloseHandle", "(", "handle", ")", "return", "is_alive" ]
Check if PID is running for Windows systems
[ "Check", "if", "PID", "is", "running", "for", "Windows", "systems" ]
python
train
33.4
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L1192-L1204
def router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def") fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway") address_family = ET.SubElement(fabric_virtual_gateway, "address-family") ipv6 = ET.SubElement(address_family, "ipv6") gateway_mac_address = ET.SubElement(ipv6, "gateway-mac-address") gateway_mac_address.text = kwargs.pop('gateway_mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "router", "=", "ET", ".", "SubElement", "(", "config", ",", "\"router\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "fabric_virtual_gateway", "=", "ET", ".", "SubElement", "(", "router", ",", "\"fabric-virtual-gateway\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-anycast-gateway\"", ")", "address_family", "=", "ET", ".", "SubElement", "(", "fabric_virtual_gateway", ",", "\"address-family\"", ")", "ipv6", "=", "ET", ".", "SubElement", "(", "address_family", ",", "\"ipv6\"", ")", "gateway_mac_address", "=", "ET", ".", "SubElement", "(", "ipv6", ",", "\"gateway-mac-address\"", ")", "gateway_mac_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'gateway_mac_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
58.384615
dlintott/gns3-converter
gns3converter/topology.py
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/topology.py#L469-L498
def get_topology(self): """ Get the converted topology ready for JSON encoding :return: converted topology assembled into a single dict :rtype: dict """ topology = {'name': self._name, 'resources_type': 'local', 'topology': {}, 'type': 'topology', 'version': '1.0'} if self._links: topology['topology']['links'] = self._links if self._nodes: topology['topology']['nodes'] = self._nodes if self._servers: topology['topology']['servers'] = self._servers if self._notes: topology['topology']['notes'] = self._notes if self._shapes['ellipse']: topology['topology']['ellipses'] = self._shapes['ellipse'] if self._shapes['rectangle']: topology['topology']['rectangles'] = \ self._shapes['rectangle'] if self._images: topology['topology']['images'] = self._images return topology
[ "def", "get_topology", "(", "self", ")", ":", "topology", "=", "{", "'name'", ":", "self", ".", "_name", ",", "'resources_type'", ":", "'local'", ",", "'topology'", ":", "{", "}", ",", "'type'", ":", "'topology'", ",", "'version'", ":", "'1.0'", "}", "if", "self", ".", "_links", ":", "topology", "[", "'topology'", "]", "[", "'links'", "]", "=", "self", ".", "_links", "if", "self", ".", "_nodes", ":", "topology", "[", "'topology'", "]", "[", "'nodes'", "]", "=", "self", ".", "_nodes", "if", "self", ".", "_servers", ":", "topology", "[", "'topology'", "]", "[", "'servers'", "]", "=", "self", ".", "_servers", "if", "self", ".", "_notes", ":", "topology", "[", "'topology'", "]", "[", "'notes'", "]", "=", "self", ".", "_notes", "if", "self", ".", "_shapes", "[", "'ellipse'", "]", ":", "topology", "[", "'topology'", "]", "[", "'ellipses'", "]", "=", "self", ".", "_shapes", "[", "'ellipse'", "]", "if", "self", ".", "_shapes", "[", "'rectangle'", "]", ":", "topology", "[", "'topology'", "]", "[", "'rectangles'", "]", "=", "self", ".", "_shapes", "[", "'rectangle'", "]", "if", "self", ".", "_images", ":", "topology", "[", "'topology'", "]", "[", "'images'", "]", "=", "self", ".", "_images", "return", "topology" ]
Get the converted topology ready for JSON encoding :return: converted topology assembled into a single dict :rtype: dict
[ "Get", "the", "converted", "topology", "ready", "for", "JSON", "encoding" ]
python
train
34.6
ThreatConnect-Inc/tcex
tcex/tcex.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L404-L419
def datastore(self, domain, data_type, mapping=None): """Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class. """ from .tcex_datastore import TcExDataStore return TcExDataStore(self, domain, data_type, mapping)
[ "def", "datastore", "(", "self", ",", "domain", ",", "data_type", ",", "mapping", "=", "None", ")", ":", "from", ".", "tcex_datastore", "import", "TcExDataStore", "return", "TcExDataStore", "(", "self", ",", "domain", ",", "data_type", ",", "mapping", ")" ]
Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class.
[ "Get", "instance", "of", "the", "DataStore", "module", "." ]
python
train
46
XuShaohua/bcloud
bcloud/App.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/App.py#L313-L331
def update_avatar(self): '''更新用户头像''' def do_update_avatar(info, error=None): if error or not info: logger.error('Failed to get user avatar: %s, %s' % (info, error)) else: uk, uname, img_path = info self.img_avatar.set_from_file(img_path) self.img_avatar.props.tooltip_text = '\n'.join([ self.profile['username'], uname, ]) if not self.profile['display-avatar']: return self.img_avatar.props.tooltip_text = '' cache_path = Config.get_cache_path(self.profile['username']) gutil.async_call(gutil.update_avatar, self.cookie, self.tokens, cache_path, callback=do_update_avatar)
[ "def", "update_avatar", "(", "self", ")", ":", "def", "do_update_avatar", "(", "info", ",", "error", "=", "None", ")", ":", "if", "error", "or", "not", "info", ":", "logger", ".", "error", "(", "'Failed to get user avatar: %s, %s'", "%", "(", "info", ",", "error", ")", ")", "else", ":", "uk", ",", "uname", ",", "img_path", "=", "info", "self", ".", "img_avatar", ".", "set_from_file", "(", "img_path", ")", "self", ".", "img_avatar", ".", "props", ".", "tooltip_text", "=", "'\\n'", ".", "join", "(", "[", "self", ".", "profile", "[", "'username'", "]", ",", "uname", ",", "]", ")", "if", "not", "self", ".", "profile", "[", "'display-avatar'", "]", ":", "return", "self", ".", "img_avatar", ".", "props", ".", "tooltip_text", "=", "''", "cache_path", "=", "Config", ".", "get_cache_path", "(", "self", ".", "profile", "[", "'username'", "]", ")", "gutil", ".", "async_call", "(", "gutil", ".", "update_avatar", ",", "self", ".", "cookie", ",", "self", ".", "tokens", ",", "cache_path", ",", "callback", "=", "do_update_avatar", ")" ]
更新用户头像
[ "更新用户头像" ]
python
train
42.789474
NoneGG/aredis
aredis/utils.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/utils.py#L151-L176
def clusterdown_wrapper(func): """ Wrapper for CLUSTERDOWN error handling. If the cluster reports it is down it is assumed that: - connection_pool was disconnected - connection_pool was reseted - refereh_table_asap set to True It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail. """ @wraps(func) async def inner(*args, **kwargs): for _ in range(0, 3): try: return await func(*args, **kwargs) except ClusterDownError: # Try again with the new cluster setup. All other errors # should be raised. pass # If it fails 3 times then raise exception back to caller raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") return inner
[ "def", "clusterdown_wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "async", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_", "in", "range", "(", "0", ",", "3", ")", ":", "try", ":", "return", "await", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "ClusterDownError", ":", "# Try again with the new cluster setup. All other errors", "# should be raised.", "pass", "# If it fails 3 times then raise exception back to caller", "raise", "ClusterDownError", "(", "\"CLUSTERDOWN error. Unable to rebuild the cluster\"", ")", "return", "inner" ]
Wrapper for CLUSTERDOWN error handling. If the cluster reports it is down it is assumed that: - connection_pool was disconnected - connection_pool was reseted - refereh_table_asap set to True It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail.
[ "Wrapper", "for", "CLUSTERDOWN", "error", "handling", "." ]
python
train
31.884615
scopus-api/scopus
scopus/deprecated_/scopus_author.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/deprecated_/scopus_author.py#L267-L271
def get_journal_abstracts(self, refresh=True): """Return a list of ScopusAbstract objects using ScopusSearch, but only if belonging to a Journal.""" return [abstract for abstract in self.get_abstracts(refresh=refresh) if abstract.aggregationType == 'Journal']
[ "def", "get_journal_abstracts", "(", "self", ",", "refresh", "=", "True", ")", ":", "return", "[", "abstract", "for", "abstract", "in", "self", ".", "get_abstracts", "(", "refresh", "=", "refresh", ")", "if", "abstract", ".", "aggregationType", "==", "'Journal'", "]" ]
Return a list of ScopusAbstract objects using ScopusSearch, but only if belonging to a Journal.
[ "Return", "a", "list", "of", "ScopusAbstract", "objects", "using", "ScopusSearch", "but", "only", "if", "belonging", "to", "a", "Journal", "." ]
python
train
59.6
SiLab-Bonn/pixel_clusterizer
pixel_clusterizer/cluster_functions.py
https://github.com/SiLab-Bonn/pixel_clusterizer/blob/d2c8c3072fb03ebb7c6a3e8c57350fbbe38efd4d/pixel_clusterizer/cluster_functions.py#L137-L143
def _is_in_max_difference(value_1, value_2, max_difference): ''' Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode. Circumvents numba bug #1653 ''' if value_1 <= value_2: return value_2 - value_1 <= max_difference return value_1 - value_2 <= max_difference
[ "def", "_is_in_max_difference", "(", "value_1", ",", "value_2", ",", "max_difference", ")", ":", "if", "value_1", "<=", "value_2", ":", "return", "value_2", "-", "value_1", "<=", "max_difference", "return", "value_1", "-", "value_2", "<=", "max_difference" ]
Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode. Circumvents numba bug #1653
[ "Helper", "function", "to", "determine", "the", "difference", "of", "two", "values", "that", "can", "be", "np", ".", "uints", ".", "Works", "in", "python", "and", "numba", "mode", ".", "Circumvents", "numba", "bug", "#1653" ]
python
test
48.428571
apache/incubator-heron
heron/instance/src/python/network/heron_client.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/network/heron_client.py#L186-L197
def start_connect(self): """Tries to connect to the Heron Server ``loop()`` method needs to be called after this. """ Log.debug("In start_connect() of %s" % self._get_classname()) # TODO: specify buffer size, exception handling self.create_socket(socket.AF_INET, socket.SOCK_STREAM) # when ready, handle_connect is called self._connecting = True self.connect(self.endpoint)
[ "def", "start_connect", "(", "self", ")", ":", "Log", ".", "debug", "(", "\"In start_connect() of %s\"", "%", "self", ".", "_get_classname", "(", ")", ")", "# TODO: specify buffer size, exception handling", "self", ".", "create_socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "# when ready, handle_connect is called", "self", ".", "_connecting", "=", "True", "self", ".", "connect", "(", "self", ".", "endpoint", ")" ]
Tries to connect to the Heron Server ``loop()`` method needs to be called after this.
[ "Tries", "to", "connect", "to", "the", "Heron", "Server" ]
python
valid
33.333333
AdvancedClimateSystems/uModbus
umodbus/functions.py
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/functions.py#L99-L113
def pdu_to_function_code_or_raise_error(resp_pdu): """ Parse response PDU and return of :class:`ModbusFunction` or raise error. :param resp_pdu: PDU of response. :return: Subclass of :class:`ModbusFunction` matching the response. :raises ModbusError: When response contains error code. """ function_code = struct.unpack('>B', resp_pdu[0:1])[0] if function_code not in function_code_to_function_map.keys(): error_code = struct.unpack('>B', resp_pdu[1:2])[0] raise error_code_to_exception_map[error_code] return function_code
[ "def", "pdu_to_function_code_or_raise_error", "(", "resp_pdu", ")", ":", "function_code", "=", "struct", ".", "unpack", "(", "'>B'", ",", "resp_pdu", "[", "0", ":", "1", "]", ")", "[", "0", "]", "if", "function_code", "not", "in", "function_code_to_function_map", ".", "keys", "(", ")", ":", "error_code", "=", "struct", ".", "unpack", "(", "'>B'", ",", "resp_pdu", "[", "1", ":", "2", "]", ")", "[", "0", "]", "raise", "error_code_to_exception_map", "[", "error_code", "]", "return", "function_code" ]
Parse response PDU and return of :class:`ModbusFunction` or raise error. :param resp_pdu: PDU of response. :return: Subclass of :class:`ModbusFunction` matching the response. :raises ModbusError: When response contains error code.
[ "Parse", "response", "PDU", "and", "return", "of", ":", "class", ":", "ModbusFunction", "or", "raise", "error", "." ]
python
train
37.6
neurosynth/neurosynth
neurosynth/analysis/classify.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/classify.py#L291-L306
def fit(self, X, y, cv=None, class_weight='auto'): """ Fits X to outcomes y, using clf """ # Incorporate error checking such as : # if isinstance(self.classifier, ScikitClassifier): # do one thingNone # otherwiseNone. self.X = X self.y = y self.set_class_weight(class_weight=class_weight, y=y) self.clf = self.clf.fit(X, y) return self.clf
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "cv", "=", "None", ",", "class_weight", "=", "'auto'", ")", ":", "# Incorporate error checking such as :", "# if isinstance(self.classifier, ScikitClassifier):", "# do one thingNone", "# otherwiseNone.", "self", ".", "X", "=", "X", "self", ".", "y", "=", "y", "self", ".", "set_class_weight", "(", "class_weight", "=", "class_weight", ",", "y", "=", "y", ")", "self", ".", "clf", "=", "self", ".", "clf", ".", "fit", "(", "X", ",", "y", ")", "return", "self", ".", "clf" ]
Fits X to outcomes y, using clf
[ "Fits", "X", "to", "outcomes", "y", "using", "clf" ]
python
test
25.8125
danielfrg/datasciencebox
datasciencebox/core/cloud/instance.py
https://github.com/danielfrg/datasciencebox/blob/6b7aa642c6616a46547035fcb815acc1de605a6f/datasciencebox/core/cloud/instance.py#L36-L50
def new(cls, settings, *args, **kwargs): """ Create a new Cloud instance based on the Settings """ logger.debug('Initializing new "%s" Instance object' % settings['CLOUD']) cloud = settings['CLOUD'] if cloud == 'bare': self = BareInstance(settings=settings, *args, **kwargs) elif cloud == 'aws': self = AWSInstance(settings=settings, *args, **kwargs) elif cloud == 'gcp': self = GCPInstance(settings=settings, *args, **kwargs) else: raise DSBException('Cloud "%s" not supported' % cloud) return self
[ "def", "new", "(", "cls", ",", "settings", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'Initializing new \"%s\" Instance object'", "%", "settings", "[", "'CLOUD'", "]", ")", "cloud", "=", "settings", "[", "'CLOUD'", "]", "if", "cloud", "==", "'bare'", ":", "self", "=", "BareInstance", "(", "settings", "=", "settings", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "cloud", "==", "'aws'", ":", "self", "=", "AWSInstance", "(", "settings", "=", "settings", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "cloud", "==", "'gcp'", ":", "self", "=", "GCPInstance", "(", "settings", "=", "settings", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "DSBException", "(", "'Cloud \"%s\" not supported'", "%", "cloud", ")", "return", "self" ]
Create a new Cloud instance based on the Settings
[ "Create", "a", "new", "Cloud", "instance", "based", "on", "the", "Settings" ]
python
train
40.866667
happyleavesaoc/python-limitlessled
limitlessled/group/commands/legacy.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/legacy.py#L72-L84
def _build_command(self, cmd_1, cmd_2=None, select=False, select_command=None): """ Constructs the complete command. :param cmd_1: Light command 1. :param cmd_2: Light command 2. :param select: If command requires selection. :param select_command: Selection command bytes. :return: The complete command. """ return CommandLegacy(cmd_1, cmd_2, self._group_number, select, select_command)
[ "def", "_build_command", "(", "self", ",", "cmd_1", ",", "cmd_2", "=", "None", ",", "select", "=", "False", ",", "select_command", "=", "None", ")", ":", "return", "CommandLegacy", "(", "cmd_1", ",", "cmd_2", ",", "self", ".", "_group_number", ",", "select", ",", "select_command", ")" ]
Constructs the complete command. :param cmd_1: Light command 1. :param cmd_2: Light command 2. :param select: If command requires selection. :param select_command: Selection command bytes. :return: The complete command.
[ "Constructs", "the", "complete", "command", ".", ":", "param", "cmd_1", ":", "Light", "command", "1", ".", ":", "param", "cmd_2", ":", "Light", "command", "2", ".", ":", "param", "select", ":", "If", "command", "requires", "selection", ".", ":", "param", "select_command", ":", "Selection", "command", "bytes", ".", ":", "return", ":", "The", "complete", "command", "." ]
python
train
38.384615
KimiNewt/pyshark
src/pyshark/tshark/tshark.py
https://github.com/KimiNewt/pyshark/blob/089ea6208c4321f03bc548f491e00a053285918f/src/pyshark/tshark/tshark.py#L96-L105
def get_tshark_interfaces(tshark_path=None): """ Returns a list of interface numbers from the output tshark -D. Used internally to capture on multiple interfaces. """ parameters = [get_process_path(tshark_path), '-D'] with open(os.devnull, 'w') as null: tshark_interfaces = subprocess.check_output(parameters, stderr=null).decode("utf-8") return [line.split('.')[0] for line in tshark_interfaces.splitlines()]
[ "def", "get_tshark_interfaces", "(", "tshark_path", "=", "None", ")", ":", "parameters", "=", "[", "get_process_path", "(", "tshark_path", ")", ",", "'-D'", "]", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "null", ":", "tshark_interfaces", "=", "subprocess", ".", "check_output", "(", "parameters", ",", "stderr", "=", "null", ")", ".", "decode", "(", "\"utf-8\"", ")", "return", "[", "line", ".", "split", "(", "'.'", ")", "[", "0", "]", "for", "line", "in", "tshark_interfaces", ".", "splitlines", "(", ")", "]" ]
Returns a list of interface numbers from the output tshark -D. Used internally to capture on multiple interfaces.
[ "Returns", "a", "list", "of", "interface", "numbers", "from", "the", "output", "tshark", "-", "D", ".", "Used", "internally", "to", "capture", "on", "multiple", "interfaces", "." ]
python
train
43.7
zsimic/runez
src/runez/base.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L187-L197
def remove_threadlocal(self, name): """ Args: name (str | unicode): Remove entry with `name` from current thread's context """ with self._lock: if self._tpayload is not None: if name in self._tpayload.context: del self._tpayload.context[name] if not self._tpayload.context: self._tpayload = None
[ "def", "remove_threadlocal", "(", "self", ",", "name", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_tpayload", "is", "not", "None", ":", "if", "name", "in", "self", ".", "_tpayload", ".", "context", ":", "del", "self", ".", "_tpayload", ".", "context", "[", "name", "]", "if", "not", "self", ".", "_tpayload", ".", "context", ":", "self", ".", "_tpayload", "=", "None" ]
Args: name (str | unicode): Remove entry with `name` from current thread's context
[ "Args", ":", "name", "(", "str", "|", "unicode", ")", ":", "Remove", "entry", "with", "name", "from", "current", "thread", "s", "context" ]
python
train
37.545455
O365/python-o365
O365/utils/attachment.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/attachment.py#L315-L320
def _update_parent_attachments(self): """ Tries to update the parent property 'has_attachments' """ try: self._parent.has_attachments = bool(len(self.__attachments)) except AttributeError: pass
[ "def", "_update_parent_attachments", "(", "self", ")", ":", "try", ":", "self", ".", "_parent", ".", "has_attachments", "=", "bool", "(", "len", "(", "self", ".", "__attachments", ")", ")", "except", "AttributeError", ":", "pass" ]
Tries to update the parent property 'has_attachments'
[ "Tries", "to", "update", "the", "parent", "property", "has_attachments" ]
python
train
39.333333
mlperf/training
reinforcement/tensorflow/minigo/rl_loop/update_resign_threshold.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/rl_loop/update_resign_threshold.py#L56-L79
def update_flagfile(flags_path, new_threshold): """Updates the flagfile at `flags_path`, changing the value for `resign_threshold` to `new_threshold` """ if abs(new_threshold) > 1: raise ValueError("Invalid new percentile for resign threshold") with tf.gfile.GFile(flags_path) as f: lines = f.read() if new_threshold > 0: new_threshold *= -1 if not RESIGN_FLAG_REGEX.search(lines): print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path)) sys.exit(1) old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1) lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines) if abs(float(old_threshold[0]) - new_threshold) < 0.001: print("Not updating percentiles; {} ~= {:.3f}".format( old_threshold[0], new_threshold), flush=True) else: print("Updated percentile from {} to {:.3f}".format( old_threshold[0], new_threshold), flush=True) with tf.gfile.GFile(flags_path, 'w') as f: f.write(lines)
[ "def", "update_flagfile", "(", "flags_path", ",", "new_threshold", ")", ":", "if", "abs", "(", "new_threshold", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Invalid new percentile for resign threshold\"", ")", "with", "tf", ".", "gfile", ".", "GFile", "(", "flags_path", ")", "as", "f", ":", "lines", "=", "f", ".", "read", "(", ")", "if", "new_threshold", ">", "0", ":", "new_threshold", "*=", "-", "1", "if", "not", "RESIGN_FLAG_REGEX", ".", "search", "(", "lines", ")", ":", "print", "(", "\"Resign threshold flag not found in flagfile {}! Aborting.\"", ".", "format", "(", "flags_path", ")", ")", "sys", ".", "exit", "(", "1", ")", "old_threshold", "=", "RESIGN_FLAG_REGEX", ".", "search", "(", "lines", ")", ".", "groups", "(", "1", ")", "lines", "=", "re", ".", "sub", "(", "RESIGN_FLAG_REGEX", ",", "\"--resign_threshold={:.3f}\"", ".", "format", "(", "new_threshold", ")", ",", "lines", ")", "if", "abs", "(", "float", "(", "old_threshold", "[", "0", "]", ")", "-", "new_threshold", ")", "<", "0.001", ":", "print", "(", "\"Not updating percentiles; {} ~= {:.3f}\"", ".", "format", "(", "old_threshold", "[", "0", "]", ",", "new_threshold", ")", ",", "flush", "=", "True", ")", "else", ":", "print", "(", "\"Updated percentile from {} to {:.3f}\"", ".", "format", "(", "old_threshold", "[", "0", "]", ",", "new_threshold", ")", ",", "flush", "=", "True", ")", "with", "tf", ".", "gfile", ".", "GFile", "(", "flags_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "lines", ")" ]
Updates the flagfile at `flags_path`, changing the value for `resign_threshold` to `new_threshold`
[ "Updates", "the", "flagfile", "at", "flags_path", "changing", "the", "value", "for", "resign_threshold", "to", "new_threshold" ]
python
train
45.041667
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1599-L1630
def _from_binary_reparse(cls, binary_stream): """See base class.""" ''' Reparse type flags - 4 Reparse tag - 4 bits Reserved - 12 bits Reparse type - 2 bits Reparse data length - 2 Padding - 2 ''' #content = cls._REPR.unpack(binary_view[:cls._REPR.size]) reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size]) #reparse_tag (type, flags) data_len, guid, data reparse_type = ReparseType(reparse_tag & 0x0000FFFF) reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28) guid = None #guid exists only in third party reparse points if reparse_flags & ReparseFlags.IS_MICROSOFT:#a microsoft tag if reparse_type is ReparseType.SYMLINK: data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:]) elif reparse_type is ReparseType.MOUNT_POINT: data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:]) else: data = binary_stream[cls._REPR.size:].tobytes() else: guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes()) data = binary_stream[cls._REPR.size+16:].tobytes() nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data)) _MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
[ "def", "_from_binary_reparse", "(", "cls", ",", "binary_stream", ")", ":", "''' Reparse type flags - 4\n Reparse tag - 4 bits\n Reserved - 12 bits\n Reparse type - 2 bits\n Reparse data length - 2\n Padding - 2\n '''", "#content = cls._REPR.unpack(binary_view[:cls._REPR.size])", "reparse_tag", ",", "data_len", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_stream", "[", ":", "cls", ".", "_REPR", ".", "size", "]", ")", "#reparse_tag (type, flags) data_len, guid, data", "reparse_type", "=", "ReparseType", "(", "reparse_tag", "&", "0x0000FFFF", ")", "reparse_flags", "=", "ReparseFlags", "(", "(", "reparse_tag", "&", "0xF0000000", ")", ">>", "28", ")", "guid", "=", "None", "#guid exists only in third party reparse points", "if", "reparse_flags", "&", "ReparseFlags", ".", "IS_MICROSOFT", ":", "#a microsoft tag", "if", "reparse_type", "is", "ReparseType", ".", "SYMLINK", ":", "data", "=", "SymbolicLink", ".", "create_from_binary", "(", "binary_stream", "[", "cls", ".", "_REPR", ".", "size", ":", "]", ")", "elif", "reparse_type", "is", "ReparseType", ".", "MOUNT_POINT", ":", "data", "=", "JunctionOrMount", ".", "create_from_binary", "(", "binary_stream", "[", "cls", ".", "_REPR", ".", "size", ":", "]", ")", "else", ":", "data", "=", "binary_stream", "[", "cls", ".", "_REPR", ".", "size", ":", "]", ".", "tobytes", "(", ")", "else", ":", "guid", "=", "UUID", "(", "bytes_le", "=", "binary_stream", "[", "cls", ".", "_REPR", ".", "size", ":", "cls", ".", "_REPR", ".", "size", "+", "16", "]", ".", "tobytes", "(", ")", ")", "data", "=", "binary_stream", "[", "cls", ".", "_REPR", ".", "size", "+", "16", ":", "]", ".", "tobytes", "(", ")", "nw_obj", "=", "cls", "(", "(", "reparse_type", ",", "reparse_flags", ",", "data_len", ",", "guid", ",", "data", ")", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack REPARSE_POINT from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "nw_obj", ")", "return", "nw_obj" ]
See base class.
[ "See", "base", "class", "." ]
python
train
43.25
nyrkovalex/httpsrv
httpsrv/httpsrv.py
https://github.com/nyrkovalex/httpsrv/blob/0acc3298be56856f73bda1ed10c9ab5153894b01/httpsrv/httpsrv.py#L263-L270
def stop(self): ''' Shuts the server down and waits for server thread to join ''' self._server.shutdown() self._server.server_close() self._thread.join() self.running = False
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_server", ".", "shutdown", "(", ")", "self", ".", "_server", ".", "server_close", "(", ")", "self", ".", "_thread", ".", "join", "(", ")", "self", ".", "running", "=", "False" ]
Shuts the server down and waits for server thread to join
[ "Shuts", "the", "server", "down", "and", "waits", "for", "server", "thread", "to", "join" ]
python
train
27.875
ConsenSys/mythril-classic
mythril/analysis/report.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/analysis/report.py#L97-L106
def _set_internal_compiler_error(self): """ Adds the false positive to description and changes severity to low """ self.severity = "Low" self.description_tail += ( " This issue is reported for internal compiler generated code." ) self.description = "%s\n%s" % (self.description_head, self.description_tail) self.code = ""
[ "def", "_set_internal_compiler_error", "(", "self", ")", ":", "self", ".", "severity", "=", "\"Low\"", "self", ".", "description_tail", "+=", "(", "\" This issue is reported for internal compiler generated code.\"", ")", "self", ".", "description", "=", "\"%s\\n%s\"", "%", "(", "self", ".", "description_head", ",", "self", ".", "description_tail", ")", "self", ".", "code", "=", "\"\"" ]
Adds the false positive to description and changes severity to low
[ "Adds", "the", "false", "positive", "to", "description", "and", "changes", "severity", "to", "low" ]
python
train
38.8
agile4you/bottle-neck
bottle_neck/response.py
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/response.py#L198-L211
def unauthorized(cls, errors=None): """Shortcut API for HTTP 401 `Unauthorized` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance. """ if cls.expose_status: # pragma: no cover cls.response.content_type = 'application/json' cls.response._status_line = '401 Unauthorized' return cls(401, errors=errors).to_json
[ "def", "unauthorized", "(", "cls", ",", "errors", "=", "None", ")", ":", "if", "cls", ".", "expose_status", ":", "# pragma: no cover", "cls", ".", "response", ".", "content_type", "=", "'application/json'", "cls", ".", "response", ".", "_status_line", "=", "'401 Unauthorized'", "return", "cls", "(", "401", ",", "errors", "=", "errors", ")", ".", "to_json" ]
Shortcut API for HTTP 401 `Unauthorized` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
[ "Shortcut", "API", "for", "HTTP", "401", "Unauthorized", "response", "." ]
python
train
30.714286
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L190-L208
def _duplicate_example(self, request): """Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response. """ index = int(request.args.get('index')) if index >= len(self.examples): return http_util.Respond(request, {'error': 'invalid index provided'}, 'application/json', code=400) new_example = self.example_class() new_example.CopyFrom(self.examples[index]) self.examples.append(new_example) self.updated_example_indices.add(len(self.examples) - 1) self.generate_sprite([ex.SerializeToString() for ex in self.examples]) return http_util.Respond(request, {}, 'application/json')
[ "def", "_duplicate_example", "(", "self", ",", "request", ")", ":", "index", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'index'", ")", ")", "if", "index", ">=", "len", "(", "self", ".", "examples", ")", ":", "return", "http_util", ".", "Respond", "(", "request", ",", "{", "'error'", ":", "'invalid index provided'", "}", ",", "'application/json'", ",", "code", "=", "400", ")", "new_example", "=", "self", ".", "example_class", "(", ")", "new_example", ".", "CopyFrom", "(", "self", ".", "examples", "[", "index", "]", ")", "self", ".", "examples", ".", "append", "(", "new_example", ")", "self", ".", "updated_example_indices", ".", "add", "(", "len", "(", "self", ".", "examples", ")", "-", "1", ")", "self", ".", "generate_sprite", "(", "[", "ex", ".", "SerializeToString", "(", ")", "for", "ex", "in", "self", ".", "examples", "]", ")", "return", "http_util", ".", "Respond", "(", "request", ",", "{", "}", ",", "'application/json'", ")" ]
Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
[ "Duplicates", "the", "specified", "example", "." ]
python
train
37.473684
fermiPy/fermipy
fermipy/jobs/file_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L766-L775
def write_table_file(self, table_file=None): """Write the table to self._table_file""" if self._table is None: raise RuntimeError("No table to write") if table_file is not None: self._table_file = table_file if self._table_file is None: raise RuntimeError("No output file specified for table") write_tables_to_fits(self._table_file, [self._table], clobber=True, namelist=['FILE_ARCHIVE'])
[ "def", "write_table_file", "(", "self", ",", "table_file", "=", "None", ")", ":", "if", "self", ".", "_table", "is", "None", ":", "raise", "RuntimeError", "(", "\"No table to write\"", ")", "if", "table_file", "is", "not", "None", ":", "self", ".", "_table_file", "=", "table_file", "if", "self", ".", "_table_file", "is", "None", ":", "raise", "RuntimeError", "(", "\"No output file specified for table\"", ")", "write_tables_to_fits", "(", "self", ".", "_table_file", ",", "[", "self", ".", "_table", "]", ",", "clobber", "=", "True", ",", "namelist", "=", "[", "'FILE_ARCHIVE'", "]", ")" ]
Write the table to self._table_file
[ "Write", "the", "table", "to", "self", ".", "_table_file" ]
python
train
48.4
bpannier/simpletr64
simpletr64/actions/wan.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/wan.py#L187-L200
def getConnectionInfo(self, wanInterfaceId=1, timeout=1): """Execute GetInfo action to get WAN connection information's. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: WAN connection information's. :rtype: ConnectionInfo """ namespace = Wan.getServiceType("getConnectionInfo") + str(wanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetInfo", timeout=timeout) return ConnectionInfo(results)
[ "def", "getConnectionInfo", "(", "self", ",", "wanInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Wan", ".", "getServiceType", "(", "\"getConnectionInfo\"", ")", "+", "str", "(", "wanInterfaceId", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "results", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetInfo\"", ",", "timeout", "=", "timeout", ")", "return", "ConnectionInfo", "(", "results", ")" ]
Execute GetInfo action to get WAN connection information's. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: WAN connection information's. :rtype: ConnectionInfo
[ "Execute", "GetInfo", "action", "to", "get", "WAN", "connection", "information", "s", "." ]
python
train
42
Chilipp/psyplot
psyplot/config/rcsetup.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/config/rcsetup.py#L350-L355
def validate(self): """Dictionary with validation methods as values""" depr = self._all_deprecated return dict((key, val[1]) for key, val in six.iteritems(self.defaultParams) if key not in depr)
[ "def", "validate", "(", "self", ")", ":", "depr", "=", "self", ".", "_all_deprecated", "return", "dict", "(", "(", "key", ",", "val", "[", "1", "]", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "self", ".", "defaultParams", ")", "if", "key", "not", "in", "depr", ")" ]
Dictionary with validation methods as values
[ "Dictionary", "with", "validation", "methods", "as", "values" ]
python
train
42.166667
mitsei/dlkit
dlkit/aws_adapter/osid/queries.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/aws_adapter/osid/queries.py#L68-L77
def _get_string_match_value(self, string, string_match_type): """Gets the match value""" if string_match_type == Type(**get_type_data('EXACT')): return string elif string_match_type == Type(**get_type_data('IGNORECASE')): return re.compile('^' + string, re.I) elif string_match_type == Type(**get_type_data('WORD')): return re.compile('.*' + string + '.*') elif string_match_type == Type(**get_type_data('WORDIGNORECASE')): return re.compile('.*' + string + '.*', re.I)
[ "def", "_get_string_match_value", "(", "self", ",", "string", ",", "string_match_type", ")", ":", "if", "string_match_type", "==", "Type", "(", "*", "*", "get_type_data", "(", "'EXACT'", ")", ")", ":", "return", "string", "elif", "string_match_type", "==", "Type", "(", "*", "*", "get_type_data", "(", "'IGNORECASE'", ")", ")", ":", "return", "re", ".", "compile", "(", "'^'", "+", "string", ",", "re", ".", "I", ")", "elif", "string_match_type", "==", "Type", "(", "*", "*", "get_type_data", "(", "'WORD'", ")", ")", ":", "return", "re", ".", "compile", "(", "'.*'", "+", "string", "+", "'.*'", ")", "elif", "string_match_type", "==", "Type", "(", "*", "*", "get_type_data", "(", "'WORDIGNORECASE'", ")", ")", ":", "return", "re", ".", "compile", "(", "'.*'", "+", "string", "+", "'.*'", ",", "re", ".", "I", ")" ]
Gets the match value
[ "Gets", "the", "match", "value" ]
python
train
54.8
klavinslab/coral
coral/design/_oligo_synthesis/oligo_assembly.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_oligo_synthesis/oligo_assembly.py#L215-L347
def _grow_overlaps(dna, melting_temp, require_even, length_max, overlap_min, min_exception): '''Grows equidistant overlaps until they meet specified constraints. :param dna: Input sequence. :type dna: coral.DNA :param melting_temp: Ideal Tm of the overlaps, in degrees C. :type melting_temp: float :param require_even: Require that the number of oligonucleotides is even. :type require_even: bool :param length_max: Maximum oligo size (e.g. 60bp price point cutoff) range. :type length_range: int :param overlap_min: Minimum overlap size. :type overlap_min: int :param min_exception: In order to meet melting_temp and overlap_min settings, allow overlaps less than overlap_min to continue growing above melting_temp. :type min_exception: bool :returns: Oligos, their overlapping regions, overlap Tms, and overlap indices. :rtype: tuple ''' # TODO: prevent growing overlaps from bumping into each other - # should halt when it happens, give warning, let user decide if they still # want the current construct # Another option would be to start over, moving the starting positions # near the problem region a little farther from each other - this would # put the AT-rich region in the middle of the spanning oligo # Try bare minimum number of oligos oligo_n = len(dna) // length_max + 1 # Adjust number of oligos if even number required if require_even: oligo_increment = 2 if oligo_n % 2 == 1: oligo_n += 1 else: oligo_increment = 1 # Increase oligo number until the minimum oligo_len is less than length_max while float(len(dna)) / oligo_n > length_max: oligo_n += oligo_increment # Loop until all overlaps meet minimum Tm and length tm_met = False len_met = False while(not tm_met or not len_met): # Calculate initial number of overlaps overlap_n = oligo_n - 1 # Place overlaps approximately equidistant over sequence length overlap_interval = float(len(dna)) / oligo_n starts = [int(overlap_interval * (i + 1)) for i in range(overlap_n)] ends = [index + 1 for index in starts] # Fencepost for while loop # Initial overlaps (1 base) and their tms overlaps = [dna[start:end] for start, end in zip(starts, ends)] overlap_tms = [coral.analysis.tm(overlap) for overlap in overlaps] index = overlap_tms.index(min(overlap_tms)) # Initial oligos - includes the 1 base overlaps. # All the oligos are in the same direction - reverse # complementation of every other one happens later oligo_starts = [0] + starts oligo_ends = ends + [len(dna)] oligo_indices = [oligo_starts, oligo_ends] oligos = [dna[start:end] for start, end in zip(*oligo_indices)] # Oligo won't be maxed in first pass. tm_met and len_met will be false maxed = False while not (tm_met and len_met) and not maxed: # Recalculate overlaps and their Tms overlaps = _recalculate_overlaps(dna, overlaps, oligo_indices) # Tm calculation is bottleneck - only recalculate changed overlap overlap_tms[index] = coral.analysis.tm(overlaps[index]) # Find lowest-Tm overlap and its index. index = overlap_tms.index(min(overlap_tms)) # Move overlap at that index oligos = _expand_overlap(dna, oligo_indices, index, oligos, length_max) # Regenerate conditions maxed = any([len(x) == length_max for x in oligos]) tm_met = all([x >= melting_temp for x in overlap_tms]) if min_exception: len_met = True else: len_met = all([len(x) >= overlap_min for x in overlaps]) # TODO: add test for min_exception case (use rob's sequence from # 20130624 with 65C Tm) if min_exception: len_met = all([len(x) >= overlap_min for x in overlaps]) # See if len_met is true - if so do nothing if len_met: break else: while not len_met and not maxed: # Recalculate overlaps and their Tms overlaps = _recalculate_overlaps(dna, overlaps, oligo_indices) # Overlap to increase is the shortest one overlap_lens = [len(overlap) for overlap in overlaps] index = overlap_lens.index(min(overlap_lens)) # Increase left or right oligo oligos = _expand_overlap(dna, oligo_indices, index, oligos, length_max) # Recalculate conditions maxed = any([len(x) == length_max for x in oligos]) len_met = all([len(x) >= overlap_min for x in overlaps]) # Recalculate tms to reflect any changes (some are redundant) overlap_tms[index] = coral.analysis.tm(overlaps[index]) # Outcome could be that len_met happened *or* maxed out # length of one of the oligos. If len_met happened, should be # done so long as tm_met has been satisfied. If maxed happened, # len_met will not have been met, even if tm_met is satisfied, # and script will reattempt with more oligos oligo_n += oligo_increment # Calculate location of overlaps overlap_indices = [(oligo_indices[0][x + 1], oligo_indices[1][x]) for x in range(overlap_n)] return oligos, overlaps, overlap_tms, overlap_indices
[ "def", "_grow_overlaps", "(", "dna", ",", "melting_temp", ",", "require_even", ",", "length_max", ",", "overlap_min", ",", "min_exception", ")", ":", "# TODO: prevent growing overlaps from bumping into each other -", "# should halt when it happens, give warning, let user decide if they still", "# want the current construct", "# Another option would be to start over, moving the starting positions", "# near the problem region a little farther from each other - this would", "# put the AT-rich region in the middle of the spanning oligo", "# Try bare minimum number of oligos", "oligo_n", "=", "len", "(", "dna", ")", "//", "length_max", "+", "1", "# Adjust number of oligos if even number required", "if", "require_even", ":", "oligo_increment", "=", "2", "if", "oligo_n", "%", "2", "==", "1", ":", "oligo_n", "+=", "1", "else", ":", "oligo_increment", "=", "1", "# Increase oligo number until the minimum oligo_len is less than length_max", "while", "float", "(", "len", "(", "dna", ")", ")", "/", "oligo_n", ">", "length_max", ":", "oligo_n", "+=", "oligo_increment", "# Loop until all overlaps meet minimum Tm and length", "tm_met", "=", "False", "len_met", "=", "False", "while", "(", "not", "tm_met", "or", "not", "len_met", ")", ":", "# Calculate initial number of overlaps", "overlap_n", "=", "oligo_n", "-", "1", "# Place overlaps approximately equidistant over sequence length", "overlap_interval", "=", "float", "(", "len", "(", "dna", ")", ")", "/", "oligo_n", "starts", "=", "[", "int", "(", "overlap_interval", "*", "(", "i", "+", "1", ")", ")", "for", "i", "in", "range", "(", "overlap_n", ")", "]", "ends", "=", "[", "index", "+", "1", "for", "index", "in", "starts", "]", "# Fencepost for while loop", "# Initial overlaps (1 base) and their tms", "overlaps", "=", "[", "dna", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "zip", "(", "starts", ",", "ends", ")", "]", "overlap_tms", "=", "[", "coral", ".", "analysis", ".", "tm", "(", "overlap", ")", "for", "overlap", "in", "overlaps", "]", "index", "=", "overlap_tms", ".", "index", "(", "min", "(", "overlap_tms", ")", ")", "# Initial oligos - includes the 1 base overlaps.", "# All the oligos are in the same direction - reverse", "# complementation of every other one happens later", "oligo_starts", "=", "[", "0", "]", "+", "starts", "oligo_ends", "=", "ends", "+", "[", "len", "(", "dna", ")", "]", "oligo_indices", "=", "[", "oligo_starts", ",", "oligo_ends", "]", "oligos", "=", "[", "dna", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "zip", "(", "*", "oligo_indices", ")", "]", "# Oligo won't be maxed in first pass. tm_met and len_met will be false", "maxed", "=", "False", "while", "not", "(", "tm_met", "and", "len_met", ")", "and", "not", "maxed", ":", "# Recalculate overlaps and their Tms", "overlaps", "=", "_recalculate_overlaps", "(", "dna", ",", "overlaps", ",", "oligo_indices", ")", "# Tm calculation is bottleneck - only recalculate changed overlap", "overlap_tms", "[", "index", "]", "=", "coral", ".", "analysis", ".", "tm", "(", "overlaps", "[", "index", "]", ")", "# Find lowest-Tm overlap and its index.", "index", "=", "overlap_tms", ".", "index", "(", "min", "(", "overlap_tms", ")", ")", "# Move overlap at that index", "oligos", "=", "_expand_overlap", "(", "dna", ",", "oligo_indices", ",", "index", ",", "oligos", ",", "length_max", ")", "# Regenerate conditions", "maxed", "=", "any", "(", "[", "len", "(", "x", ")", "==", "length_max", "for", "x", "in", "oligos", "]", ")", "tm_met", "=", "all", "(", "[", "x", ">=", "melting_temp", "for", "x", "in", "overlap_tms", "]", ")", "if", "min_exception", ":", "len_met", "=", "True", "else", ":", "len_met", "=", "all", "(", "[", "len", "(", "x", ")", ">=", "overlap_min", "for", "x", "in", "overlaps", "]", ")", "# TODO: add test for min_exception case (use rob's sequence from", "# 20130624 with 65C Tm)", "if", "min_exception", ":", "len_met", "=", "all", "(", "[", "len", "(", "x", ")", ">=", "overlap_min", "for", "x", "in", "overlaps", "]", ")", "# See if len_met is true - if so do nothing", "if", "len_met", ":", "break", "else", ":", "while", "not", "len_met", "and", "not", "maxed", ":", "# Recalculate overlaps and their Tms", "overlaps", "=", "_recalculate_overlaps", "(", "dna", ",", "overlaps", ",", "oligo_indices", ")", "# Overlap to increase is the shortest one", "overlap_lens", "=", "[", "len", "(", "overlap", ")", "for", "overlap", "in", "overlaps", "]", "index", "=", "overlap_lens", ".", "index", "(", "min", "(", "overlap_lens", ")", ")", "# Increase left or right oligo", "oligos", "=", "_expand_overlap", "(", "dna", ",", "oligo_indices", ",", "index", ",", "oligos", ",", "length_max", ")", "# Recalculate conditions", "maxed", "=", "any", "(", "[", "len", "(", "x", ")", "==", "length_max", "for", "x", "in", "oligos", "]", ")", "len_met", "=", "all", "(", "[", "len", "(", "x", ")", ">=", "overlap_min", "for", "x", "in", "overlaps", "]", ")", "# Recalculate tms to reflect any changes (some are redundant)", "overlap_tms", "[", "index", "]", "=", "coral", ".", "analysis", ".", "tm", "(", "overlaps", "[", "index", "]", ")", "# Outcome could be that len_met happened *or* maxed out", "# length of one of the oligos. If len_met happened, should be", "# done so long as tm_met has been satisfied. If maxed happened,", "# len_met will not have been met, even if tm_met is satisfied,", "# and script will reattempt with more oligos", "oligo_n", "+=", "oligo_increment", "# Calculate location of overlaps", "overlap_indices", "=", "[", "(", "oligo_indices", "[", "0", "]", "[", "x", "+", "1", "]", ",", "oligo_indices", "[", "1", "]", "[", "x", "]", ")", "for", "x", "in", "range", "(", "overlap_n", ")", "]", "return", "oligos", ",", "overlaps", ",", "overlap_tms", ",", "overlap_indices" ]
Grows equidistant overlaps until they meet specified constraints. :param dna: Input sequence. :type dna: coral.DNA :param melting_temp: Ideal Tm of the overlaps, in degrees C. :type melting_temp: float :param require_even: Require that the number of oligonucleotides is even. :type require_even: bool :param length_max: Maximum oligo size (e.g. 60bp price point cutoff) range. :type length_range: int :param overlap_min: Minimum overlap size. :type overlap_min: int :param min_exception: In order to meet melting_temp and overlap_min settings, allow overlaps less than overlap_min to continue growing above melting_temp. :type min_exception: bool :returns: Oligos, their overlapping regions, overlap Tms, and overlap indices. :rtype: tuple
[ "Grows", "equidistant", "overlaps", "until", "they", "meet", "specified", "constraints", "." ]
python
train
43.503759
datosgobar/pydatajson
pydatajson/core.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/core.py#L902-L945
def _extract_datasets_to_harvest(cls, report): """Extrae de un reporte los datos necesarios para reconocer qué datasets marcar para cosecha en cualquier generador. Args: report (str o list): Reporte (lista de dicts) o path a uno. Returns: list: Lista de tuplas con los títulos de catálogo y dataset de cada reporte extraído. """ assert isinstance(report, string_types + (list,)) # Si `report` es una lista de tuplas con longitud 2, asumimos que es un # reporte procesado para extraer los datasets a harvestear. Se devuelve # intacta. if (isinstance(report, list) and all([isinstance(x, tuple) and len(x) == 2 for x in report])): return report table = readers.read_table(report) table_keys = table[0].keys() expected_keys = ["catalog_metadata_url", "dataset_title", "dataset_accrualPeriodicity"] # Verifico la presencia de las claves básicas de un config de harvester for key in expected_keys: if key not in table_keys: raise KeyError(""" El reporte no contiene la clave obligatoria {}. Pruebe con otro archivo. """.format(key)) if "harvest" in table_keys: # El archivo es un reporte de datasets. datasets_to_harvest = [ (row["catalog_metadata_url"], row["dataset_title"]) for row in table if int(row["harvest"])] else: # El archivo es un config de harvester. datasets_to_harvest = [ (row["catalog_metadata_url"], row["dataset_title"]) for row in table] return datasets_to_harvest
[ "def", "_extract_datasets_to_harvest", "(", "cls", ",", "report", ")", ":", "assert", "isinstance", "(", "report", ",", "string_types", "+", "(", "list", ",", ")", ")", "# Si `report` es una lista de tuplas con longitud 2, asumimos que es un", "# reporte procesado para extraer los datasets a harvestear. Se devuelve", "# intacta.", "if", "(", "isinstance", "(", "report", ",", "list", ")", "and", "all", "(", "[", "isinstance", "(", "x", ",", "tuple", ")", "and", "len", "(", "x", ")", "==", "2", "for", "x", "in", "report", "]", ")", ")", ":", "return", "report", "table", "=", "readers", ".", "read_table", "(", "report", ")", "table_keys", "=", "table", "[", "0", "]", ".", "keys", "(", ")", "expected_keys", "=", "[", "\"catalog_metadata_url\"", ",", "\"dataset_title\"", ",", "\"dataset_accrualPeriodicity\"", "]", "# Verifico la presencia de las claves básicas de un config de harvester", "for", "key", "in", "expected_keys", ":", "if", "key", "not", "in", "table_keys", ":", "raise", "KeyError", "(", "\"\"\"\nEl reporte no contiene la clave obligatoria {}. Pruebe con otro archivo.\n\"\"\"", ".", "format", "(", "key", ")", ")", "if", "\"harvest\"", "in", "table_keys", ":", "# El archivo es un reporte de datasets.", "datasets_to_harvest", "=", "[", "(", "row", "[", "\"catalog_metadata_url\"", "]", ",", "row", "[", "\"dataset_title\"", "]", ")", "for", "row", "in", "table", "if", "int", "(", "row", "[", "\"harvest\"", "]", ")", "]", "else", ":", "# El archivo es un config de harvester.", "datasets_to_harvest", "=", "[", "(", "row", "[", "\"catalog_metadata_url\"", "]", ",", "row", "[", "\"dataset_title\"", "]", ")", "for", "row", "in", "table", "]", "return", "datasets_to_harvest" ]
Extrae de un reporte los datos necesarios para reconocer qué datasets marcar para cosecha en cualquier generador. Args: report (str o list): Reporte (lista de dicts) o path a uno. Returns: list: Lista de tuplas con los títulos de catálogo y dataset de cada reporte extraído.
[ "Extrae", "de", "un", "reporte", "los", "datos", "necesarios", "para", "reconocer", "qué", "datasets", "marcar", "para", "cosecha", "en", "cualquier", "generador", "." ]
python
train
39.681818
ace0/pyrelic
pyrelic/ec.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/ec.py#L231-L242
def _getCachedValue(obj, relicFunc, resultType): """ Retrieves a value from obj.cached (if not None) or calls @relicFunc and caches the result (of @resultType) int obj.cached. This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt """ # If the value has not been previously cached, fetch if not obj.cached: obj.cached = resultType() relicFunc(byref(obj.cached)) return obj.cached
[ "def", "_getCachedValue", "(", "obj", ",", "relicFunc", ",", "resultType", ")", ":", "# If the value has not been previously cached, fetch ", "if", "not", "obj", ".", "cached", ":", "obj", ".", "cached", "=", "resultType", "(", ")", "relicFunc", "(", "byref", "(", "obj", ".", "cached", ")", ")", "return", "obj", ".", "cached" ]
Retrieves a value from obj.cached (if not None) or calls @relicFunc and caches the result (of @resultType) int obj.cached. This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt
[ "Retrieves", "a", "value", "from", "obj", ".", "cached", "(", "if", "not", "None", ")", "or", "calls", "@relicFunc", "and", "caches", "the", "result", "(", "of", "@resultType", ")", "int", "obj", ".", "cached", "." ]
python
train
36.333333
pandas-dev/pandas
pandas/core/groupby/groupby.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1224-L1236
def sem(self, ddof=1): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ return self.std(ddof=ddof) / np.sqrt(self.count())
[ "def", "sem", "(", "self", ",", "ddof", "=", "1", ")", ":", "return", "self", ".", "std", "(", "ddof", "=", "ddof", ")", "/", "np", ".", "sqrt", "(", "self", ".", "count", "(", ")", ")" ]
Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
[ "Compute", "standard", "error", "of", "the", "mean", "of", "groups", "excluding", "missing", "values", "." ]
python
train
26.923077
CityOfZion/neo-python
neo/Core/State/UnspentCoinState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/UnspentCoinState.py#L55-L65
def IsAllSpent(self): """ Flag indicating if all balance is spend. Returns: bool: """ for item in self.Items: if item == CoinState.Confirmed: return False return True
[ "def", "IsAllSpent", "(", "self", ")", ":", "for", "item", "in", "self", ".", "Items", ":", "if", "item", "==", "CoinState", ".", "Confirmed", ":", "return", "False", "return", "True" ]
Flag indicating if all balance is spend. Returns: bool:
[ "Flag", "indicating", "if", "all", "balance", "is", "spend", "." ]
python
train
22.272727
ultrabug/py3status
py3status/docstrings.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/docstrings.py#L257-L289
def _from_docstring_rst(doc): """ format from docstring to ReStructured Text """ def format_fn(line, status): """ format function """ if re_from_data.match(line): line = re_from_data.sub(r"**\1** ", line) status["add_line"] = True line = re_from_defaults.sub(r"*\1*", line) if status["listing"]: # parameters if re_from_param.match(line): m = re_from_param.match(line) line = " - ``{}`` {}".format(m.group(1), m.group(3)) # status items elif re_from_status.match(line): m = re_from_status.match(line) line = " - ``{}`` {}".format(m.group(1), m.group(3)) # bullets elif re_from_item.match(line): line = re_from_item.sub(r" -", line) # is continuation line else: line = " " * 4 + line.lstrip() # in .rst format code samples use double backticks vs single ones for # .md This converts them. line = re_lone_backtick.sub("``", line) return line return _reformat_docstring(doc, format_fn, code_newline="\n")
[ "def", "_from_docstring_rst", "(", "doc", ")", ":", "def", "format_fn", "(", "line", ",", "status", ")", ":", "\"\"\" format function \"\"\"", "if", "re_from_data", ".", "match", "(", "line", ")", ":", "line", "=", "re_from_data", ".", "sub", "(", "r\"**\\1** \"", ",", "line", ")", "status", "[", "\"add_line\"", "]", "=", "True", "line", "=", "re_from_defaults", ".", "sub", "(", "r\"*\\1*\"", ",", "line", ")", "if", "status", "[", "\"listing\"", "]", ":", "# parameters", "if", "re_from_param", ".", "match", "(", "line", ")", ":", "m", "=", "re_from_param", ".", "match", "(", "line", ")", "line", "=", "\" - ``{}`` {}\"", ".", "format", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "3", ")", ")", "# status items", "elif", "re_from_status", ".", "match", "(", "line", ")", ":", "m", "=", "re_from_status", ".", "match", "(", "line", ")", "line", "=", "\" - ``{}`` {}\"", ".", "format", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "3", ")", ")", "# bullets", "elif", "re_from_item", ".", "match", "(", "line", ")", ":", "line", "=", "re_from_item", ".", "sub", "(", "r\" -\"", ",", "line", ")", "# is continuation line", "else", ":", "line", "=", "\" \"", "*", "4", "+", "line", ".", "lstrip", "(", ")", "# in .rst format code samples use double backticks vs single ones for", "# .md This converts them.", "line", "=", "re_lone_backtick", ".", "sub", "(", "\"``\"", ",", "line", ")", "return", "line", "return", "_reformat_docstring", "(", "doc", ",", "format_fn", ",", "code_newline", "=", "\"\\n\"", ")" ]
format from docstring to ReStructured Text
[ "format", "from", "docstring", "to", "ReStructured", "Text" ]
python
train
35.606061
IdentityPython/SATOSA
src/satosa/util.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/util.py#L15-L32
def hash_data(salt, value, hash_alg=None): """ Hashes a value together with a salt with the given hash algorithm. :type salt: str :type hash_alg: str :type value: str :param salt: hash salt :param hash_alg: the hash algorithm to use (default: SHA512) :param value: value to hash together with the salt :return: hashed value """ hash_alg = hash_alg or 'sha512' hasher = hashlib.new(hash_alg) hasher.update(value.encode('utf-8')) hasher.update(salt.encode('utf-8')) value_hashed = hasher.hexdigest() return value_hashed
[ "def", "hash_data", "(", "salt", ",", "value", ",", "hash_alg", "=", "None", ")", ":", "hash_alg", "=", "hash_alg", "or", "'sha512'", "hasher", "=", "hashlib", ".", "new", "(", "hash_alg", ")", "hasher", ".", "update", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")", "hasher", ".", "update", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "value_hashed", "=", "hasher", ".", "hexdigest", "(", ")", "return", "value_hashed" ]
Hashes a value together with a salt with the given hash algorithm. :type salt: str :type hash_alg: str :type value: str :param salt: hash salt :param hash_alg: the hash algorithm to use (default: SHA512) :param value: value to hash together with the salt :return: hashed value
[ "Hashes", "a", "value", "together", "with", "a", "salt", "with", "the", "given", "hash", "algorithm", "." ]
python
train
31.388889
spacetelescope/stsci.tools
lib/stsci/tools/cfgpars.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/cfgpars.py#L1206-L1238
def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \ ) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # delete it target.pop(tup_to_del[1]) return retval
[ "def", "listTheExtras", "(", "self", ",", "deleteAlso", ")", ":", "# get list of extras", "extras", "=", "configobj", ".", "get_extra_values", "(", "self", ")", "# extras is in format: [(sections, key), (sections, key), ]", "# but we need: [(sections, key, result), ...] - set all results to", "# a bool just to make it the right shape. BUT, since we are in", "# here anyway, make that bool mean something - hide info in it about", "# whether that extra item is a section (1) or just a single par (0)", "#", "# simplified, this is: expanded = [ (x+(abool,)) for x in extras]", "expanded", "=", "[", "(", "x", "+", "(", "bool", "(", "len", "(", "x", "[", "0", "]", ")", "<", "1", "and", "hasattr", "(", "self", "[", "x", "[", "1", "]", "]", ",", "'keys'", ")", ")", ",", ")", ")", "for", "x", "in", "extras", "]", "retval", "=", "''", "if", "expanded", ":", "retval", "=", "flattened2str", "(", "expanded", ",", "extra", "=", "1", ")", "# but before we return, delete them (from ourself!) if requested to", "if", "deleteAlso", ":", "for", "tup_to_del", "in", "extras", ":", "target", "=", "self", "# descend the tree to the dict where this items is located.", "# (this works because target is not a copy (because the dict", "# type is mutable))", "location", "=", "tup_to_del", "[", "0", "]", "for", "subdict", "in", "location", ":", "target", "=", "target", "[", "subdict", "]", "# delete it", "target", ".", "pop", "(", "tup_to_del", "[", "1", "]", ")", "return", "retval" ]
Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items.
[ "Use", "ConfigObj", "s", "get_extra_values", "()", "call", "to", "find", "any", "extra", "/", "unknown", "parameters", "we", "may", "have", "loaded", ".", "Return", "a", "string", "similar", "to", "findTheLost", ".", "If", "deleteAlso", "is", "True", "this", "will", "also", "delete", "any", "extra", "/", "unknown", "items", "." ]
python
train
47.969697
gem/oq-engine
openquake/hazardlib/geo/surface/base.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/base.py#L342-L375
def get_resampled_top_edge(self, angle_var=0.1): """ This methods computes a simplified representation of a fault top edge by removing the points that are not describing a change of direction, provided a certain tolerance angle. :param float angle_var: Number representing the maximum deviation (in degrees) admitted without the creation of a new segment :returns: A :class:`~openquake.hazardlib.geo.line.Line` representing the rupture surface's top edge. """ mesh = self.mesh top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])] for i in range(len(mesh.triangulate()[1][0]) - 1): v1 = numpy.asarray(mesh.triangulate()[1][0][i]) v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1]) cosang = numpy.dot(v1, v2) sinang = numpy.linalg.norm(numpy.cross(v1, v2)) angle = math.degrees(numpy.arctan2(sinang, cosang)) if abs(angle) > angle_var: top_edge.append(Point(mesh.lons[0][i + 1], mesh.lats[0][i + 1], mesh.depths[0][i + 1])) top_edge.append(Point(mesh.lons[0][-1], mesh.lats[0][-1], mesh.depths[0][-1])) line_top_edge = Line(top_edge) return line_top_edge
[ "def", "get_resampled_top_edge", "(", "self", ",", "angle_var", "=", "0.1", ")", ":", "mesh", "=", "self", ".", "mesh", "top_edge", "=", "[", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "0", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "0", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "0", "]", ")", "]", "for", "i", "in", "range", "(", "len", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", ")", "-", "1", ")", ":", "v1", "=", "numpy", ".", "asarray", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", "[", "i", "]", ")", "v2", "=", "numpy", ".", "asarray", "(", "mesh", ".", "triangulate", "(", ")", "[", "1", "]", "[", "0", "]", "[", "i", "+", "1", "]", ")", "cosang", "=", "numpy", ".", "dot", "(", "v1", ",", "v2", ")", "sinang", "=", "numpy", ".", "linalg", ".", "norm", "(", "numpy", ".", "cross", "(", "v1", ",", "v2", ")", ")", "angle", "=", "math", ".", "degrees", "(", "numpy", ".", "arctan2", "(", "sinang", ",", "cosang", ")", ")", "if", "abs", "(", "angle", ")", ">", "angle_var", ":", "top_edge", ".", "append", "(", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "i", "+", "1", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "i", "+", "1", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "i", "+", "1", "]", ")", ")", "top_edge", ".", "append", "(", "Point", "(", "mesh", ".", "lons", "[", "0", "]", "[", "-", "1", "]", ",", "mesh", ".", "lats", "[", "0", "]", "[", "-", "1", "]", ",", "mesh", ".", "depths", "[", "0", "]", "[", "-", "1", "]", ")", ")", "line_top_edge", "=", "Line", "(", "top_edge", ")", "return", "line_top_edge" ]
This methods computes a simplified representation of a fault top edge by removing the points that are not describing a change of direction, provided a certain tolerance angle. :param float angle_var: Number representing the maximum deviation (in degrees) admitted without the creation of a new segment :returns: A :class:`~openquake.hazardlib.geo.line.Line` representing the rupture surface's top edge.
[ "This", "methods", "computes", "a", "simplified", "representation", "of", "a", "fault", "top", "edge", "by", "removing", "the", "points", "that", "are", "not", "describing", "a", "change", "of", "direction", "provided", "a", "certain", "tolerance", "angle", "." ]
python
train
40.911765
7sDream/zhihu-py3
zhihu/answer.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L169-L181
def collect_num(self): """获取答案收藏数 :return: 答案收藏数量 :rtype: int """ element = self.soup.find("a", { "data-za-a": "click_answer_collected_count" }) if element is None: return 0 else: return int(element.get_text())
[ "def", "collect_num", "(", "self", ")", ":", "element", "=", "self", ".", "soup", ".", "find", "(", "\"a\"", ",", "{", "\"data-za-a\"", ":", "\"click_answer_collected_count\"", "}", ")", "if", "element", "is", "None", ":", "return", "0", "else", ":", "return", "int", "(", "element", ".", "get_text", "(", ")", ")" ]
获取答案收藏数 :return: 答案收藏数量 :rtype: int
[ "获取答案收藏数" ]
python
train
23.076923
saltstack/salt
salt/modules/mysql.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mysql.py#L1710-L1732
def db_optimize(name, table=None, **connection_args): ''' Optimizes the full database or just a given table CLI Example: .. code-block:: bash salt '*' mysql.db_optimize dbname ''' ret = [] if table is None: # we need to optimize all tables tables = db_tables(name, **connection_args) for table in tables: log.info('Optimizing table \'%s\' in db \'%s\'..', name, table) ret.append(__optimize_table(name, table, **connection_args)) else: log.info('Optimizing table \'%s\' in db \'%s\'..', name, table) ret = __optimize_table(name, table, **connection_args) return ret
[ "def", "db_optimize", "(", "name", ",", "table", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "[", "]", "if", "table", "is", "None", ":", "# we need to optimize all tables", "tables", "=", "db_tables", "(", "name", ",", "*", "*", "connection_args", ")", "for", "table", "in", "tables", ":", "log", ".", "info", "(", "'Optimizing table \\'%s\\' in db \\'%s\\'..'", ",", "name", ",", "table", ")", "ret", ".", "append", "(", "__optimize_table", "(", "name", ",", "table", ",", "*", "*", "connection_args", ")", ")", "else", ":", "log", ".", "info", "(", "'Optimizing table \\'%s\\' in db \\'%s\\'..'", ",", "name", ",", "table", ")", "ret", "=", "__optimize_table", "(", "name", ",", "table", ",", "*", "*", "connection_args", ")", "return", "ret" ]
Optimizes the full database or just a given table CLI Example: .. code-block:: bash salt '*' mysql.db_optimize dbname
[ "Optimizes", "the", "full", "database", "or", "just", "a", "given", "table" ]
python
train
29.652174
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/ipstruct.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/ipstruct.py#L219-L231
def __dict_invert(self, data): """Helper function for merge. Takes a dictionary whose values are lists and returns a dict with the elements of each list as keys and the original keys as values. """ outdict = {} for k,lst in data.items(): if isinstance(lst, str): lst = lst.split() for entry in lst: outdict[entry] = k return outdict
[ "def", "__dict_invert", "(", "self", ",", "data", ")", ":", "outdict", "=", "{", "}", "for", "k", ",", "lst", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "lst", ",", "str", ")", ":", "lst", "=", "lst", ".", "split", "(", ")", "for", "entry", "in", "lst", ":", "outdict", "[", "entry", "]", "=", "k", "return", "outdict" ]
Helper function for merge. Takes a dictionary whose values are lists and returns a dict with the elements of each list as keys and the original keys as values.
[ "Helper", "function", "for", "merge", "." ]
python
test
33.307692
sosy-lab/benchexec
benchexec/cgroups.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/cgroups.py#L119-L130
def _find_own_cgroups(): """ For all subsystems, return the information in which (sub-)cgroup this process is in. (Each process is in exactly cgroup in each hierarchy.) @return a generator of tuples (subsystem, cgroup) """ try: with open('/proc/self/cgroup', 'rt') as ownCgroupsFile: for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile): yield cgroup except IOError: logging.exception('Cannot read /proc/self/cgroup')
[ "def", "_find_own_cgroups", "(", ")", ":", "try", ":", "with", "open", "(", "'/proc/self/cgroup'", ",", "'rt'", ")", "as", "ownCgroupsFile", ":", "for", "cgroup", "in", "_parse_proc_pid_cgroup", "(", "ownCgroupsFile", ")", ":", "yield", "cgroup", "except", "IOError", ":", "logging", ".", "exception", "(", "'Cannot read /proc/self/cgroup'", ")" ]
For all subsystems, return the information in which (sub-)cgroup this process is in. (Each process is in exactly cgroup in each hierarchy.) @return a generator of tuples (subsystem, cgroup)
[ "For", "all", "subsystems", "return", "the", "information", "in", "which", "(", "sub", "-", ")", "cgroup", "this", "process", "is", "in", ".", "(", "Each", "process", "is", "in", "exactly", "cgroup", "in", "each", "hierarchy", ".", ")" ]
python
train
39.833333
ooici/elasticpy
elasticpy/sort.py
https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/sort.py#L16-L32
def missing(self, field, last=True): ''' Numeric fields support specific handling for missing fields in a doc. The missing value can be _last, _first, or a custom value (that will be used for missing docs as the sort value). missing('price') > {"price" : {"missing": "_last" } } missing('price',False) > {"price" : {"missing": "_first"} } ''' if last: self.append({field: {'missing': '_last'}}) else: self.append({field: {'missing': '_first'}}) return self
[ "def", "missing", "(", "self", ",", "field", ",", "last", "=", "True", ")", ":", "if", "last", ":", "self", ".", "append", "(", "{", "field", ":", "{", "'missing'", ":", "'_last'", "}", "}", ")", "else", ":", "self", ".", "append", "(", "{", "field", ":", "{", "'missing'", ":", "'_first'", "}", "}", ")", "return", "self" ]
Numeric fields support specific handling for missing fields in a doc. The missing value can be _last, _first, or a custom value (that will be used for missing docs as the sort value). missing('price') > {"price" : {"missing": "_last" } } missing('price',False) > {"price" : {"missing": "_first"} }
[ "Numeric", "fields", "support", "specific", "handling", "for", "missing", "fields", "in", "a", "doc", ".", "The", "missing", "value", "can", "be", "_last", "_first", "or", "a", "custom", "value", "(", "that", "will", "be", "used", "for", "missing", "docs", "as", "the", "sort", "value", ")", "." ]
python
train
33.058824
ForensicArtifacts/artifacts
utils/dependencies.py
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/utils/dependencies.py#L88-L108
def Read(self, file_object): """Reads dependency definitions. Args: file_object (file): file-like object to read from. Yields: DependencyDefinition: dependency definition. """ config_parser = configparser.RawConfigParser() # pylint: disable=deprecated-method # TODO: replace readfp by read_file, check if Python 2 compatible config_parser.readfp(file_object) for section_name in config_parser.sections(): dependency_definition = DependencyDefinition(section_name) for value_name in self._VALUE_NAMES: value = self._GetConfigValue(config_parser, section_name, value_name) setattr(dependency_definition, value_name, value) yield dependency_definition
[ "def", "Read", "(", "self", ",", "file_object", ")", ":", "config_parser", "=", "configparser", ".", "RawConfigParser", "(", ")", "# pylint: disable=deprecated-method", "# TODO: replace readfp by read_file, check if Python 2 compatible", "config_parser", ".", "readfp", "(", "file_object", ")", "for", "section_name", "in", "config_parser", ".", "sections", "(", ")", ":", "dependency_definition", "=", "DependencyDefinition", "(", "section_name", ")", "for", "value_name", "in", "self", ".", "_VALUE_NAMES", ":", "value", "=", "self", ".", "_GetConfigValue", "(", "config_parser", ",", "section_name", ",", "value_name", ")", "setattr", "(", "dependency_definition", ",", "value_name", ",", "value", ")", "yield", "dependency_definition" ]
Reads dependency definitions. Args: file_object (file): file-like object to read from. Yields: DependencyDefinition: dependency definition.
[ "Reads", "dependency", "definitions", "." ]
python
train
34
briandilley/ebs-deploy
ebs_deploy/__init__.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/__init__.py#L309-L324
def create_environment(self, env_name, version_label=None, solution_stack_name=None, cname_prefix=None, description=None, option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'): """ Creates a new environment """ out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type)) self.ebs.create_environment(self.app_name, env_name, version_label=version_label, solution_stack_name=solution_stack_name, cname_prefix=cname_prefix, description=description, option_settings=option_settings, tier_type=tier_type, tier_name=tier_name, tier_version=tier_version)
[ "def", "create_environment", "(", "self", ",", "env_name", ",", "version_label", "=", "None", ",", "solution_stack_name", "=", "None", ",", "cname_prefix", "=", "None", ",", "description", "=", "None", ",", "option_settings", "=", "None", ",", "tier_name", "=", "'WebServer'", ",", "tier_type", "=", "'Standard'", ",", "tier_version", "=", "'1.1'", ")", ":", "out", "(", "\"Creating environment: \"", "+", "str", "(", "env_name", ")", "+", "\", tier_name:\"", "+", "str", "(", "tier_name", ")", "+", "\", tier_type:\"", "+", "str", "(", "tier_type", ")", ")", "self", ".", "ebs", ".", "create_environment", "(", "self", ".", "app_name", ",", "env_name", ",", "version_label", "=", "version_label", ",", "solution_stack_name", "=", "solution_stack_name", ",", "cname_prefix", "=", "cname_prefix", ",", "description", "=", "description", ",", "option_settings", "=", "option_settings", ",", "tier_type", "=", "tier_type", ",", "tier_name", "=", "tier_name", ",", "tier_version", "=", "tier_version", ")" ]
Creates a new environment
[ "Creates", "a", "new", "environment" ]
python
valid
62.5625
spyder-ide/spyder
spyder/plugins/editor/extensions/closequotes.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/extensions/closequotes.py#L61-L113
def _autoinsert_quotes(self, key): """Control how to automatically insert quotes in various situations.""" char = {Qt.Key_QuoteDbl: '"', Qt.Key_Apostrophe: '\''}[key] line_text = self.editor.get_text('sol', 'eol') line_to_cursor = self.editor.get_text('sol', 'cursor') cursor = self.editor.textCursor() last_three = self.editor.get_text('sol', 'cursor')[-3:] last_two = self.editor.get_text('sol', 'cursor')[-2:] trailing_text = self.editor.get_text('cursor', 'eol').strip() if self.editor.has_selected_text(): text = self.editor.get_selected_text() self.editor.insert_text("{0}{1}{0}".format(char, text)) # keep text selected, for inserting multiple quotes cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1) cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor, len(text)) self.editor.setTextCursor(cursor) elif self.editor.in_comment(): self.editor.insert_text(char) elif (len(trailing_text) > 0 and not unmatched_quotes_in_line(line_to_cursor) == char and not trailing_text[0] in (',', ':', ';', ')', ']', '}')): self.editor.insert_text(char) elif (unmatched_quotes_in_line(line_text) and (not last_three == 3*char)): self.editor.insert_text(char) # Move to the right if we are before a quote elif self.editor.next_char() == char: cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, 1) cursor.clearSelection() self.editor.setTextCursor(cursor) # Automatic insertion of triple double quotes (for docstrings) elif last_three == 3*char: self.editor.insert_text(3*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor, 3) cursor.clearSelection() self.editor.setTextCursor(cursor) # If last two chars are quotes, just insert one more because most # probably the user wants to write a docstring elif last_two == 2*char: self.editor.insert_text(char) self.editor.delayed_popup_docstring() # Automatic insertion of quotes else: self.editor.insert_text(2*char) cursor = self.editor.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter) self.editor.setTextCursor(cursor)
[ "def", "_autoinsert_quotes", "(", "self", ",", "key", ")", ":", "char", "=", "{", "Qt", ".", "Key_QuoteDbl", ":", "'\"'", ",", "Qt", ".", "Key_Apostrophe", ":", "'\\''", "}", "[", "key", "]", "line_text", "=", "self", ".", "editor", ".", "get_text", "(", "'sol'", ",", "'eol'", ")", "line_to_cursor", "=", "self", ".", "editor", ".", "get_text", "(", "'sol'", ",", "'cursor'", ")", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "last_three", "=", "self", ".", "editor", ".", "get_text", "(", "'sol'", ",", "'cursor'", ")", "[", "-", "3", ":", "]", "last_two", "=", "self", ".", "editor", ".", "get_text", "(", "'sol'", ",", "'cursor'", ")", "[", "-", "2", ":", "]", "trailing_text", "=", "self", ".", "editor", ".", "get_text", "(", "'cursor'", ",", "'eol'", ")", ".", "strip", "(", ")", "if", "self", ".", "editor", ".", "has_selected_text", "(", ")", ":", "text", "=", "self", ".", "editor", ".", "get_selected_text", "(", ")", "self", ".", "editor", ".", "insert_text", "(", "\"{0}{1}{0}\"", ".", "format", "(", "char", ",", "text", ")", ")", "# keep text selected, for inserting multiple quotes", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "Left", ",", "QTextCursor", ".", "MoveAnchor", ",", "1", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "Left", ",", "QTextCursor", ".", "KeepAnchor", ",", "len", "(", "text", ")", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "elif", "self", ".", "editor", ".", "in_comment", "(", ")", ":", "self", ".", "editor", ".", "insert_text", "(", "char", ")", "elif", "(", "len", "(", "trailing_text", ")", ">", "0", "and", "not", "unmatched_quotes_in_line", "(", "line_to_cursor", ")", "==", "char", "and", "not", "trailing_text", "[", "0", "]", "in", "(", "','", ",", "':'", ",", "';'", ",", "')'", ",", "']'", ",", "'}'", ")", ")", ":", "self", ".", "editor", ".", "insert_text", "(", "char", ")", "elif", "(", "unmatched_quotes_in_line", "(", "line_text", ")", "and", "(", "not", "last_three", "==", "3", "*", "char", ")", ")", ":", "self", ".", "editor", ".", "insert_text", "(", "char", ")", "# Move to the right if we are before a quote", "elif", "self", ".", "editor", ".", "next_char", "(", ")", "==", "char", ":", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "NextCharacter", ",", "QTextCursor", ".", "KeepAnchor", ",", "1", ")", "cursor", ".", "clearSelection", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "# Automatic insertion of triple double quotes (for docstrings)", "elif", "last_three", "==", "3", "*", "char", ":", "self", ".", "editor", ".", "insert_text", "(", "3", "*", "char", ")", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "PreviousCharacter", ",", "QTextCursor", ".", "KeepAnchor", ",", "3", ")", "cursor", ".", "clearSelection", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "# If last two chars are quotes, just insert one more because most", "# probably the user wants to write a docstring", "elif", "last_two", "==", "2", "*", "char", ":", "self", ".", "editor", ".", "insert_text", "(", "char", ")", "self", ".", "editor", ".", "delayed_popup_docstring", "(", ")", "# Automatic insertion of quotes", "else", ":", "self", ".", "editor", ".", "insert_text", "(", "2", "*", "char", ")", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "PreviousCharacter", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")" ]
Control how to automatically insert quotes in various situations.
[ "Control", "how", "to", "automatically", "insert", "quotes", "in", "various", "situations", "." ]
python
train
49.283019
saltstack/salt
salt/modules/boto_kms.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kms.py#L180-L200
def key_exists(key_id, region=None, key=None, keyid=None, profile=None): ''' Check for the existence of a key. CLI example:: salt myminion boto_kms.key_exists 'alias/mykey' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: key = conn.describe_key(key_id) # TODO: add to context cache r['result'] = True except boto.exception.BotoServerError as e: if isinstance(e, boto.kms.exceptions.NotFoundException): r['result'] = False return r r['error'] = __utils__['boto.get_error'](e) return r
[ "def", "key_exists", "(", "key_id", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "r", "=", "{", "}", "try", ":", "key", "=", "conn", ".", "describe_key", "(", "key_id", ")", "# TODO: add to context cache", "r", "[", "'result'", "]", "=", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "if", "isinstance", "(", "e", ",", "boto", ".", "kms", ".", "exceptions", ".", "NotFoundException", ")", ":", "r", "[", "'result'", "]", "=", "False", "return", "r", "r", "[", "'error'", "]", "=", "__utils__", "[", "'boto.get_error'", "]", "(", "e", ")", "return", "r" ]
Check for the existence of a key. CLI example:: salt myminion boto_kms.key_exists 'alias/mykey'
[ "Check", "for", "the", "existence", "of", "a", "key", "." ]
python
train
29.190476
dereneaton/ipyrad
ipyrad/analysis/tree.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tree.py#L176-L199
def _collapse_outgroup(tree, taxdicts): """ collapse outgroup in ete Tree for easier viewing """ ## check that all tests have the same outgroup outg = taxdicts[0]["p4"] if not all([i["p4"] == outg for i in taxdicts]): raise Exception("no good") ## prune tree, keep only one sample from outgroup tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy") alltax = [i for i in tre.get_leaf_names() if i not in outg] alltax += [outg[0]] tre.prune(alltax) tre.search_nodes(name=outg[0])[0].name = "outgroup" tre.ladderize() ## remove other ougroups from taxdicts taxd = copy.deepcopy(taxdicts) newtaxdicts = [] for test in taxd: #test["p4"] = [outg[0]] test["p4"] = ["outgroup"] newtaxdicts.append(test) return tre, newtaxdicts
[ "def", "_collapse_outgroup", "(", "tree", ",", "taxdicts", ")", ":", "## check that all tests have the same outgroup", "outg", "=", "taxdicts", "[", "0", "]", "[", "\"p4\"", "]", "if", "not", "all", "(", "[", "i", "[", "\"p4\"", "]", "==", "outg", "for", "i", "in", "taxdicts", "]", ")", ":", "raise", "Exception", "(", "\"no good\"", ")", "## prune tree, keep only one sample from outgroup", "tre", "=", "ete", ".", "Tree", "(", "tree", ".", "write", "(", "format", "=", "1", ")", ")", "#tree.copy(method=\"deepcopy\")", "alltax", "=", "[", "i", "for", "i", "in", "tre", ".", "get_leaf_names", "(", ")", "if", "i", "not", "in", "outg", "]", "alltax", "+=", "[", "outg", "[", "0", "]", "]", "tre", ".", "prune", "(", "alltax", ")", "tre", ".", "search_nodes", "(", "name", "=", "outg", "[", "0", "]", ")", "[", "0", "]", ".", "name", "=", "\"outgroup\"", "tre", ".", "ladderize", "(", ")", "## remove other ougroups from taxdicts", "taxd", "=", "copy", ".", "deepcopy", "(", "taxdicts", ")", "newtaxdicts", "=", "[", "]", "for", "test", "in", "taxd", ":", "#test[\"p4\"] = [outg[0]]", "test", "[", "\"p4\"", "]", "=", "[", "\"outgroup\"", "]", "newtaxdicts", ".", "append", "(", "test", ")", "return", "tre", ",", "newtaxdicts" ]
collapse outgroup in ete Tree for easier viewing
[ "collapse", "outgroup", "in", "ete", "Tree", "for", "easier", "viewing" ]
python
valid
33.75
pre-commit/pre-commit
pre_commit/languages/helpers.py
https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/languages/helpers.py#L73-L83
def _shuffled(seq): """Deterministically shuffle identically under both py2 + py3.""" fixed_random = random.Random() if six.PY2: # pragma: no cover (py2) fixed_random.seed(FIXED_RANDOM_SEED) else: # pragma: no cover (py3) fixed_random.seed(FIXED_RANDOM_SEED, version=1) seq = list(seq) random.shuffle(seq, random=fixed_random.random) return seq
[ "def", "_shuffled", "(", "seq", ")", ":", "fixed_random", "=", "random", ".", "Random", "(", ")", "if", "six", ".", "PY2", ":", "# pragma: no cover (py2)", "fixed_random", ".", "seed", "(", "FIXED_RANDOM_SEED", ")", "else", ":", "# pragma: no cover (py3)", "fixed_random", ".", "seed", "(", "FIXED_RANDOM_SEED", ",", "version", "=", "1", ")", "seq", "=", "list", "(", "seq", ")", "random", ".", "shuffle", "(", "seq", ",", "random", "=", "fixed_random", ".", "random", ")", "return", "seq" ]
Deterministically shuffle identically under both py2 + py3.
[ "Deterministically", "shuffle", "identically", "under", "both", "py2", "+", "py3", "." ]
python
train
34.636364
dbrattli/OSlash
oslash/cont.py
https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/cont.py#L45-L50
def bind(self, fn: Callable[[Any], 'Cont']) -> 'Cont': r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c """ return Cont(lambda c: self.run(lambda a: fn(a).run(c)))
[ "def", "bind", "(", "self", ",", "fn", ":", "Callable", "[", "[", "Any", "]", ",", "'Cont'", "]", ")", "->", "'Cont'", ":", "return", "Cont", "(", "lambda", "c", ":", "self", ".", "run", "(", "lambda", "a", ":", "fn", "(", "a", ")", ".", "run", "(", "c", ")", ")", ")" ]
r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
[ "r", "Chain", "continuation", "passing", "functions", "." ]
python
train
41.666667
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L782-L790
def brpoplpush(self, source, destination, timeout=0): """Emulate brpoplpush""" transfer_item = self.brpop(source, timeout) if transfer_item is None: return None key, val = transfer_item self.lpush(destination, val) return val
[ "def", "brpoplpush", "(", "self", ",", "source", ",", "destination", ",", "timeout", "=", "0", ")", ":", "transfer_item", "=", "self", ".", "brpop", "(", "source", ",", "timeout", ")", "if", "transfer_item", "is", "None", ":", "return", "None", "key", ",", "val", "=", "transfer_item", "self", ".", "lpush", "(", "destination", ",", "val", ")", "return", "val" ]
Emulate brpoplpush
[ "Emulate", "brpoplpush" ]
python
train
30.888889
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L1131-L1138
def get_agile_board(self, board_id): """ Get agile board info by id :param board_id: :return: """ url = 'rest/agile/1.0/board/{}'.format(str(board_id)) return self.get(url)
[ "def", "get_agile_board", "(", "self", ",", "board_id", ")", ":", "url", "=", "'rest/agile/1.0/board/{}'", ".", "format", "(", "str", "(", "board_id", ")", ")", "return", "self", ".", "get", "(", "url", ")" ]
Get agile board info by id :param board_id: :return:
[ "Get", "agile", "board", "info", "by", "id", ":", "param", "board_id", ":", ":", "return", ":" ]
python
train
27.625