id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
250,700
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.get_queue_sizes
def get_queue_sizes(self, queue): """ Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue. """ states = [QUEUED, SCHEDULED, ACTIVE] pipeline = self.connection.pipeline() for state in states: pipeline.zcard(self._key(state, queue)) results = pipeline.execute() return dict(zip(states, results))
python
def get_queue_sizes(self, queue): states = [QUEUED, SCHEDULED, ACTIVE] pipeline = self.connection.pipeline() for state in states: pipeline.zcard(self._key(state, queue)) results = pipeline.execute() return dict(zip(states, results))
[ "def", "get_queue_sizes", "(", "self", ",", "queue", ")", ":", "states", "=", "[", "QUEUED", ",", "SCHEDULED", ",", "ACTIVE", "]", "pipeline", "=", "self", ".", "connection", ".", "pipeline", "(", ")", "for", "state", "in", "states", ":", "pipeline", ".", "zcard", "(", "self", ".", "_key", "(", "state", ",", "queue", ")", ")", "results", "=", "pipeline", ".", "execute", "(", ")", "return", "dict", "(", "zip", "(", "states", ",", "results", ")", ")" ]
Get the queue's number of tasks in each state. Returns dict with queue size for the QUEUED, SCHEDULED, and ACTIVE states. Does not include size of error queue.
[ "Get", "the", "queue", "s", "number", "of", "tasks", "in", "each", "state", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L350-L363
250,701
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.get_queue_system_lock
def get_queue_system_lock(self, queue): """ Get system lock timeout Returns time system lock expires or None if lock does not exist """ key = self._key(LOCK_REDIS_KEY, queue) return Semaphore.get_system_lock(self.connection, key)
python
def get_queue_system_lock(self, queue): key = self._key(LOCK_REDIS_KEY, queue) return Semaphore.get_system_lock(self.connection, key)
[ "def", "get_queue_system_lock", "(", "self", ",", "queue", ")", ":", "key", "=", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", "return", "Semaphore", ".", "get_system_lock", "(", "self", ".", "connection", ",", "key", ")" ]
Get system lock timeout Returns time system lock expires or None if lock does not exist
[ "Get", "system", "lock", "timeout" ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L370-L378
250,702
closeio/tasktiger
tasktiger/__init__.py
TaskTiger.set_queue_system_lock
def set_queue_system_lock(self, queue, timeout): """ Set system lock on a queue. Max workers for this queue must be used for this to have any effect. This will keep workers from processing tasks for this queue until the timeout has expired. Active tasks will continue processing their current task. timeout is number of seconds to hold the lock """ key = self._key(LOCK_REDIS_KEY, queue) Semaphore.set_system_lock(self.connection, key, timeout)
python
def set_queue_system_lock(self, queue, timeout): key = self._key(LOCK_REDIS_KEY, queue) Semaphore.set_system_lock(self.connection, key, timeout)
[ "def", "set_queue_system_lock", "(", "self", ",", "queue", ",", "timeout", ")", ":", "key", "=", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", "Semaphore", ".", "set_system_lock", "(", "self", ".", "connection", ",", "key", ",", "timeout", ")" ]
Set system lock on a queue. Max workers for this queue must be used for this to have any effect. This will keep workers from processing tasks for this queue until the timeout has expired. Active tasks will continue processing their current task. timeout is number of seconds to hold the lock
[ "Set", "system", "lock", "on", "a", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L380-L394
250,703
closeio/tasktiger
tasktiger/worker.py
Worker._install_signal_handlers
def _install_signal_handlers(self): """ Sets up signal handlers for safely stopping the worker. """ def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
python
def _install_signal_handlers(self): def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
[ "def", "_install_signal_handlers", "(", "self", ")", ":", "def", "request_stop", "(", "signum", ",", "frame", ")", ":", "self", ".", "_stop_requested", "=", "True", "self", ".", "log", ".", "info", "(", "'stop requested, waiting for task to finish'", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "request_stop", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "request_stop", ")" ]
Sets up signal handlers for safely stopping the worker.
[ "Sets", "up", "signal", "handlers", "for", "safely", "stopping", "the", "worker", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L105-L113
250,704
closeio/tasktiger
tasktiger/worker.py
Worker._uninstall_signal_handlers
def _uninstall_signal_handlers(self): """ Restores default signal handlers. """ signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
python
def _uninstall_signal_handlers(self): signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
[ "def", "_uninstall_signal_handlers", "(", "self", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_DFL", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "signal", ".", "SIG_DFL", ")" ]
Restores default signal handlers.
[ "Restores", "default", "signal", "handlers", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L115-L120
250,705
closeio/tasktiger
tasktiger/worker.py
Worker._filter_queues
def _filter_queues(self, queues): """ Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar". """ def match(queue): """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts(queue): if part in self.exclude_queues: return False if part in self.only_queues: return True return not self.only_queues return [q for q in queues if match(q)]
python
def _filter_queues(self, queues): def match(queue): """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts(queue): if part in self.exclude_queues: return False if part in self.only_queues: return True return not self.only_queues return [q for q in queues if match(q)]
[ "def", "_filter_queues", "(", "self", ",", "queues", ")", ":", "def", "match", "(", "queue", ")", ":", "\"\"\"\n Returns whether the given queue should be included by checking each\n part of the queue name.\n \"\"\"", "for", "part", "in", "reversed_dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "self", ".", "exclude_queues", ":", "return", "False", "if", "part", "in", "self", ".", "only_queues", ":", "return", "True", "return", "not", "self", ".", "only_queues", "return", "[", "q", "for", "q", "in", "queues", "if", "match", "(", "q", ")", "]" ]
Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar".
[ "Applies", "the", "queue", "filter", "to", "the", "given", "list", "of", "queues", "and", "returns", "the", "queues", "that", "match", ".", "Note", "that", "a", "queue", "name", "matches", "any", "subqueues", "starting", "with", "the", "name", "followed", "by", "a", "date", ".", "For", "example", "foo", "will", "match", "both", "foo", "and", "foo", ".", "bar", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L122-L142
250,706
closeio/tasktiger
tasktiger/worker.py
Worker._worker_queue_scheduled_tasks
def _worker_queue_scheduled_tasks(self): """ Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically. """ queues = set(self._filter_queues(self.connection.smembers( self._key(SCHEDULED)))) now = time.time() for queue in queues: # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self.scripts.zpoppush( self._key(SCHEDULED, queue), self._key(QUEUED, queue), self.config['SCHEDULED_TASK_BATCH_SIZE'], now, now, if_exists=('noupdate',), on_success=('update_sets', queue, self._key(SCHEDULED), self._key(QUEUED)), ) self.log.debug('scheduled tasks', queue=queue, qty=len(result)) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result: self.connection.publish(self._key('activity'), queue) self._did_work = True
python
def _worker_queue_scheduled_tasks(self): queues = set(self._filter_queues(self.connection.smembers( self._key(SCHEDULED)))) now = time.time() for queue in queues: # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self.scripts.zpoppush( self._key(SCHEDULED, queue), self._key(QUEUED, queue), self.config['SCHEDULED_TASK_BATCH_SIZE'], now, now, if_exists=('noupdate',), on_success=('update_sets', queue, self._key(SCHEDULED), self._key(QUEUED)), ) self.log.debug('scheduled tasks', queue=queue, qty=len(result)) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result: self.connection.publish(self._key('activity'), queue) self._did_work = True
[ "def", "_worker_queue_scheduled_tasks", "(", "self", ")", ":", "queues", "=", "set", "(", "self", ".", "_filter_queues", "(", "self", ".", "connection", ".", "smembers", "(", "self", ".", "_key", "(", "SCHEDULED", ")", ")", ")", ")", "now", "=", "time", ".", "time", "(", ")", "for", "queue", "in", "queues", ":", "# Move due items from the SCHEDULED queue to the QUEUED queue. If", "# items were moved, remove the queue from the scheduled set if it", "# is empty, and add it to the queued set so the task gets picked", "# up. If any unique tasks are already queued, don't update their", "# queue time (because the new queue time would be later).", "result", "=", "self", ".", "scripts", ".", "zpoppush", "(", "self", ".", "_key", "(", "SCHEDULED", ",", "queue", ")", ",", "self", ".", "_key", "(", "QUEUED", ",", "queue", ")", ",", "self", ".", "config", "[", "'SCHEDULED_TASK_BATCH_SIZE'", "]", ",", "now", ",", "now", ",", "if_exists", "=", "(", "'noupdate'", ",", ")", ",", "on_success", "=", "(", "'update_sets'", ",", "queue", ",", "self", ".", "_key", "(", "SCHEDULED", ")", ",", "self", ".", "_key", "(", "QUEUED", ")", ")", ",", ")", "self", ".", "log", ".", "debug", "(", "'scheduled tasks'", ",", "queue", "=", "queue", ",", "qty", "=", "len", "(", "result", ")", ")", "# XXX: ideally this would be in the same pipeline, but we only want", "# to announce if there was a result.", "if", "result", ":", "self", ".", "connection", ".", "publish", "(", "self", ".", "_key", "(", "'activity'", ")", ",", "queue", ")", "self", ".", "_did_work", "=", "True" ]
Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically.
[ "Helper", "method", "that", "takes", "due", "tasks", "from", "the", "SCHEDULED", "queue", "and", "puts", "them", "in", "the", "QUEUED", "queue", "for", "execution", ".", "This", "should", "be", "called", "periodically", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L144-L175
250,707
closeio/tasktiger
tasktiger/worker.py
Worker._wait_for_new_tasks
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0): """ Check activity channel and wait as necessary. This method is also used to slow down the main processing loop to reduce the effects of rapidly sending Redis commands. This method will exit for any of these conditions: 1. _did_work is True, suggests there could be more work pending 2. Found new queue and after batch timeout. Note batch timeout can be zero so it will exit immediately. 3. Timeout seconds have passed, this is the maximum time to stay in this method """ new_queue_found = False start_time = batch_exit = time.time() while True: # Check to see if batch_exit has been updated if batch_exit > start_time: pubsub_sleep = batch_exit - time.time() else: pubsub_sleep = start_time + timeout - time.time() message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or self._did_work else pubsub_sleep) # Pull remaining messages off of channel while message: if message['type'] == 'message': new_queue_found, batch_exit = self._process_queue_message( message['data'], new_queue_found, batch_exit, start_time, timeout, batch_timeout ) message = self._pubsub.get_message() if self._did_work: break # Exit immediately if we did work during the last # execution loop because there might be more work to do elif time.time() >= batch_exit and new_queue_found: break # After finding a new queue we can wait until the # batch timeout expires elif time.time() - start_time > timeout: break
python
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0): new_queue_found = False start_time = batch_exit = time.time() while True: # Check to see if batch_exit has been updated if batch_exit > start_time: pubsub_sleep = batch_exit - time.time() else: pubsub_sleep = start_time + timeout - time.time() message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or self._did_work else pubsub_sleep) # Pull remaining messages off of channel while message: if message['type'] == 'message': new_queue_found, batch_exit = self._process_queue_message( message['data'], new_queue_found, batch_exit, start_time, timeout, batch_timeout ) message = self._pubsub.get_message() if self._did_work: break # Exit immediately if we did work during the last # execution loop because there might be more work to do elif time.time() >= batch_exit and new_queue_found: break # After finding a new queue we can wait until the # batch timeout expires elif time.time() - start_time > timeout: break
[ "def", "_wait_for_new_tasks", "(", "self", ",", "timeout", "=", "0", ",", "batch_timeout", "=", "0", ")", ":", "new_queue_found", "=", "False", "start_time", "=", "batch_exit", "=", "time", ".", "time", "(", ")", "while", "True", ":", "# Check to see if batch_exit has been updated", "if", "batch_exit", ">", "start_time", ":", "pubsub_sleep", "=", "batch_exit", "-", "time", ".", "time", "(", ")", "else", ":", "pubsub_sleep", "=", "start_time", "+", "timeout", "-", "time", ".", "time", "(", ")", "message", "=", "self", ".", "_pubsub", ".", "get_message", "(", "timeout", "=", "0", "if", "pubsub_sleep", "<", "0", "or", "self", ".", "_did_work", "else", "pubsub_sleep", ")", "# Pull remaining messages off of channel", "while", "message", ":", "if", "message", "[", "'type'", "]", "==", "'message'", ":", "new_queue_found", ",", "batch_exit", "=", "self", ".", "_process_queue_message", "(", "message", "[", "'data'", "]", ",", "new_queue_found", ",", "batch_exit", ",", "start_time", ",", "timeout", ",", "batch_timeout", ")", "message", "=", "self", ".", "_pubsub", ".", "get_message", "(", ")", "if", "self", ".", "_did_work", ":", "break", "# Exit immediately if we did work during the last", "# execution loop because there might be more work to do", "elif", "time", ".", "time", "(", ")", ">=", "batch_exit", "and", "new_queue_found", ":", "break", "# After finding a new queue we can wait until the", "# batch timeout expires", "elif", "time", ".", "time", "(", ")", "-", "start_time", ">", "timeout", ":", "break" ]
Check activity channel and wait as necessary. This method is also used to slow down the main processing loop to reduce the effects of rapidly sending Redis commands. This method will exit for any of these conditions: 1. _did_work is True, suggests there could be more work pending 2. Found new queue and after batch timeout. Note batch timeout can be zero so it will exit immediately. 3. Timeout seconds have passed, this is the maximum time to stay in this method
[ "Check", "activity", "channel", "and", "wait", "as", "necessary", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L177-L220
250,708
closeio/tasktiger
tasktiger/worker.py
Worker._execute_forked
def _execute_forked(self, tasks, log): """ Executes the tasks in the forked process. Multiple tasks can be passed for batch processing. However, they must all use the same function and will share the execution entry. """ success = False execution = {} assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) execution['time_started'] = time.time() exc = None exc_info = None try: func = tasks[0].func is_batch_func = getattr(func, '_task_batch', False) g['current_task_is_batch'] = is_batch_func if is_batch_func: # Batch process if the task supports it. params = [{ 'args': task.args, 'kwargs': task.kwargs, } for task in tasks] task_timeouts = [task.hard_timeout for task in tasks if task.hard_timeout is not None] hard_timeout = ((max(task_timeouts) if task_timeouts else None) or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = tasks with UnixSignalDeathPenalty(hard_timeout): func(params) else: # Process sequentially. for task in tasks: hard_timeout = (task.hard_timeout or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = [task] with UnixSignalDeathPenalty(hard_timeout): func(*task.args, **task.kwargs) except RetryException as exc: execution['retry'] = True if exc.method: execution['retry_method'] = serialize_retry_method(exc.method) execution['log_error'] = exc.log_error execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = exc.exc_info or sys.exc_info() except (JobTimeoutException, Exception) as exc: execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = sys.exc_info() else: success = True if not success: execution['time_failed'] = time.time() if self.store_tracebacks: # Currently we only log failed task executions to Redis. execution['traceback'] = \ ''.join(traceback.format_exception(*exc_info)) execution['success'] = success execution['host'] = socket.gethostname() serialized_execution = json.dumps(execution) for task in tasks: self.connection.rpush(self._key('task', task.id, 'executions'), serialized_execution) return success
python
def _execute_forked(self, tasks, log): success = False execution = {} assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) execution['time_started'] = time.time() exc = None exc_info = None try: func = tasks[0].func is_batch_func = getattr(func, '_task_batch', False) g['current_task_is_batch'] = is_batch_func if is_batch_func: # Batch process if the task supports it. params = [{ 'args': task.args, 'kwargs': task.kwargs, } for task in tasks] task_timeouts = [task.hard_timeout for task in tasks if task.hard_timeout is not None] hard_timeout = ((max(task_timeouts) if task_timeouts else None) or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = tasks with UnixSignalDeathPenalty(hard_timeout): func(params) else: # Process sequentially. for task in tasks: hard_timeout = (task.hard_timeout or getattr(func, '_task_hard_timeout', None) or self.config['DEFAULT_HARD_TIMEOUT']) g['current_tasks'] = [task] with UnixSignalDeathPenalty(hard_timeout): func(*task.args, **task.kwargs) except RetryException as exc: execution['retry'] = True if exc.method: execution['retry_method'] = serialize_retry_method(exc.method) execution['log_error'] = exc.log_error execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = exc.exc_info or sys.exc_info() except (JobTimeoutException, Exception) as exc: execution['exception_name'] = serialize_func_name(exc.__class__) exc_info = sys.exc_info() else: success = True if not success: execution['time_failed'] = time.time() if self.store_tracebacks: # Currently we only log failed task executions to Redis. execution['traceback'] = \ ''.join(traceback.format_exception(*exc_info)) execution['success'] = success execution['host'] = socket.gethostname() serialized_execution = json.dumps(execution) for task in tasks: self.connection.rpush(self._key('task', task.id, 'executions'), serialized_execution) return success
[ "def", "_execute_forked", "(", "self", ",", "tasks", ",", "log", ")", ":", "success", "=", "False", "execution", "=", "{", "}", "assert", "len", "(", "tasks", ")", "task_func", "=", "tasks", "[", "0", "]", ".", "serialized_func", "assert", "all", "(", "[", "task_func", "==", "task", ".", "serialized_func", "for", "task", "in", "tasks", "[", "1", ":", "]", "]", ")", "execution", "[", "'time_started'", "]", "=", "time", ".", "time", "(", ")", "exc", "=", "None", "exc_info", "=", "None", "try", ":", "func", "=", "tasks", "[", "0", "]", ".", "func", "is_batch_func", "=", "getattr", "(", "func", ",", "'_task_batch'", ",", "False", ")", "g", "[", "'current_task_is_batch'", "]", "=", "is_batch_func", "if", "is_batch_func", ":", "# Batch process if the task supports it.", "params", "=", "[", "{", "'args'", ":", "task", ".", "args", ",", "'kwargs'", ":", "task", ".", "kwargs", ",", "}", "for", "task", "in", "tasks", "]", "task_timeouts", "=", "[", "task", ".", "hard_timeout", "for", "task", "in", "tasks", "if", "task", ".", "hard_timeout", "is", "not", "None", "]", "hard_timeout", "=", "(", "(", "max", "(", "task_timeouts", ")", "if", "task_timeouts", "else", "None", ")", "or", "getattr", "(", "func", ",", "'_task_hard_timeout'", ",", "None", ")", "or", "self", ".", "config", "[", "'DEFAULT_HARD_TIMEOUT'", "]", ")", "g", "[", "'current_tasks'", "]", "=", "tasks", "with", "UnixSignalDeathPenalty", "(", "hard_timeout", ")", ":", "func", "(", "params", ")", "else", ":", "# Process sequentially.", "for", "task", "in", "tasks", ":", "hard_timeout", "=", "(", "task", ".", "hard_timeout", "or", "getattr", "(", "func", ",", "'_task_hard_timeout'", ",", "None", ")", "or", "self", ".", "config", "[", "'DEFAULT_HARD_TIMEOUT'", "]", ")", "g", "[", "'current_tasks'", "]", "=", "[", "task", "]", "with", "UnixSignalDeathPenalty", "(", "hard_timeout", ")", ":", "func", "(", "*", "task", ".", "args", ",", "*", "*", "task", ".", "kwargs", ")", "except", "RetryException", "as", "exc", ":", "execution", "[", "'retry'", "]", "=", "True", "if", "exc", ".", "method", ":", "execution", "[", "'retry_method'", "]", "=", "serialize_retry_method", "(", "exc", ".", "method", ")", "execution", "[", "'log_error'", "]", "=", "exc", ".", "log_error", "execution", "[", "'exception_name'", "]", "=", "serialize_func_name", "(", "exc", ".", "__class__", ")", "exc_info", "=", "exc", ".", "exc_info", "or", "sys", ".", "exc_info", "(", ")", "except", "(", "JobTimeoutException", ",", "Exception", ")", "as", "exc", ":", "execution", "[", "'exception_name'", "]", "=", "serialize_func_name", "(", "exc", ".", "__class__", ")", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "else", ":", "success", "=", "True", "if", "not", "success", ":", "execution", "[", "'time_failed'", "]", "=", "time", ".", "time", "(", ")", "if", "self", ".", "store_tracebacks", ":", "# Currently we only log failed task executions to Redis.", "execution", "[", "'traceback'", "]", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "exc_info", ")", ")", "execution", "[", "'success'", "]", "=", "success", "execution", "[", "'host'", "]", "=", "socket", ".", "gethostname", "(", ")", "serialized_execution", "=", "json", ".", "dumps", "(", "execution", ")", "for", "task", "in", "tasks", ":", "self", ".", "connection", ".", "rpush", "(", "self", ".", "_key", "(", "'task'", ",", "task", ".", "id", ",", "'executions'", ")", ",", "serialized_execution", ")", "return", "success" ]
Executes the tasks in the forked process. Multiple tasks can be passed for batch processing. However, they must all use the same function and will share the execution entry.
[ "Executes", "the", "tasks", "in", "the", "forked", "process", ".", "Multiple", "tasks", "can", "be", "passed", "for", "batch", "processing", ".", "However", "they", "must", "all", "use", "the", "same", "function", "and", "will", "share", "the", "execution", "entry", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L292-L370
250,709
closeio/tasktiger
tasktiger/worker.py
Worker._get_queue_batch_size
def _get_queue_batch_size(self, queue): """Get queue batch size.""" # Fetch one item unless this is a batch queue. # XXX: It would be more efficient to loop in reverse order and break. batch_queues = self.config['BATCH_QUEUES'] batch_size = 1 for part in dotted_parts(queue): if part in batch_queues: batch_size = batch_queues[part] return batch_size
python
def _get_queue_batch_size(self, queue): # Fetch one item unless this is a batch queue. # XXX: It would be more efficient to loop in reverse order and break. batch_queues = self.config['BATCH_QUEUES'] batch_size = 1 for part in dotted_parts(queue): if part in batch_queues: batch_size = batch_queues[part] return batch_size
[ "def", "_get_queue_batch_size", "(", "self", ",", "queue", ")", ":", "# Fetch one item unless this is a batch queue.", "# XXX: It would be more efficient to loop in reverse order and break.", "batch_queues", "=", "self", ".", "config", "[", "'BATCH_QUEUES'", "]", "batch_size", "=", "1", "for", "part", "in", "dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "batch_queues", ":", "batch_size", "=", "batch_queues", "[", "part", "]", "return", "batch_size" ]
Get queue batch size.
[ "Get", "queue", "batch", "size", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L372-L383
250,710
closeio/tasktiger
tasktiger/worker.py
Worker._get_queue_lock
def _get_queue_lock(self, queue, log): """Get queue lock for max worker queues. For max worker queues it returns a Lock if acquired and whether it failed to acquire the lock. """ max_workers = self.max_workers_per_queue # Check if this is single worker queue for part in dotted_parts(queue): if part in self.single_worker_queues: log.debug('single worker queue') max_workers = 1 break # Max worker queues require us to get a queue lock before # moving tasks if max_workers: queue_lock = Semaphore(self.connection, self._key(LOCK_REDIS_KEY, queue), self.id, max_locks=max_workers, timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired, locks = queue_lock.acquire() if not acquired: return None, True log.debug('acquired queue lock', locks=locks) else: queue_lock = None return queue_lock, False
python
def _get_queue_lock(self, queue, log): max_workers = self.max_workers_per_queue # Check if this is single worker queue for part in dotted_parts(queue): if part in self.single_worker_queues: log.debug('single worker queue') max_workers = 1 break # Max worker queues require us to get a queue lock before # moving tasks if max_workers: queue_lock = Semaphore(self.connection, self._key(LOCK_REDIS_KEY, queue), self.id, max_locks=max_workers, timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired, locks = queue_lock.acquire() if not acquired: return None, True log.debug('acquired queue lock', locks=locks) else: queue_lock = None return queue_lock, False
[ "def", "_get_queue_lock", "(", "self", ",", "queue", ",", "log", ")", ":", "max_workers", "=", "self", ".", "max_workers_per_queue", "# Check if this is single worker queue", "for", "part", "in", "dotted_parts", "(", "queue", ")", ":", "if", "part", "in", "self", ".", "single_worker_queues", ":", "log", ".", "debug", "(", "'single worker queue'", ")", "max_workers", "=", "1", "break", "# Max worker queues require us to get a queue lock before", "# moving tasks", "if", "max_workers", ":", "queue_lock", "=", "Semaphore", "(", "self", ".", "connection", ",", "self", ".", "_key", "(", "LOCK_REDIS_KEY", ",", "queue", ")", ",", "self", ".", "id", ",", "max_locks", "=", "max_workers", ",", "timeout", "=", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "acquired", ",", "locks", "=", "queue_lock", ".", "acquire", "(", ")", "if", "not", "acquired", ":", "return", "None", ",", "True", "log", ".", "debug", "(", "'acquired queue lock'", ",", "locks", "=", "locks", ")", "else", ":", "queue_lock", "=", "None", "return", "queue_lock", ",", "False" ]
Get queue lock for max worker queues. For max worker queues it returns a Lock if acquired and whether it failed to acquire the lock.
[ "Get", "queue", "lock", "for", "max", "worker", "queues", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L385-L414
250,711
closeio/tasktiger
tasktiger/worker.py
Worker._heartbeat
def _heartbeat(self, queue, task_ids): """ Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued. """ now = time.time() self.connection.zadd(self._key(ACTIVE, queue), **{task_id: now for task_id in task_ids})
python
def _heartbeat(self, queue, task_ids): now = time.time() self.connection.zadd(self._key(ACTIVE, queue), **{task_id: now for task_id in task_ids})
[ "def", "_heartbeat", "(", "self", ",", "queue", ",", "task_ids", ")", ":", "now", "=", "time", ".", "time", "(", ")", "self", ".", "connection", ".", "zadd", "(", "self", ".", "_key", "(", "ACTIVE", ",", "queue", ")", ",", "*", "*", "{", "task_id", ":", "now", "for", "task_id", "in", "task_ids", "}", ")" ]
Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued.
[ "Updates", "the", "heartbeat", "for", "the", "given", "task", "IDs", "to", "prevent", "them", "from", "timing", "out", "and", "being", "requeued", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L416-L423
250,712
closeio/tasktiger
tasktiger/worker.py
Worker._execute
def _execute(self, queue, tasks, log, locks, queue_lock, all_task_ids): """ Executes the given tasks. Returns a boolean indicating whether the tasks were executed successfully. """ # The tasks must use the same function. assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) # Before executing periodic tasks, queue them for the next period. if task_func in self.tiger.periodic_task_funcs: tasks[0]._queue_for_next_period() with g_fork_lock: child_pid = os.fork() if child_pid == 0: # Child process log = log.bind(child_pid=os.getpid()) # Disconnect the Redis connection inherited from the main process. # Note that this doesn't disconnect the socket in the main process. self.connection.connection_pool.disconnect() random.seed() # Ignore Ctrl+C in the child so we don't abort the job -- the main # process already takes care of a graceful shutdown. signal.signal(signal.SIGINT, signal.SIG_IGN) with WorkerContextManagerStack(self.config['CHILD_CONTEXT_MANAGERS']): success = self._execute_forked(tasks, log) # Wait for any threads that might be running in the child, just # like sys.exit() would. Note we don't call sys.exit() directly # because it would perform additional cleanup (e.g. calling atexit # handlers twice). See also: https://bugs.python.org/issue18966 threading._shutdown() os._exit(int(not success)) else: # Main process log = log.bind(child_pid=child_pid) for task in tasks: log.info('processing', func=task_func, task_id=task.id, params={'args': task.args, 'kwargs': task.kwargs}) # Attach a signal handler to SIGCHLD (sent when the child process # exits) so we can capture it. signal.signal(signal.SIGCHLD, sigchld_handler) # Since newer Python versions retry interrupted system calls we can't # rely on the fact that select() is interrupted with EINTR. Instead, # we'll set up a wake-up file descriptor below. # Create a new pipe and apply the non-blocking flag (required for # set_wakeup_fd). pipe_r, pipe_w = os.pipe() flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags) # A byte will be written to pipe_w if a signal occurs (and can be # read from pipe_r). old_wakeup_fd = signal.set_wakeup_fd(pipe_w) def check_child_exit(): """ Do a non-blocking check to see if the child process exited. Returns None if the process is still running, or the exit code value of the child process. """ try: pid, return_code = os.waitpid(child_pid, os.WNOHANG) if pid != 0: # The child process is done. return return_code except OSError as e: # Of course EINTR can happen if the child process exits # while we're checking whether it exited. In this case it # should be safe to retry. if e.errno == errno.EINTR: return check_child_exit() else: raise # Wait for the child to exit and perform a periodic heartbeat. # We check for the child twice in this loop so that we avoid # unnecessary waiting if the child exited just before entering # the while loop or while renewing heartbeat/locks. while True: return_code = check_child_exit() if return_code is not None: break # Wait until the timeout or a signal / child exit occurs. try: select.select([pipe_r], [], [], self.config['ACTIVE_TASK_UPDATE_TIMER']) except select.error as e: if e.args[0] != errno.EINTR: raise return_code = check_child_exit() if return_code is not None: break try: self._heartbeat(queue, all_task_ids) for lock in locks: lock.renew(self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) if queue_lock: acquired, current_locks = queue_lock.renew() if not acquired: log.debug('queue lock renew failure') except OSError as e: # EINTR happens if the task completed. Since we're just # renewing locks/heartbeat it's okay if we get interrupted. if e.errno != errno.EINTR: raise # Restore signals / clean up signal.signal(signal.SIGCHLD, signal.SIG_DFL) signal.set_wakeup_fd(old_wakeup_fd) os.close(pipe_r) os.close(pipe_w) success = (return_code == 0) return success
python
def _execute(self, queue, tasks, log, locks, queue_lock, all_task_ids): # The tasks must use the same function. assert len(tasks) task_func = tasks[0].serialized_func assert all([task_func == task.serialized_func for task in tasks[1:]]) # Before executing periodic tasks, queue them for the next period. if task_func in self.tiger.periodic_task_funcs: tasks[0]._queue_for_next_period() with g_fork_lock: child_pid = os.fork() if child_pid == 0: # Child process log = log.bind(child_pid=os.getpid()) # Disconnect the Redis connection inherited from the main process. # Note that this doesn't disconnect the socket in the main process. self.connection.connection_pool.disconnect() random.seed() # Ignore Ctrl+C in the child so we don't abort the job -- the main # process already takes care of a graceful shutdown. signal.signal(signal.SIGINT, signal.SIG_IGN) with WorkerContextManagerStack(self.config['CHILD_CONTEXT_MANAGERS']): success = self._execute_forked(tasks, log) # Wait for any threads that might be running in the child, just # like sys.exit() would. Note we don't call sys.exit() directly # because it would perform additional cleanup (e.g. calling atexit # handlers twice). See also: https://bugs.python.org/issue18966 threading._shutdown() os._exit(int(not success)) else: # Main process log = log.bind(child_pid=child_pid) for task in tasks: log.info('processing', func=task_func, task_id=task.id, params={'args': task.args, 'kwargs': task.kwargs}) # Attach a signal handler to SIGCHLD (sent when the child process # exits) so we can capture it. signal.signal(signal.SIGCHLD, sigchld_handler) # Since newer Python versions retry interrupted system calls we can't # rely on the fact that select() is interrupted with EINTR. Instead, # we'll set up a wake-up file descriptor below. # Create a new pipe and apply the non-blocking flag (required for # set_wakeup_fd). pipe_r, pipe_w = os.pipe() flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags) # A byte will be written to pipe_w if a signal occurs (and can be # read from pipe_r). old_wakeup_fd = signal.set_wakeup_fd(pipe_w) def check_child_exit(): """ Do a non-blocking check to see if the child process exited. Returns None if the process is still running, or the exit code value of the child process. """ try: pid, return_code = os.waitpid(child_pid, os.WNOHANG) if pid != 0: # The child process is done. return return_code except OSError as e: # Of course EINTR can happen if the child process exits # while we're checking whether it exited. In this case it # should be safe to retry. if e.errno == errno.EINTR: return check_child_exit() else: raise # Wait for the child to exit and perform a periodic heartbeat. # We check for the child twice in this loop so that we avoid # unnecessary waiting if the child exited just before entering # the while loop or while renewing heartbeat/locks. while True: return_code = check_child_exit() if return_code is not None: break # Wait until the timeout or a signal / child exit occurs. try: select.select([pipe_r], [], [], self.config['ACTIVE_TASK_UPDATE_TIMER']) except select.error as e: if e.args[0] != errno.EINTR: raise return_code = check_child_exit() if return_code is not None: break try: self._heartbeat(queue, all_task_ids) for lock in locks: lock.renew(self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) if queue_lock: acquired, current_locks = queue_lock.renew() if not acquired: log.debug('queue lock renew failure') except OSError as e: # EINTR happens if the task completed. Since we're just # renewing locks/heartbeat it's okay if we get interrupted. if e.errno != errno.EINTR: raise # Restore signals / clean up signal.signal(signal.SIGCHLD, signal.SIG_DFL) signal.set_wakeup_fd(old_wakeup_fd) os.close(pipe_r) os.close(pipe_w) success = (return_code == 0) return success
[ "def", "_execute", "(", "self", ",", "queue", ",", "tasks", ",", "log", ",", "locks", ",", "queue_lock", ",", "all_task_ids", ")", ":", "# The tasks must use the same function.", "assert", "len", "(", "tasks", ")", "task_func", "=", "tasks", "[", "0", "]", ".", "serialized_func", "assert", "all", "(", "[", "task_func", "==", "task", ".", "serialized_func", "for", "task", "in", "tasks", "[", "1", ":", "]", "]", ")", "# Before executing periodic tasks, queue them for the next period.", "if", "task_func", "in", "self", ".", "tiger", ".", "periodic_task_funcs", ":", "tasks", "[", "0", "]", ".", "_queue_for_next_period", "(", ")", "with", "g_fork_lock", ":", "child_pid", "=", "os", ".", "fork", "(", ")", "if", "child_pid", "==", "0", ":", "# Child process", "log", "=", "log", ".", "bind", "(", "child_pid", "=", "os", ".", "getpid", "(", ")", ")", "# Disconnect the Redis connection inherited from the main process.", "# Note that this doesn't disconnect the socket in the main process.", "self", ".", "connection", ".", "connection_pool", ".", "disconnect", "(", ")", "random", ".", "seed", "(", ")", "# Ignore Ctrl+C in the child so we don't abort the job -- the main", "# process already takes care of a graceful shutdown.", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "with", "WorkerContextManagerStack", "(", "self", ".", "config", "[", "'CHILD_CONTEXT_MANAGERS'", "]", ")", ":", "success", "=", "self", ".", "_execute_forked", "(", "tasks", ",", "log", ")", "# Wait for any threads that might be running in the child, just", "# like sys.exit() would. Note we don't call sys.exit() directly", "# because it would perform additional cleanup (e.g. calling atexit", "# handlers twice). See also: https://bugs.python.org/issue18966", "threading", ".", "_shutdown", "(", ")", "os", ".", "_exit", "(", "int", "(", "not", "success", ")", ")", "else", ":", "# Main process", "log", "=", "log", ".", "bind", "(", "child_pid", "=", "child_pid", ")", "for", "task", "in", "tasks", ":", "log", ".", "info", "(", "'processing'", ",", "func", "=", "task_func", ",", "task_id", "=", "task", ".", "id", ",", "params", "=", "{", "'args'", ":", "task", ".", "args", ",", "'kwargs'", ":", "task", ".", "kwargs", "}", ")", "# Attach a signal handler to SIGCHLD (sent when the child process", "# exits) so we can capture it.", "signal", ".", "signal", "(", "signal", ".", "SIGCHLD", ",", "sigchld_handler", ")", "# Since newer Python versions retry interrupted system calls we can't", "# rely on the fact that select() is interrupted with EINTR. Instead,", "# we'll set up a wake-up file descriptor below.", "# Create a new pipe and apply the non-blocking flag (required for", "# set_wakeup_fd).", "pipe_r", ",", "pipe_w", "=", "os", ".", "pipe", "(", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "pipe_w", ",", "fcntl", ".", "F_GETFL", ",", "0", ")", "flags", "=", "flags", "|", "os", ".", "O_NONBLOCK", "fcntl", ".", "fcntl", "(", "pipe_w", ",", "fcntl", ".", "F_SETFL", ",", "flags", ")", "# A byte will be written to pipe_w if a signal occurs (and can be", "# read from pipe_r).", "old_wakeup_fd", "=", "signal", ".", "set_wakeup_fd", "(", "pipe_w", ")", "def", "check_child_exit", "(", ")", ":", "\"\"\"\n Do a non-blocking check to see if the child process exited.\n Returns None if the process is still running, or the exit code\n value of the child process.\n \"\"\"", "try", ":", "pid", ",", "return_code", "=", "os", ".", "waitpid", "(", "child_pid", ",", "os", ".", "WNOHANG", ")", "if", "pid", "!=", "0", ":", "# The child process is done.", "return", "return_code", "except", "OSError", "as", "e", ":", "# Of course EINTR can happen if the child process exits", "# while we're checking whether it exited. In this case it", "# should be safe to retry.", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "return", "check_child_exit", "(", ")", "else", ":", "raise", "# Wait for the child to exit and perform a periodic heartbeat.", "# We check for the child twice in this loop so that we avoid", "# unnecessary waiting if the child exited just before entering", "# the while loop or while renewing heartbeat/locks.", "while", "True", ":", "return_code", "=", "check_child_exit", "(", ")", "if", "return_code", "is", "not", "None", ":", "break", "# Wait until the timeout or a signal / child exit occurs.", "try", ":", "select", ".", "select", "(", "[", "pipe_r", "]", ",", "[", "]", ",", "[", "]", ",", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMER'", "]", ")", "except", "select", ".", "error", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "errno", ".", "EINTR", ":", "raise", "return_code", "=", "check_child_exit", "(", ")", "if", "return_code", "is", "not", "None", ":", "break", "try", ":", "self", ".", "_heartbeat", "(", "queue", ",", "all_task_ids", ")", "for", "lock", "in", "locks", ":", "lock", ".", "renew", "(", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "if", "queue_lock", ":", "acquired", ",", "current_locks", "=", "queue_lock", ".", "renew", "(", ")", "if", "not", "acquired", ":", "log", ".", "debug", "(", "'queue lock renew failure'", ")", "except", "OSError", "as", "e", ":", "# EINTR happens if the task completed. Since we're just", "# renewing locks/heartbeat it's okay if we get interrupted.", "if", "e", ".", "errno", "!=", "errno", ".", "EINTR", ":", "raise", "# Restore signals / clean up", "signal", ".", "signal", "(", "signal", ".", "SIGCHLD", ",", "signal", ".", "SIG_DFL", ")", "signal", ".", "set_wakeup_fd", "(", "old_wakeup_fd", ")", "os", ".", "close", "(", "pipe_r", ")", "os", ".", "close", "(", "pipe_w", ")", "success", "=", "(", "return_code", "==", "0", ")", "return", "success" ]
Executes the given tasks. Returns a boolean indicating whether the tasks were executed successfully.
[ "Executes", "the", "given", "tasks", ".", "Returns", "a", "boolean", "indicating", "whether", "the", "tasks", "were", "executed", "successfully", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L425-L554
250,713
closeio/tasktiger
tasktiger/worker.py
Worker._process_queue_message
def _process_queue_message(self, message_queue, new_queue_found, batch_exit, start_time, timeout, batch_timeout): """Process a queue message from activity channel.""" for queue in self._filter_queues([message_queue]): if queue not in self._queue_set: if not new_queue_found: new_queue_found = True batch_exit = time.time() + batch_timeout # Limit batch_exit to max timeout if batch_exit > start_time + timeout: batch_exit = start_time + timeout self._queue_set.add(queue) self.log.debug('new queue', queue=queue) return new_queue_found, batch_exit
python
def _process_queue_message(self, message_queue, new_queue_found, batch_exit, start_time, timeout, batch_timeout): for queue in self._filter_queues([message_queue]): if queue not in self._queue_set: if not new_queue_found: new_queue_found = True batch_exit = time.time() + batch_timeout # Limit batch_exit to max timeout if batch_exit > start_time + timeout: batch_exit = start_time + timeout self._queue_set.add(queue) self.log.debug('new queue', queue=queue) return new_queue_found, batch_exit
[ "def", "_process_queue_message", "(", "self", ",", "message_queue", ",", "new_queue_found", ",", "batch_exit", ",", "start_time", ",", "timeout", ",", "batch_timeout", ")", ":", "for", "queue", "in", "self", ".", "_filter_queues", "(", "[", "message_queue", "]", ")", ":", "if", "queue", "not", "in", "self", ".", "_queue_set", ":", "if", "not", "new_queue_found", ":", "new_queue_found", "=", "True", "batch_exit", "=", "time", ".", "time", "(", ")", "+", "batch_timeout", "# Limit batch_exit to max timeout", "if", "batch_exit", ">", "start_time", "+", "timeout", ":", "batch_exit", "=", "start_time", "+", "timeout", "self", ".", "_queue_set", ".", "add", "(", "queue", ")", "self", ".", "log", ".", "debug", "(", "'new queue'", ",", "queue", "=", "queue", ")", "return", "new_queue_found", ",", "batch_exit" ]
Process a queue message from activity channel.
[ "Process", "a", "queue", "message", "from", "activity", "channel", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L556-L571
250,714
closeio/tasktiger
tasktiger/worker.py
Worker._process_queue_tasks
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): """Process tasks in queue.""" processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
python
def _process_queue_tasks(self, queue, queue_lock, task_ids, now, log): processed_count = 0 # Get all tasks serialized_tasks = self.connection.mget([ self._key('task', task_id) for task_id in task_ids ]) # Parse tasks tasks = [] for task_id, serialized_task in zip(task_ids, serialized_tasks): if serialized_task: task_data = json.loads(serialized_task) else: # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = {'id': task_id} task = Task(self.tiger, queue=queue, _data=task_data, _state=ACTIVE, _ts=now) if not serialized_task: # Remove task as per comment above log.error('not found', task_id=task_id) task._move() elif task.id != task_id: log.error('task ID mismatch', task_id=task_id) # Remove task task._move() else: tasks.append(task) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set(task.id for task in tasks) # Group by task func tasks_by_func = OrderedDict() for task in tasks: func = task.serialized_func if func in tasks_by_func: tasks_by_func[func].append(task) else: tasks_by_func[func] = [task] # Execute tasks for each task func for tasks in tasks_by_func.values(): success, processed_tasks = self._execute_task_group(queue, tasks, valid_task_ids, queue_lock) processed_count = processed_count + len(processed_tasks) log.debug('processed', attempted=len(tasks), processed=processed_count) for task in processed_tasks: self._finish_task_processing(queue, task, success) return processed_count
[ "def", "_process_queue_tasks", "(", "self", ",", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", ":", "processed_count", "=", "0", "# Get all tasks", "serialized_tasks", "=", "self", ".", "connection", ".", "mget", "(", "[", "self", ".", "_key", "(", "'task'", ",", "task_id", ")", "for", "task_id", "in", "task_ids", "]", ")", "# Parse tasks", "tasks", "=", "[", "]", "for", "task_id", ",", "serialized_task", "in", "zip", "(", "task_ids", ",", "serialized_tasks", ")", ":", "if", "serialized_task", ":", "task_data", "=", "json", ".", "loads", "(", "serialized_task", ")", "else", ":", "# In the rare case where we don't find the task which is", "# queued (see ReliabilityTestCase.test_task_disappears),", "# we log an error and remove the task below. We need to", "# at least initialize the Task object with an ID so we can", "# remove it.", "task_data", "=", "{", "'id'", ":", "task_id", "}", "task", "=", "Task", "(", "self", ".", "tiger", ",", "queue", "=", "queue", ",", "_data", "=", "task_data", ",", "_state", "=", "ACTIVE", ",", "_ts", "=", "now", ")", "if", "not", "serialized_task", ":", "# Remove task as per comment above", "log", ".", "error", "(", "'not found'", ",", "task_id", "=", "task_id", ")", "task", ".", "_move", "(", ")", "elif", "task", ".", "id", "!=", "task_id", ":", "log", ".", "error", "(", "'task ID mismatch'", ",", "task_id", "=", "task_id", ")", "# Remove task", "task", ".", "_move", "(", ")", "else", ":", "tasks", ".", "append", "(", "task", ")", "# List of task IDs that exist and we will update the heartbeat on.", "valid_task_ids", "=", "set", "(", "task", ".", "id", "for", "task", "in", "tasks", ")", "# Group by task func", "tasks_by_func", "=", "OrderedDict", "(", ")", "for", "task", "in", "tasks", ":", "func", "=", "task", ".", "serialized_func", "if", "func", "in", "tasks_by_func", ":", "tasks_by_func", "[", "func", "]", ".", "append", "(", "task", ")", "else", ":", "tasks_by_func", "[", "func", "]", "=", "[", "task", "]", "# Execute tasks for each task func", "for", "tasks", "in", "tasks_by_func", ".", "values", "(", ")", ":", "success", ",", "processed_tasks", "=", "self", ".", "_execute_task_group", "(", "queue", ",", "tasks", ",", "valid_task_ids", ",", "queue_lock", ")", "processed_count", "=", "processed_count", "+", "len", "(", "processed_tasks", ")", "log", ".", "debug", "(", "'processed'", ",", "attempted", "=", "len", "(", "tasks", ")", ",", "processed", "=", "processed_count", ")", "for", "task", "in", "processed_tasks", ":", "self", ".", "_finish_task_processing", "(", "queue", ",", "task", ",", "success", ")", "return", "processed_count" ]
Process tasks in queue.
[ "Process", "tasks", "in", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L573-L632
250,715
closeio/tasktiger
tasktiger/worker.py
Worker._process_from_queue
def _process_from_queue(self, queue): """ Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired. """ now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) queue_lock, failed_to_acquire = self._get_queue_lock(queue, log) if failed_to_acquire: return [], -1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time.time() + self.config['LOCK_RETRY'] task_ids = self.scripts.zpoppush( self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)) ) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return task_ids, processed_count
python
def _process_from_queue(self, queue): now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) queue_lock, failed_to_acquire = self._get_queue_lock(queue, log) if failed_to_acquire: return [], -1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time.time() + self.config['LOCK_RETRY'] task_ids = self.scripts.zpoppush( self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)) ) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return task_ids, processed_count
[ "def", "_process_from_queue", "(", "self", ",", "queue", ")", ":", "now", "=", "time", ".", "time", "(", ")", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ")", "batch_size", "=", "self", ".", "_get_queue_batch_size", "(", "queue", ")", "queue_lock", ",", "failed_to_acquire", "=", "self", ".", "_get_queue_lock", "(", "queue", ",", "log", ")", "if", "failed_to_acquire", ":", "return", "[", "]", ",", "-", "1", "# Move an item to the active queue, if available.", "# We need to be careful when moving unique tasks: We currently don't", "# support concurrent processing of multiple unique tasks. If the task", "# is already in the ACTIVE queue, we need to execute the queued task", "# later, i.e. move it to the SCHEDULED queue (prefer the earliest", "# time if it's already scheduled). We want to make sure that the last", "# queued instance of the task always gets executed no earlier than it", "# was queued.", "later", "=", "time", ".", "time", "(", ")", "+", "self", ".", "config", "[", "'LOCK_RETRY'", "]", "task_ids", "=", "self", ".", "scripts", ".", "zpoppush", "(", "self", ".", "_key", "(", "QUEUED", ",", "queue", ")", ",", "self", ".", "_key", "(", "ACTIVE", ",", "queue", ")", ",", "batch_size", ",", "None", ",", "now", ",", "if_exists", "=", "(", "'add'", ",", "self", ".", "_key", "(", "SCHEDULED", ",", "queue", ")", ",", "later", ",", "'min'", ")", ",", "on_success", "=", "(", "'update_sets'", ",", "queue", ",", "self", ".", "_key", "(", "QUEUED", ")", ",", "self", ".", "_key", "(", "ACTIVE", ")", ",", "self", ".", "_key", "(", "SCHEDULED", ")", ")", ")", "log", ".", "debug", "(", "'moved tasks'", ",", "src_queue", "=", "QUEUED", ",", "dest_queue", "=", "ACTIVE", ",", "qty", "=", "len", "(", "task_ids", ")", ")", "processed_count", "=", "0", "if", "task_ids", ":", "processed_count", "=", "self", ".", "_process_queue_tasks", "(", "queue", ",", "queue_lock", ",", "task_ids", ",", "now", ",", "log", ")", "if", "queue_lock", ":", "queue_lock", ".", "release", "(", ")", "log", ".", "debug", "(", "'released swq lock'", ")", "return", "task_ids", ",", "processed_count" ]
Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired.
[ "Internal", "method", "to", "process", "a", "task", "batch", "from", "the", "given", "queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L634-L690
250,716
closeio/tasktiger
tasktiger/worker.py
Worker._execute_task_group
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
python
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
[ "def", "_execute_task_group", "(", "self", ",", "queue", ",", "tasks", ",", "all_task_ids", ",", "queue_lock", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ")", "locks", "=", "[", "]", "# Keep track of the acquired locks: If two tasks in the list require", "# the same lock we only acquire it once.", "lock_ids", "=", "set", "(", ")", "ready_tasks", "=", "[", "]", "for", "task", "in", "tasks", ":", "if", "task", ".", "lock", ":", "if", "task", ".", "lock_key", ":", "kwargs", "=", "task", ".", "kwargs", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "None", ",", "{", "key", ":", "kwargs", ".", "get", "(", "key", ")", "for", "key", "in", "task", ".", "lock_key", "}", ",", ")", "else", ":", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "task", ".", "args", ",", "task", ".", "kwargs", ",", ")", "if", "lock_id", "not", "in", "lock_ids", ":", "lock", "=", "Lock", "(", "self", ".", "connection", ",", "self", ".", "_key", "(", "'lock'", ",", "lock_id", ")", ",", "timeout", "=", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "acquired", "=", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", "if", "acquired", ":", "lock_ids", ".", "add", "(", "lock_id", ")", "locks", ".", "append", "(", "lock", ")", "else", ":", "log", ".", "info", "(", "'could not acquire lock'", ",", "task_id", "=", "task", ".", "id", ")", "# Reschedule the task (but if the task is already", "# scheduled in case of a unique task, don't prolong", "# the schedule date).", "when", "=", "time", ".", "time", "(", ")", "+", "self", ".", "config", "[", "'LOCK_RETRY'", "]", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "SCHEDULED", ",", "when", "=", "when", ",", "mode", "=", "'min'", ")", "# Make sure to remove it from this list so we don't", "# re-add to the ACTIVE queue by updating the heartbeat.", "all_task_ids", ".", "remove", "(", "task", ".", "id", ")", "continue", "ready_tasks", ".", "append", "(", "task", ")", "if", "not", "ready_tasks", ":", "return", "True", ",", "[", "]", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_start", "(", ")", "success", "=", "self", ".", "_execute", "(", "queue", ",", "ready_tasks", ",", "log", ",", "locks", ",", "queue_lock", ",", "all_task_ids", ")", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_end", "(", ")", "for", "lock", "in", "locks", ":", "lock", ".", "release", "(", ")", "return", "success", ",", "ready_tasks" ]
Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue.
[ "Executes", "the", "given", "tasks", "in", "the", "queue", ".", "Updates", "the", "heartbeat", "for", "task", "IDs", "passed", "in", "all_task_ids", ".", "This", "internal", "method", "is", "only", "meant", "to", "be", "called", "from", "within", "_process_from_queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L692-L757
250,717
closeio/tasktiger
tasktiger/worker.py
Worker._finish_task_processing
def _finish_task_processing(self, queue, task, success): """ After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed. """ log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
python
def _finish_task_processing(self, queue, task, success): log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
[ "def", "_finish_task_processing", "(", "self", ",", "queue", ",", "task", ",", "success", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ",", "task_id", "=", "task", ".", "id", ")", "def", "_mark_done", "(", ")", ":", "# Remove the task from active queue", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ")", "log", ".", "info", "(", "'done'", ")", "if", "success", ":", "_mark_done", "(", ")", "else", ":", "should_retry", "=", "False", "should_log_error", "=", "True", "# Get execution info (for logging and retry purposes)", "execution", "=", "self", ".", "connection", ".", "lindex", "(", "self", ".", "_key", "(", "'task'", ",", "task", ".", "id", ",", "'executions'", ")", ",", "-", "1", ")", "if", "execution", ":", "execution", "=", "json", ".", "loads", "(", "execution", ")", "if", "execution", "and", "execution", ".", "get", "(", "'retry'", ")", ":", "if", "'retry_method'", "in", "execution", ":", "retry_func", ",", "retry_args", "=", "execution", "[", "'retry_method'", "]", "else", ":", "# We expect the serialized method here.", "retry_func", ",", "retry_args", "=", "serialize_retry_method", "(", "self", ".", "config", "[", "'DEFAULT_RETRY_METHOD'", "]", ")", "should_log_error", "=", "execution", "[", "'log_error'", "]", "should_retry", "=", "True", "if", "task", ".", "retry_method", "and", "not", "should_retry", ":", "retry_func", ",", "retry_args", "=", "task", ".", "retry_method", "if", "task", ".", "retry_on", ":", "if", "execution", ":", "exception_name", "=", "execution", ".", "get", "(", "'exception_name'", ")", "try", ":", "exception_class", "=", "import_attribute", "(", "exception_name", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import exception'", ",", "exception_name", "=", "exception_name", ")", "else", ":", "if", "task", ".", "should_retry_on", "(", "exception_class", ",", "logger", "=", "log", ")", ":", "should_retry", "=", "True", "else", ":", "should_retry", "=", "True", "state", "=", "ERROR", "when", "=", "time", ".", "time", "(", ")", "log_context", "=", "{", "'func'", ":", "task", ".", "serialized_func", "}", "if", "should_retry", ":", "retry_num", "=", "task", ".", "n_executions", "(", ")", "log_context", "[", "'retry_func'", "]", "=", "retry_func", "log_context", "[", "'retry_num'", "]", "=", "retry_num", "try", ":", "func", "=", "import_attribute", "(", "retry_func", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import retry function'", ",", "func", "=", "retry_func", ")", "else", ":", "try", ":", "retry_delay", "=", "func", "(", "retry_num", ",", "*", "retry_args", ")", "log_context", "[", "'retry_delay'", "]", "=", "retry_delay", "when", "+=", "retry_delay", "except", "StopRetry", ":", "pass", "else", ":", "state", "=", "SCHEDULED", "if", "execution", ":", "if", "state", "==", "ERROR", "and", "should_log_error", ":", "log_func", "=", "log", ".", "error", "else", ":", "log_func", "=", "log", ".", "warning", "log_context", ".", "update", "(", "{", "'time_failed'", ":", "execution", ".", "get", "(", "'time_failed'", ")", ",", "'traceback'", ":", "execution", ".", "get", "(", "'traceback'", ")", ",", "'exception_name'", ":", "execution", ".", "get", "(", "'exception_name'", ")", ",", "}", ")", "log_func", "(", "'task error'", ",", "*", "*", "log_context", ")", "else", ":", "log", ".", "error", "(", "'execution not found'", ",", "*", "*", "log_context", ")", "# Move task to the scheduled queue for retry, or move to error", "# queue if we don't want to retry.", "if", "state", "==", "ERROR", "and", "not", "should_log_error", ":", "_mark_done", "(", ")", "else", ":", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "state", ",", "when", "=", "when", ")" ]
After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed.
[ "After", "a", "task", "is", "executed", "this", "method", "is", "called", "and", "ensures", "that", "the", "task", "gets", "properly", "removed", "from", "the", "ACTIVE", "queue", "and", "in", "case", "of", "an", "error", "retried", "or", "marked", "as", "failed", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L759-L859
250,718
closeio/tasktiger
tasktiger/worker.py
Worker.run
def run(self, once=False, force_once=False): """ Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued. """ self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
python
def run(self, once=False, force_once=False): self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
[ "def", "run", "(", "self", ",", "once", "=", "False", ",", "force_once", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'ready'", ",", "id", "=", "self", ".", "id", ",", "queues", "=", "sorted", "(", "self", ".", "only_queues", ")", ",", "exclude_queues", "=", "sorted", "(", "self", ".", "exclude_queues", ")", ",", "single_worker_queues", "=", "sorted", "(", "self", ".", "single_worker_queues", ")", ",", "max_workers", "=", "self", ".", "max_workers_per_queue", ")", "if", "not", "self", ".", "scripts", ".", "can_replicate_commands", ":", "# Older Redis versions may create additional overhead when", "# executing pipelines.", "self", ".", "log", ".", "warn", "(", "'using old Redis version'", ")", "if", "self", ".", "config", "[", "'STATS_INTERVAL'", "]", ":", "self", ".", "stats_thread", "=", "StatsThread", "(", "self", ")", "self", ".", "stats_thread", ".", "start", "(", ")", "# Queue any periodic tasks that are not queued yet.", "self", ".", "_queue_periodic_tasks", "(", ")", "# First scan all the available queues for new items until they're empty.", "# Then, listen to the activity channel.", "# XXX: This can get inefficient when having lots of queues.", "self", ".", "_pubsub", "=", "self", ".", "connection", ".", "pubsub", "(", ")", "self", ".", "_pubsub", ".", "subscribe", "(", "self", ".", "_key", "(", "'activity'", ")", ")", "self", ".", "_queue_set", "=", "set", "(", "self", ".", "_filter_queues", "(", "self", ".", "connection", ".", "smembers", "(", "self", ".", "_key", "(", "QUEUED", ")", ")", ")", ")", "try", ":", "while", "True", ":", "# Update the queue set on every iteration so we don't get stuck", "# on processing a specific queue.", "self", ".", "_wait_for_new_tasks", "(", "timeout", "=", "self", ".", "config", "[", "'SELECT_TIMEOUT'", "]", ",", "batch_timeout", "=", "self", ".", "config", "[", "'SELECT_BATCH_TIMEOUT'", "]", ")", "self", ".", "_install_signal_handlers", "(", ")", "self", ".", "_did_work", "=", "False", "self", ".", "_worker_run", "(", ")", "self", ".", "_uninstall_signal_handlers", "(", ")", "if", "once", "and", "(", "not", "self", ".", "_queue_set", "or", "force_once", ")", ":", "break", "if", "self", ".", "_stop_requested", ":", "raise", "KeyboardInterrupt", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "exception", "(", "event", "=", "'exception'", ")", "raise", "finally", ":", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "stop", "(", ")", "self", ".", "stats_thread", "=", "None", "# Free up Redis connection", "self", ".", "_pubsub", ".", "reset", "(", ")", "self", ".", "log", ".", "info", "(", "'done'", ")" ]
Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued.
[ "Main", "loop", "of", "the", "worker", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L938-L1004
250,719
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.can_replicate_commands
def can_replicate_commands(self): """ Whether Redis supports single command replication. """ if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
python
def can_replicate_commands(self): if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
[ "def", "can_replicate_commands", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_can_replicate_commands'", ")", ":", "info", "=", "self", ".", "redis", ".", "info", "(", "'server'", ")", "version_info", "=", "info", "[", "'redis_version'", "]", ".", "split", "(", "'.'", ")", "major", ",", "minor", "=", "int", "(", "version_info", "[", "0", "]", ")", ",", "int", "(", "version_info", "[", "1", "]", ")", "result", "=", "major", ">", "3", "or", "major", "==", "3", "and", "minor", ">=", "2", "self", ".", "_can_replicate_commands", "=", "result", "return", "self", ".", "_can_replicate_commands" ]
Whether Redis supports single command replication.
[ "Whether", "Redis", "supports", "single", "command", "replication", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L301-L311
250,720
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.zpoppush
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): """ Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated). """ if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
python
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
[ "def", "zpoppush", "(", "self", ",", "source", ",", "destination", ",", "count", ",", "score", ",", "new_score", ",", "client", "=", "None", ",", "withscores", "=", "False", ",", "on_success", "=", "None", ",", "if_exists", "=", "None", ")", ":", "if", "score", "is", "None", ":", "score", "=", "'+inf'", "# Include all elements.", "if", "withscores", ":", "if", "on_success", ":", "raise", "NotImplementedError", "(", ")", "return", "self", ".", "_zpoppush_withscores", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")", "else", ":", "if", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'add'", ":", "_", ",", "if_exists_key", ",", "if_exists_score", ",", "if_exists_mode", "=", "if_exists", "if", "if_exists_mode", "!=", "'min'", ":", "raise", "NotImplementedError", "(", ")", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_min_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", ",", "if_exists_key", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", ",", "if_exists_score", "]", ",", ")", "elif", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'noupdate'", ":", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_ignore_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", ")", "if", "on_success", ":", "if", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "else", ":", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", "client", "=", "client", ")", "else", ":", "return", "self", ".", "_zpoppush", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")" ]
Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated).
[ "Pops", "the", "first", "count", "members", "from", "the", "ZSET", "source", "and", "adds", "them", "to", "the", "ZSET", "destination", "with", "a", "score", "of", "new_score", ".", "If", "score", "is", "not", "None", "only", "members", "up", "to", "a", "score", "of", "score", "are", "used", ".", "Returns", "the", "members", "that", "were", "moved", "and", "if", "withscores", "is", "True", "their", "original", "scores", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L339-L423
250,721
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.execute_pipeline
def execute_pipeline(self, pipeline, client=None): """ Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p) """ client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
python
def execute_pipeline(self, pipeline, client=None): client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
[ "def", "execute_pipeline", "(", "self", ",", "pipeline", ",", "client", "=", "None", ")", ":", "client", "=", "client", "or", "self", ".", "redis", "executing_pipeline", "=", "None", "try", ":", "# Prepare args", "stack", "=", "pipeline", ".", "command_stack", "script_args", "=", "[", "int", "(", "self", ".", "can_replicate_commands", ")", ",", "len", "(", "stack", ")", "]", "for", "args", ",", "options", "in", "stack", ":", "script_args", "+=", "[", "len", "(", "args", ")", "-", "1", "]", "+", "list", "(", "args", ")", "# Run the pipeline", "if", "self", ".", "can_replicate_commands", ":", "# Redis 3.2 or higher", "# Make sure scripts exist", "if", "pipeline", ".", "scripts", ":", "pipeline", ".", "load_scripts", "(", ")", "raw_results", "=", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "client", ")", "else", ":", "executing_pipeline", "=", "client", ".", "pipeline", "(", ")", "# Always load scripts to avoid issues when Redis loads data", "# from AOF file / when replicating.", "for", "s", "in", "pipeline", ".", "scripts", ":", "executing_pipeline", ".", "script_load", "(", "s", ".", "script", ")", "# Run actual pipeline lua script", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "executing_pipeline", ")", "# Always load all scripts and run actual pipeline lua script", "raw_results", "=", "executing_pipeline", ".", "execute", "(", ")", "[", "-", "1", "]", "# Run response callbacks on results.", "results", "=", "[", "]", "response_callbacks", "=", "pipeline", ".", "response_callbacks", "for", "(", "(", "args", ",", "options", ")", ",", "result", ")", "in", "zip", "(", "stack", ",", "raw_results", ")", ":", "command_name", "=", "args", "[", "0", "]", "if", "command_name", "in", "response_callbacks", ":", "result", "=", "response_callbacks", "[", "command_name", "]", "(", "result", ",", "*", "*", "options", ")", "results", ".", "append", "(", "result", ")", "return", "results", "finally", ":", "if", "executing_pipeline", ":", "executing_pipeline", ".", "reset", "(", ")", "pipeline", ".", "reset", "(", ")" ]
Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p)
[ "Executes", "the", "given", "Redis", "pipeline", "as", "a", "Lua", "script", ".", "When", "an", "error", "occurs", "the", "transaction", "stops", "executing", "and", "an", "exception", "is", "raised", ".", "This", "differs", "from", "Redis", "transactions", "where", "execution", "continues", "after", "an", "error", ".", "On", "success", "a", "list", "of", "results", "is", "returned", ".", "The", "pipeline", "is", "cleared", "after", "execution", "and", "can", "no", "longer", "be", "reused", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L466-L534
250,722
closeio/tasktiger
tasktiger/_internal.py
gen_unique_id
def gen_unique_id(serialized_name, args, kwargs): """ Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks. """ return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
python
def gen_unique_id(serialized_name, args, kwargs): return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
[ "def", "gen_unique_id", "(", "serialized_name", ",", "args", ",", "kwargs", ")", ":", "return", "hashlib", ".", "sha256", "(", "json", ".", "dumps", "(", "{", "'func'", ":", "serialized_name", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", ",", "}", ",", "sort_keys", "=", "True", ")", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")" ]
Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks.
[ "Generates", "and", "returns", "a", "hex", "-", "encoded", "256", "-", "bit", "ID", "for", "the", "given", "task", "name", "and", "args", ".", "Used", "to", "generate", "IDs", "for", "unique", "tasks", "or", "for", "task", "locks", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L56-L65
250,723
closeio/tasktiger
tasktiger/_internal.py
serialize_func_name
def serialize_func_name(func): """ Returns the dotted serialized path to the passed function. """ if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
python
def serialize_func_name(func): if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
[ "def", "serialize_func_name", "(", "func", ")", ":", "if", "func", ".", "__module__", "==", "'__main__'", ":", "raise", "ValueError", "(", "'Functions from the __main__ module cannot be '", "'processed by workers.'", ")", "try", ":", "# This will only work on Python 3.3 or above, but it will allow us to use static/classmethods", "func_name", "=", "func", ".", "__qualname__", "except", "AttributeError", ":", "func_name", "=", "func", ".", "__name__", "return", "':'", ".", "join", "(", "[", "func", ".", "__module__", ",", "func_name", "]", ")" ]
Returns the dotted serialized path to the passed function.
[ "Returns", "the", "dotted", "serialized", "path", "to", "the", "passed", "function", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L67-L79
250,724
closeio/tasktiger
tasktiger/_internal.py
dotted_parts
def dotted_parts(s): """ For a string "a.b.c", yields "a", "a.b", "a.b.c". """ idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
python
def dotted_parts(s): idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
[ "def", "dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "while", "s", ":", "idx", "=", "s", ".", "find", "(", "'.'", ",", "idx", "+", "1", ")", "if", "idx", "==", "-", "1", ":", "yield", "s", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a", "a.b", "a.b.c".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", "a", ".", "b", "a", ".", "b", ".", "c", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L81-L91
250,725
closeio/tasktiger
tasktiger/_internal.py
reversed_dotted_parts
def reversed_dotted_parts(s): """ For a string "a.b.c", yields "a.b.c", "a.b", "a". """ idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
python
def reversed_dotted_parts(s): idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
[ "def", "reversed_dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "if", "s", ":", "yield", "s", "while", "s", ":", "idx", "=", "s", ".", "rfind", "(", "'.'", ",", "0", ",", "idx", ")", "if", "idx", "==", "-", "1", ":", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a.b.c", "a.b", "a".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", ".", "b", ".", "c", "a", ".", "b", "a", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L93-L104
250,726
closeio/tasktiger
tasktiger/logging.py
tasktiger_processor
def tasktiger_processor(logger, method_name, event_dict): """ TaskTiger structlog processor. Inject the current task id for non-batch tasks. """ if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
python
def tasktiger_processor(logger, method_name, event_dict): if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
[ "def", "tasktiger_processor", "(", "logger", ",", "method_name", ",", "event_dict", ")", ":", "if", "g", "[", "'current_tasks'", "]", "is", "not", "None", "and", "not", "g", "[", "'current_task_is_batch'", "]", ":", "event_dict", "[", "'task_id'", "]", "=", "g", "[", "'current_tasks'", "]", "[", "0", "]", ".", "id", "return", "event_dict" ]
TaskTiger structlog processor. Inject the current task id for non-batch tasks.
[ "TaskTiger", "structlog", "processor", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/logging.py#L4-L14
250,727
closeio/tasktiger
tasktiger/task.py
Task.should_retry_on
def should_retry_on(self, exception_class, logger=None): """ Whether this task should be retried when the given exception occurs. """ for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
python
def should_retry_on(self, exception_class, logger=None): for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
[ "def", "should_retry_on", "(", "self", ",", "exception_class", ",", "logger", "=", "None", ")", ":", "for", "n", "in", "(", "self", ".", "retry_on", "or", "[", "]", ")", ":", "try", ":", "if", "issubclass", "(", "exception_class", ",", "import_attribute", "(", "n", ")", ")", ":", "return", "True", "except", "TaskImportError", ":", "if", "logger", ":", "logger", ".", "error", "(", "'should_retry_on could not import class'", ",", "exception_name", "=", "n", ")", "return", "False" ]
Whether this task should be retried when the given exception occurs.
[ "Whether", "this", "task", "should", "be", "retried", "when", "the", "given", "exception", "occurs", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L155-L167
250,728
closeio/tasktiger
tasktiger/task.py
Task.update_scheduled_time
def update_scheduled_time(self, when): """ Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised. """ tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
python
def update_scheduled_time(self, when): tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
[ "def", "update_scheduled_time", "(", "self", ",", "when", ")", ":", "tiger", "=", "self", ".", "tiger", "ts", "=", "get_timestamp", "(", "when", ")", "assert", "ts", "pipeline", "=", "tiger", ".", "connection", ".", "pipeline", "(", ")", "key", "=", "tiger", ".", "_key", "(", "SCHEDULED", ",", "self", ".", "queue", ")", "tiger", ".", "scripts", ".", "zadd", "(", "key", ",", "ts", ",", "self", ".", "id", ",", "mode", "=", "'xx'", ",", "client", "=", "pipeline", ")", "pipeline", ".", "zscore", "(", "key", ",", "self", ".", "id", ")", "_", ",", "score", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "score", ":", "raise", "TaskNotFound", "(", "'Task {} not found in queue \"{}\" in state \"{}\".'", ".", "format", "(", "self", ".", "id", ",", "self", ".", "queue", ",", "SCHEDULED", ")", ")", "self", ".", "_ts", "=", "ts" ]
Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised.
[ "Updates", "a", "scheduled", "task", "s", "date", "to", "the", "given", "date", ".", "If", "the", "task", "is", "not", "scheduled", "a", "TaskNotFound", "exception", "is", "raised", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L322-L342
250,729
closeio/tasktiger
tasktiger/task.py
Task.n_executions
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
python
def n_executions(self): pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
[ "def", "n_executions", "(", "self", ")", ":", "pipeline", "=", "self", ".", "tiger", ".", "connection", ".", "pipeline", "(", ")", "pipeline", ".", "exists", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ")", ")", "pipeline", ".", "llen", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ",", "'executions'", ")", ")", "exists", ",", "n_executions", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "exists", ":", "raise", "TaskNotFound", "(", "'Task {} not found.'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "n_executions" ]
Queries and returns the number of past task executions.
[ "Queries", "and", "returns", "the", "number", "of", "past", "task", "executions", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L421-L433
250,730
aewallin/allantools
allantools/noise_kasdin.py
Noise.set_input
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
python
def set_input(self, nr=2, qd=1, b=0): self.nr = nr self.qd = qd self.b = b
[ "def", "set_input", "(", "self", ",", "nr", "=", "2", ",", "qd", "=", "1", ",", "b", "=", "0", ")", ":", "self", ".", "nr", "=", "nr", "self", ".", "qd", "=", "qd", "self", ".", "b", "=", "b" ]
Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM)
[ "Set", "inputs", "after", "initialization" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L81-L102
250,731
aewallin/allantools
allantools/noise_kasdin.py
Noise.generateNoise
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
python
def generateNoise(self): # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
[ "def", "generateNoise", "(", "self", ")", ":", "# Fill wfb array with white noise based on given discrete variance", "wfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "wfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "random", ".", "normal", "(", "0", ",", "np", ".", "sqrt", "(", "self", ".", "qd", ")", ",", "self", ".", "nr", ")", "# Generate the hfb coefficients based on the noise type", "mhb", "=", "-", "self", ".", "b", "/", "2.0", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "[", "0", "]", "=", "1.0", "indices", "=", "np", ".", "arange", "(", "self", ".", "nr", "-", "1", ")", "hfb", "[", "1", ":", "self", ".", "nr", "]", "=", "(", "mhb", "+", "indices", ")", "/", "(", "indices", "+", "1.0", ")", "hfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "multiply", ".", "accumulate", "(", "hfb", "[", ":", "self", ".", "nr", "]", ")", "# Perform discrete Fourier transform of wfb and hfb time series", "wfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "wfb", ")", "hfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "hfb", ")", "# Perform inverse Fourier transform of the product of wfb and hfb FFTs", "time_series", "=", "np", ".", "fft", ".", "irfft", "(", "wfb_fft", "*", "hfb_fft", ")", "[", ":", "self", ".", "nr", "]", "self", ".", "time_series", "=", "time_series" ]
Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr
[ "Generate", "noise", "time", "series", "based", "on", "input", "parameters" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L104-L130
250,732
aewallin/allantools
allantools/noise_kasdin.py
Noise.adev
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
python
def adev(self, tau0, tau): prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
[ "def", "adev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "adev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_avar", "(", ")", "avar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "avar", ")" ]
return predicted ADEV of noise-type at given tau
[ "return", "predicted", "ADEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L161-L168
250,733
aewallin/allantools
allantools/noise_kasdin.py
Noise.mdev
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
python
def mdev(self, tau0, tau): prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
[ "def", "mdev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "mdev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_mvar", "(", ")", "mvar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "mvar", ")" ]
return predicted MDEV of noise-type at given tau
[ "return", "predicted", "MDEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L170-L177
250,734
aewallin/allantools
allantools/noise.py
scipy_psd
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
python
def scipy_psd(x, f_sample=1.0, nr_segments=4): f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
[ "def", "scipy_psd", "(", "x", ",", "f_sample", "=", "1.0", ",", "nr_segments", "=", "4", ")", ":", "f_axis", ",", "psd_of_x", "=", "scipy", ".", "signal", ".", "welch", "(", "x", ",", "f_sample", ",", "nperseg", "=", "len", "(", "x", ")", "/", "nr_segments", ")", "return", "f_axis", ",", "psd_of_x" ]
PSD routine from scipy we can compare our own numpy result against this one
[ "PSD", "routine", "from", "scipy", "we", "can", "compare", "our", "own", "numpy", "result", "against", "this", "one" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L37-L42
250,735
aewallin/allantools
allantools/noise.py
iterpink
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
python
def iterpink(depth=20): values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
[ "def", "iterpink", "(", "depth", "=", "20", ")", ":", "values", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "sumvals", "=", "values", ".", "sum", "(", ")", "i", "=", "0", "while", "True", ":", "yield", "sumvals", "+", "smooth", "[", "i", "]", "# advance the index by 1. if the index wraps, generate noise to use in", "# the calculations, but do not update any of the pink noise values.", "i", "+=", "1", "if", "i", "==", "depth", ":", "i", "=", "0", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "continue", "# count trailing zeros in i", "c", "=", "0", "while", "not", "(", "i", ">>", "c", ")", "&", "1", ":", "c", "+=", "1", "# replace value c with a new source element", "sumvals", "+=", "source", "[", "i", "]", "-", "values", "[", "c", "]", "values", "[", "c", "]", "=", "source", "[", "i", "]" ]
Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum.
[ "Generate", "a", "sequence", "of", "samples", "of", "pink", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L85-L125
250,736
aewallin/allantools
examples/noise-color-demo.py
plotline
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
python
def plotline(plt, alpha, taus, style,label=""): y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
[ "def", "plotline", "(", "plt", ",", "alpha", ",", "taus", ",", "style", ",", "label", "=", "\"\"", ")", ":", "y", "=", "[", "pow", "(", "tt", ",", "alpha", ")", "for", "tt", "in", "taus", "]", "plt", ".", "loglog", "(", "taus", ",", "y", ",", "style", ",", "label", "=", "label", ")" ]
plot a line with the slope alpha
[ "plot", "a", "line", "with", "the", "slope", "alpha" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color-demo.py#L38-L41
250,737
aewallin/allantools
examples/b1_noise_id_figure.py
b1_noise_id
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
python
def b1_noise_id(x, af, rate): (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
[ "def", "b1_noise_id", "(", "x", ",", "af", ",", "rate", ")", ":", "(", "taus", ",", "devs", ",", "errs", ",", "ns", ")", "=", "at", ".", "adev", "(", "x", ",", "taus", "=", "[", "af", "*", "rate", "]", ",", "data_type", "=", "\"phase\"", ",", "rate", "=", "rate", ")", "oadev_x", "=", "devs", "[", "0", "]", "y", "=", "np", ".", "diff", "(", "x", ")", "y_cut", "=", "np", ".", "array", "(", "y", "[", ":", "len", "(", "y", ")", "-", "(", "len", "(", "y", ")", "%", "af", ")", "]", ")", "# cut to length", "assert", "len", "(", "y_cut", ")", "%", "af", "==", "0", "y_shaped", "=", "y_cut", ".", "reshape", "(", "(", "int", "(", "len", "(", "y_cut", ")", "/", "af", ")", ",", "af", ")", ")", "y_averaged", "=", "np", ".", "average", "(", "y_shaped", ",", "axis", "=", "1", ")", "# average", "var", "=", "np", ".", "var", "(", "y_averaged", ",", "ddof", "=", "1", ")", "return", "var", "/", "pow", "(", "oadev_x", ",", "2.0", ")" ]
B1 ratio for noise identification ratio of Standard Variace to AVAR
[ "B1", "ratio", "for", "noise", "identification", "ratio", "of", "Standard", "Variace", "to", "AVAR" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/b1_noise_id_figure.py#L5-L19
250,738
aewallin/allantools
allantools/plot.py
Plot.plot
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
python
def plot(self, atDataset, errorbars=False, grid=False): if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
[ "def", "plot", "(", "self", ",", "atDataset", ",", "errorbars", "=", "False", ",", "grid", "=", "False", ")", ":", "if", "errorbars", ":", "self", ".", "ax", ".", "errorbar", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", "yerr", "=", "atDataset", ".", "out", "[", "\"stat_err\"", "]", ",", ")", "else", ":", "self", ".", "ax", ".", "plot", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", ")", "self", ".", "ax", ".", "set_xlabel", "(", "\"Tau\"", ")", "self", ".", "ax", ".", "set_ylabel", "(", "atDataset", ".", "out", "[", "\"stat_id\"", "]", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"minor\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.65'", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"major\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.25'", ")" ]
use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False
[ "use", "matplotlib", "methods", "for", "plotting" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/plot.py#L66-L92
250,739
aewallin/allantools
allantools/ci.py
greenhall_table2
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
python
def greenhall_table2(alpha, d): row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
[ "def", "greenhall_table2", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "assert", "(", "row_idx", "in", "[", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", "]", ")", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table2", "=", "[", "[", "(", "3.0", "/", "2.0", ",", "1.0", "/", "2.0", ")", ",", "(", "35.0", "/", "18.0", ",", "1.0", ")", ",", "(", "231.0", "/", "100.0", ",", "3.0", "/", "2.0", ")", "]", ",", "# alpha=+2", "[", "(", "78.6", ",", "25.2", ")", ",", "(", "790.0", ",", "410.0", ")", ",", "(", "9950.0", ",", "6520.0", ")", "]", ",", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "6.0", ")", ",", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", "]", ",", "# alpha=0", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "0.852", ",", "0.375", ")", ",", "(", "0.997", ",", "0.617", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.053", ",", "0.553", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", "]", ",", "# alpha=-4", "]", "#print(\"table2 = \", table2[row_idx][col_idx])", "return", "table2", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 2 from Greenhall 2004
[ "Table", "2", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L662-L676
250,740
aewallin/allantools
allantools/ci.py
greenhall_table1
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
python
def greenhall_table1(alpha, d): row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
[ "def", "greenhall_table1", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table1", "=", "[", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", ",", "(", "22.0", "/", "25.0", ",", "2.0", "/", "3.0", ")", "]", ",", "# alpha=+2", "[", "(", "0.840", ",", "0.345", ")", ",", "(", "0.997", ",", "0.616", ")", ",", "(", "1.141", ",", "0.843", ")", "]", ",", "[", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", ",", "(", "1.184", ",", "0.848", ")", "]", ",", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.048", ",", "0.534", ")", ",", "(", "1.180", ",", "0.816", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", ",", "(", "1.175", ",", "0.777", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.194", ",", "0.703", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.489", ",", "0.702", ")", "]", ",", "# alpha=-4", "]", "#print(\"table1 = \", table1[row_idx][col_idx])", "return", "table1", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 1 from Greenhall 2004
[ "Table", "1", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L678-L691
250,741
aewallin/allantools
allantools/ci.py
edf_mtotdev
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
python
def edf_mtotdev(N, m, alpha): assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
[ "def", "edf_mtotdev", "(", "N", ",", "m", ",", "alpha", ")", ":", "assert", "(", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ")", "NIST_SP1065_table8", "=", "[", "(", "1.90", ",", "2.1", ")", ",", "(", "1.20", ",", "1.40", ")", ",", "(", "1.10", ",", "1.2", ")", ",", "(", "0.85", ",", "0.50", ")", ",", "(", "0.75", ",", "0.31", ")", "]", "#(b, c) = NIST_SP1065_table8[ abs(alpha-2) ]", "(", "b", ",", "c", ")", "=", "NIST_SP1065_table8", "[", "abs", "(", "alpha", "-", "2", ")", "]", "edf", "=", "b", "*", "(", "float", "(", "N", ")", "/", "float", "(", "m", ")", ")", "-", "c", "print", "(", "\"mtotdev b,c= \"", ",", "(", "b", ",", "c", ")", ",", "\" edf=\"", ",", "edf", ")", "return", "edf" ]
Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8
[ "Equivalent", "degrees", "of", "freedom", "for", "Modified", "Total", "Deviation", "NIST", "SP1065", "page", "41", "Table", "8" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L710-L721
250,742
aewallin/allantools
allantools/ci.py
edf_simple
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
python
def edf_simple(N, m, alpha): N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
[ "def", "edf_simple", "(", "N", ",", "m", ",", "alpha", ")", ":", "N", "=", "float", "(", "N", ")", "m", "=", "float", "(", "m", ")", "if", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ":", "# NIST SP 1065, Table 5", "if", "alpha", "==", "+", "2", ":", "edf", "=", "(", "N", "+", "1", ")", "*", "(", "N", "-", "2", "*", "m", ")", "/", "(", "2", "*", "(", "N", "-", "m", ")", ")", "if", "alpha", "==", "0", ":", "edf", "=", "(", "(", "(", "3", "*", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", ")", "-", "(", "2", "*", "(", "N", "-", "2", ")", "/", "N", ")", ")", "*", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "/", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "+", "5", ")", ")", ")", "if", "alpha", "==", "1", ":", "a", "=", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", "b", "=", "(", "2", "*", "m", "+", "1", ")", "*", "(", "N", "-", "1", ")", "/", "4", "edf", "=", "np", ".", "exp", "(", "np", ".", "sqrt", "(", "np", ".", "log", "(", "a", ")", "*", "np", ".", "log", "(", "b", ")", ")", ")", "if", "alpha", "==", "-", "1", ":", "if", "m", "==", "1", ":", "edf", "=", "2", "*", "(", "N", "-", "2", ")", "/", "(", "2.3", "*", "N", "-", "4.9", ")", "if", "m", ">=", "2", ":", "edf", "=", "5", "*", "N", "**", "2", "/", "(", "4", "*", "m", "*", "(", "N", "+", "(", "3", "*", "m", ")", ")", ")", "if", "alpha", "==", "-", "2", ":", "a", "=", "(", "N", "-", "2", ")", "/", "(", "m", "*", "(", "N", "-", "3", ")", "**", "2", ")", "b", "=", "(", "N", "-", "1", ")", "**", "2", "c", "=", "3", "*", "m", "*", "(", "N", "-", "1", ")", "d", "=", "4", "*", "m", "**", "2", "edf", "=", "a", "*", "(", "b", "-", "c", "+", "d", ")", "else", ":", "edf", "=", "(", "N", "-", "1", ")", "print", "(", "\"Noise type not recognized. Defaulting to N - 1 degrees of freedom.\"", ")", "return", "edf" ]
Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom
[ "Equivalent", "degrees", "of", "freedom", ".", "Simple", "approximate", "formulae", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L723-L789
250,743
aewallin/allantools
examples/gradev-demo.py
example1
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example1(): N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example1", "(", ")", ":", "N", "=", "1000", "f", "=", "1", "y", "=", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, no gaps'", ")", "y", "[", "int", "(", "np", ".", "floor", "(", "0.4", "*", "N", ")", ")", ":", "int", "(", "np", ".", "floor", "(", "0.6", "*", "N", ")", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, with gaps'", ")", "plt", ".", "xscale", "(", "'log'", ")", "plt", ".", "yscale", "(", "'log'", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
[ "Compute", "the", "GRADEV", "of", "a", "white", "phase", "noise", ".", "Compares", "two", "different", "scenarios", ".", "1", ")", "The", "original", "data", "and", "2", ")", "ADEV", "estimate", "with", "gap", "robust", "ADEV", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L10-L32
250,744
aewallin/allantools
examples/gradev-demo.py
example2
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example2(): N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example2", "(", ")", ":", "N", "=", "1000", "# number of samples", "f", "=", "1", "# data samples per second", "s", "=", "1", "+", "5", "/", "N", "*", "np", ".", "arange", "(", "0", ",", "N", ")", "y", "=", "s", "*", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'b.'", ",", "label", "=", "\"No gaps\"", ")", "y", "[", "int", "(", "0.4", "*", "N", ")", ":", "int", "(", "0.6", "*", "N", ",", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'g.'", ",", "label", "=", "\"With gaps\"", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a nonstationary white phase noise.
[ "Compute", "the", "GRADEV", "of", "a", "nonstationary", "white", "phase", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L34-L52
250,745
aewallin/allantools
allantools/allantools.py
tdev
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
python
def tdev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
[ "def", "tdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "taus", ",", "md", ",", "mde", ",", "ns", ")", "=", "mdev", "(", "phase", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "td", "=", "taus", "*", "md", "/", "np", ".", "sqrt", "(", "3.0", ")", "tde", "=", "td", "/", "np", ".", "sqrt", "(", "ns", ")", "return", "taus", ",", "td", ",", "tde", ",", "ns" ]
Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation
[ "Time", "deviation", ".", "Based", "on", "modified", "Allan", "variance", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L109-L155
250,746
aewallin/allantools
allantools/allantools.py
mdev
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
python
def mdev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
[ "def", "mdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", "=", "taus", ")", "data", ",", "taus", "=", "np", ".", "array", "(", "phase", ")", ",", "np", ".", "array", "(", "taus", ")", "md", "=", "np", ".", "zeros_like", "(", "ms", ")", "mderr", "=", "np", ".", "zeros_like", "(", "ms", ")", "ns", "=", "np", ".", "zeros_like", "(", "ms", ")", "# this is a 'loop-unrolled' algorithm following", "# http://www.leapsecond.com/tools/adev_lib.c", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# without this we get: VisibleDeprecationWarning:", "# using a non-integer number instead of an integer", "# will result in an error in the future", "tau", "=", "taus_used", "[", "idx", "]", "# First loop sum", "d0", "=", "phase", "[", "0", ":", "m", "]", "d1", "=", "phase", "[", "m", ":", "2", "*", "m", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "3", "*", "m", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ")", "v", "=", "np", ".", "sum", "(", "d2", "[", ":", "e", "]", "-", "2", "*", "d1", "[", ":", "e", "]", "+", "d0", "[", ":", "e", "]", ")", "s", "=", "v", "*", "v", "# Second part of sum", "d3", "=", "phase", "[", "3", "*", "m", ":", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "]", "d1", "=", "phase", "[", "1", "*", "m", ":", "]", "d0", "=", "phase", "[", "0", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "n", "=", "e", "+", "1", "v_arr", "=", "v", "+", "np", ".", "cumsum", "(", "d3", "[", ":", "e", "]", "-", "3", "*", "d2", "[", ":", "e", "]", "+", "3", "*", "d1", "[", ":", "e", "]", "-", "d0", "[", ":", "e", "]", ")", "s", "=", "s", "+", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "s", "/=", "2.0", "*", "m", "*", "m", "*", "tau", "*", "tau", "*", "n", "s", "=", "np", ".", "sqrt", "(", "s", ")", "md", "[", "idx", "]", "=", "s", "mderr", "[", "idx", "]", "=", "(", "s", "/", "np", ".", "sqrt", "(", "n", ")", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "md", ",", "mderr", ",", "ns", ")" ]
Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17
[ "Modified", "Allan", "deviation", ".", "Used", "to", "distinguish", "between", "White", "and", "Flicker", "Phase", "Modulation", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L157-L245
250,747
aewallin/allantools
allantools/allantools.py
adev
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
python
def adev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
[ "def", "adev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "# loop through each tau value m(j)", "(", "ad", "[", "idx", "]", ",", "ade", "[", "idx", "]", ",", "adn", "[", "idx", "]", ")", "=", "calc_adev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "ade", ",", "adn", ")" ]
Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation
[ "Allan", "deviation", ".", "Classic", "-", "use", "only", "if", "required", "-", "relatively", "poor", "confidence", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L247-L311
250,748
aewallin/allantools
allantools/allantools.py
ohdev
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
python
def ohdev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
[ "def", "ohdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "hdevs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "hdeverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "hdevs", "[", "idx", "]", ",", "hdeverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "return", "remove_small_ns", "(", "taus_used", ",", "hdevs", ",", "hdeverrs", ",", "ns", ")" ]
Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation
[ "Overlapping", "Hadamard", "deviation", ".", "Better", "confidence", "than", "normal", "Hadamard", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L420-L471
250,749
aewallin/allantools
allantools/allantools.py
calc_hdev_phase
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
python
def calc_hdev_phase(phase, rate, mj, stride): tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
[ "def", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "stride", ")", ":", "tau0", "=", "1.0", "/", "float", "(", "rate", ")", "mj", "=", "int", "(", "mj", ")", "stride", "=", "int", "(", "stride", ")", "d3", "=", "phase", "[", "3", "*", "mj", ":", ":", "stride", "]", "d2", "=", "phase", "[", "2", "*", "mj", ":", ":", "stride", "]", "d1", "=", "phase", "[", "1", "*", "mj", ":", ":", "stride", "]", "d0", "=", "phase", "[", ":", ":", "stride", "]", "n", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "v_arr", "=", "d3", "[", ":", "n", "]", "-", "3", "*", "d2", "[", ":", "n", "]", "+", "3", "*", "d1", "[", ":", "n", "]", "-", "d0", "[", ":", "n", "]", "s", "=", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "if", "n", "==", "0", ":", "n", "=", "1", "h", "=", "np", ".", "sqrt", "(", "s", "/", "6.0", "/", "float", "(", "n", ")", ")", "/", "float", "(", "tau0", "*", "mj", ")", "e", "=", "h", "/", "np", ".", "sqrt", "(", "n", ")", "return", "h", ",", "e", ",", "n" ]
main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21
[ "main", "calculation", "fungtion", "for", "HDEV", "and", "OHDEV" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L515-L566
250,750
aewallin/allantools
allantools/allantools.py
totdev
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
python
def totdev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "totdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "N", "=", "len", "(", "phase", ")", "# totdev requires a new dataset", "# Begin by adding reflected data before dataset", "x1", "=", "2.0", "*", "phase", "[", "0", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x1", "=", "x1", "-", "phase", "[", "1", ":", "-", "1", "]", "x1", "=", "x1", "[", ":", ":", "-", "1", "]", "# Reflected data at end of dataset", "x2", "=", "2.0", "*", "phase", "[", "-", "1", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x2", "=", "x2", "-", "phase", "[", "1", ":", "-", "1", "]", "[", ":", ":", "-", "1", "]", "# check length of new dataset", "assert", "len", "(", "x1", ")", "+", "len", "(", "phase", ")", "+", "len", "(", "x2", ")", "==", "3", "*", "N", "-", "4", "# Combine into a single array", "x", "=", "np", ".", "zeros", "(", "(", "3", "*", "N", "-", "4", ")", ")", "x", "[", "0", ":", "N", "-", "2", "]", "=", "x1", "x", "[", "N", "-", "2", ":", "2", "*", "(", "N", "-", "2", ")", "+", "2", "]", "=", "phase", "# original data in the middle", "x", "[", "2", "*", "(", "N", "-", "2", ")", "+", "2", ":", "]", "=", "x2", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "mid", "=", "len", "(", "x1", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "d0", "=", "x", "[", "mid", "+", "1", ":", "]", "d1", "=", "x", "[", "mid", "+", "mj", "+", "1", ":", "]", "d1n", "=", "x", "[", "mid", "-", "mj", "+", "1", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d1n", ")", ")", "v_arr", "=", "d1n", "[", ":", "e", "]", "-", "2.0", "*", "d0", "[", ":", "e", "]", "+", "d1", "[", ":", "e", "]", "dev", "=", "np", ".", "sum", "(", "v_arr", "[", ":", "mid", "]", "*", "v_arr", "[", ":", "mid", "]", ")", "dev", "/=", "float", "(", "2", "*", "pow", "(", "mj", "/", "rate", ",", "2", ")", "*", "(", "N", "-", "2", ")", ")", "dev", "=", "np", ".", "sqrt", "(", "dev", ")", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "mid", ")", "ns", "[", "idx", "]", "=", "mid", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23
[ "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Allan", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L568-L660
250,751
aewallin/allantools
allantools/allantools.py
mtotdev
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtotdev(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "phase", ")", ")", "/", "3.0", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", "=", "calc_mtotdev_phase", "(", "phase", ",", "rate", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Modified", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "modified", "Allan" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L674-L716
250,752
aewallin/allantools
allantools/allantools.py
htotdev
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def htotdev(data, rate=1.0, data_type="phase", taus=None): if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "htotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "phase", "=", "data", "freq", "=", "phase2frequency", "(", "phase", ",", "rate", ")", "elif", "data_type", "==", "\"freq\"", ":", "phase", "=", "frequency2phase", "(", "data", ",", "rate", ")", "freq", "=", "data", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")", "rate", "=", "float", "(", "rate", ")", "(", "freq", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "freq", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "freq", ")", ")", "/", "3.0", ")", "phase", "=", "np", ".", "array", "(", "phase", ")", "freq", "=", "np", ".", "array", "(", "freq", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "# NOTE at mj==1 we use ohdev(), based on comment from here:", "# http://www.wriley.com/paper4ht.htm", "# \"For best consistency, the overlapping Hadamard variance is used", "# instead of the Hadamard total variance at m=1\"", "# FIXME: this uses both freq and phase datasets, which uses double the memory really needed...", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "if", "int", "(", "mj", ")", "==", "1", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "else", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_htotdev_freq", "(", "freq", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Hadamard", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Hadamard", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L787-L847
250,753
aewallin/allantools
allantools/allantools.py
theo1
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
python
def theo1(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "theo1", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "tau0", "=", "1.0", "/", "rate", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "even", "=", "True", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "N", "=", "len", "(", "phase", ")", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# to avoid: VisibleDeprecationWarning: using a", "# non-integer number instead of an integer will", "# result in an error in the future", "assert", "m", "%", "2", "==", "0", "# m must be even", "dev", "=", "0", "n", "=", "0", "for", "i", "in", "range", "(", "int", "(", "N", "-", "m", ")", ")", ":", "s", "=", "0", "for", "d", "in", "range", "(", "int", "(", "m", "/", "2", ")", ")", ":", "# inner sum", "pre", "=", "1.0", "/", "(", "float", "(", "m", ")", "/", "2", "-", "float", "(", "d", ")", ")", "s", "+=", "pre", "*", "pow", "(", "phase", "[", "i", "]", "-", "phase", "[", "i", "-", "d", "+", "int", "(", "m", "/", "2", ")", "]", "+", "phase", "[", "i", "+", "m", "]", "-", "phase", "[", "i", "+", "d", "+", "int", "(", "m", "/", "2", ")", "]", ",", "2", ")", "n", "=", "n", "+", "1", "dev", "+=", "s", "assert", "n", "==", "(", "N", "-", "m", ")", "*", "m", "/", "2", "# N-m outer sums, m/2 inner sums", "dev", "=", "dev", "/", "(", "0.75", "*", "(", "N", "-", "m", ")", "*", "pow", "(", "m", "*", "tau0", ",", "2", ")", ")", "# factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf", "# but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29", "devs", "[", "idx", "]", "=", "np", ".", "sqrt", "(", "dev", ")", "deverrs", "[", "idx", "]", "=", "devs", "[", "idx", "]", "/", "np", ".", "sqrt", "(", "N", "-", "m", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Theo1", "is", "a", "two", "-", "sample", "variance", "with", "improved", "confidence", "and", "extended", "averaging", "factor", "range", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L919-L987
250,754
aewallin/allantools
allantools/allantools.py
tierms
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def tierms(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "tierms", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "count", "=", "len", "(", "phase", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "# This seems like an unusual way to", "phases", "=", "np", ".", "column_stack", "(", "(", "phase", "[", ":", "-", "mj", "]", ",", "phase", "[", "mj", ":", "]", ")", ")", "p_max", "=", "np", ".", "max", "(", "phases", ",", "axis", "=", "1", ")", "p_min", "=", "np", ".", "min", "(", "phases", ",", "axis", "=", "1", ")", "phases", "=", "p_max", "-", "p_min", "tie", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "phases", "*", "phases", ")", ")", "ncount", "=", "count", "-", "mj", "devs", "[", "idx", "]", "=", "tie", "deverrs", "[", "idx", "]", "=", "0", "/", "np", ".", "sqrt", "(", "ncount", ")", "# TODO! I THINK THIS IS WRONG!", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "Time", "Interval", "Error", "RMS", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L990-L1033
250,755
aewallin/allantools
allantools/allantools.py
mtie
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie(data, rate=1.0, data_type="phase", taus=None): phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "rw", "=", "mtie_rolling_window", "(", "phase", ",", "int", "(", "mj", "+", "1", ")", ")", "win_max", "=", "np", ".", "max", "(", "rw", ",", "axis", "=", "1", ")", "win_min", "=", "np", ".", "min", "(", "rw", ",", "axis", "=", "1", ")", "tie", "=", "win_max", "-", "win_min", "dev", "=", "np", ".", "max", "(", "tie", ")", "ncount", "=", "phase", ".", "shape", "[", "0", "]", "-", "mj", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "ncount", ")", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow?
[ "Maximum", "Time", "Interval", "Error", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1061-L1101
250,756
aewallin/allantools
allantools/allantools.py
mtie_phase_fast
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie_phase_fast", "(", "phase", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "rate", "=", "float", "(", "rate", ")", "phase", "=", "np", ".", "asarray", "(", "phase", ")", "k_max", "=", "int", "(", "np", ".", "floor", "(", "np", ".", "log2", "(", "len", "(", "phase", ")", ")", ")", ")", "phase", "=", "phase", "[", "0", ":", "pow", "(", "2", ",", "k_max", ")", "]", "# truncate data to 2**k_max datapoints", "assert", "len", "(", "phase", ")", "==", "pow", "(", "2", ",", "k_max", ")", "#k = 1", "taus", "=", "[", "pow", "(", "2", ",", "k", ")", "for", "k", "in", "range", "(", "k_max", ")", "]", "#while k <= k_max:", "# tau = pow(2, k)", "# taus.append(tau)", "#print tau", "# k += 1", "print", "(", "\"taus N=\"", ",", "len", "(", "taus", ")", ",", "\" \"", ",", "taus", ")", "devs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "deverrs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "ns", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "taus_used", "=", "np", ".", "array", "(", "taus", ")", "# [(1.0/rate)*t for t in taus]", "# matrices to store results", "mtie_max", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "mtie_min", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "for", "kidx", "in", "range", "(", "k_max", ")", ":", "k", "=", "kidx", "+", "1", "imax", "=", "len", "(", "phase", ")", "-", "pow", "(", "2", ",", "k", ")", "+", "1", "#print k, imax", "tie", "=", "np", ".", "zeros", "(", "imax", ")", "ns", "[", "kidx", "]", "=", "imax", "#print np.max( tie )", "for", "i", "in", "range", "(", "imax", ")", ":", "if", "k", "==", "1", ":", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "else", ":", "p", "=", "int", "(", "pow", "(", "2", ",", "k", "-", "1", ")", ")", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "mtie_max", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_max", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "mtie_min", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_min", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "#for i in range(imax):", "tie", "[", "i", "]", "=", "mtie_max", "[", "i", ",", "kidx", "]", "-", "mtie_min", "[", "i", ",", "kidx", "]", "#print tie[i]", "devs", "[", "kidx", "]", "=", "np", ".", "amax", "(", "tie", ")", "# maximum along axis", "#print \"maximum %2.4f\" % devs[kidx]", "#print np.amax( tie )", "#for tau in taus:", "#for", "devs", "=", "np", ".", "array", "(", "devs", ")", "print", "(", "\"devs N=\"", ",", "len", "(", "devs", ")", ",", "\" \"", ",", "devs", ")", "print", "(", "\"taus N=\"", ",", "len", "(", "taus_used", ")", ",", "\" \"", ",", "taus_used", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
[ "fast", "binary", "decomposition", "algorithm", "for", "MTIE" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1108-L1163
250,757
aewallin/allantools
allantools/allantools.py
gradev
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
python
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
[ "def", "gradev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ",", "ci", "=", "0.9", ",", "noisetype", "=", "'wp'", ")", ":", "if", "(", "data_type", "==", "\"freq\"", ")", ":", "print", "(", "\"Warning : phase data is preferred as input to gradev()\"", ")", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_l", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_h", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "dev", ",", "deverr", ",", "n", ")", "=", "calc_gradev_phase", "(", "data", ",", "rate", ",", "mj", ",", "1", ",", "ci", ",", "noisetype", ")", "# stride=1 for overlapping ADEV", "ad", "[", "idx", "]", "=", "dev", "ade_l", "[", "idx", "]", "=", "deverr", "[", "0", "]", "ade_h", "[", "idx", "]", "=", "deverr", "[", "1", "]", "adn", "[", "idx", "]", "=", "n", "# Note that errors are split in 2 arrays", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "[", "ade_l", ",", "ade_h", "]", ",", "adn", ")" ]
gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate.
[ "gap", "resistant", "overlapping", "Allan", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1173-L1242
250,758
aewallin/allantools
allantools/allantools.py
input_to_phase
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
python
def input_to_phase(data, rate, data_type): if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
[ "def", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "return", "data", "elif", "data_type", "==", "\"freq\"", ":", "return", "frequency2phase", "(", "data", ",", "rate", ")", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")" ]
Take either phase or frequency as input and return phase
[ "Take", "either", "phase", "or", "frequency", "as", "input", "and", "return", "phase" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1297-L1305
250,759
aewallin/allantools
allantools/allantools.py
trim_data
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
python
def trim_data(x): # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
[ "def", "trim_data", "(", "x", ")", ":", "# Find indices for first and last valid data", "first", "=", "0", "while", "np", ".", "isnan", "(", "x", "[", "first", "]", ")", ":", "first", "+=", "1", "last", "=", "len", "(", "x", ")", "while", "np", ".", "isnan", "(", "x", "[", "last", "-", "1", "]", ")", ":", "last", "-=", "1", "return", "x", "[", "first", ":", "last", "]" ]
Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array
[ "Trim", "leading", "and", "trailing", "NaNs", "from", "dataset", "This", "is", "done", "by", "browsing", "the", "array", "from", "each", "end", "and", "store", "the", "index", "of", "the", "first", "non", "-", "NaN", "in", "each", "case", "the", "return", "the", "appropriate", "slice", "of", "the", "array" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1498-L1511
250,760
aewallin/allantools
allantools/allantools.py
three_cornered_hat_phase
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
python
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
[ "def", "three_cornered_hat_phase", "(", "phasedata_ab", ",", "phasedata_bc", ",", "phasedata_ca", ",", "rate", ",", "taus", ",", "function", ")", ":", "(", "tau_ab", ",", "dev_ab", ",", "err_ab", ",", "ns_ab", ")", "=", "function", "(", "phasedata_ab", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_bc", ",", "dev_bc", ",", "err_bc", ",", "ns_bc", ")", "=", "function", "(", "phasedata_bc", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_ca", ",", "dev_ca", ",", "err_ca", ",", "ns_ca", ")", "=", "function", "(", "phasedata_ca", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "var_ab", "=", "dev_ab", "*", "dev_ab", "var_bc", "=", "dev_bc", "*", "dev_bc", "var_ca", "=", "dev_ca", "*", "dev_ca", "assert", "len", "(", "var_ab", ")", "==", "len", "(", "var_bc", ")", "==", "len", "(", "var_ca", ")", "var_a", "=", "0.5", "*", "(", "var_ab", "+", "var_ca", "-", "var_bc", ")", "var_a", "[", "var_a", "<", "0", "]", "=", "0", "# don't return imaginary deviations (?)", "dev_a", "=", "np", ".", "sqrt", "(", "var_a", ")", "err_a", "=", "[", "d", "/", "np", ".", "sqrt", "(", "nn", ")", "for", "(", "d", ",", "nn", ")", "in", "zip", "(", "dev_a", ",", "ns_ab", ")", "]", "return", "tau_ab", ",", "dev_a", ",", "err_a", ",", "ns_ab" ]
Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm
[ "Three", "Cornered", "Hat", "Method" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1513-L1588
250,761
aewallin/allantools
allantools/allantools.py
frequency2phase
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
python
def frequency2phase(freqdata, rate): dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
[ "def", "frequency2phase", "(", "freqdata", ",", "rate", ")", ":", "dt", "=", "1.0", "/", "float", "(", "rate", ")", "# Protect against NaN values in input array (issue #60)", "# Reintroduces data trimming as in commit 503cb82", "freqdata", "=", "trim_data", "(", "freqdata", ")", "phasedata", "=", "np", ".", "cumsum", "(", "freqdata", ")", "*", "dt", "phasedata", "=", "np", ".", "insert", "(", "phasedata", ",", "0", ",", "0", ")", "# FIXME: why do we do this?", "# so that phase starts at zero and len(phase)=len(freq)+1 ??", "return", "phasedata" ]
integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians()
[ "integrate", "fractional", "frequency", "data", "and", "output", "phase", "data" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1595-L1619
250,762
aewallin/allantools
allantools/allantools.py
phase2radians
def phase2radians(phasedata, v0): """ Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians """ fi = [2*np.pi*v0*xx for xx in phasedata] return fi
python
def phase2radians(phasedata, v0): fi = [2*np.pi*v0*xx for xx in phasedata] return fi
[ "def", "phase2radians", "(", "phasedata", ",", "v0", ")", ":", "fi", "=", "[", "2", "*", "np", ".", "pi", "*", "v0", "*", "xx", "for", "xx", "in", "phasedata", "]", "return", "fi" ]
Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians
[ "Convert", "phase", "in", "seconds", "to", "phase", "in", "radians" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1621-L1637
250,763
aewallin/allantools
allantools/allantools.py
frequency2fractional
def frequency2fractional(frequency, mean_frequency=-1): """ Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency """ if mean_frequency == -1: mu = np.mean(frequency) else: mu = mean_frequency y = [(x-mu)/mu for x in frequency] return y
python
def frequency2fractional(frequency, mean_frequency=-1): if mean_frequency == -1: mu = np.mean(frequency) else: mu = mean_frequency y = [(x-mu)/mu for x in frequency] return y
[ "def", "frequency2fractional", "(", "frequency", ",", "mean_frequency", "=", "-", "1", ")", ":", "if", "mean_frequency", "==", "-", "1", ":", "mu", "=", "np", ".", "mean", "(", "frequency", ")", "else", ":", "mu", "=", "mean_frequency", "y", "=", "[", "(", "x", "-", "mu", ")", "/", "mu", "for", "x", "in", "frequency", "]", "return", "y" ]
Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency
[ "Convert", "frequency", "in", "Hz", "to", "fractional", "frequency" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1657-L1678
250,764
aewallin/allantools
allantools/dataset.py
Dataset.set_input
def set_input(self, data, rate=1.0, data_type="phase", taus=None): """ Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic """ self.inp["data"] = data self.inp["rate"] = rate self.inp["data_type"] = data_type self.inp["taus"] = taus
python
def set_input(self, data, rate=1.0, data_type="phase", taus=None): self.inp["data"] = data self.inp["rate"] = rate self.inp["data_type"] = data_type self.inp["taus"] = taus
[ "def", "set_input", "(", "self", ",", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "self", ".", "inp", "[", "\"data\"", "]", "=", "data", "self", ".", "inp", "[", "\"rate\"", "]", "=", "rate", "self", ".", "inp", "[", "\"data_type\"", "]", "=", "data_type", "self", ".", "inp", "[", "\"taus\"", "]", "=", "taus" ]
Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic
[ "Optionnal", "method", "if", "you", "chose", "not", "to", "set", "inputs", "on", "init" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/dataset.py#L93-L113
250,765
aewallin/allantools
allantools/dataset.py
Dataset.compute
def compute(self, function): """Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation. """ try: func = getattr(allantools, function) except AttributeError: raise AttributeError("function must be defined in allantools") whitelisted = ["theo1", "mtie", "tierms"] if function[-3:] != "dev" and function not in whitelisted: # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError("function must be one of the 'dev' functions") result = func(self.inp["data"], rate=self.inp["rate"], data_type=self.inp["data_type"], taus=self.inp["taus"]) keys = ["taus", "stat", "stat_err", "stat_n"] result = {key: result[i] for i, key in enumerate(keys)} self.out = result.copy() self.out["stat_id"] = function return result
python
def compute(self, function): try: func = getattr(allantools, function) except AttributeError: raise AttributeError("function must be defined in allantools") whitelisted = ["theo1", "mtie", "tierms"] if function[-3:] != "dev" and function not in whitelisted: # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError("function must be one of the 'dev' functions") result = func(self.inp["data"], rate=self.inp["rate"], data_type=self.inp["data_type"], taus=self.inp["taus"]) keys = ["taus", "stat", "stat_err", "stat_n"] result = {key: result[i] for i, key in enumerate(keys)} self.out = result.copy() self.out["stat_id"] = function return result
[ "def", "compute", "(", "self", ",", "function", ")", ":", "try", ":", "func", "=", "getattr", "(", "allantools", ",", "function", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"function must be defined in allantools\"", ")", "whitelisted", "=", "[", "\"theo1\"", ",", "\"mtie\"", ",", "\"tierms\"", "]", "if", "function", "[", "-", "3", ":", "]", "!=", "\"dev\"", "and", "function", "not", "in", "whitelisted", ":", "# this should probably raise a custom exception type so", "# it's easier to distinguish from other bad things", "raise", "RuntimeError", "(", "\"function must be one of the 'dev' functions\"", ")", "result", "=", "func", "(", "self", ".", "inp", "[", "\"data\"", "]", ",", "rate", "=", "self", ".", "inp", "[", "\"rate\"", "]", ",", "data_type", "=", "self", ".", "inp", "[", "\"data_type\"", "]", ",", "taus", "=", "self", ".", "inp", "[", "\"taus\"", "]", ")", "keys", "=", "[", "\"taus\"", ",", "\"stat\"", ",", "\"stat_err\"", ",", "\"stat_n\"", "]", "result", "=", "{", "key", ":", "result", "[", "i", "]", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", "}", "self", ".", "out", "=", "result", ".", "copy", "(", ")", "self", ".", "out", "[", "\"stat_id\"", "]", "=", "function", "return", "result" ]
Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation.
[ "Evaluate", "the", "passed", "function", "with", "the", "supplied", "data", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/dataset.py#L115-L148
250,766
aewallin/allantools
examples/noise-color_and_PSD.py
many_psds
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): """ compute average of many PSDs """ psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
python
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
[ "def", "many_psds", "(", "k", "=", "2", ",", "fs", "=", "1.0", ",", "b0", "=", "1.0", ",", "N", "=", "1024", ")", ":", "psd", "=", "[", "]", "for", "j", "in", "range", "(", "k", ")", ":", "print", "j", "x", "=", "noise", ".", "white", "(", "N", "=", "2", "*", "4096", ",", "b0", "=", "b0", ",", "fs", "=", "fs", ")", "f", ",", "tmp", "=", "noise", ".", "numpy_psd", "(", "x", ",", "fs", ")", "if", "j", "==", "0", ":", "psd", "=", "tmp", "else", ":", "psd", "=", "psd", "+", "tmp", "return", "f", ",", "psd", "/", "k" ]
compute average of many PSDs
[ "compute", "average", "of", "many", "PSDs" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color_and_PSD.py#L7-L18
250,767
singnet/snet-cli
snet_cli/commands.py
OrganizationCommand.list_my
def list_my(self): """ Find organization that has the current identity as the owner or as the member """ org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
python
def list_my(self): org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
[ "def", "list_my", "(", "self", ")", ":", "org_list", "=", "self", ".", "call_contract_command", "(", "\"Registry\"", ",", "\"listOrganizations\"", ",", "[", "]", ")", "rez_owner", "=", "[", "]", "rez_member", "=", "[", "]", "for", "idx", ",", "org_id", "in", "enumerate", "(", "org_list", ")", ":", "(", "found", ",", "org_id", ",", "org_name", ",", "owner", ",", "members", ",", "serviceNames", ",", "repositoryNames", ")", "=", "self", ".", "call_contract_command", "(", "\"Registry\"", ",", "\"getOrganizationById\"", ",", "[", "org_id", "]", ")", "if", "(", "not", "found", ")", ":", "raise", "Exception", "(", "\"Organization was removed during this call. Please retry.\"", ")", "if", "self", ".", "ident", ".", "address", "==", "owner", ":", "rez_owner", ".", "append", "(", "(", "org_name", ",", "bytes32_to_str", "(", "org_id", ")", ")", ")", "if", "self", ".", "ident", ".", "address", "in", "members", ":", "rez_member", ".", "append", "(", "(", "org_name", ",", "bytes32_to_str", "(", "org_id", ")", ")", ")", "if", "(", "rez_owner", ")", ":", "self", ".", "_printout", "(", "\"# Organizations you are the owner of\"", ")", "self", ".", "_printout", "(", "\"# OrgName OrgId\"", ")", "for", "n", ",", "i", "in", "rez_owner", ":", "self", ".", "_printout", "(", "\"%s %s\"", "%", "(", "n", ",", "i", ")", ")", "if", "(", "rez_member", ")", ":", "self", ".", "_printout", "(", "\"# Organizations you are the member of\"", ")", "self", ".", "_printout", "(", "\"# OrgName OrgId\"", ")", "for", "n", ",", "i", "in", "rez_member", ":", "self", ".", "_printout", "(", "\"%s %s\"", "%", "(", "n", ",", "i", ")", ")" ]
Find organization that has the current identity as the owner or as the member
[ "Find", "organization", "that", "has", "the", "current", "identity", "as", "the", "owner", "or", "as", "the", "member" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/commands.py#L541-L567
250,768
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.add_group
def add_group(self, group_name, payment_address): """ Return new group_id in base64 """ if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
python
def add_group(self, group_name, payment_address): if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
[ "def", "add_group", "(", "self", ",", "group_name", ",", "payment_address", ")", ":", "if", "(", "self", ".", "is_group_name_exists", "(", "group_name", ")", ")", ":", "raise", "Exception", "(", "\"the group \\\"%s\\\" is already present\"", "%", "str", "(", "group_name", ")", ")", "group_id_base64", "=", "base64", ".", "b64encode", "(", "secrets", ".", "token_bytes", "(", "32", ")", ")", "self", ".", "m", "[", "\"groups\"", "]", "+=", "[", "{", "\"group_name\"", ":", "group_name", ",", "\"group_id\"", ":", "group_id_base64", ".", "decode", "(", "\"ascii\"", ")", ",", "\"payment_address\"", ":", "payment_address", "}", "]", "return", "group_id_base64" ]
Return new group_id in base64
[ "Return", "new", "group_id", "in", "base64" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L75-L83
250,769
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.is_group_name_exists
def is_group_name_exists(self, group_name): """ check if group with given name is already exists """ groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
python
def is_group_name_exists(self, group_name): groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
[ "def", "is_group_name_exists", "(", "self", ",", "group_name", ")", ":", "groups", "=", "self", ".", "m", "[", "\"groups\"", "]", "for", "g", "in", "groups", ":", "if", "(", "g", "[", "\"group_name\"", "]", "==", "group_name", ")", ":", "return", "True", "return", "False" ]
check if group with given name is already exists
[ "check", "if", "group", "with", "given", "name", "is", "already", "exists" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L103-L109
250,770
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.get_group_name_nonetrick
def get_group_name_nonetrick(self, group_name = None): """ In all getter function in case of single payment group, group_name can be None """ groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
python
def get_group_name_nonetrick(self, group_name = None): groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
[ "def", "get_group_name_nonetrick", "(", "self", ",", "group_name", "=", "None", ")", ":", "groups", "=", "self", ".", "m", "[", "\"groups\"", "]", "if", "(", "len", "(", "groups", ")", "==", "0", ")", ":", "raise", "Exception", "(", "\"Cannot find any groups in metadata\"", ")", "if", "(", "not", "group_name", ")", ":", "if", "(", "len", "(", "groups", ")", ">", "1", ")", ":", "raise", "Exception", "(", "\"We have more than one payment group in metadata, so group_name should be specified\"", ")", "return", "groups", "[", "0", "]", "[", "\"group_name\"", "]", "return", "group_name" ]
In all getter function in case of single payment group, group_name can be None
[ "In", "all", "getter", "function", "in", "case", "of", "single", "payment", "group", "group_name", "can", "be", "None" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L145-L154
250,771
singnet/snet-cli
snet_cli/utils_ipfs.py
get_from_ipfs_and_checkhash
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True): """ Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise """ if validate: from snet_cli.resources.proto.unixfs_pb2 import Data from snet_cli.resources.proto.merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client.block_get(ipfs_hash_base58) mn = MerkleNode() mn.ParseFromString(block_data) unixfs_data = Data() unixfs_data.ParseFromString(mn.Data) assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file" data = unixfs_data.Data # multihash has a badly registered base58 codec, overwrite it... multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode) # create a multihash object from our ipfs hash mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58') # Convenience method lets us directly use a multihash to verify data if not mh.verify(block_data): raise Exception("IPFS hash mismatch with data") else: data = ipfs_client.cat(ipfs_hash_base58) return data
python
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True): if validate: from snet_cli.resources.proto.unixfs_pb2 import Data from snet_cli.resources.proto.merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client.block_get(ipfs_hash_base58) mn = MerkleNode() mn.ParseFromString(block_data) unixfs_data = Data() unixfs_data.ParseFromString(mn.Data) assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file" data = unixfs_data.Data # multihash has a badly registered base58 codec, overwrite it... multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode) # create a multihash object from our ipfs hash mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58') # Convenience method lets us directly use a multihash to verify data if not mh.verify(block_data): raise Exception("IPFS hash mismatch with data") else: data = ipfs_client.cat(ipfs_hash_base58) return data
[ "def", "get_from_ipfs_and_checkhash", "(", "ipfs_client", ",", "ipfs_hash_base58", ",", "validate", "=", "True", ")", ":", "if", "validate", ":", "from", "snet_cli", ".", "resources", ".", "proto", ".", "unixfs_pb2", "import", "Data", "from", "snet_cli", ".", "resources", ".", "proto", ".", "merckledag_pb2", "import", "MerkleNode", "# No nice Python library to parse ipfs blocks, so do it ourselves.", "block_data", "=", "ipfs_client", ".", "block_get", "(", "ipfs_hash_base58", ")", "mn", "=", "MerkleNode", "(", ")", "mn", ".", "ParseFromString", "(", "block_data", ")", "unixfs_data", "=", "Data", "(", ")", "unixfs_data", ".", "ParseFromString", "(", "mn", ".", "Data", ")", "assert", "unixfs_data", ".", "Type", "==", "unixfs_data", ".", "DataType", ".", "Value", "(", "'File'", ")", ",", "\"IPFS hash must be a file\"", "data", "=", "unixfs_data", ".", "Data", "# multihash has a badly registered base58 codec, overwrite it...", "multihash", ".", "CodecReg", ".", "register", "(", "'base58'", ",", "base58", ".", "b58encode", ",", "base58", ".", "b58decode", ")", "# create a multihash object from our ipfs hash", "mh", "=", "multihash", ".", "decode", "(", "ipfs_hash_base58", ".", "encode", "(", "'ascii'", ")", ",", "'base58'", ")", "# Convenience method lets us directly use a multihash to verify data", "if", "not", "mh", ".", "verify", "(", "block_data", ")", ":", "raise", "Exception", "(", "\"IPFS hash mismatch with data\"", ")", "else", ":", "data", "=", "ipfs_client", ".", "cat", "(", "ipfs_hash_base58", ")", "return", "data" ]
Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
[ "Get", "file", "from", "ipfs", "We", "must", "check", "the", "hash", "becasue", "we", "cannot", "believe", "that", "ipfs_client", "wasn", "t", "been", "compromise" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L35-L63
250,772
singnet/snet-cli
snet_cli/utils_ipfs.py
hash_to_bytesuri
def hash_to_bytesuri(s): """ Convert in and from bytes uri format used in Registry contract """ # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s.encode("ascii").ljust(32 * (len(s)//32 + 1), b"\0")
python
def hash_to_bytesuri(s): # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s.encode("ascii").ljust(32 * (len(s)//32 + 1), b"\0")
[ "def", "hash_to_bytesuri", "(", "s", ")", ":", "# TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt)", "s", "=", "\"ipfs://\"", "+", "s", "return", "s", ".", "encode", "(", "\"ascii\"", ")", ".", "ljust", "(", "32", "*", "(", "len", "(", "s", ")", "//", "32", "+", "1", ")", ",", "b\"\\0\"", ")" ]
Convert in and from bytes uri format used in Registry contract
[ "Convert", "in", "and", "from", "bytes", "uri", "format", "used", "in", "Registry", "contract" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L65-L71
250,773
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._get_stub_and_request_classes
def _get_stub_and_request_classes(self, service_name): """ import protobuf and return stub and request class """ # Compile protobuf if needed codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service") proto_dir = Path(__file__).absolute().parent.joinpath("resources", "proto") if (not codegen_dir.joinpath("control_service_pb2.py").is_file()): compile_proto(proto_dir, codegen_dir, proto_file = "control_service.proto") stub_class, request_class, _ = import_protobuf_from_dir(codegen_dir, service_name) return stub_class, request_class
python
def _get_stub_and_request_classes(self, service_name): # Compile protobuf if needed codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service") proto_dir = Path(__file__).absolute().parent.joinpath("resources", "proto") if (not codegen_dir.joinpath("control_service_pb2.py").is_file()): compile_proto(proto_dir, codegen_dir, proto_file = "control_service.proto") stub_class, request_class, _ = import_protobuf_from_dir(codegen_dir, service_name) return stub_class, request_class
[ "def", "_get_stub_and_request_classes", "(", "self", ",", "service_name", ")", ":", "# Compile protobuf if needed", "codegen_dir", "=", "Path", ".", "home", "(", ")", ".", "joinpath", "(", "\".snet\"", ",", "\"mpe_client\"", ",", "\"control_service\"", ")", "proto_dir", "=", "Path", "(", "__file__", ")", ".", "absolute", "(", ")", ".", "parent", ".", "joinpath", "(", "\"resources\"", ",", "\"proto\"", ")", "if", "(", "not", "codegen_dir", ".", "joinpath", "(", "\"control_service_pb2.py\"", ")", ".", "is_file", "(", ")", ")", ":", "compile_proto", "(", "proto_dir", ",", "codegen_dir", ",", "proto_file", "=", "\"control_service.proto\"", ")", "stub_class", ",", "request_class", ",", "_", "=", "import_protobuf_from_dir", "(", "codegen_dir", ",", "service_name", ")", "return", "stub_class", ",", "request_class" ]
import protobuf and return stub and request class
[ "import", "protobuf", "and", "return", "stub", "and", "request", "class" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L31-L40
250,774
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._start_claim_channels
def _start_claim_channels(self, grpc_channel, channels_ids): """ Safely run StartClaim for given channels """ unclaimed_payments = self._call_GetListUnclaimed(grpc_channel) unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments} to_claim = [] for channel_id in channels_ids: if (channel_id not in unclaimed_payments_dict or unclaimed_payments_dict[channel_id]["amount"] == 0): self._printout("There is nothing to claim for channel %i, we skip it"%channel_id) continue blockchain = self._get_channel_state_from_blockchain(channel_id) if (unclaimed_payments_dict[channel_id]["nonce"] != blockchain["nonce"]): self._printout("Old payment for channel %i is still in progress. Please run claim for this channel later."%channel_id) continue to_claim.append((channel_id, blockchain["nonce"])) payments = [self._call_StartClaim(grpc_channel, channel_id, nonce) for channel_id, nonce in to_claim] return payments
python
def _start_claim_channels(self, grpc_channel, channels_ids): unclaimed_payments = self._call_GetListUnclaimed(grpc_channel) unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments} to_claim = [] for channel_id in channels_ids: if (channel_id not in unclaimed_payments_dict or unclaimed_payments_dict[channel_id]["amount"] == 0): self._printout("There is nothing to claim for channel %i, we skip it"%channel_id) continue blockchain = self._get_channel_state_from_blockchain(channel_id) if (unclaimed_payments_dict[channel_id]["nonce"] != blockchain["nonce"]): self._printout("Old payment for channel %i is still in progress. Please run claim for this channel later."%channel_id) continue to_claim.append((channel_id, blockchain["nonce"])) payments = [self._call_StartClaim(grpc_channel, channel_id, nonce) for channel_id, nonce in to_claim] return payments
[ "def", "_start_claim_channels", "(", "self", ",", "grpc_channel", ",", "channels_ids", ")", ":", "unclaimed_payments", "=", "self", ".", "_call_GetListUnclaimed", "(", "grpc_channel", ")", "unclaimed_payments_dict", "=", "{", "p", "[", "\"channel_id\"", "]", ":", "p", "for", "p", "in", "unclaimed_payments", "}", "to_claim", "=", "[", "]", "for", "channel_id", "in", "channels_ids", ":", "if", "(", "channel_id", "not", "in", "unclaimed_payments_dict", "or", "unclaimed_payments_dict", "[", "channel_id", "]", "[", "\"amount\"", "]", "==", "0", ")", ":", "self", ".", "_printout", "(", "\"There is nothing to claim for channel %i, we skip it\"", "%", "channel_id", ")", "continue", "blockchain", "=", "self", ".", "_get_channel_state_from_blockchain", "(", "channel_id", ")", "if", "(", "unclaimed_payments_dict", "[", "channel_id", "]", "[", "\"nonce\"", "]", "!=", "blockchain", "[", "\"nonce\"", "]", ")", ":", "self", ".", "_printout", "(", "\"Old payment for channel %i is still in progress. Please run claim for this channel later.\"", "%", "channel_id", ")", "continue", "to_claim", ".", "append", "(", "(", "channel_id", ",", "blockchain", "[", "\"nonce\"", "]", ")", ")", "payments", "=", "[", "self", ".", "_call_StartClaim", "(", "grpc_channel", ",", "channel_id", ",", "nonce", ")", "for", "channel_id", ",", "nonce", "in", "to_claim", "]", "return", "payments" ]
Safely run StartClaim for given channels
[ "Safely", "run", "StartClaim", "for", "given", "channels" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L103-L120
250,775
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._claim_in_progress_and_claim_channels
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): """ Claim all 'pending' payments in progress and after we claim given channels """ # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
python
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
[ "def", "_claim_in_progress_and_claim_channels", "(", "self", ",", "grpc_channel", ",", "channels", ")", ":", "# first we get the list of all 'payments in progress' in case we 'lost' some payments.", "payments", "=", "self", ".", "_call_GetListInProgress", "(", "grpc_channel", ")", "if", "(", "len", "(", "payments", ")", ">", "0", ")", ":", "self", ".", "_printout", "(", "\"There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them.\"", "%", "len", "(", "payments", ")", ")", "self", ".", "_blockchain_claim", "(", "payments", ")", "payments", "=", "self", ".", "_start_claim_channels", "(", "grpc_channel", ",", "channels", ")", "self", ".", "_blockchain_claim", "(", "payments", ")" ]
Claim all 'pending' payments in progress and after we claim given channels
[ "Claim", "all", "pending", "payments", "in", "progress", "and", "after", "we", "claim", "given", "channels" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L122-L130
250,776
singnet/snet-cli
snet_cli/config.py
Config.create_default_config
def create_default_config(self): """ Create default configuration if config file does not exist """ # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan.infura.io", "default_gas_price" : "medium"} self["network.mainnet"] = {"default_eth_rpc_endpoint": "https://mainnet.infura.io", "default_gas_price" : "medium"} self["network.ropsten"] = {"default_eth_rpc_endpoint": "https://ropsten.infura.io", "default_gas_price" : "medium"} self["network.rinkeby"] = {"default_eth_rpc_endpoint": "https://rinkeby.infura.io", "default_gas_price" : "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = { "network": "kovan" } self._persist() print("We've created configuration file with default values in: %s\n"%str(self._config_file))
python
def create_default_config(self): # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan.infura.io", "default_gas_price" : "medium"} self["network.mainnet"] = {"default_eth_rpc_endpoint": "https://mainnet.infura.io", "default_gas_price" : "medium"} self["network.ropsten"] = {"default_eth_rpc_endpoint": "https://ropsten.infura.io", "default_gas_price" : "medium"} self["network.rinkeby"] = {"default_eth_rpc_endpoint": "https://rinkeby.infura.io", "default_gas_price" : "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = { "network": "kovan" } self._persist() print("We've created configuration file with default values in: %s\n"%str(self._config_file))
[ "def", "create_default_config", "(", "self", ")", ":", "# make config directory with the minimal possible permission", "self", ".", "_config_file", ".", "parent", ".", "mkdir", "(", "mode", "=", "0o700", ",", "exist_ok", "=", "True", ")", "self", "[", "\"network.kovan\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://kovan.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.mainnet\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://mainnet.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.ropsten\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://ropsten.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.rinkeby\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://rinkeby.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"ipfs\"", "]", "=", "{", "\"default_ipfs_endpoint\"", ":", "\"http://ipfs.singularitynet.io:80\"", "}", "self", "[", "\"session\"", "]", "=", "{", "\"network\"", ":", "\"kovan\"", "}", "self", ".", "_persist", "(", ")", "print", "(", "\"We've created configuration file with default values in: %s\\n\"", "%", "str", "(", "self", ".", "_config_file", ")", ")" ]
Create default configuration if config file does not exist
[ "Create", "default", "configuration", "if", "config", "file", "does", "not", "exist" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/config.py#L175-L187
250,777
singnet/snet-cli
snet_cli/utils_proto.py
switch_to_json_payload_encoding
def switch_to_json_payload_encoding(call_fn, response_class): """ Switch payload encoding to JSON for GRPC call """ def json_serializer(*args, **kwargs): return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8") def json_deserializer(*args, **kwargs): resp = response_class() json_format.Parse(args[0], resp, True) return resp call_fn._request_serializer = json_serializer call_fn._response_deserializer = json_deserializer
python
def switch_to_json_payload_encoding(call_fn, response_class): def json_serializer(*args, **kwargs): return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8") def json_deserializer(*args, **kwargs): resp = response_class() json_format.Parse(args[0], resp, True) return resp call_fn._request_serializer = json_serializer call_fn._response_deserializer = json_deserializer
[ "def", "switch_to_json_payload_encoding", "(", "call_fn", ",", "response_class", ")", ":", "def", "json_serializer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "bytes", "(", "json_format", ".", "MessageToJson", "(", "args", "[", "0", "]", ",", "True", ",", "preserving_proto_field_name", "=", "True", ")", ",", "\"utf-8\"", ")", "def", "json_deserializer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "resp", "=", "response_class", "(", ")", "json_format", ".", "Parse", "(", "args", "[", "0", "]", ",", "resp", ",", "True", ")", "return", "resp", "call_fn", ".", "_request_serializer", "=", "json_serializer", "call_fn", ".", "_response_deserializer", "=", "json_deserializer" ]
Switch payload encoding to JSON for GRPC call
[ "Switch", "payload", "encoding", "to", "JSON", "for", "GRPC", "call" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_proto.py#L72-L81
250,778
singnet/snet-cli
snet_cli/mpe_account_command.py
MPEAccountCommand.print_agi_and_mpe_balances
def print_agi_and_mpe_balances(self): """ Print balance of ETH, AGI, and MPE wallet """ if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
python
def print_agi_and_mpe_balances(self): if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
[ "def", "print_agi_and_mpe_balances", "(", "self", ")", ":", "if", "(", "self", ".", "args", ".", "account", ")", ":", "account", "=", "self", ".", "args", ".", "account", "else", ":", "account", "=", "self", ".", "ident", ".", "address", "eth_wei", "=", "self", ".", "w3", ".", "eth", ".", "getBalance", "(", "account", ")", "agi_cogs", "=", "self", ".", "call_contract_command", "(", "\"SingularityNetToken\"", ",", "\"balanceOf\"", ",", "[", "account", "]", ")", "mpe_cogs", "=", "self", ".", "call_contract_command", "(", "\"MultiPartyEscrow\"", ",", "\"balances\"", ",", "[", "account", "]", ")", "# we cannot use _pprint here because it doesn't conserve order yet", "self", ".", "_printout", "(", "\" account: %s\"", "%", "account", ")", "self", ".", "_printout", "(", "\" ETH: %s\"", "%", "self", ".", "w3", ".", "fromWei", "(", "eth_wei", ",", "'ether'", ")", ")", "self", ".", "_printout", "(", "\" AGI: %s\"", "%", "cogs2stragi", "(", "agi_cogs", ")", ")", "self", ".", "_printout", "(", "\" MPE: %s\"", "%", "cogs2stragi", "(", "mpe_cogs", ")", ")" ]
Print balance of ETH, AGI, and MPE wallet
[ "Print", "balance", "of", "ETH", "AGI", "and", "MPE", "wallet" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_account_command.py#L10-L24
250,779
singnet/snet-cli
snet_cli/mpe_service_command.py
MPEServiceCommand.publish_proto_in_ipfs
def publish_proto_in_ipfs(self): """ Publish proto files in ipfs and print hash """ ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
python
def publish_proto_in_ipfs(self): ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
[ "def", "publish_proto_in_ipfs", "(", "self", ")", ":", "ipfs_hash_base58", "=", "utils_ipfs", ".", "publish_proto_in_ipfs", "(", "self", ".", "_get_ipfs_client", "(", ")", ",", "self", ".", "args", ".", "protodir", ")", "self", ".", "_printout", "(", "ipfs_hash_base58", ")" ]
Publish proto files in ipfs and print hash
[ "Publish", "proto", "files", "in", "ipfs", "and", "print", "hash" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_command.py#L15-L18
250,780
singnet/snet-cli
snet_cli/mpe_service_command.py
MPEServiceCommand.publish_proto_metadata_update
def publish_proto_metadata_update(self): """ Publish protobuf model in ipfs and update existing metadata file """ metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
python
def publish_proto_metadata_update(self): metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
[ "def", "publish_proto_metadata_update", "(", "self", ")", ":", "metadata", "=", "load_mpe_service_metadata", "(", "self", ".", "args", ".", "metadata_file", ")", "ipfs_hash_base58", "=", "utils_ipfs", ".", "publish_proto_in_ipfs", "(", "self", ".", "_get_ipfs_client", "(", ")", ",", "self", ".", "args", ".", "protodir", ")", "metadata", ".", "set_simple_field", "(", "\"model_ipfs_hash\"", ",", "ipfs_hash_base58", ")", "metadata", ".", "save_pretty", "(", "self", ".", "args", ".", "metadata_file", ")" ]
Publish protobuf model in ipfs and update existing metadata file
[ "Publish", "protobuf", "model", "in", "ipfs", "and", "update", "existing", "metadata", "file" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_command.py#L37-L42
250,781
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._get_persistent_mpe_dir
def _get_persistent_mpe_dir(self): """ get persistent storage for mpe """ mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
python
def _get_persistent_mpe_dir(self): mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
[ "def", "_get_persistent_mpe_dir", "(", "self", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", ".", "lower", "(", ")", "registry_address", "=", "self", ".", "get_registry_address", "(", ")", ".", "lower", "(", ")", "return", "Path", ".", "home", "(", ")", ".", "joinpath", "(", "\".snet\"", ",", "\"mpe_client\"", ",", "\"%s_%s\"", "%", "(", "mpe_address", ",", "registry_address", ")", ")" ]
get persistent storage for mpe
[ "get", "persistent", "storage", "for", "mpe" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L21-L25
250,782
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._check_mpe_address_metadata
def _check_mpe_address_metadata(self, metadata): """ we make sure that MultiPartyEscrow address from metadata is correct """ mpe_address = self.get_mpe_address() if (str(mpe_address).lower() != str(metadata["mpe_address"]).lower()): raise Exception("MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s"%(metadata["mpe_address"], mpe_address))
python
def _check_mpe_address_metadata(self, metadata): mpe_address = self.get_mpe_address() if (str(mpe_address).lower() != str(metadata["mpe_address"]).lower()): raise Exception("MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s"%(metadata["mpe_address"], mpe_address))
[ "def", "_check_mpe_address_metadata", "(", "self", ",", "metadata", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", "if", "(", "str", "(", "mpe_address", ")", ".", "lower", "(", ")", "!=", "str", "(", "metadata", "[", "\"mpe_address\"", "]", ")", ".", "lower", "(", ")", ")", ":", "raise", "Exception", "(", "\"MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s\"", "%", "(", "metadata", "[", "\"mpe_address\"", "]", ",", "mpe_address", ")", ")" ]
we make sure that MultiPartyEscrow address from metadata is correct
[ "we", "make", "sure", "that", "MultiPartyEscrow", "address", "from", "metadata", "is", "correct" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L74-L78
250,783
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._init_or_update_registered_service_if_needed
def _init_or_update_registered_service_if_needed(self): ''' similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services ''' if (self.is_service_initialized()): old_reg = self._read_service_info(self.args.org_id, self.args.service_id) # metadataURI will be in old_reg only for service which was initilized from registry (not from metadata) # we do nothing for services which were initilized from metadata if ("metadataURI" not in old_reg): return service_registration = self._get_service_registration() # if metadataURI hasn't been changed we do nothing if (not self.is_metadataURI_has_changed(service_registration)): return else: service_registration = self._get_service_registration() service_metadata = self._get_service_metadata_from_registry() self._init_or_update_service_if_needed(service_metadata, service_registration)
python
def _init_or_update_registered_service_if_needed(self): ''' similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services ''' if (self.is_service_initialized()): old_reg = self._read_service_info(self.args.org_id, self.args.service_id) # metadataURI will be in old_reg only for service which was initilized from registry (not from metadata) # we do nothing for services which were initilized from metadata if ("metadataURI" not in old_reg): return service_registration = self._get_service_registration() # if metadataURI hasn't been changed we do nothing if (not self.is_metadataURI_has_changed(service_registration)): return else: service_registration = self._get_service_registration() service_metadata = self._get_service_metadata_from_registry() self._init_or_update_service_if_needed(service_metadata, service_registration)
[ "def", "_init_or_update_registered_service_if_needed", "(", "self", ")", ":", "if", "(", "self", ".", "is_service_initialized", "(", ")", ")", ":", "old_reg", "=", "self", ".", "_read_service_info", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ")", "# metadataURI will be in old_reg only for service which was initilized from registry (not from metadata)", "# we do nothing for services which were initilized from metadata", "if", "(", "\"metadataURI\"", "not", "in", "old_reg", ")", ":", "return", "service_registration", "=", "self", ".", "_get_service_registration", "(", ")", "# if metadataURI hasn't been changed we do nothing", "if", "(", "not", "self", ".", "is_metadataURI_has_changed", "(", "service_registration", ")", ")", ":", "return", "else", ":", "service_registration", "=", "self", ".", "_get_service_registration", "(", ")", "service_metadata", "=", "self", ".", "_get_service_metadata_from_registry", "(", ")", "self", ".", "_init_or_update_service_if_needed", "(", "service_metadata", ",", "service_registration", ")" ]
similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services
[ "similar", "to", "_init_or_update_service_if_needed", "but", "we", "get", "service_registraion", "from", "registry", "so", "we", "can", "update", "only", "registered", "services" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L115-L136
250,784
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._smart_get_initialized_channel_for_service
def _smart_get_initialized_channel_for_service(self, metadata, filter_by, is_try_initailize = True): ''' - filter_by can be sender or signer ''' channels = self._get_initialized_channels_for_service(self.args.org_id, self.args.service_id) group_id = metadata.get_group_id(self.args.group_name) channels = [c for c in channels if c[filter_by].lower() == self.ident.address.lower() and c["groupId"] == group_id] if (len(channels) == 0 and is_try_initailize): # this will work only in simple case where signer == sender self._initialize_already_opened_channel(metadata, self.ident.address, self.ident.address) return self._smart_get_initialized_channel_for_service(metadata, filter_by, is_try_initailize = False) if (len(channels) == 0): raise Exception("Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s"%(self.args.org_id, self.args.service_id, self.ident.address)) if (self.args.channel_id is None): if (len(channels) > 1): channel_ids = [channel["channelId"] for channel in channels] raise Exception("We have several initialized channel: %s. You should use --channel-id to select one"%str(channel_ids)) return channels[0] for channel in channels: if (channel["channelId"] == self.args.channel_id): return channel raise Exception("Channel %i has not been initialized or your are not the sender/signer of it"%self.args.channel_id)
python
def _smart_get_initialized_channel_for_service(self, metadata, filter_by, is_try_initailize = True): ''' - filter_by can be sender or signer ''' channels = self._get_initialized_channels_for_service(self.args.org_id, self.args.service_id) group_id = metadata.get_group_id(self.args.group_name) channels = [c for c in channels if c[filter_by].lower() == self.ident.address.lower() and c["groupId"] == group_id] if (len(channels) == 0 and is_try_initailize): # this will work only in simple case where signer == sender self._initialize_already_opened_channel(metadata, self.ident.address, self.ident.address) return self._smart_get_initialized_channel_for_service(metadata, filter_by, is_try_initailize = False) if (len(channels) == 0): raise Exception("Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s"%(self.args.org_id, self.args.service_id, self.ident.address)) if (self.args.channel_id is None): if (len(channels) > 1): channel_ids = [channel["channelId"] for channel in channels] raise Exception("We have several initialized channel: %s. You should use --channel-id to select one"%str(channel_ids)) return channels[0] for channel in channels: if (channel["channelId"] == self.args.channel_id): return channel raise Exception("Channel %i has not been initialized or your are not the sender/signer of it"%self.args.channel_id)
[ "def", "_smart_get_initialized_channel_for_service", "(", "self", ",", "metadata", ",", "filter_by", ",", "is_try_initailize", "=", "True", ")", ":", "channels", "=", "self", ".", "_get_initialized_channels_for_service", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ")", "group_id", "=", "metadata", ".", "get_group_id", "(", "self", ".", "args", ".", "group_name", ")", "channels", "=", "[", "c", "for", "c", "in", "channels", "if", "c", "[", "filter_by", "]", ".", "lower", "(", ")", "==", "self", ".", "ident", ".", "address", ".", "lower", "(", ")", "and", "c", "[", "\"groupId\"", "]", "==", "group_id", "]", "if", "(", "len", "(", "channels", ")", "==", "0", "and", "is_try_initailize", ")", ":", "# this will work only in simple case where signer == sender", "self", ".", "_initialize_already_opened_channel", "(", "metadata", ",", "self", ".", "ident", ".", "address", ",", "self", ".", "ident", ".", "address", ")", "return", "self", ".", "_smart_get_initialized_channel_for_service", "(", "metadata", ",", "filter_by", ",", "is_try_initailize", "=", "False", ")", "if", "(", "len", "(", "channels", ")", "==", "0", ")", ":", "raise", "Exception", "(", "\"Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s\"", "%", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ",", "self", ".", "ident", ".", "address", ")", ")", "if", "(", "self", ".", "args", ".", "channel_id", "is", "None", ")", ":", "if", "(", "len", "(", "channels", ")", ">", "1", ")", ":", "channel_ids", "=", "[", "channel", "[", "\"channelId\"", "]", "for", "channel", "in", "channels", "]", "raise", "Exception", "(", "\"We have several initialized channel: %s. You should use --channel-id to select one\"", "%", "str", "(", "channel_ids", ")", ")", "return", "channels", "[", "0", "]", "for", "channel", "in", "channels", ":", "if", "(", "channel", "[", "\"channelId\"", "]", "==", "self", ".", "args", ".", "channel_id", ")", ":", "return", "channel", "raise", "Exception", "(", "\"Channel %i has not been initialized or your are not the sender/signer of it\"", "%", "self", ".", "args", ".", "channel_id", ")" ]
- filter_by can be sender or signer
[ "-", "filter_by", "can", "be", "sender", "or", "signer" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L297-L320
250,785
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._get_all_filtered_channels
def _get_all_filtered_channels(self, topics_without_signature): """ get all filtered chanels from blockchain logs """ mpe_address = self.get_mpe_address() event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex() topics = [event_signature] + topics_without_signature logs = self.ident.w3.eth.getLogs({"fromBlock" : self.args.from_block, "address" : mpe_address, "topics" : topics}) abi = get_contract_def("MultiPartyEscrow") event_abi = abi_get_element_by_name(abi, "ChannelOpen") channels_ids = [get_event_data(event_abi, l)["args"]["channelId"] for l in logs] return channels_ids
python
def _get_all_filtered_channels(self, topics_without_signature): mpe_address = self.get_mpe_address() event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex() topics = [event_signature] + topics_without_signature logs = self.ident.w3.eth.getLogs({"fromBlock" : self.args.from_block, "address" : mpe_address, "topics" : topics}) abi = get_contract_def("MultiPartyEscrow") event_abi = abi_get_element_by_name(abi, "ChannelOpen") channels_ids = [get_event_data(event_abi, l)["args"]["channelId"] for l in logs] return channels_ids
[ "def", "_get_all_filtered_channels", "(", "self", ",", "topics_without_signature", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", "event_signature", "=", "self", ".", "ident", ".", "w3", ".", "sha3", "(", "text", "=", "\"ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)\"", ")", ".", "hex", "(", ")", "topics", "=", "[", "event_signature", "]", "+", "topics_without_signature", "logs", "=", "self", ".", "ident", ".", "w3", ".", "eth", ".", "getLogs", "(", "{", "\"fromBlock\"", ":", "self", ".", "args", ".", "from_block", ",", "\"address\"", ":", "mpe_address", ",", "\"topics\"", ":", "topics", "}", ")", "abi", "=", "get_contract_def", "(", "\"MultiPartyEscrow\"", ")", "event_abi", "=", "abi_get_element_by_name", "(", "abi", ",", "\"ChannelOpen\"", ")", "channels_ids", "=", "[", "get_event_data", "(", "event_abi", ",", "l", ")", "[", "\"args\"", "]", "[", "\"channelId\"", "]", "for", "l", "in", "logs", "]", "return", "channels_ids" ]
get all filtered chanels from blockchain logs
[ "get", "all", "filtered", "chanels", "from", "blockchain", "logs" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L415-L424
250,786
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_repo
def list_repo(self): """ Returns info about all Repos. """ req = proto.ListRepoRequest() res = self.stub.ListRepo(req, metadata=self.metadata) if hasattr(res, 'repo_info'): return res.repo_info return []
python
def list_repo(self): req = proto.ListRepoRequest() res = self.stub.ListRepo(req, metadata=self.metadata) if hasattr(res, 'repo_info'): return res.repo_info return []
[ "def", "list_repo", "(", "self", ")", ":", "req", "=", "proto", ".", "ListRepoRequest", "(", ")", "res", "=", "self", ".", "stub", ".", "ListRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'repo_info'", ")", ":", "return", "res", ".", "repo_info", "return", "[", "]" ]
Returns info about all Repos.
[ "Returns", "info", "about", "all", "Repos", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L71-L79
250,787
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_repo
def delete_repo(self, repo_name=None, force=False, all=False): """ Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos. """ if not all: if repo_name: req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Either a repo_name or all=True needs to be provided") else: if not repo_name: req = proto.DeleteRepoRequest(force=force, all=all) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Cannot specify a repo_name if all=True")
python
def delete_repo(self, repo_name=None, force=False, all=False): if not all: if repo_name: req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Either a repo_name or all=True needs to be provided") else: if not repo_name: req = proto.DeleteRepoRequest(force=force, all=all) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Cannot specify a repo_name if all=True")
[ "def", "delete_repo", "(", "self", ",", "repo_name", "=", "None", ",", "force", "=", "False", ",", "all", "=", "False", ")", ":", "if", "not", "all", ":", "if", "repo_name", ":", "req", "=", "proto", ".", "DeleteRepoRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "force", "=", "force", ")", "self", ".", "stub", ".", "DeleteRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "else", ":", "raise", "ValueError", "(", "\"Either a repo_name or all=True needs to be provided\"", ")", "else", ":", "if", "not", "repo_name", ":", "req", "=", "proto", ".", "DeleteRepoRequest", "(", "force", "=", "force", ",", "all", "=", "all", ")", "self", ".", "stub", ".", "DeleteRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot specify a repo_name if all=True\"", ")" ]
Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos.
[ "Deletes", "a", "repo", "and", "reclaims", "the", "storage", "space", "it", "was", "using", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L81-L102
250,788
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.start_commit
def start_commit(self, repo_name, branch=None, parent=None, description=None): """ Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity. """ req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch, description=description) res = self.stub.StartCommit(req, metadata=self.metadata) return res
python
def start_commit(self, repo_name, branch=None, parent=None, description=None): req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch, description=description) res = self.stub.StartCommit(req, metadata=self.metadata) return res
[ "def", "start_commit", "(", "self", ",", "repo_name", ",", "branch", "=", "None", ",", "parent", "=", "None", ",", "description", "=", "None", ")", ":", "req", "=", "proto", ".", "StartCommitRequest", "(", "parent", "=", "proto", ".", "Commit", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "id", "=", "parent", ")", ",", "branch", "=", "branch", ",", "description", "=", "description", ")", "res", "=", "self", ".", "stub", ".", "StartCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity.
[ "Begins", "the", "process", "of", "committing", "data", "to", "a", "Repo", ".", "Once", "started", "you", "can", "write", "to", "the", "Commit", "with", "PutFile", "and", "when", "all", "the", "data", "has", "been", "written", "you", "must", "finish", "the", "Commit", "with", "FinishCommit", ".", "NOTE", "data", "is", "not", "persisted", "until", "FinishCommit", "is", "called", ".", "A", "Commit", "object", "is", "returned", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L104-L129
250,789
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.finish_commit
def finish_commit(self, commit): """ Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
python
def finish_commit(self, commit): req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
[ "def", "finish_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "FinishCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "res", "=", "self", ".", "stub", ".", "FinishCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Ends", "the", "process", "of", "committing", "data", "to", "a", "Repo", "and", "persists", "the", "Commit", ".", "Once", "a", "Commit", "is", "finished", "the", "data", "becomes", "immutable", "and", "future", "attempts", "to", "write", "to", "it", "with", "PutFile", "will", "error", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L131-L142
250,790
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.commit
def commit(self, repo_name, branch=None, parent=None, description=None): """A context manager for doing stuff inside a commit.""" commit = self.start_commit(repo_name, branch, parent, description) try: yield commit except Exception as e: print("An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)") raise e finally: self.finish_commit(commit)
python
def commit(self, repo_name, branch=None, parent=None, description=None): commit = self.start_commit(repo_name, branch, parent, description) try: yield commit except Exception as e: print("An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)") raise e finally: self.finish_commit(commit)
[ "def", "commit", "(", "self", ",", "repo_name", ",", "branch", "=", "None", ",", "parent", "=", "None", ",", "description", "=", "None", ")", ":", "commit", "=", "self", ".", "start_commit", "(", "repo_name", ",", "branch", ",", "parent", ",", "description", ")", "try", ":", "yield", "commit", "except", "Exception", "as", "e", ":", "print", "(", "\"An exception occurred during an open commit. \"", "\"Trying to finish it (Currently a commit can't be cancelled)\"", ")", "raise", "e", "finally", ":", "self", ".", "finish_commit", "(", "commit", ")" ]
A context manager for doing stuff inside a commit.
[ "A", "context", "manager", "for", "doing", "stuff", "inside", "a", "commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L145-L155
250,791
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.inspect_commit
def inspect_commit(self, commit): """ Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
python
def inspect_commit(self, commit): req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
[ "def", "inspect_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "InspectCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "return", "self", ".", "stub", ".", "InspectCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Returns", "info", "about", "a", "specific", "Commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L157-L165
250,792
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_commit
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0): """ Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned. """ req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number) if to_commit is not None: req.to.CopyFrom(commit_from(to_commit)) if from_commit is not None: getattr(req, 'from').CopyFrom(commit_from(from_commit)) res = self.stub.ListCommit(req, metadata=self.metadata) if hasattr(res, 'commit_info'): return res.commit_info return []
python
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0): req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number) if to_commit is not None: req.to.CopyFrom(commit_from(to_commit)) if from_commit is not None: getattr(req, 'from').CopyFrom(commit_from(from_commit)) res = self.stub.ListCommit(req, metadata=self.metadata) if hasattr(res, 'commit_info'): return res.commit_info return []
[ "def", "list_commit", "(", "self", ",", "repo_name", ",", "to_commit", "=", "None", ",", "from_commit", "=", "None", ",", "number", "=", "0", ")", ":", "req", "=", "proto", ".", "ListCommitRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "number", "=", "number", ")", "if", "to_commit", "is", "not", "None", ":", "req", ".", "to", ".", "CopyFrom", "(", "commit_from", "(", "to_commit", ")", ")", "if", "from_commit", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "commit_from", "(", "from_commit", ")", ")", "res", "=", "self", ".", "stub", ".", "ListCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'commit_info'", ")", ":", "return", "res", ".", "commit_info", "return", "[", "]" ]
Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned.
[ "Gets", "a", "list", "of", "CommitInfo", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L177-L200
250,793
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_commit
def delete_commit(self, commit): """ Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
python
def delete_commit(self, commit): req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
[ "def", "delete_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "DeleteCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "self", ".", "stub", ".", "DeleteCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Deletes", "a", "commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L202-L210
250,794
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.flush_commit
def flush_commit(self, commits, repos=tuple()): """ Blocks until all of the commits which have a set of commits as provenance have finished. For commits to be considered they must have all of the specified commits as provenance. This in effect waits for all of the jobs that are triggered by a set of commits to complete. It returns an error if any of the commits it's waiting on are cancelled due to one of the jobs encountering an error during runtime. Note that it's never necessary to call FlushCommit to run jobs, they'll run no matter what, FlushCommit just allows you to wait for them to complete and see their output once they do. This returns an iterator of CommitInfo objects. Params: * commits: A commit or a list of commits to wait on. * repos: Optional. Only the commits up to and including those repos. will be considered, otherwise all repos are considered. """ req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits], to_repo=[proto.Repo(name=r) for r in repos]) res = self.stub.FlushCommit(req, metadata=self.metadata) return res
python
def flush_commit(self, commits, repos=tuple()): req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits], to_repo=[proto.Repo(name=r) for r in repos]) res = self.stub.FlushCommit(req, metadata=self.metadata) return res
[ "def", "flush_commit", "(", "self", ",", "commits", ",", "repos", "=", "tuple", "(", ")", ")", ":", "req", "=", "proto", ".", "FlushCommitRequest", "(", "commit", "=", "[", "commit_from", "(", "c", ")", "for", "c", "in", "commits", "]", ",", "to_repo", "=", "[", "proto", ".", "Repo", "(", "name", "=", "r", ")", "for", "r", "in", "repos", "]", ")", "res", "=", "self", ".", "stub", ".", "FlushCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Blocks until all of the commits which have a set of commits as provenance have finished. For commits to be considered they must have all of the specified commits as provenance. This in effect waits for all of the jobs that are triggered by a set of commits to complete. It returns an error if any of the commits it's waiting on are cancelled due to one of the jobs encountering an error during runtime. Note that it's never necessary to call FlushCommit to run jobs, they'll run no matter what, FlushCommit just allows you to wait for them to complete and see their output once they do. This returns an iterator of CommitInfo objects. Params: * commits: A commit or a list of commits to wait on. * repos: Optional. Only the commits up to and including those repos. will be considered, otherwise all repos are considered.
[ "Blocks", "until", "all", "of", "the", "commits", "which", "have", "a", "set", "of", "commits", "as", "provenance", "have", "finished", ".", "For", "commits", "to", "be", "considered", "they", "must", "have", "all", "of", "the", "specified", "commits", "as", "provenance", ".", "This", "in", "effect", "waits", "for", "all", "of", "the", "jobs", "that", "are", "triggered", "by", "a", "set", "of", "commits", "to", "complete", ".", "It", "returns", "an", "error", "if", "any", "of", "the", "commits", "it", "s", "waiting", "on", "are", "cancelled", "due", "to", "one", "of", "the", "jobs", "encountering", "an", "error", "during", "runtime", ".", "Note", "that", "it", "s", "never", "necessary", "to", "call", "FlushCommit", "to", "run", "jobs", "they", "ll", "run", "no", "matter", "what", "FlushCommit", "just", "allows", "you", "to", "wait", "for", "them", "to", "complete", "and", "see", "their", "output", "once", "they", "do", ".", "This", "returns", "an", "iterator", "of", "CommitInfo", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L212-L233
250,795
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.subscribe_commit
def subscribe_commit(self, repo_name, branch, from_commit_id=None): """ SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned. """ repo = proto.Repo(name=repo_name) req = proto.SubscribeCommitRequest(repo=repo, branch=branch) if from_commit_id is not None: getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id)) res = self.stub.SubscribeCommit(req, metadata=self.metadata) return res
python
def subscribe_commit(self, repo_name, branch, from_commit_id=None): repo = proto.Repo(name=repo_name) req = proto.SubscribeCommitRequest(repo=repo, branch=branch) if from_commit_id is not None: getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id)) res = self.stub.SubscribeCommit(req, metadata=self.metadata) return res
[ "def", "subscribe_commit", "(", "self", ",", "repo_name", ",", "branch", ",", "from_commit_id", "=", "None", ")", ":", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", "req", "=", "proto", ".", "SubscribeCommitRequest", "(", "repo", "=", "repo", ",", "branch", "=", "branch", ")", "if", "from_commit_id", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "proto", ".", "Commit", "(", "repo", "=", "repo", ",", "id", "=", "from_commit_id", ")", ")", "res", "=", "self", ".", "stub", ".", "SubscribeCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned.
[ "SubscribeCommit", "is", "like", "ListCommit", "but", "it", "keeps", "listening", "for", "commits", "as", "they", "come", "in", ".", "This", "returns", "an", "iterator", "Commit", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L235-L251
250,796
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_branch
def list_branch(self, repo_name): """ Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo. """ req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name)) res = self.stub.ListBranch(req, metadata=self.metadata) if hasattr(res, 'branch_info'): return res.branch_info return []
python
def list_branch(self, repo_name): req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name)) res = self.stub.ListBranch(req, metadata=self.metadata) if hasattr(res, 'branch_info'): return res.branch_info return []
[ "def", "list_branch", "(", "self", ",", "repo_name", ")", ":", "req", "=", "proto", ".", "ListBranchRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ")", "res", "=", "self", ".", "stub", ".", "ListBranch", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'branch_info'", ")", ":", "return", "res", ".", "branch_info", "return", "[", "]" ]
Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo.
[ "Lists", "the", "active", "Branch", "objects", "on", "a", "Repo", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L253-L264
250,797
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.set_branch
def set_branch(self, commit, branch_name): """ Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set. """ res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name) self.stub.SetBranch(res, metadata=self.metadata)
python
def set_branch(self, commit, branch_name): res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name) self.stub.SetBranch(res, metadata=self.metadata)
[ "def", "set_branch", "(", "self", ",", "commit", ",", "branch_name", ")", ":", "res", "=", "proto", ".", "SetBranchRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "branch", "=", "branch_name", ")", "self", ".", "stub", ".", "SetBranch", "(", "res", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set.
[ "Sets", "a", "commit", "and", "its", "ancestors", "as", "a", "branch", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L266-L275
250,798
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_branch
def delete_branch(self, repo_name, branch_name): """ Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete. """ res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name) self.stub.DeleteBranch(res, metadata=self.metadata)
python
def delete_branch(self, repo_name, branch_name): res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name) self.stub.DeleteBranch(res, metadata=self.metadata)
[ "def", "delete_branch", "(", "self", ",", "repo_name", ",", "branch_name", ")", ":", "res", "=", "proto", ".", "DeleteBranchRequest", "(", "repo", "=", "Repo", "(", "name", "=", "repo_name", ")", ",", "branch", "=", "branch_name", ")", "self", ".", "stub", ".", "DeleteBranch", "(", "res", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete.
[ "Deletes", "a", "branch", "but", "leaves", "the", "commits", "themselves", "intact", ".", "In", "other", "words", "those", "commits", "can", "still", "be", "accessed", "via", "commit", "IDs", "and", "other", "branches", "they", "happen", "to", "be", "on", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L277-L288
250,799
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.put_file_url
def put_file_url(self, commit, path, url, recursive=False): """ Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls. """ req = iter([ proto.PutFileRequest( file=proto.File(commit=commit_from(commit), path=path), url=url, recursive=recursive ) ]) self.stub.PutFile(req, metadata=self.metadata)
python
def put_file_url(self, commit, path, url, recursive=False): req = iter([ proto.PutFileRequest( file=proto.File(commit=commit_from(commit), path=path), url=url, recursive=recursive ) ]) self.stub.PutFile(req, metadata=self.metadata)
[ "def", "put_file_url", "(", "self", ",", "commit", ",", "path", ",", "url", ",", "recursive", "=", "False", ")", ":", "req", "=", "iter", "(", "[", "proto", ".", "PutFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ",", "url", "=", "url", ",", "recursive", "=", "recursive", ")", "]", ")", "self", ".", "stub", ".", "PutFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls.
[ "Puts", "a", "file", "using", "the", "content", "found", "at", "a", "URL", ".", "The", "URL", "is", "sent", "to", "the", "server", "which", "performs", "the", "request", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L363-L382