docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Get PyPI package names from a list of imports. Args: pkgs (List[str]): List of import names. Returns: List[str]: The corresponding PyPI package names.
def get_pkg_names(pkgs): result = set() with open(join("mapping"), "r") as f: data = dict(x.strip().split(":") for x in f) for pkg in pkgs: # Look up the mapped requirement. If a mapping isn't found, # simply use the package name. result.add(data.get(pkg, pkg)) # Return a sorted list for backward compatibility. return sorted(result, key=lambda s: s.lower())
250,162
Converts data string to iterable. Parameters: ----------- datastring: string, defaults to None \tThe data string to be converted. \tself.get_clipboard() is called if set to None sep: string \tSeparator for columns in datastring
def _convert_clipboard(self, datastring=None, sep='\t'): if datastring is None: datastring = self.get_clipboard() data_it = ((ele for ele in line.split(sep)) for line in datastring.splitlines()) return data_it
250,966
Return list of all positions of event_find_string in MainGrid. Only the code is searched. The result is not searched here. Parameters: ----------- gridpos: 3-tuple of Integer \tPosition at which the search starts find_string: String \tString to find in grid flags: List of strings \t Search flag out of \t ["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"]
def find_all(self, find_string, flags): code_array = self.grid.code_array string_match = code_array.string_match find_keys = [] for key in code_array: if string_match(code_array(key), find_string, flags) is not None: find_keys.append(key) return find_keys
251,035
Return next position of event_find_string in MainGrid Parameters: ----------- gridpos: 3-tuple of Integer \tPosition at which the search starts find_string: String \tString to find in grid flags: List of strings \tSearch flag out of \t["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"] search_result: Bool, defaults to True \tIf True then the search includes the result string (slower)
def find(self, gridpos, find_string, flags, search_result=True): findfunc = self.grid.code_array.findnextmatch if "DOWN" in flags: if gridpos[0] < self.grid.code_array.shape[0]: gridpos[0] += 1 elif gridpos[1] < self.grid.code_array.shape[1]: gridpos[1] += 1 elif gridpos[2] < self.grid.code_array.shape[2]: gridpos[2] += 1 else: gridpos = (0, 0, 0) elif "UP" in flags: if gridpos[0] > 0: gridpos[0] -= 1 elif gridpos[1] > 0: gridpos[1] -= 1 elif gridpos[2] > 0: gridpos[2] -= 1 else: gridpos = [dim - 1 for dim in self.grid.code_array.shape] return findfunc(tuple(gridpos), find_string, flags, search_result)
251,036
Returns a tuple with the position of the next match of find_string Returns None if string not found. Parameters: ----------- startkey: Start position of search find_string:String to be searched for flags: List of strings, out of ["UP" xor "DOWN", "WHOLE_WORD", "MATCH_CASE", "REG_EXP"] search_result: Bool, defaults to True \tIf True then the search includes the result string (slower)
def findnextmatch(self, startkey, find_string, flags, search_result=True): assert "UP" in flags or "DOWN" in flags assert not ("UP" in flags and "DOWN" in flags) if search_result: def is_matching(key, find_string, flags): code = self(key) if self.string_match(code, find_string, flags) is not None: return True else: res_str = unicode(self[key]) return self.string_match(res_str, find_string, flags) \ is not None else: def is_matching(code, find_string, flags): code = self(key) return self.string_match(code, find_string, flags) is not None # List of keys in sgrid in search order reverse = "UP" in flags for key in self._sorted_keys(self.keys(), startkey, reverse=reverse): try: if is_matching(key, find_string, flags): return key except Exception: # re errors are cryptical: sre_constants,... pass
251,264
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Login = channel.unary_unary( '/api.Dgraph/Login', request_serializer=api__pb2.LoginRequest.SerializeToString, response_deserializer=api__pb2.Response.FromString, ) self.Query = channel.unary_unary( '/api.Dgraph/Query', request_serializer=api__pb2.Request.SerializeToString, response_deserializer=api__pb2.Response.FromString, ) self.Mutate = channel.unary_unary( '/api.Dgraph/Mutate', request_serializer=api__pb2.Mutation.SerializeToString, response_deserializer=api__pb2.Assigned.FromString, ) self.Alter = channel.unary_unary( '/api.Dgraph/Alter', request_serializer=api__pb2.Operation.SerializeToString, response_deserializer=api__pb2.Payload.FromString, ) self.CommitOrAbort = channel.unary_unary( '/api.Dgraph/CommitOrAbort', request_serializer=api__pb2.TxnContext.SerializeToString, response_deserializer=api__pb2.TxnContext.FromString, ) self.CheckVersion = channel.unary_unary( '/api.Dgraph/CheckVersion', request_serializer=api__pb2.Check.SerializeToString, response_deserializer=api__pb2.Version.FromString, )
251,338
Semaphore lock. Semaphore logic is implemented in the lua/semaphore.lua script. Individual locks within the semaphore are managed inside a ZSET using scores to track when they expire. Arguments: redis: Redis client name: Name of lock. Used as ZSET key. lock_id: Lock ID timeout: Timeout in seconds max_locks: Maximum number of locks allowed for this semaphore
def __init__(self, redis, name, lock_id, timeout, max_locks=1): self.redis = redis self.name = name self.lock_id = lock_id self.max_locks = max_locks self.timeout = timeout with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lua/semaphore.lua')) as f: self._semaphore = self.redis.register_script(f.read())
251,449
Set system lock for the semaphore. Sets a system lock that will expire in timeout seconds. This overrides all other locks. Existing locks cannot be renewed and no new locks will be permitted until the system lock expires. Arguments: redis: Redis client name: Name of lock. Used as ZSET key. timeout: Timeout in seconds for system lock
def set_system_lock(cls, redis, name, timeout): pipeline = redis.pipeline() pipeline.zadd(name, SYSTEM_LOCK_ID, time.time() + timeout) pipeline.expire(name, timeout + 10) # timeout plus buffer for troubleshooting pipeline.execute()
251,450
Internal method to process a task batch from the given queue. Args: queue: Queue name to be processed Returns: Task IDs: List of tasks that were processed (even if there was an error so that client code can assume the queue is empty if nothing was returned) Count: The number of tasks that were attempted to be executed or -1 if the queue lock couldn't be acquired.
def _process_from_queue(self, queue): now = time.time() log = self.log.bind(queue=queue) batch_size = self._get_queue_batch_size(queue) queue_lock, failed_to_acquire = self._get_queue_lock(queue, log) if failed_to_acquire: return [], -1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time.time() + self.config['LOCK_RETRY'] task_ids = self.scripts.zpoppush( self._key(QUEUED, queue), self._key(ACTIVE, queue), batch_size, None, now, if_exists=('add', self._key(SCHEDULED, queue), later, 'min'), on_success=('update_sets', queue, self._key(QUEUED), self._key(ACTIVE), self._key(SCHEDULED)) ) log.debug('moved tasks', src_queue=QUEUED, dest_queue=ACTIVE, qty=len(task_ids)) processed_count = 0 if task_ids: processed_count = self._process_queue_tasks(queue, queue_lock, task_ids, now, log) if queue_lock: queue_lock.release() log.debug('released swq lock') return task_ids, processed_count
251,483
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Dump = channel.unary_stream( '/debug.Debug/Dump', request_serializer=client_dot_debug_dot_debug__pb2.DumpRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.Profile = channel.unary_stream( '/debug.Debug/Profile', request_serializer=client_dot_debug_dot_debug__pb2.ProfileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.Binary = channel.unary_stream( '/debug.Debug/Binary', request_serializer=client_dot_debug_dot_debug__pb2.BinaryRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, )
252,002
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Health = channel.unary_unary( '/health.Health/Health', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
252,004
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Activate = channel.unary_unary( '/auth.API/Activate', request_serializer=client_dot_auth_dot_auth__pb2.ActivateRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.ActivateResponse.FromString, ) self.Deactivate = channel.unary_unary( '/auth.API/Deactivate', request_serializer=client_dot_auth_dot_auth__pb2.DeactivateRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.DeactivateResponse.FromString, ) self.GetConfiguration = channel.unary_unary( '/auth.API/GetConfiguration', request_serializer=client_dot_auth_dot_auth__pb2.GetConfigurationRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetConfigurationResponse.FromString, ) self.SetConfiguration = channel.unary_unary( '/auth.API/SetConfiguration', request_serializer=client_dot_auth_dot_auth__pb2.SetConfigurationRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.SetConfigurationResponse.FromString, ) self.GetAdmins = channel.unary_unary( '/auth.API/GetAdmins', request_serializer=client_dot_auth_dot_auth__pb2.GetAdminsRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetAdminsResponse.FromString, ) self.ModifyAdmins = channel.unary_unary( '/auth.API/ModifyAdmins', request_serializer=client_dot_auth_dot_auth__pb2.ModifyAdminsRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.ModifyAdminsResponse.FromString, ) self.Authenticate = channel.unary_unary( '/auth.API/Authenticate', request_serializer=client_dot_auth_dot_auth__pb2.AuthenticateRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.AuthenticateResponse.FromString, ) self.Authorize = channel.unary_unary( '/auth.API/Authorize', request_serializer=client_dot_auth_dot_auth__pb2.AuthorizeRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.AuthorizeResponse.FromString, ) self.WhoAmI = channel.unary_unary( '/auth.API/WhoAmI', request_serializer=client_dot_auth_dot_auth__pb2.WhoAmIRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.WhoAmIResponse.FromString, ) self.GetScope = channel.unary_unary( '/auth.API/GetScope', request_serializer=client_dot_auth_dot_auth__pb2.GetScopeRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetScopeResponse.FromString, ) self.SetScope = channel.unary_unary( '/auth.API/SetScope', request_serializer=client_dot_auth_dot_auth__pb2.SetScopeRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.SetScopeResponse.FromString, ) self.GetACL = channel.unary_unary( '/auth.API/GetACL', request_serializer=client_dot_auth_dot_auth__pb2.GetACLRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetACLResponse.FromString, ) self.SetACL = channel.unary_unary( '/auth.API/SetACL', request_serializer=client_dot_auth_dot_auth__pb2.SetACLRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.SetACLResponse.FromString, ) self.GetAuthToken = channel.unary_unary( '/auth.API/GetAuthToken', request_serializer=client_dot_auth_dot_auth__pb2.GetAuthTokenRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetAuthTokenResponse.FromString, ) self.ExtendAuthToken = channel.unary_unary( '/auth.API/ExtendAuthToken', request_serializer=client_dot_auth_dot_auth__pb2.ExtendAuthTokenRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.ExtendAuthTokenResponse.FromString, ) self.RevokeAuthToken = channel.unary_unary( '/auth.API/RevokeAuthToken', request_serializer=client_dot_auth_dot_auth__pb2.RevokeAuthTokenRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.RevokeAuthTokenResponse.FromString, ) self.SetGroupsForUser = channel.unary_unary( '/auth.API/SetGroupsForUser', request_serializer=client_dot_auth_dot_auth__pb2.SetGroupsForUserRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.SetGroupsForUserResponse.FromString, ) self.ModifyMembers = channel.unary_unary( '/auth.API/ModifyMembers', request_serializer=client_dot_auth_dot_auth__pb2.ModifyMembersRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.ModifyMembersResponse.FromString, ) self.GetGroups = channel.unary_unary( '/auth.API/GetGroups', request_serializer=client_dot_auth_dot_auth__pb2.GetGroupsRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetGroupsResponse.FromString, ) self.GetUsers = channel.unary_unary( '/auth.API/GetUsers', request_serializer=client_dot_auth_dot_auth__pb2.GetUsersRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetUsersResponse.FromString, ) self.GetOneTimePassword = channel.unary_unary( '/auth.API/GetOneTimePassword', request_serializer=client_dot_auth_dot_auth__pb2.GetOneTimePasswordRequest.SerializeToString, response_deserializer=client_dot_auth_dot_auth__pb2.GetOneTimePasswordResponse.FromString, )
252,012
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.DeployStorageSecret = channel.unary_unary( '/deploy.API/DeployStorageSecret', request_serializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretRequest.SerializeToString, response_deserializer=client_dot_deploy_dot_deploy__pb2.DeployStorageSecretResponse.FromString, )
252,014
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.CreateJob = channel.unary_unary( '/pps.API/CreateJob', request_serializer=client_dot_pps_dot_pps__pb2.CreateJobRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.Job.FromString, ) self.InspectJob = channel.unary_unary( '/pps.API/InspectJob', request_serializer=client_dot_pps_dot_pps__pb2.InspectJobRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.ListJob = channel.unary_unary( '/pps.API/ListJob', request_serializer=client_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.JobInfos.FromString, ) self.ListJobStream = channel.unary_stream( '/pps.API/ListJobStream', request_serializer=client_dot_pps_dot_pps__pb2.ListJobRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.FlushJob = channel.unary_stream( '/pps.API/FlushJob', request_serializer=client_dot_pps_dot_pps__pb2.FlushJobRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.JobInfo.FromString, ) self.DeleteJob = channel.unary_unary( '/pps.API/DeleteJob', request_serializer=client_dot_pps_dot_pps__pb2.DeleteJobRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StopJob = channel.unary_unary( '/pps.API/StopJob', request_serializer=client_dot_pps_dot_pps__pb2.StopJobRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectDatum = channel.unary_unary( '/pps.API/InspectDatum', request_serializer=client_dot_pps_dot_pps__pb2.InspectDatumRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.DatumInfo.FromString, ) self.ListDatum = channel.unary_unary( '/pps.API/ListDatum', request_serializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.ListDatumResponse.FromString, ) self.ListDatumStream = channel.unary_stream( '/pps.API/ListDatumStream', request_serializer=client_dot_pps_dot_pps__pb2.ListDatumRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.ListDatumStreamResponse.FromString, ) self.RestartDatum = channel.unary_unary( '/pps.API/RestartDatum', request_serializer=client_dot_pps_dot_pps__pb2.RestartDatumRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreatePipeline = channel.unary_unary( '/pps.API/CreatePipeline', request_serializer=client_dot_pps_dot_pps__pb2.CreatePipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectPipeline = channel.unary_unary( '/pps.API/InspectPipeline', request_serializer=client_dot_pps_dot_pps__pb2.InspectPipelineRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.PipelineInfo.FromString, ) self.ListPipeline = channel.unary_unary( '/pps.API/ListPipeline', request_serializer=client_dot_pps_dot_pps__pb2.ListPipelineRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.PipelineInfos.FromString, ) self.DeletePipeline = channel.unary_unary( '/pps.API/DeletePipeline', request_serializer=client_dot_pps_dot_pps__pb2.DeletePipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StartPipeline = channel.unary_unary( '/pps.API/StartPipeline', request_serializer=client_dot_pps_dot_pps__pb2.StartPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StopPipeline = channel.unary_unary( '/pps.API/StopPipeline', request_serializer=client_dot_pps_dot_pps__pb2.StopPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.RerunPipeline = channel.unary_unary( '/pps.API/RerunPipeline', request_serializer=client_dot_pps_dot_pps__pb2.RerunPipelineRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteAll = channel.unary_unary( '/pps.API/DeleteAll', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetLogs = channel.unary_stream( '/pps.API/GetLogs', request_serializer=client_dot_pps_dot_pps__pb2.GetLogsRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.LogMessage.FromString, ) self.GarbageCollect = channel.unary_unary( '/pps.API/GarbageCollect', request_serializer=client_dot_pps_dot_pps__pb2.GarbageCollectRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.GarbageCollectResponse.FromString, ) self.ActivateAuth = channel.unary_unary( '/pps.API/ActivateAuth', request_serializer=client_dot_pps_dot_pps__pb2.ActivateAuthRequest.SerializeToString, response_deserializer=client_dot_pps_dot_pps__pb2.ActivateAuthResponse.FromString, )
252,017
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.CreateRepo = channel.unary_unary( '/pfs.API/CreateRepo', request_serializer=client_dot_pfs_dot_pfs__pb2.CreateRepoRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectRepo = channel.unary_unary( '/pfs.API/InspectRepo', request_serializer=client_dot_pfs_dot_pfs__pb2.InspectRepoRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.RepoInfo.FromString, ) self.ListRepo = channel.unary_unary( '/pfs.API/ListRepo', request_serializer=client_dot_pfs_dot_pfs__pb2.ListRepoRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.ListRepoResponse.FromString, ) self.DeleteRepo = channel.unary_unary( '/pfs.API/DeleteRepo', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteRepoRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.StartCommit = channel.unary_unary( '/pfs.API/StartCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.StartCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.Commit.FromString, ) self.FinishCommit = channel.unary_unary( '/pfs.API/FinishCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.FinishCommitRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectCommit = channel.unary_unary( '/pfs.API/InspectCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.InspectCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CommitInfo.FromString, ) self.ListCommit = channel.unary_unary( '/pfs.API/ListCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.ListCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CommitInfos.FromString, ) self.ListCommitStream = channel.unary_stream( '/pfs.API/ListCommitStream', request_serializer=client_dot_pfs_dot_pfs__pb2.ListCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CommitInfo.FromString, ) self.DeleteCommit = channel.unary_unary( '/pfs.API/DeleteCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteCommitRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.FlushCommit = channel.unary_stream( '/pfs.API/FlushCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.FlushCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CommitInfo.FromString, ) self.SubscribeCommit = channel.unary_stream( '/pfs.API/SubscribeCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.SubscribeCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CommitInfo.FromString, ) self.BuildCommit = channel.unary_unary( '/pfs.API/BuildCommit', request_serializer=client_dot_pfs_dot_pfs__pb2.BuildCommitRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.Commit.FromString, ) self.CreateBranch = channel.unary_unary( '/pfs.API/CreateBranch', request_serializer=client_dot_pfs_dot_pfs__pb2.CreateBranchRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectBranch = channel.unary_unary( '/pfs.API/InspectBranch', request_serializer=client_dot_pfs_dot_pfs__pb2.InspectBranchRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.BranchInfo.FromString, ) self.ListBranch = channel.unary_unary( '/pfs.API/ListBranch', request_serializer=client_dot_pfs_dot_pfs__pb2.ListBranchRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.BranchInfos.FromString, ) self.DeleteBranch = channel.unary_unary( '/pfs.API/DeleteBranch', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteBranchRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.PutFile = channel.stream_unary( '/pfs.API/PutFile', request_serializer=client_dot_pfs_dot_pfs__pb2.PutFileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CopyFile = channel.unary_unary( '/pfs.API/CopyFile', request_serializer=client_dot_pfs_dot_pfs__pb2.CopyFileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetFile = channel.unary_stream( '/pfs.API/GetFile', request_serializer=client_dot_pfs_dot_pfs__pb2.GetFileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.InspectFile = channel.unary_unary( '/pfs.API/InspectFile', request_serializer=client_dot_pfs_dot_pfs__pb2.InspectFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfo.FromString, ) self.ListFile = channel.unary_unary( '/pfs.API/ListFile', request_serializer=client_dot_pfs_dot_pfs__pb2.ListFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfos.FromString, ) self.ListFileStream = channel.unary_stream( '/pfs.API/ListFileStream', request_serializer=client_dot_pfs_dot_pfs__pb2.ListFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfo.FromString, ) self.WalkFile = channel.unary_stream( '/pfs.API/WalkFile', request_serializer=client_dot_pfs_dot_pfs__pb2.WalkFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfo.FromString, ) self.GlobFile = channel.unary_unary( '/pfs.API/GlobFile', request_serializer=client_dot_pfs_dot_pfs__pb2.GlobFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfos.FromString, ) self.GlobFileStream = channel.unary_stream( '/pfs.API/GlobFileStream', request_serializer=client_dot_pfs_dot_pfs__pb2.GlobFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.FileInfo.FromString, ) self.DiffFile = channel.unary_unary( '/pfs.API/DiffFile', request_serializer=client_dot_pfs_dot_pfs__pb2.DiffFileRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.DiffFileResponse.FromString, ) self.DeleteFile = channel.unary_unary( '/pfs.API/DeleteFile', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteFileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteAll = channel.unary_unary( '/pfs.API/DeleteAll', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
252,020
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.PutObject = channel.stream_unary( '/pfs.ObjectAPI/PutObject', request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString, ) self.PutObjectSplit = channel.stream_unary( '/pfs.ObjectAPI/PutObjectSplit', request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.Objects.FromString, ) self.PutObjects = channel.stream_unary( '/pfs.ObjectAPI/PutObjects', request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetObject = channel.unary_stream( '/pfs.ObjectAPI/GetObject', request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.GetObjects = channel.unary_stream( '/pfs.ObjectAPI/GetObjects', request_serializer=client_dot_pfs_dot_pfs__pb2.GetObjectsRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.GetBlocks = channel.unary_stream( '/pfs.ObjectAPI/GetBlocks', request_serializer=client_dot_pfs_dot_pfs__pb2.GetBlocksRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.TagObject = channel.unary_unary( '/pfs.ObjectAPI/TagObject', request_serializer=client_dot_pfs_dot_pfs__pb2.TagObjectRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.InspectObject = channel.unary_unary( '/pfs.ObjectAPI/InspectObject', request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString, ) self.CheckObject = channel.unary_unary( '/pfs.ObjectAPI/CheckObject', request_serializer=client_dot_pfs_dot_pfs__pb2.CheckObjectRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.CheckObjectResponse.FromString, ) self.ListObjects = channel.unary_stream( '/pfs.ObjectAPI/ListObjects', request_serializer=client_dot_pfs_dot_pfs__pb2.ListObjectsRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString, ) self.DeleteObjects = channel.unary_unary( '/pfs.ObjectAPI/DeleteObjects', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsResponse.FromString, ) self.GetTag = channel.unary_stream( '/pfs.ObjectAPI/GetTag', request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString, response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString, ) self.InspectTag = channel.unary_unary( '/pfs.ObjectAPI/InspectTag', request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString, ) self.ListTags = channel.unary_stream( '/pfs.ObjectAPI/ListTags', request_serializer=client_dot_pfs_dot_pfs__pb2.ListTagsRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.ListTagsResponse.FromString, ) self.DeleteTags = channel.unary_unary( '/pfs.ObjectAPI/DeleteTags', request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsRequest.SerializeToString, response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsResponse.FromString, ) self.Compact = channel.unary_unary( '/pfs.ObjectAPI/Compact', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
252,021
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Activate = channel.unary_unary( '/enterprise.API/Activate', request_serializer=client_dot_enterprise_dot_enterprise__pb2.ActivateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.ActivateResponse.FromString, ) self.GetState = channel.unary_unary( '/enterprise.API/GetState', request_serializer=client_dot_enterprise_dot_enterprise__pb2.GetStateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.GetStateResponse.FromString, ) self.Deactivate = channel.unary_unary( '/enterprise.API/Deactivate', request_serializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateRequest.SerializeToString, response_deserializer=client_dot_enterprise_dot_enterprise__pb2.DeactivateResponse.FromString, )
252,023
Creates a new Repo object in PFS with the given name. Repos are the top level data object in PFS and should be used to store data of a similar type. For example rather than having a single Repo for an entire project you might have separate Repos for logs, metrics, database dumps etc. Params: * repo_name: Name of the repo. * description: Repo description.
def create_repo(self, repo_name, description=None): req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description) self.stub.CreateRepo(req, metadata=self.metadata)
252,024
Returns info about a specific Repo. Params: * repo_name: Name of the repo.
def inspect_repo(self, repo_name): req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name)) res = self.stub.InspectRepo(req, metadata=self.metadata) return res
252,025
Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos.
def delete_repo(self, repo_name=None, force=False, all=False): if not all: if repo_name: req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Either a repo_name or all=True needs to be provided") else: if not repo_name: req = proto.DeleteRepoRequest(force=force, all=all) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Cannot specify a repo_name if all=True")
252,027
Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit.
def finish_commit(self, commit): req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
252,029
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
def inspect_commit(self, commit): req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
252,031
Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit.
def delete_commit(self, commit): req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
252,034
SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned.
def subscribe_commit(self, repo_name, branch, from_commit_id=None): repo = proto.Repo(name=repo_name) req = proto.SubscribeCommitRequest(repo=repo, branch=branch) if from_commit_id is not None: getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id)) res = self.stub.SubscribeCommit(req, metadata=self.metadata) return res
252,036
Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo.
def list_branch(self, repo_name): req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name)) res = self.stub.ListBranch(req, metadata=self.metadata) if hasattr(res, 'branch_info'): return res.branch_info return []
252,037
Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set.
def set_branch(self, commit, branch_name): res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name) self.stub.SetBranch(res, metadata=self.metadata)
252,038
Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete.
def delete_branch(self, repo_name, branch_name): res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name) self.stub.DeleteBranch(res, metadata=self.metadata)
252,039
Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls.
def put_file_url(self, commit, path, url, recursive=False): req = iter([ proto.PutFileRequest( file=proto.File(commit=commit_from(commit), path=path), url=url, recursive=recursive ) ]) self.stub.PutFile(req, metadata=self.metadata)
252,041
Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data. Params: * commit: A tuple, string, or Commit object representing the commit. * paths: A list of paths to retrieve. * recursive: If True, will go into each directory in the list recursively.
def get_files(self, commit, paths, recursive=False): filtered_file_infos = [] for path in paths: fi = self.inspect_file(commit, path) if fi.file_type == proto.FILE: filtered_file_infos.append(fi) else: filtered_file_infos += self.list_file(commit, path, recursive=recursive) filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE] return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
252,043
Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file.
def inspect_file(self, commit, path): req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path)) res = self.stub.InspectFile(req, metadata=self.metadata) return res
252,044
Lists the files in a directory. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the directory. * recursive: If True, continue listing the files for sub-directories.
def list_file(self, commit, path, recursive=False): req = proto.ListFileRequest( file=proto.File(commit=commit_from(commit), path=path) ) res = self.stub.ListFile(req, metadata=self.metadata) file_infos = res.file_info if recursive: dirs = [f for f in file_infos if f.file_type == proto.DIR] files = [f for f in file_infos if f.file_type == proto.FILE] return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files) return list(file_infos)
252,045
Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file.
def delete_file(self, commit, path): req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path)) self.stub.DeleteFile(req, metadata=self.metadata)
252,047
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.GetVersion = channel.unary_unary( '/versionpb.API/GetVersion', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=client_dot_version_dot_versionpb_dot_version__pb2.Version.FromString, )
252,049
r"""[DEPRECATED] Get descriptors from module. Parameters: mdl(module): module to search Returns: [Descriptor]
def get_descriptors_from_module(mdl, submodule=False): r warnings.warn("use get_descriptors_in_module", DeprecationWarning) __all__ = getattr(mdl, "__all__", None) if __all__ is None: __all__ = dir(mdl) all_functions = (getattr(mdl, name) for name in __all__ if name[:1] != "_") if submodule: descs = [ d for fn in all_functions if is_descriptor_class(fn) or isinstance(fn, ModuleType) for d in ( [fn] if is_descriptor_class(fn) else get_descriptors_from_module(fn, submodule=True) ) ] else: descs = [ fn for fn in all_functions if is_descriptor_class(fn) ] return descs
252,787
r"""Get descriptors in module. Parameters: mdl(module): module to search submodule(bool): search recursively Returns: Iterator[Descriptor]
def get_descriptors_in_module(mdl, submodule=True): r __all__ = getattr(mdl, "__all__", None) if __all__ is None: __all__ = dir(mdl) all_values = (getattr(mdl, name) for name in __all__ if name[:1] != "_") if submodule: for v in all_values: if is_descriptor_class(v): yield v if isinstance(v, ModuleType): for v in get_descriptors_in_module(v, submodule=True): yield v else: for v in all_values: if is_descriptor_class(v): yield v
252,788
Register Descriptors from json descriptor objects. Parameters: obj(list or dict): descriptors to register
def register_json(self, obj): if not isinstance(obj, list): obj = [obj] self.register(Descriptor.from_json(j) for j in obj)
252,790
r"""Register descriptors. Descriptor-like: * Descriptor instance: self * Descriptor class: use Descriptor.preset() method * module: use Descriptor-likes in module * Iterable: use Descriptor-likes in Iterable Parameters: desc(Descriptor-like): descriptors to register version(str): version ignore_3D(bool): ignore 3D descriptors
def register(self, desc, version=None, ignore_3D=False): r if version is None: version = __version__ version = StrictVersion(version) return self._register(desc, version, ignore_3D)
252,795
Output message. Parameters: s(str): message to output file(file-like): output to end(str): end mark of message Return: None
def echo(self, s, file=sys.stdout, end="\n"): p = getattr(self, "_progress_bar", None) if p is not None: p.write(s, file=file, end="\n") return print(s, file=file, end="\n")
252,803
Create Descriptor instance from json dict. Parameters: obj(dict): descriptor dict Returns: Descriptor: descriptor
def _Descriptor_from_json(self, obj): descs = getattr(self, "_all_descriptors", None) if descs is None: from mordred import descriptors descs = { cls.__name__: cls for cls in get_descriptors_in_module(descriptors) } descs[ConstDescriptor.__name__] = ConstDescriptor self._all_descriptors = descs return _from_json(obj, descs)
252,935
r"""Replace missing value to "value". Parameters: value: value that missing value is replaced Returns: Result
def fill_missing(self, value=np.nan): r return self.__class__( self.mol, [(value if is_missing(v) else v) for v in self.values()], self.keys(), )
253,058
r"""Convert Result to dict. Parameters: rawkey(bool): * True: dict key is Descriptor instance * False: dict key is str Returns: dict
def asdict(self, rawkey=False): r if rawkey: return dict(self.items()) else: return { str(k): v for k, v in self.items() }
253,061
Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
def get_params(img, output_size): w, h, *_ = img.shape th, tw = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw
256,771
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # ANOTHER LINE I'VE CHANGED state['square_avg'] = torch.ones_like(p.data) if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like(p.data) if group['centered']: state['grad_avg'] = torch.zeros_like(p.data) square_avg = state['square_avg'] alpha = group['alpha'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(group['weight_decay'], p.data) square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) if group['centered']: grad_avg = state['grad_avg'] grad_avg.mul_(alpha).add_(1 - alpha, grad) # THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER # avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps']) avg = square_avg.addcmul(-1, grad_avg, grad_avg).add(group['eps']).sqrt() else: # THIS LINE IS EVERYTHING THAT I CHANGED IN THIS OPTIMIZER # avg = square_avg.sqrt().add_(group['eps']) avg = square_avg.add(group['eps']).sqrt() if group['momentum'] > 0: buf = state['momentum_buffer'] buf.mul_(group['momentum']).addcdiv_(grad, avg) p.data.add_(-group['lr'], buf) else: p.data.addcdiv_(-group['lr'], grad, avg) return loss
257,046
Rotates an image by deg degrees Arguments: deg (float): degree to rotate.
def rotate_img(im, deg, mode=cv2.BORDER_CONSTANT, interpolation=cv2.INTER_AREA): r,c,*_ = im.shape M = cv2.getRotationMatrix2D((c//2,r//2),deg,1) return cv2.warpAffine(im,M,(c,r), borderMode=mode, flags=cv2.WARP_FILL_OUTLIERS+interpolation)
257,117
Create an IMDB dataset instance given a path and fields. Arguments: path: Path to the dataset's highest level directory text_field: The field that will be used for text data. label_field: The field that will be used for label data. Remaining keyword arguments: Passed to the constructor of data.Dataset.
def __init__(self, path, text_field, label_field, **kwargs): cache_file = os.path.join(path, 'examples_cache.pk') fields = [('text', text_field), ('label', label_field)] if os.path.exists(cache_file): with open(cache_file, 'rb') as fp: examples = pickle.load(fp) else: examples = [] for label in ['pos', 'neg']: for fname in glob.iglob(os.path.join(path, label, '*.txt')): with io.open(fname, 'r', encoding="utf-8") as f: text = f.readline() examples.append(data.Example.fromlist([text, label], fields)) with open(cache_file, 'wb') as fp: pickle.dump(examples, file=fp) data.Dataset.__init__(self, examples, fields, **kwargs)
257,212
Get the type of identifier name from the type environment env. Args: name: The identifier name env: The type environment mapping from identifier names to types non_generic: A set of non-generic TypeVariables Raises: ParseError: Raised if name is an undefined symbol in the type environment.
def get_type(name, env, non_generic): if name in env: if isinstance(env[name], MultiType): return clone(env[name]) return fresh(env[name], non_generic) else: print("W: Undefined symbol {0}".format(name)) return TypeVariable()
257,902
Makes a copy of a type expression. The type t is copied. The generic variables are duplicated and the non_generic variables are shared. Args: t: A type to be copied. non_generic: A set of non-generic TypeVariables
def fresh(t, non_generic): mappings = {} # A mapping of TypeVariables to TypeVariables def freshrec(tp): p = prune(tp) if isinstance(p, TypeVariable): if is_generic(p, non_generic): if p not in mappings: mappings[p] = TypeVariable() return mappings[p] else: return p elif isinstance(p, dict): return p # module elif isinstance(p, Collection): return Collection(*[freshrec(x) for x in p.types]) elif isinstance(p, Scalar): return Scalar([freshrec(x) for x in p.types]) elif isinstance(p, TypeOperator): return TypeOperator(p.name, [freshrec(x) for x in p.types]) elif isinstance(p, MultiType): return MultiType([freshrec(x) for x in p.types]) else: assert False, "missing freshrec case {}".format(type(p)) return freshrec(t)
257,903
Unify the two types t1 and t2. Makes the types t1 and t2 the same. Args: t1: The first type to be made equivalent t2: The second type to be be equivalent Returns: None Raises: InferenceError: Raised if the types cannot be unified.
def unify(t1, t2): a = prune(t1) b = prune(t2) if isinstance(a, TypeVariable): if a != b: if occurs_in_type(a, b): raise InferenceError("recursive unification") a.instance = b elif isinstance(b, TypeVariable): unify(b, a) elif isinstance(a, TypeOperator) and a.name == 'any': return elif isinstance(b, TypeOperator) and b.name == 'any': return elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator): if len(a.types) != len(b.types): raise InferenceError("Type length differ") else: if a.name != b.name: raise InferenceError("Type name differ") try: for p, q in zip(a.types, b.types): unify(p, q) except InferenceError: raise elif isinstance(a, MultiType) and isinstance(b, MultiType): if len(a.types) != len(b.types): raise InferenceError("Type lenght differ") for p, q in zip(a.types, b.types): unify(p, q) elif isinstance(b, MultiType): return unify(b, a) elif isinstance(a, MultiType): types = [] for t in a.types: try: t_clone = fresh(t, {}) b_clone = fresh(b, {}) unify(t_clone, b_clone) types.append(t) except InferenceError: pass if types: if len(types) == 1: unify(clone(types[0]), b) else: # too many overloads are found, # so extract as many information as we can, # and leave the remaining over-approximated def try_unify(t, ts): if isinstance(t, TypeVariable): return if any(isinstance(tp, TypeVariable) for tp in ts): return if any(len(tp.types) != len(t.types) for tp in ts): return for i, tt in enumerate(t.types): its = [prune(tp.types[i]) for tp in ts] if any(isinstance(it, TypeVariable) for it in its): continue it0 = its[0] it0ntypes = len(it0.types) if all(((it.name == it0.name) and (len(it.types) == it0ntypes)) for it in its): ntypes = [TypeVariable() for _ in range(it0ntypes)] new_tt = TypeOperator(it0.name, ntypes) new_tt.__class__ = it0.__class__ unify(tt, new_tt) try_unify(prune(tt), [prune(it) for it in its]) try_unify(b, types) else: raise InferenceError("No overload") else: raise RuntimeError("Not unified {} and {}".format(type(a), type(b)))
257,905
Checks whether a type variable occurs in a type expression. Note: Must be called with v pre-pruned Args: v: The TypeVariable to be tested for type2: The type in which to search Returns: True if v occurs in type2, otherwise False
def occurs_in_type(v, type2): pruned_type2 = prune(type2) if pruned_type2 == v: return True elif isinstance(pruned_type2, TypeOperator): return occurs_in(v, pruned_type2.types) return False
257,908
N-Queens solver. Args: queen_count: the number of queens to solve for. This is also the board size. Yields: Solutions to the problem. Each yielded value is looks like (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the queen, and the index into the tuple indicates the row.
def n_queens(queen_count): out =list() cols = range(queen_count) #for vec in permutations(cols): for vec in permutations(cols,None): if (queen_count == len(set(vec[i]+i for i in cols)) == len(set(vec[i]-i for i in cols))): #yield vec out.append(vec) return out
258,476
Matplotlib patch object for this region (`matplotlib.patches.Rectangle`). Parameters: ----------- origin : array_like, optional The ``(x, y)`` pixel position of the origin of the displayed image. Default is (0, 0). kwargs : `dict` All keywords that a `~matplotlib.patches.Rectangle` object accepts Returns ------- patch : `~matplotlib.patches.Rectangle` Matplotlib circle patch
def as_artist(self, origin=(0, 0), **kwargs): from matplotlib.patches import Rectangle xy = self._lower_left_xy() xy = xy[0] - origin[0], xy[1] - origin[1] width = self.width height = self.height # From the docstring: MPL expects "rotation in degrees (anti-clockwise)" angle = self.angle.to('deg').value mpl_params = self.mpl_properties_default('patch') mpl_params.update(kwargs) return Rectangle(xy=xy, width=width, height=height, angle=angle, **mpl_params)
258,584
Matplotlib patch object for this region (`matplotlib.patches.Ellipse`). Parameters: ----------- origin : array_like, optional The ``(x, y)`` pixel position of the origin of the displayed image. Default is (0, 0). kwargs : `dict` All keywords that a `~matplotlib.patches.Ellipse` object accepts Returns ------- patch : `~matplotlib.patches.Ellipse` Matplotlib ellipse patch
def as_artist(self, origin=(0, 0), **kwargs): from matplotlib.patches import Ellipse xy = self.center.x - origin[0], self.center.y - origin[1] width = self.width height = self.height # From the docstring: MPL expects "rotation in degrees (anti-clockwise)" angle = self.angle.to('deg').value mpl_params = self.mpl_properties_default('patch') mpl_params.update(kwargs) return Ellipse(xy=xy, width=width, height=height, angle=angle, **mpl_params)
258,592
Matplotlib patch object for this region (`matplotlib.patches.Polygon`). Parameters: ----------- origin : array_like, optional The ``(x, y)`` pixel position of the origin of the displayed image. Default is (0, 0). kwargs : `dict` All keywords that a `~matplotlib.patches.Polygon` object accepts Returns ------- patch : `~matplotlib.patches.Polygon` Matplotlib polygon patch
def as_artist(self, origin=(0, 0), **kwargs): from matplotlib.patches import Polygon xy = np.vstack([self.vertices.x - origin[0], self.vertices.y - origin[1]]).transpose() mpl_params = self.mpl_properties_default('patch') mpl_params.update(kwargs) return Polygon(xy=xy, **mpl_params)
258,702
Solve for the value function and associated Markov decision rule by iterating over the value function. Parameters: ----------- model : "dtmscc" model. Must contain a 'felicity' function. grid : grid options dr : decision rule to evaluate Returns: -------- mdr : Markov decision rule The solved decision rule/policy function mdrv: decision rule The solved value function
def value_iteration(model, grid={}, tol=1e-6, maxit=500, maxit_howard=20, verbose=False, details=True): transition = model.functions['transition'] felicity = model.functions['felicity'] controls_lb = model.functions['controls_lb'] controls_ub = model.functions['controls_ub'] parms = model.calibration['parameters'] discount = model.calibration['beta'] x0 = model.calibration['controls'] m0 = model.calibration['exogenous'] s0 = model.calibration['states'] r0 = felicity(m0, s0, x0, parms) process = model.exogenous dprocess = process.discretize() n_ms = dprocess.n_nodes() # number of exogenous states n_mv = dprocess.n_inodes( 0) # this assume number of integration nodes is constant endo_grid = model.get_grid(**grid) exo_grid = dprocess.grid mdrv = DecisionRule(exo_grid, endo_grid) grid = mdrv.endo_grid.nodes() N = grid.shape[0] n_x = len(x0) mdr = constant_policy(model) controls_0 = np.zeros((n_ms, N, n_x)) for i_ms in range(n_ms): controls_0[i_ms, :, :] = mdr.eval_is(i_ms, grid) values_0 = np.zeros((n_ms, N, 1)) # for i_ms in range(n_ms): # values_0[i_ms, :, :] = mdrv(i_ms, grid) mdr = DecisionRule(exo_grid, endo_grid) # mdr.set_values(controls_0) # THIRD: value function iterations until convergence it = 0 err_v = 100 err_v_0 = 0 gain_v = 0.0 err_x = 100 err_x_0 = 0 tol_x = 1e-5 tol_v = 1e-7 itprint = IterationsPrinter( ('N', int), ('Error_V', float), ('Gain_V', float), ('Error_x', float), ('Gain_x', float), ('Eval_n', int), ('Time', float), verbose=verbose) itprint.print_header('Start value function iterations.') while (it < maxit) and (err_v > tol or err_x > tol_x): t_start = time.time() it += 1 mdr.set_values(controls_0) if it > 2: ev = evaluate_policy( model, mdr, initial_guess=mdrv, verbose=False, details=True) else: ev = evaluate_policy(model, mdr, verbose=False, details=True) mdrv = ev.solution for i_ms in range(n_ms): values_0[i_ms, :, :] = mdrv.eval_is(i_ms, grid) values = values_0.copy() controls = controls_0.copy() for i_m in range(n_ms): m = dprocess.node(i_m) for n in range(N): s = grid[n, :] x = controls[i_m, n, :] lb = controls_lb(m, s, parms) ub = controls_ub(m, s, parms) bnds = [e for e in zip(lb, ub)] def valfun(xx): return -choice_value(transition, felicity, i_m, s, xx, mdrv, dprocess, parms, discount)[0] res = scipy.optimize.minimize(valfun, x, bounds=bnds) controls[i_m, n, :] = res.x values[i_m, n, 0] = -valfun(x) # compute error, update value and dr err_x = abs(controls - controls_0).max() err_v = abs(values - values_0).max() t_end = time.time() elapsed = t_end - t_start values_0 = values controls_0 = controls gain_x = err_x / err_x_0 gain_v = err_v / err_v_0 err_x_0 = err_x err_v_0 = err_v itprint.print_iteration( N=it, Error_V=err_v, Gain_V=gain_v, Error_x=err_x, Gain_x=gain_x, Eval_n=ev.iterations, Time=elapsed) itprint.print_finished() mdr = DecisionRule(exo_grid, endo_grid) mdr.set_values(controls) mdrv.set_values(values_0) if not details: return mdr, mdrv else: return ValueIterationResult( mdr, #:AbstractDecisionRule mdrv, #:AbstractDecisionRule it, #:Int dprocess, #:AbstractDiscretizedProcess err_x<tol_x, #:Bool tol_x, #:Float64 err_x, #:Float64 err_v<tol_v, #:Bool tol_v, #:Float64 err_v, #:Float64 None, #log: #:ValueIterationLog None #trace: #:Union{Nothing,IterationTrace )
259,367
Compute value function corresponding to policy ``dr`` Parameters: ----------- model: "dtcscc" model. Must contain a 'value' function. mdr: decision rule to evaluate Returns: -------- decision rule: value function (a function of the space similar to a decision rule object)
def evaluate_policy(model, mdr, tol=1e-8, maxit=2000, grid={}, verbose=True, initial_guess=None, hook=None, integration_orders=None, details=False, interp_type='cubic'): process = model.exogenous dprocess = process.discretize() n_ms = dprocess.n_nodes() # number of exogenous states n_mv = dprocess.n_inodes( 0) # this assume number of integration nodes is constant x0 = model.calibration['controls'] v0 = model.calibration['values'] parms = model.calibration['parameters'] n_x = len(x0) n_v = len(v0) n_s = len(model.symbols['states']) endo_grid = model.get_grid(**grid) exo_grid = dprocess.grid if initial_guess is not None: mdrv = initial_guess else: mdrv = DecisionRule(exo_grid, endo_grid, interp_type=interp_type) grid = mdrv.endo_grid.nodes() N = grid.shape[0] if isinstance(mdr, np.ndarray): controls = mdr else: controls = np.zeros((n_ms, N, n_x)) for i_m in range(n_ms): controls[i_m, :, :] = mdr.eval_is(i_m, grid) values_0 = np.zeros((n_ms, N, n_v)) if initial_guess is None: for i_m in range(n_ms): values_0[i_m, :, :] = v0[None, :] else: for i_m in range(n_ms): values_0[i_m, :, :] = initial_guess.eval_is(i_m, grid) val = model.functions['value'] g = model.functions['transition'] sh_v = values_0.shape err = 10 inner_maxit = 50 it = 0 if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'.format( 'N', ' Error', 'Gain', 'Time') stars = '-' * len(headline) print(stars) print(headline) print(stars) t1 = time.time() err_0 = np.nan verbit = (verbose == 'full') while err > tol and it < maxit: it += 1 t_start = time.time() mdrv.set_values(values_0.reshape(sh_v)) values = update_value(val, g, grid, controls, values_0, mdr, mdrv, dprocess, parms).reshape((-1, n_v)) err = abs(values.reshape(sh_v) - values_0).max() err_SA = err / err_0 err_0 = err values_0 = values.reshape(sh_v) t_finish = time.time() elapsed = t_finish - t_start if verbose: print('|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'.format( it, err, err_SA, elapsed)) # values_0 = values.reshape(sh_v) t2 = time.time() if verbose: print(stars) print("Elapsed: {} seconds.".format(t2 - t1)) print(stars) if not details: return mdrv else: return EvaluationResult(mdrv, it, tol, err)
259,369
Get a stream of Transactions for an Account starting from when the request is made. Args: accountID: Account Identifier Returns: v20.response.Response containing the results from submitting the request
def stream( self, accountID, **kwargs ): request = Request( 'GET', '/v3/accounts/{accountID}/transactions/stream' ) request.set_path_param( 'accountID', accountID ) request.set_stream(True) class Parser(): def __init__(self, ctx): self.ctx = ctx def __call__(self, line): j = json.loads(line.decode('utf-8')) type = j.get("type") if type is None: return ("unknown", j) elif type == "HEARTBEAT": return ( "transaction.TransactionHeartbeat", self.ctx.transaction.TransactionHeartbeat.from_dict( j, self.ctx ) ) transaction = self.ctx.transaction.Transaction.from_dict( j, self.ctx ) return ( "transaction.Transaction", transaction ) request.set_line_parser( Parser(self.ctx) ) response = self.ctx.request(request) return response
260,658
Fetch a price for an instrument. Accounts are not associated in any way with this endpoint. Args: instrument: Name of the Instrument time: The time at which the desired price is in effect. The current price is returned if no time is provided. Returns: v20.response.Response containing the results from submitting the request
def price( self, instrument, **kwargs ): request = Request( 'GET', '/v3/instruments/{instrument}/price' ) request.set_path_param( 'instrument', instrument ) request.set_param( 'time', kwargs.get('time') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('price') is not None: parsed_body['price'] = \ self.ctx.pricing_common.Price.from_dict( jbody['price'], self.ctx ) elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,689
Replace an Order in an Account by simultaneously cancelling it and creating a replacement Order Args: accountID: Account Identifier orderSpecifier: The Order Specifier order: Specification of the replacing Order Returns: v20.response.Response containing the results from submitting the request
def replace( self, accountID, orderSpecifier, **kwargs ): request = Request( 'PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}' ) request.set_path_param( 'accountID', accountID ) request.set_path_param( 'orderSpecifier', orderSpecifier ) body = EntityDict() if 'order' in kwargs: body.set('order', kwargs['order']) request.set_body_dict(body.dict) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "201": if jbody.get('orderCancelTransaction') is not None: parsed_body['orderCancelTransaction'] = \ self.ctx.transaction.OrderCancelTransaction.from_dict( jbody['orderCancelTransaction'], self.ctx ) if jbody.get('orderCreateTransaction') is not None: parsed_body['orderCreateTransaction'] = \ self.ctx.transaction.Transaction.from_dict( jbody['orderCreateTransaction'], self.ctx ) if jbody.get('orderFillTransaction') is not None: parsed_body['orderFillTransaction'] = \ self.ctx.transaction.OrderFillTransaction.from_dict( jbody['orderFillTransaction'], self.ctx ) if jbody.get('orderReissueTransaction') is not None: parsed_body['orderReissueTransaction'] = \ self.ctx.transaction.Transaction.from_dict( jbody['orderReissueTransaction'], self.ctx ) if jbody.get('orderReissueRejectTransaction') is not None: parsed_body['orderReissueRejectTransaction'] = \ self.ctx.transaction.Transaction.from_dict( jbody['orderReissueRejectTransaction'], self.ctx ) if jbody.get('replacingOrderCancelTransaction') is not None: parsed_body['replacingOrderCancelTransaction'] = \ self.ctx.transaction.OrderCancelTransaction.from_dict( jbody['replacingOrderCancelTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('orderRejectTransaction') is not None: parsed_body['orderRejectTransaction'] = \ self.ctx.transaction.Transaction.from_dict( jbody['orderRejectTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('orderCancelRejectTransaction') is not None: parsed_body['orderCancelRejectTransaction'] = \ self.ctx.transaction.Transaction.from_dict( jbody['orderCancelRejectTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,724
Cancel a pending Order in an Account Args: accountID: Account Identifier orderSpecifier: The Order Specifier Returns: v20.response.Response containing the results from submitting the request
def cancel( self, accountID, orderSpecifier, **kwargs ): request = Request( 'PUT', '/v3/accounts/{accountID}/orders/{orderSpecifier}/cancel' ) request.set_path_param( 'accountID', accountID ) request.set_path_param( 'orderSpecifier', orderSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('orderCancelTransaction') is not None: parsed_body['orderCancelTransaction'] = \ self.ctx.transaction.OrderCancelTransaction.from_dict( jbody['orderCancelTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('orderCancelRejectTransaction') is not None: parsed_body['orderCancelRejectTransaction'] = \ self.ctx.transaction.OrderCancelRejectTransaction.from_dict( jbody['orderCancelRejectTransaction'], self.ctx ) if jbody.get('relatedTransactionIDs') is not None: parsed_body['relatedTransactionIDs'] = \ jbody.get('relatedTransactionIDs') if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,725
Shortcut to create a Market Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketOrderRequest Returns: v20.response.Response containing the results from submitting the request
def market(self, accountID, **kwargs): return self.create( accountID, order=MarketOrderRequest(**kwargs) )
260,727
Shortcut to create a Limit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request
def limit(self, accountID, **kwargs): return self.create( accountID, order=LimitOrderRequest(**kwargs) )
260,728
Shortcut to replace a pending Limit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Limit Order to replace kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request
def limit_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=LimitOrderRequest(**kwargs) )
260,729
Shortcut to create a Stop Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
def stop(self, accountID, **kwargs): return self.create( accountID, order=StopOrderRequest(**kwargs) )
260,730
Shortcut to replace a pending Stop Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Order to replace kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
def stop_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=StopOrderRequest(**kwargs) )
260,731
Shortcut to create a MarketIfTouched Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
def market_if_touched(self, accountID, **kwargs): return self.create( accountID, order=MarketIfTouchedOrderRequest(**kwargs) )
260,732
Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
def market_if_touched_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs) )
260,733
Shortcut to create a Take Profit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request
def take_profit(self, accountID, **kwargs): return self.create( accountID, order=TakeProfitOrderRequest(**kwargs) )
260,734
Shortcut to replace a pending Take Profit Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Take Profit Order to replace kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request
def take_profit_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=TakeProfitOrderRequest(**kwargs) )
260,735
Shortcut to create a Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
def stop_loss(self, accountID, **kwargs): return self.create( accountID, order=StopLossOrderRequest(**kwargs) )
260,736
Shortcut to replace a pending Stop Loss Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Stop Loss Order to replace kwargs : The arguments to create a StopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
def stop_loss_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=StopLossOrderRequest(**kwargs) )
260,737
Shortcut to create a Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
def trailing_stop_loss(self, accountID, **kwargs): return self.create( accountID, order=TrailingStopLossOrderRequest(**kwargs) )
260,738
Shortcut to replace a pending Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account orderID : The ID of the Take Profit Order to replace kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
def trailing_stop_loss_replace(self, accountID, orderID, **kwargs): return self.replace( accountID, orderID, order=TrailingStopLossOrderRequest(**kwargs) )
260,739
Set the token for the v20 context Args: token: The token used to access the v20 REST api
def set_token(self, token): self.token = token self.set_header( 'Authorization', "Bearer {}".format(token) )
260,747
Set the Accept-Datetime-Format header to an acceptable value Args: format: UNIX or RFC3339
def set_datetime_format(self, format): if not format in ["UNIX", "RFC3339"]: return self.datetime_format = format self.set_header("Accept-Datetime-Format", self.datetime_format)
260,748
Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object
def request(self, request): url = "{}{}".format(self._base_url, request.path) timeout = self.poll_timeout if request.stream is True: timeout = self.stream_timeout try: http_response = self._session.request( request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout ) except requests.exceptions.ConnectionError: raise V20ConnectionError(url) except requests.exceptions.ConnectTimeout: raise V20Timeout(url, "connect") except requests.exceptions.ReadTimeout: raise V20Timeout(url, "read") request.headers = http_response.request.headers response = Response( request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers ) if request.stream: response.set_line_parser( request.line_parser ) response.set_lines( http_response.iter_lines( self.stream_chunk_size ) ) else: response.set_raw_body(http_response.text) return response
260,749
Get a list of all Accounts authorized for the provided token. Args: Returns: v20.response.Response containing the results from submitting the request
def list( self, **kwargs ): request = Request( 'GET', '/v3/accounts' ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('accounts') is not None: parsed_body['accounts'] = [ self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts') ] elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,760
Set the client-configurable portions of an Account. Args: accountID: Account Identifier alias: Client-defined alias (name) for the Account marginRate: The string representation of a decimal number. Returns: v20.response.Response containing the results from submitting the request
def configure( self, accountID, **kwargs ): request = Request( 'PATCH', '/v3/accounts/{accountID}/configuration' ) request.set_path_param( 'accountID', accountID ) body = EntityDict() if 'alias' in kwargs: body.set('alias', kwargs['alias']) if 'marginRate' in kwargs: body.set('marginRate', kwargs['marginRate']) request.set_body_dict(body.dict) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('clientConfigureTransaction') is not None: parsed_body['clientConfigureTransaction'] = \ self.ctx.transaction.ClientConfigureTransaction.from_dict( jbody['clientConfigureTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') elif str(response.status) == "400": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('clientConfigureRejectTransaction') is not None: parsed_body['clientConfigureRejectTransaction'] = \ self.ctx.transaction.ClientConfigureRejectTransaction.from_dict( jbody['clientConfigureRejectTransaction'], self.ctx ) if jbody.get('lastTransactionID') is not None: parsed_body['lastTransactionID'] = \ jbody.get('lastTransactionID') if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,762
Fetch the user information for the specified user. This endpoint is intended to be used by the user themself to obtain their own information. Args: userSpecifier: The User Specifier Returns: v20.response.Response containing the results from submitting the request
def get_info( self, userSpecifier, **kwargs ): request = Request( 'GET', '/v3/users/{userSpecifier}' ) request.set_path_param( 'userSpecifier', userSpecifier ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('userInfo') is not None: parsed_body['userInfo'] = \ self.ctx.user.UserInfo.from_dict( jbody['userInfo'], self.ctx ) elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "403": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
260,765
Pulls tasks from the incoming tasks 0mq pipe onto the internal pending task queue Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def pull_tasks(self, kill_event): logger.info("[TASK PULL THREAD] starting") poller = zmq.Poller() poller.register(self.task_incoming, zmq.POLLIN) # Send a registration message msg = self.create_reg_message() logger.debug("Sending registration message: {}".format(msg)) self.task_incoming.send(msg) last_beat = time.time() last_interchange_contact = time.time() task_recv_counter = 0 poll_timer = 1 while not kill_event.is_set(): time.sleep(LOOP_SLOWDOWN) ready_worker_count = self.ready_worker_queue.qsize() pending_task_count = self.pending_task_queue.qsize() logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count, pending_task_count)) if time.time() > last_beat + self.heartbeat_period: self.heartbeat() last_beat = time.time() if pending_task_count < self.max_queue_size and ready_worker_count > 0: logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count)) msg = ((ready_worker_count).to_bytes(4, "little")) self.task_incoming.send(msg) socks = dict(poller.poll(timeout=poll_timer)) if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN: _, pkl_msg = self.task_incoming.recv_multipart() tasks = pickle.loads(pkl_msg) last_interchange_contact = time.time() if tasks == 'STOP': logger.critical("[TASK_PULL_THREAD] Received stop request") kill_event.set() break elif tasks == HEARTBEAT_CODE: logger.debug("Got heartbeat from interchange") else: # Reset timer on receiving message poll_timer = 1 task_recv_counter += len(tasks) logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks], task_recv_counter)) for task in tasks: self.pending_task_queue.put(task) else: logger.debug("[TASK_PULL_THREAD] No incoming tasks") # Limit poll duration to heartbeat_period # heartbeat_period is in s vs poll_timer in ms poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2) # Only check if no messages were received. if time.time() > last_interchange_contact + self.heartbeat_threshold: logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold") kill_event.set() logger.critical("[TASK_PULL_THREAD] Exiting") break
260,934
Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def push_results(self, kill_event): # We set this timeout so that the thread checks the kill_event and does not # block forever on the internal result queue timeout = 0.1 # timer = time.time() logger.debug("[RESULT_PUSH_THREAD] Starting thread") while not kill_event.is_set(): time.sleep(LOOP_SLOWDOWN) try: items = [] while not self.pending_result_queue.empty(): r = self.pending_result_queue.get(block=True) items.append(r) if items: self.result_outgoing.send_multipart(items) except queue.Empty: logger.debug("[RESULT_PUSH_THREAD] No results to send in past {}seconds".format(timeout)) except Exception as e: logger.exception("[RESULT_PUSH_THREAD] Got an exception : {}".format(e)) logger.critical("[RESULT_PUSH_THREAD] Exiting")
260,935
Add a stream log handler. Args: - filename (string): Name of the file to write logs to - name (string): Logger name - level (logging.LEVEL): Set the logging level. - format_string (string): Set the format string Returns: - None
def set_file_logger(filename: str, name: str = 'parsl', level: int = logging.DEBUG, format_string: Optional[str] = None): if format_string is None: format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s" logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename) handler.setLevel(level) formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) # see note in set_stream_logger for notes about logging # concurrent.futures futures_logger = logging.getLogger("concurrent.futures") futures_logger.addHandler(handler)
260,945
Look for inputs of the app that are remote files. Submit stage_in apps for such files and replace the file objects in the inputs list with corresponding DataFuture objects. Args: - executor (str) : executor where the app is going to be launched - args (List) : Positional args to app function - kwargs (Dict) : Kwargs to app function
def _add_input_deps(self, executor, args, kwargs): # Return if the task is _*_stage_in if executor == 'data_manager': return args, kwargs inputs = kwargs.get('inputs', []) for idx, f in enumerate(inputs): if isinstance(f, File) and f.is_remote(): inputs[idx] = self.data_manager.stage_in(f, executor) for kwarg, f in kwargs.items(): if isinstance(f, File) and f.is_remote(): kwargs[kwarg] = self.data_manager.stage_in(f, executor) newargs = list(args) for idx, f in enumerate(newargs): if isinstance(f, File) and f.is_remote(): newargs[idx] = self.data_manager.stage_in(f, executor) return tuple(newargs), kwargs
260,974
Count the number of unresolved futures on which a task depends. Args: - args (List[args]) : The list of args list to the fn - kwargs (Dict{kwargs}) : The dict of all kwargs passed to the fn Returns: - count, [list of dependencies]
def _gather_all_deps(self, args, kwargs): # Check the positional args depends = [] count = 0 for dep in args: if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) # Check for explicit kwargs ex, fu_1=<fut> for key in kwargs: dep = kwargs[key] if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) # Check for futures in inputs=[<fut>...] for dep in kwargs.get('inputs', []): if isinstance(dep, Future): if self.tasks[dep.tid]['status'] not in FINAL_STATES: count += 1 depends.extend([dep]) return count, depends
260,975
Load a DataFlowKernel. Args: - config (Config) : Configuration to load. This config will be passed to a new DataFlowKernel instantiation which will be set as the active DataFlowKernel. Returns: - DataFlowKernel : The loaded DataFlowKernel object.
def load(cls, config: Optional[Config] = None): if cls._dfk is not None: raise RuntimeError('Config has already been loaded') if config is None: cls._dfk = DataFlowKernel(Config()) else: cls._dfk = DataFlowKernel(config) return cls._dfk
260,985
Pull tasks from the incoming tasks 0mq pipe onto the internal pending task queue Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def migrate_tasks_to_internal(self, kill_event): logger.info("[TASK_PULL_THREAD] Starting") task_counter = 0 poller = zmq.Poller() poller.register(self.task_incoming, zmq.POLLIN) while not kill_event.is_set(): try: msg = self.task_incoming.recv_pyobj() except zmq.Again: # We just timed out while attempting to receive logger.debug("[TASK_PULL_THREAD] {} tasks in internal queue".format(self.pending_task_queue.qsize())) continue if msg == 'STOP': kill_event.set() break else: self.pending_task_queue.put(msg) task_counter += 1 logger.debug("[TASK_PULL_THREAD] Fetched task:{}".format(task_counter))
260,991
Start the NeedNameQeueu Parameters: ---------- TODO: Move task receiving to a thread
def start(self, poll_period=None): logger.info("Incoming ports bound") if poll_period is None: poll_period = self.poll_period start = time.time() count = 0 self._kill_event = threading.Event() self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal, args=(self._kill_event,)) self._task_puller_thread.start() self._command_thread = threading.Thread(target=self._command_server, args=(self._kill_event,)) self._command_thread.start() poller = zmq.Poller() # poller.register(self.task_incoming, zmq.POLLIN) poller.register(self.task_outgoing, zmq.POLLIN) poller.register(self.results_incoming, zmq.POLLIN) # These are managers which we should examine in an iteration # for scheduling a job (or maybe any other attention?). # Anything altering the state of the manager should add it # onto this list. interesting_managers = set() while not self._kill_event.is_set(): self.socks = dict(poller.poll(timeout=poll_period)) # Listen for requests for work if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN: logger.debug("[MAIN] starting task_outgoing section") message = self.task_outgoing.recv_multipart() manager = message[0] if manager not in self._ready_manager_queue: reg_flag = False try: msg = json.loads(message[1].decode('utf-8')) reg_flag = True except Exception: logger.warning("[MAIN] Got a non-json registration message from manager:{}".format( manager)) logger.debug("[MAIN] Message :\n{}\n".format(message[0])) # By default we set up to ignore bad nodes/registration messages. self._ready_manager_queue[manager] = {'last': time.time(), 'free_capacity': 0, 'block_id': None, 'max_capacity': 0, 'active': True, 'tasks': []} if reg_flag is True: interesting_managers.add(manager) logger.info("[MAIN] Adding manager: {} to ready queue".format(manager)) self._ready_manager_queue[manager].update(msg) logger.info("[MAIN] Registration info for manager {}: {}".format(manager, msg)) if (msg['python_v'].rsplit(".", 1)[0] != self.current_platform['python_v'].rsplit(".", 1)[0] or msg['parsl_v'] != self.current_platform['parsl_v']): logger.warn("[MAIN] Manager {} has incompatible version info with the interchange".format(manager)) if self.suppress_failure is False: logger.debug("Setting kill event") self._kill_event.set() e = ManagerLost(manager) result_package = {'task_id': -1, 'exception': serialize_object(e)} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) logger.warning("[MAIN] Sent failure reports, unregistering manager") else: logger.debug("[MAIN] Suppressing shutdown due to version incompatibility") else: logger.info("[MAIN] Manager {} has compatible Parsl version {}".format(manager, msg['parsl_v'])) logger.info("[MAIN] Manager {} has compatible Python version {}".format(manager, msg['python_v'].rsplit(".", 1)[0])) else: # Registration has failed. if self.suppress_failure is False: self._kill_event.set() e = BadRegistration(manager, critical=True) result_package = {'task_id': -1, 'exception': serialize_object(e)} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) else: logger.debug("[MAIN] Suppressing bad registration from manager:{}".format( manager)) else: tasks_requested = int.from_bytes(message[1], "little") self._ready_manager_queue[manager]['last'] = time.time() if tasks_requested == HEARTBEAT_CODE: logger.debug("[MAIN] Manager {} sent heartbeat".format(manager)) self.task_outgoing.send_multipart([manager, b'', PKL_HEARTBEAT_CODE]) else: logger.debug("[MAIN] Manager {} requested {} tasks".format(manager, tasks_requested)) self._ready_manager_queue[manager]['free_capacity'] = tasks_requested interesting_managers.add(manager) logger.debug("[MAIN] leaving task_outgoing section") # If we had received any requests, check if there are tasks that could be passed logger.debug("Managers count (total/interesting): {}/{}".format(len(self._ready_manager_queue), len(interesting_managers))) if interesting_managers and not self.pending_task_queue.empty(): shuffled_managers = list(interesting_managers) random.shuffle(shuffled_managers) while shuffled_managers and not self.pending_task_queue.empty(): # cf. the if statement above... manager = shuffled_managers.pop() tasks_inflight = len(self._ready_manager_queue[manager]['tasks']) real_capacity = min(self._ready_manager_queue[manager]['free_capacity'], self._ready_manager_queue[manager]['max_capacity'] - tasks_inflight) if (real_capacity and self._ready_manager_queue[manager]['active']): tasks = self.get_tasks(real_capacity) if tasks: self.task_outgoing.send_multipart([manager, b'', pickle.dumps(tasks)]) task_count = len(tasks) count += task_count tids = [t['task_id'] for t in tasks] self._ready_manager_queue[manager]['free_capacity'] -= task_count self._ready_manager_queue[manager]['tasks'].extend(tids) logger.debug("[MAIN] Sent tasks: {} to manager {}".format(tids, manager)) if self._ready_manager_queue[manager]['free_capacity'] > 0: logger.debug("[MAIN] Manager {} has free_capacity {}".format(manager, self._ready_manager_queue[manager]['free_capacity'])) # ... so keep it in the interesting_managers list else: logger.debug("[MAIN] Manager {} is now saturated".format(manager)) interesting_managers.remove(manager) else: interesting_managers.remove(manager) # logger.debug("Nothing to send to manager {}".format(manager)) logger.debug("[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(len(interesting_managers))) else: logger.debug("[MAIN] either no interesting managers or no tasks, so skipping manager pass") # Receive any results and forward to client if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN: logger.debug("[MAIN] entering results_incoming section") manager, *b_messages = self.results_incoming.recv_multipart() if manager not in self._ready_manager_queue: logger.warning("[MAIN] Received a result from a un-registered manager: {}".format(manager)) else: logger.debug("[MAIN] Got {} result items in batch".format(len(b_messages))) for b_message in b_messages: r = pickle.loads(b_message) # logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager)) self._ready_manager_queue[manager]['tasks'].remove(r['task_id']) self.results_outgoing.send_multipart(b_messages) logger.debug("[MAIN] Current tasks: {}".format(self._ready_manager_queue[manager]['tasks'])) logger.debug("[MAIN] leaving results_incoming section") logger.debug("[MAIN] entering bad_managers section") bad_managers = [manager for manager in self._ready_manager_queue if time.time() - self._ready_manager_queue[manager]['last'] > self.heartbeat_threshold] for manager in bad_managers: logger.debug("[MAIN] Last: {} Current: {}".format(self._ready_manager_queue[manager]['last'], time.time())) logger.warning("[MAIN] Too many heartbeats missed for manager {}".format(manager)) for tid in self._ready_manager_queue[manager]['tasks']: try: raise ManagerLost(manager) except Exception: result_package = {'task_id': tid, 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) logger.warning("[MAIN] Sent failure reports, unregistering manager") self._ready_manager_queue.pop(manager, 'None') logger.debug("[MAIN] leaving bad_managers section") logger.debug("[MAIN] ending one main loop iteration") delta = time.time() - start logger.info("Processed {} tasks in {} seconds".format(count, delta)) logger.warning("Exiting")
260,993
Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die.
def push_results(self, kill_event): logger.debug("[RESULT_PUSH_THREAD] Starting thread") push_poll_period = max(10, self.poll_period) / 1000 # push_poll_period must be atleast 10 ms logger.debug("[RESULT_PUSH_THREAD] push poll period: {}".format(push_poll_period)) last_beat = time.time() items = [] while not kill_event.is_set(): try: r = self.pending_result_queue.get(block=True, timeout=push_poll_period) items.append(r) except queue.Empty: pass except Exception as e: logger.exception("[RESULT_PUSH_THREAD] Got an exception: {}".format(e)) # If we have reached poll_period duration or timer has expired, we send results if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period: last_beat = time.time() if items: self.result_outgoing.send_multipart(items) items = [] logger.critical("[RESULT_PUSH_THREAD] Exiting")
261,000
Initialize the DataManager. Args: - dfk (DataFlowKernel): The DataFlowKernel that this DataManager is managing data for. Kwargs: - max_threads (int): Number of threads. Default is 10. - executors (list of Executors): Executors for which data transfer will be managed.
def __init__(self, dfk, max_threads=10): self._scaling_enabled = False self.label = 'data_manager' self.dfk = dfk self.max_threads = max_threads self.globus = None self.managed = True
261,005
wtime_to_minutes Convert standard wallclock time string to minutes. Args: - Time_string in HH:MM:SS format Returns: (int) minutes
def wtime_to_minutes(time_string): hours, mins, seconds = time_string.split(':') total_mins = int(hours) * 60 + int(mins) if total_mins < 1: logger.warning("Time string '{}' parsed to {} minutes, less than 1".format(time_string, total_mins)) return total_mins
261,019
Peek at the DFK and the executors specified. We assume here that tasks are not held in a runnable state, and that all tasks from an app would be sent to a single specific executor, i.e tasks cannot be specified to go to one of more executors. Args: - tasks (task_ids): Not used here. KWargs: - kind (Not used)
def _strategy_simple(self, tasks, *args, kind=None, **kwargs): for label, executor in self.dfk.executors.items(): if not executor.scaling_enabled: continue # Tasks that are either pending completion active_tasks = executor.outstanding status = executor.status() self.unset_logging() # FIXME we need to handle case where provider does not define these # FIXME probably more of this logic should be moved to the provider min_blocks = executor.provider.min_blocks max_blocks = executor.provider.max_blocks if isinstance(executor, IPyParallelExecutor): tasks_per_node = executor.workers_per_node elif isinstance(executor, HighThroughputExecutor): # This is probably wrong calculation, we need this to come from the executor # since we can't know slots ahead of time. tasks_per_node = 1 elif isinstance(executor, ExtremeScaleExecutor): tasks_per_node = executor.ranks_per_node nodes_per_block = executor.provider.nodes_per_block parallelism = executor.provider.parallelism running = sum([1 for x in status if x == 'RUNNING']) submitting = sum([1 for x in status if x == 'SUBMITTING']) pending = sum([1 for x in status if x == 'PENDING']) active_blocks = running + submitting + pending active_slots = active_blocks * tasks_per_node * nodes_per_block if hasattr(executor, 'connected_workers'): logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected workers'.format( label, active_tasks, running, submitting, pending, executor.connected_workers)) else: logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format( label, active_tasks, running, submitting, pending)) # reset kill timer if executor has active tasks if active_tasks > 0 and self.executors[executor.label]['idle_since']: self.executors[executor.label]['idle_since'] = None # Case 1 # No tasks. if active_tasks == 0: # Case 1a # Fewer blocks that min_blocks if active_blocks <= min_blocks: # Ignore # logger.debug("Strategy: Case.1a") pass # Case 1b # More blocks than min_blocks. Scale down else: # We want to make sure that max_idletime is reached # before killing off resources if not self.executors[executor.label]['idle_since']: logger.debug("Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format( label, self.max_idletime) ) self.executors[executor.label]['idle_since'] = time.time() idle_since = self.executors[executor.label]['idle_since'] if (time.time() - idle_since) > self.max_idletime: # We have resources idle for the max duration, # we have to scale_in now. logger.debug("Idle time has reached {}s for executor {}; removing resources".format( self.max_idletime, label) ) executor.scale_in(active_blocks - min_blocks) else: pass # logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since)) # Case 2 # More tasks than the available slots. elif (float(active_slots) / active_tasks) < parallelism: # Case 2a # We have the max blocks possible if active_blocks >= max_blocks: # Ignore since we already have the max nodes # logger.debug("Strategy: Case.2a") pass # Case 2b else: # logger.debug("Strategy: Case.2b") excess = math.ceil((active_tasks * parallelism) - active_slots) excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block)) logger.debug("Requesting {} more blocks".format(excess_blocks)) executor.scale_out(excess_blocks) elif active_slots == 0 and active_tasks > 0: # Case 4 # Check if slots are being lost quickly ? logger.debug("Requesting single slot") executor.scale_out(1) # Case 3 # tasks ~ slots else: # logger.debug("Strategy: Case 3") pass
261,046
Terminate the controller process and its child processes. Args: - None
def close(self): if self.reuse: logger.debug("Ipcontroller not shutting down: reuse enabled") return if self.mode == "manual": logger.debug("Ipcontroller not shutting down: Manual mode") return try: pgid = os.getpgid(self.proc.pid) os.killpg(pgid, signal.SIGTERM) time.sleep(0.2) os.killpg(pgid, signal.SIGKILL) try: self.proc.wait(timeout=1) x = self.proc.returncode if x == 0: logger.debug("Controller exited with {0}".format(x)) else: logger.error("Controller exited with {0}. May require manual cleanup".format(x)) except subprocess.TimeoutExpired: logger.warn("Ipcontroller process:{0} cleanup failed. May require manual cleanup".format(self.proc.pid)) except Exception as e: logger.warn("Failed to kill the ipcontroller process[{0}]: {1}".format(self.proc.pid, e))
261,061
Initialize the memoizer. Args: - dfk (DFK obj): The DFK object KWargs: - memoize (Bool): enable memoization or not. - checkpoint (Dict): A checkpoint loaded as a dict.
def __init__(self, dfk, memoize=True, checkpoint={}): self.dfk = dfk self.memoize = memoize if self.memoize: logger.info("App caching initialized") self.memo_lookup_table = checkpoint else: logger.info("App caching disabled for all apps") self.memo_lookup_table = {}
261,062
Create a hash of the task inputs. This uses a serialization library borrowed from ipyparallel. If this fails here, then all ipp calls are also likely to fail due to failure at serialization. Args: - task (dict) : Task dictionary from dfk.tasks Returns: - hash (str) : A unique hash string
def make_hash(self, task): # Function name TODO: Add fn body later t = [serialize_object(task['func_name'])[0], serialize_object(task['fn_hash'])[0], serialize_object(task['args'])[0], serialize_object(task['kwargs'])[0], serialize_object(task['env'])[0]] x = b''.join(t) hashedsum = hashlib.md5(x).hexdigest() return hashedsum
261,063
Updates the memoization lookup table with the result from a task. Args: - task_id (int): Integer task id - task (dict) : A task dict from dfk.tasks - r (Result future): Result future A warning is issued when a hash collision occurs during the update. This is not likely.
def update_memo(self, task_id, task, r): if not self.memoize or not task['memoize']: return if task['hashsum'] in self.memo_lookup_table: logger.info('Updating appCache entry with latest %s:%s call' % (task['func_name'], task_id)) self.memo_lookup_table[task['hashsum']] = r else: self.memo_lookup_table[task['hashsum']] = r
261,065
Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses
def status(self, job_ids): if job_ids: self._status() return [self.resources[jid]['status'] for jid in job_ids]
261,088
Get the status of a list of jobs identified by their ids. Args: - job_ids (List of ids) : List of identifiers for the jobs Returns: - List of status codes.
def status(self, job_ids): logger.debug("Checking status of: {0}".format(job_ids)) for job_id in self.resources: if self.resources[job_id]['proc']: poll_code = self.resources[job_id]['proc'].poll() if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']: continue if poll_code is None: self.resources[job_id]['status'] = 'RUNNING' elif poll_code == 0: self.resources[job_id]['status'] = 'COMPLETED' elif poll_code != 0: self.resources[job_id]['status'] = 'FAILED' else: logger.error("Internal consistency error: unexpected case in local provider state machine") elif self.resources[job_id]['remote_pid']: retcode, stdout, stderr = self.channel.execute_wait('ps -p {} &> /dev/null; echo "STATUS:$?" ', self.cmd_timeout) for line in stdout.split('\n'): if line.startswith("STATUS:"): status = line.split("STATUS:")[1].strip() if status == "0": self.resources[job_id]['status'] = 'RUNNING' else: self.resources[job_id]['status'] = 'FAILED' return [self.resources[jid]['status'] for jid in job_ids]
261,090
Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False.
def cancel(self, job_ids): for job in job_ids: logger.debug("Terminating job/proc_id: {0}".format(job)) # Here we are assuming that for local, the job_ids are the process id's if self.resources[job]['proc']: proc = self.resources[job]['proc'] os.killpg(os.getpgid(proc.pid), signal.SIGTERM) self.resources[job]['status'] = 'CANCELLED' elif self.resources[job]['remote_pid']: cmd = "kill -- -$(ps -o pgid={} | grep -o '[0-9]*')".format(self.resources[job]['remote_pid']) retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout) if retcode != 0: logger.warning("Failed to kill PID: {} and child processes on {}".format(self.resources[job]['remote_pid'], self.label)) rets = [True for i in job_ids] return rets
261,092
Add a stream log handler. Args: - filename (string): Name of the file to write logs to - name (string): Logger name - level (logging.LEVEL): Set the logging level. - format_string (string): Set the format string Returns: - None
def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None): try: os.makedirs(os.path.dirname(filename), 511, True) except Exception as e: print("Caught exception with trying to make log dirs: {}".format(e)) if format_string is None: format_string = "%(asctime)s %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s".format( rank) global logger logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename) handler.setLevel(level) formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler)
261,118
Reads the json contents from filepath and uses that to compose the engine launch command. Notes: Add this to the ipengine launch for debug logs : --log-to-file --debug Args: filepath (str): Path to the engine file engine_dir (str): CWD for the engines . container_image (str): The container to be used to launch workers
def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image): self.engine_file = os.path.expanduser(filepath) uid = str(uuid.uuid4()) engine_json = None try: with open(self.engine_file, 'r') as f: engine_json = f.read() except OSError as e: logger.error("Could not open engine_json : ", self.engine_file) raise e return .format(engine_dir, engine_json, container_image, debug_option=self.debug_option, uid=uid)
261,126
Scales out the number of active workers by 1. This method is notImplemented for threads and will raise the error if called. Parameters: blocks : int Number of blocks to be provisioned.
def scale_out(self, blocks=1): r = [] for i in range(blocks): if self.provider: block = self.provider.submit(self.launch_cmd, 1, self.workers_per_node) logger.debug("Launched block {}:{}".format(i, block)) if not block: raise(ScalingFailed(self.provider.label, "Attempts to provision nodes via provider has failed")) self.engines.extend([block]) r.extend([block]) else: logger.error("No execution provider available") r = None return r
261,127