text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Create a Redis client. <END_TASK> <USER_TASK:> Description: def create_redis_client(redis_address, password=None): """Create a Redis client. Args: The IP address, port, and password of the Redis server. Returns: A Redis client. """
redis_ip_address, redis_port = redis_address.split(":") # For this command to work, some other client (on the same machine # as Redis) must have run "CONFIG SET protected-mode no". return redis.StrictRedis( host=redis_ip_address, port=int(redis_port), password=password)
<SYSTEM_TASK:> Wait for a Redis server to be available. <END_TASK> <USER_TASK:> Description: def wait_for_redis_to_start(redis_ip_address, redis_port, password=None, num_retries=5): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. num_retries (int): The number of times to try connecting with redis. The client will sleep for one second between attempts. Raises: Exception: An exception is raised if we could not connect with Redis. """
redis_client = redis.StrictRedis( host=redis_ip_address, port=redis_port, password=password) # Wait for the Redis server to start. counter = 0 while counter < num_retries: try: # Run some random command and see if it worked. logger.info( "Waiting for redis server at {}:{} to respond...".format( redis_ip_address, redis_port)) redis_client.client_list() except redis.ConnectionError: # Wait a little bit. time.sleep(1) logger.info("Failed to connect to the redis server, retrying.") counter += 1 else: break if counter == num_retries: raise Exception("Unable to connect to Redis. If the Redis instance is " "on a different machine, check that your firewall is " "configured properly.")
<SYSTEM_TASK:> Attempt to detect the number of GPUs on this machine. <END_TASK> <USER_TASK:> Description: def _autodetect_num_gpus(): """Attempt to detect the number of GPUs on this machine. TODO(rkn): This currently assumes Nvidia GPUs and Linux. Returns: The number of GPUs if any were detected, otherwise 0. """
proc_gpus_path = "/proc/driver/nvidia/gpus" if os.path.isdir(proc_gpus_path): return len(os.listdir(proc_gpus_path)) return 0
<SYSTEM_TASK:> Compute the versions of Python, pyarrow, and Ray. <END_TASK> <USER_TASK:> Description: def _compute_version_info(): """Compute the versions of Python, pyarrow, and Ray. Returns: A tuple containing the version information. """
ray_version = ray.__version__ python_version = ".".join(map(str, sys.version_info[:3])) pyarrow_version = pyarrow.__version__ return ray_version, python_version, pyarrow_version
<SYSTEM_TASK:> Check if various version info of this process is correct. <END_TASK> <USER_TASK:> Description: def check_version_info(redis_client): """Check if various version info of this process is correct. This will be used to detect if workers or drivers are started using different versions of Python, pyarrow, or Ray. If the version information is not present in Redis, then no check is done. Args: redis_client: A client for the primary Redis shard. Raises: Exception: An exception is raised if there is a version mismatch. """
redis_reply = redis_client.get("VERSION_INFO") # Don't do the check if there is no version information in Redis. This # is to make it easier to do things like start the processes by hand. if redis_reply is None: return true_version_info = tuple(json.loads(ray.utils.decode(redis_reply))) version_info = _compute_version_info() if version_info != true_version_info: node_ip_address = ray.services.get_node_ip_address() error_message = ("Version mismatch: The cluster was started with:\n" " Ray: " + true_version_info[0] + "\n" " Python: " + true_version_info[1] + "\n" " Pyarrow: " + str(true_version_info[2]) + "\n" "This process on node " + node_ip_address + " was started with:" + "\n" " Ray: " + version_info[0] + "\n" " Python: " + version_info[1] + "\n" " Pyarrow: " + str(version_info[2])) if version_info[:2] != true_version_info[:2]: raise Exception(error_message) else: logger.warning(error_message)
<SYSTEM_TASK:> Start a single Redis server. <END_TASK> <USER_TASK:> Description: def _start_redis_instance(executable, modules, port=None, redis_max_clients=None, num_retries=20, stdout_file=None, stderr_file=None, password=None, redis_max_memory=None): """Start a single Redis server. Notes: If "port" is not None, then we will only use this port and try only once. Otherwise, random ports will be used and the maximum retries count is "num_retries". Args: executable (str): Full path of the redis-server executable. modules (list of str): A list of pathnames, pointing to the redis module(s) that will be loaded in this redis server. port (int): If provided, start a Redis server with this port. redis_max_clients: If this is provided, Ray will attempt to configure Redis with this maxclients number. num_retries (int): The number of times to attempt to start Redis. If a port is provided, this defaults to 1. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. password (str): Prevents external clients without the password from connecting to Redis if provided. redis_max_memory: The max amount of memory (in bytes) to allow redis to use, or None for no limit. Once the limit is exceeded, redis will start LRU eviction of entries. Returns: A tuple of the port used by Redis and ProcessInfo for the process that was started. If a port is passed in, then the returned port value is the same. Raises: Exception: An exception is raised if Redis could not be started. """
assert os.path.isfile(executable) for module in modules: assert os.path.isfile(module) counter = 0 if port is not None: # If a port is specified, then try only once to connect. # This ensures that we will use the given port. num_retries = 1 else: port = new_port() load_module_args = [] for module in modules: load_module_args += ["--loadmodule", module] while counter < num_retries: if counter > 0: logger.warning("Redis failed to start, retrying now.") # Construct the command to start the Redis server. command = [executable] if password: command += ["--requirepass", password] command += ( ["--port", str(port), "--loglevel", "warning"] + load_module_args) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REDIS_SERVER, stdout_file=stdout_file, stderr_file=stderr_file) time.sleep(0.1) # Check if Redis successfully started (or at least if it the executable # did not exit within 0.1 seconds). if process_info.process.poll() is None: break port = new_port() counter += 1 if counter == num_retries: raise Exception("Couldn't start Redis. Check log files: {} {}".format( stdout_file.name, stderr_file.name)) # Create a Redis client just for configuring Redis. redis_client = redis.StrictRedis( host="127.0.0.1", port=port, password=password) # Wait for the Redis server to start. wait_for_redis_to_start("127.0.0.1", port, password=password) # Configure Redis to generate keyspace notifications. TODO(rkn): Change # this to only generate notifications for the export keys. redis_client.config_set("notify-keyspace-events", "Kl") # Configure Redis to not run in protected mode so that processes on other # hosts can connect to it. TODO(rkn): Do this in a more secure way. redis_client.config_set("protected-mode", "no") # Discard old task and object metadata. if redis_max_memory is not None: redis_client.config_set("maxmemory", str(redis_max_memory)) redis_client.config_set("maxmemory-policy", "allkeys-lru") redis_client.config_set("maxmemory-samples", "10") logger.info("Starting Redis shard with {} GB max memory.".format( round(redis_max_memory / 1e9, 2))) # If redis_max_clients is provided, attempt to raise the number of maximum # number of Redis clients. if redis_max_clients is not None: redis_client.config_set("maxclients", str(redis_max_clients)) else: # If redis_max_clients is not provided, determine the current ulimit. # We will use this to attempt to raise the maximum number of Redis # clients. current_max_clients = int( redis_client.config_get("maxclients")["maxclients"]) # The below command should be the same as doing ulimit -n. ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0] # The quantity redis_client_buffer appears to be the required buffer # between the maximum number of redis clients and ulimit -n. That is, # if ulimit -n returns 10000, then we can set maxclients to # 10000 - redis_client_buffer. redis_client_buffer = 32 if current_max_clients < ulimit_n - redis_client_buffer: redis_client.config_set("maxclients", ulimit_n - redis_client_buffer) # Increase the hard and soft limits for the redis client pubsub buffer to # 128MB. This is a hack to make it less likely for pubsub messages to be # dropped and for pubsub connections to therefore be killed. cur_config = (redis_client.config_get("client-output-buffer-limit")[ "client-output-buffer-limit"]) cur_config_list = cur_config.split() assert len(cur_config_list) == 12 cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"] redis_client.config_set("client-output-buffer-limit", " ".join(cur_config_list)) # Put a time stamp in Redis to indicate when it was started. redis_client.set("redis_start_time", time.time()) return port, process_info
<SYSTEM_TASK:> Start a log monitor process. <END_TASK> <USER_TASK:> Description: def start_log_monitor(redis_address, logs_dir, stdout_file=None, stderr_file=None, redis_password=None): """Start a log monitor process. Args: redis_address (str): The address of the Redis instance. logs_dir (str): The directory of logging files. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """
log_monitor_filepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "log_monitor.py") command = [ sys.executable, "-u", log_monitor_filepath, "--redis-address={}".format(redis_address), "--logs-dir={}".format(logs_dir) ] if redis_password: command += ["--redis-password", redis_password] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_LOG_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
<SYSTEM_TASK:> Start a reporter process. <END_TASK> <USER_TASK:> Description: def start_reporter(redis_address, stdout_file=None, stderr_file=None, redis_password=None): """Start a reporter process. Args: redis_address (str): The address of the Redis instance. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """
reporter_filepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "reporter.py") command = [ sys.executable, "-u", reporter_filepath, "--redis-address={}".format(redis_address) ] if redis_password: command += ["--redis-password", redis_password] try: import psutil # noqa: F401 except ImportError: logger.warning("Failed to start the reporter. The reporter requires " "'pip install psutil'.") return None process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_REPORTER, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
<SYSTEM_TASK:> Start a dashboard process. <END_TASK> <USER_TASK:> Description: def start_dashboard(redis_address, temp_dir, stdout_file=None, stderr_file=None, redis_password=None): """Start a dashboard process. Args: redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """
port = 8080 while True: try: port_test_socket = socket.socket() port_test_socket.bind(("127.0.0.1", port)) port_test_socket.close() break except socket.error: port += 1 token = ray.utils.decode(binascii.hexlify(os.urandom(24))) dashboard_filepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py") command = [ sys.executable, "-u", dashboard_filepath, "--redis-address={}".format(redis_address), "--http-port={}".format(port), "--token={}".format(token), "--temp-dir={}".format(temp_dir), ] if redis_password: command += ["--redis-password", redis_password] if sys.version_info <= (3, 0): return None, None try: import aiohttp # noqa: F401 import psutil # noqa: F401 except ImportError: raise ImportError( "Failed to start the dashboard. The dashboard requires Python 3 " "as well as 'pip install aiohttp psutil'.") process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, stdout_file=stdout_file, stderr_file=stderr_file) dashboard_url = "http://{}:{}/?token={}".format( ray.services.get_node_ip_address(), port, token) print("\n" + "=" * 70) print("View the dashboard at {}".format(dashboard_url)) print("=" * 70 + "\n") return dashboard_url, process_info
<SYSTEM_TASK:> Start a raylet, which is a combined local scheduler and object manager. <END_TASK> <USER_TASK:> Description: def start_raylet(redis_address, node_ip_address, raylet_name, plasma_store_name, worker_path, temp_dir, num_cpus=None, num_gpus=None, resources=None, object_manager_port=None, node_manager_port=None, redis_password=None, use_valgrind=False, use_profiler=False, stdout_file=None, stderr_file=None, config=None, include_java=False, java_worker_options=None, load_code_from_local=False): """Start a raylet, which is a combined local scheduler and object manager. Args: redis_address (str): The address of the primary Redis server. node_ip_address (str): The IP address of this node. raylet_name (str): The name of the raylet socket to create. plasma_store_name (str): The name of the plasma store socket to connect to. worker_path (str): The path of the Python file that new worker processes will execute. temp_dir (str): The path of the temporary directory Ray will use. num_cpus: The CPUs allocated for this raylet. num_gpus: The GPUs allocated for this raylet. resources: The custom resources allocated for this raylet. object_manager_port: The port to use for the object manager. If this is None, then the object manager will choose its own port. node_manager_port: The port to use for the node manager. If this is None, then the node manager will choose its own port. redis_password: The password to use when connecting to Redis. use_valgrind (bool): True if the raylet should be started inside of valgrind. If this is True, use_profiler must be False. use_profiler (bool): True if the raylet should be started inside a profiler. If this is True, use_valgrind must be False. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. config (dict|None): Optional Raylet configuration that will override defaults in RayConfig. include_java (bool): If True, the raylet backend can also support Java worker. java_worker_options (str): The command options for Java worker. Returns: ProcessInfo for the process that was started. """
config = config or {} config_str = ",".join(["{},{}".format(*kv) for kv in config.items()]) if use_valgrind and use_profiler: raise Exception("Cannot use valgrind and profiler at the same time.") num_initial_workers = (num_cpus if num_cpus is not None else multiprocessing.cpu_count()) static_resources = check_and_update_resources(num_cpus, num_gpus, resources) # Limit the number of workers that can be started in parallel by the # raylet. However, make sure it is at least 1. num_cpus_static = static_resources.get("CPU", 0) maximum_startup_concurrency = max( 1, min(multiprocessing.cpu_count(), num_cpus_static)) # Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'. resource_argument = ",".join( ["{},{}".format(*kv) for kv in static_resources.items()]) gcs_ip_address, gcs_port = redis_address.split(":") if include_java is True: java_worker_options = (java_worker_options or DEFAULT_JAVA_WORKER_OPTIONS) java_worker_command = build_java_worker_command( java_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, os.path.join(temp_dir, "sockets"), ) else: java_worker_command = "" # Create the command that the Raylet will use to start workers. start_worker_command = ("{} {} " "--node-ip-address={} " "--object-store-name={} " "--raylet-name={} " "--redis-address={} " "--temp-dir={}".format( sys.executable, worker_path, node_ip_address, plasma_store_name, raylet_name, redis_address, temp_dir)) if redis_password: start_worker_command += " --redis-password {}".format(redis_password) # If the object manager port is None, then use 0 to cause the object # manager to choose its own port. if object_manager_port is None: object_manager_port = 0 # If the node manager port is None, then use 0 to cause the node manager # to choose its own port. if node_manager_port is None: node_manager_port = 0 if load_code_from_local: start_worker_command += " --load-code-from-local " command = [ RAYLET_EXECUTABLE, "--raylet_socket_name={}".format(raylet_name), "--store_socket_name={}".format(plasma_store_name), "--object_manager_port={}".format(object_manager_port), "--node_manager_port={}".format(node_manager_port), "--node_ip_address={}".format(node_ip_address), "--redis_address={}".format(gcs_ip_address), "--redis_port={}".format(gcs_port), "--num_initial_workers={}".format(num_initial_workers), "--maximum_startup_concurrency={}".format(maximum_startup_concurrency), "--static_resource_list={}".format(resource_argument), "--config_list={}".format(config_str), "--python_worker_command={}".format(start_worker_command), "--java_worker_command={}".format(java_worker_command), "--redis_password={}".format(redis_password or ""), "--temp_dir={}".format(temp_dir), ] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_RAYLET, use_valgrind=use_valgrind, use_gdb=False, use_valgrind_profiler=use_profiler, use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ), stdout_file=stdout_file, stderr_file=stderr_file) return process_info
<SYSTEM_TASK:> This method assembles the command used to start a Java worker. <END_TASK> <USER_TASK:> Description: def build_java_worker_command( java_worker_options, redis_address, plasma_store_name, raylet_name, redis_password, temp_dir, ): """This method assembles the command used to start a Java worker. Args: java_worker_options (str): The command options for Java worker. redis_address (str): Redis address of GCS. plasma_store_name (str): The name of the plasma store socket to connect to. raylet_name (str): The name of the raylet socket to create. redis_password (str): The password of connect to redis. temp_dir (str): The path of the temporary directory Ray will use. Returns: The command string for starting Java worker. """
assert java_worker_options is not None command = "java ".format(java_worker_options) if redis_address is not None: command += "-Dray.redis.address={} ".format(redis_address) if plasma_store_name is not None: command += ( "-Dray.object-store.socket-name={} ".format(plasma_store_name)) if raylet_name is not None: command += "-Dray.raylet.socket-name={} ".format(raylet_name) if redis_password is not None: command += "-Dray.redis.password={} ".format(redis_password) command += "-Dray.home={} ".format(RAY_HOME) # TODO(suquark): We should use temp_dir as the input of a java worker. command += "-Dray.log-dir={} ".format(os.path.join(temp_dir, "sockets")) if java_worker_options: # Put `java_worker_options` in the last, so it can overwrite the # above options. command += java_worker_options + " " command += "org.ray.runtime.runner.worker.DefaultWorker" return command
<SYSTEM_TASK:> Figure out how to configure the plasma object store. <END_TASK> <USER_TASK:> Description: def determine_plasma_store_config(object_store_memory=None, plasma_directory=None, huge_pages=False): """Figure out how to configure the plasma object store. This will determine which directory to use for the plasma store (e.g., /tmp or /dev/shm) and how much memory to start the store with. On Linux, we will try to use /dev/shm unless the shared memory file system is too small, in which case we will fall back to /tmp. If any of the object store memory or plasma directory parameters are specified by the user, then those values will be preserved. Args: object_store_memory (int): The user-specified object store memory parameter. plasma_directory (str): The user-specified plasma directory parameter. huge_pages (bool): The user-specified huge pages parameter. Returns: A tuple of the object store memory to use and the plasma directory to use. If either of these values is specified by the user, then that value will be preserved. """
system_memory = ray.utils.get_system_memory() # Choose a default object store size. if object_store_memory is None: object_store_memory = int(system_memory * 0.3) # Cap memory to avoid memory waste and perf issues on large nodes if (object_store_memory > ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES): logger.warning( "Warning: Capping object memory store to {}GB. ".format( ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES // 1e9) + "To increase this further, specify `object_store_memory` " "when calling ray.init() or ray start.") object_store_memory = ( ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES) # Determine which directory to use. By default, use /tmp on MacOS and # /dev/shm on Linux, unless the shared-memory file system is too small, # in which case we default to /tmp on Linux. if plasma_directory is None: if sys.platform == "linux" or sys.platform == "linux2": shm_avail = ray.utils.get_shared_memory_bytes() # Compare the requested memory size to the memory available in # /dev/shm. if shm_avail > object_store_memory: plasma_directory = "/dev/shm" else: plasma_directory = "/tmp" logger.warning( "WARNING: The object store is using /tmp instead of " "/dev/shm because /dev/shm has only {} bytes available. " "This may slow down performance! You may be able to free " "up space by deleting files in /dev/shm or terminating " "any running plasma_store_server processes. If you are " "inside a Docker container, you may need to pass an " "argument with the flag '--shm-size' to 'docker run'.". format(shm_avail)) else: plasma_directory = "/tmp" # Do some sanity checks. if object_store_memory > system_memory: raise Exception( "The requested object store memory size is greater " "than the total available memory.") else: plasma_directory = os.path.abspath(plasma_directory) logger.warning("WARNING: object_store_memory is not verified when " "plasma_directory is set.") if not os.path.isdir(plasma_directory): raise Exception( "The file {} does not exist or is not a directory.".format( plasma_directory)) return object_store_memory, plasma_directory
<SYSTEM_TASK:> Start a plasma store process. <END_TASK> <USER_TASK:> Description: def _start_plasma_store(plasma_store_memory, use_valgrind=False, use_profiler=False, stdout_file=None, stderr_file=None, plasma_directory=None, huge_pages=False, socket_name=None): """Start a plasma store process. Args: plasma_store_memory (int): The amount of memory in bytes to start the plasma store with. use_valgrind (bool): True if the plasma store should be started inside of valgrind. If this is True, use_profiler must be False. use_profiler (bool): True if the plasma store should be started inside a profiler. If this is True, use_valgrind must be False. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: a boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. socket_name (str): If provided, it will specify the socket name used by the plasma store. Return: A tuple of the name of the plasma store socket and ProcessInfo for the plasma store process. """
if use_valgrind and use_profiler: raise Exception("Cannot use valgrind and profiler at the same time.") if huge_pages and not (sys.platform == "linux" or sys.platform == "linux2"): raise Exception("The huge_pages argument is only supported on " "Linux.") if huge_pages and plasma_directory is None: raise Exception("If huge_pages is True, then the " "plasma_directory argument must be provided.") if not isinstance(plasma_store_memory, int): raise Exception("plasma_store_memory should be an integer.") command = [ PLASMA_STORE_EXECUTABLE, "-s", socket_name, "-m", str(plasma_store_memory) ] if plasma_directory is not None: command += ["-d", plasma_directory] if huge_pages: command += ["-h"] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_PLASMA_STORE, use_valgrind=use_valgrind, use_valgrind_profiler=use_profiler, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
<SYSTEM_TASK:> This method starts an object store process. <END_TASK> <USER_TASK:> Description: def start_plasma_store(stdout_file=None, stderr_file=None, object_store_memory=None, plasma_directory=None, huge_pages=False, plasma_store_socket_name=None): """This method starts an object store process. Args: stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. object_store_memory: The amount of memory (in bytes) to start the object store with. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. Returns: ProcessInfo for the process that was started. """
object_store_memory, plasma_directory = determine_plasma_store_config( object_store_memory, plasma_directory, huge_pages) if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES: raise ValueError("Attempting to cap object store memory usage at {} " "bytes, but the minimum allowed is {} bytes.".format( object_store_memory, ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES)) # Print the object store memory using two decimal places. object_store_memory_str = (object_store_memory / 10**7) / 10**2 logger.info("Starting the Plasma object store with {} GB memory " "using {}.".format( round(object_store_memory_str, 2), plasma_directory)) # Start the Plasma store. process_info = _start_plasma_store( object_store_memory, use_profiler=RUN_PLASMA_STORE_PROFILER, stdout_file=stdout_file, stderr_file=stderr_file, plasma_directory=plasma_directory, huge_pages=huge_pages, socket_name=plasma_store_socket_name) return process_info
<SYSTEM_TASK:> This method starts a worker process. <END_TASK> <USER_TASK:> Description: def start_worker(node_ip_address, object_store_name, raylet_name, redis_address, worker_path, temp_dir, stdout_file=None, stderr_file=None): """This method starts a worker process. Args: node_ip_address (str): The IP address of the node that this worker is running on. object_store_name (str): The socket name of the object store. raylet_name (str): The socket name of the raylet server. redis_address (str): The address that the Redis server is listening on. worker_path (str): The path of the source code which the worker process will run. temp_dir (str): The path of the temp dir. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: ProcessInfo for the process that was started. """
command = [ sys.executable, "-u", worker_path, "--node-ip-address=" + node_ip_address, "--object-store-name=" + object_store_name, "--raylet-name=" + raylet_name, "--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir ] process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_WORKER, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
<SYSTEM_TASK:> Unpacks Dict and Tuple space observations into their original form. <END_TASK> <USER_TASK:> Description: def restore_original_dimensions(obs, obs_space, tensorlib=tf): """Unpacks Dict and Tuple space observations into their original form. This is needed since we flatten Dict and Tuple observations in transit. Before sending them to the model though, we should unflatten them into Dicts or Tuples of tensors. Arguments: obs: The flattened observation tensor. obs_space: The flattened obs space. If this has the `original_space` attribute, we will unflatten the tensor to that shape. tensorlib: The library used to unflatten (reshape) the array/tensor. Returns: single tensor or dict / tuple of tensors matching the original observation space. """
if hasattr(obs_space, "original_space"): return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib) else: return obs
<SYSTEM_TASK:> Update the AWS tags for a cluster periodically. <END_TASK> <USER_TASK:> Description: def _node_tag_update_loop(self): """ Update the AWS tags for a cluster periodically. The purpose of this loop is to avoid excessive EC2 calls when a large number of nodes are being launched simultaneously. """
while True: self.tag_cache_update_event.wait() self.tag_cache_update_event.clear() batch_updates = defaultdict(list) with self.tag_cache_lock: for node_id, tags in self.tag_cache_pending.items(): for x in tags.items(): batch_updates[x].append(node_id) self.tag_cache[node_id].update(tags) self.tag_cache_pending = {} for (k, v), node_ids in batch_updates.items(): m = "Set tag {}={} on {}".format(k, v, node_ids) with LogTimer("AWSNodeProvider: {}".format(m)): if k == TAG_RAY_NODE_NAME: k = "Name" self.ec2.meta.client.create_tags( Resources=node_ids, Tags=[{ "Key": k, "Value": v }], ) self.tag_cache_kill_event.wait(timeout=5) if self.tag_cache_kill_event.is_set(): return
<SYSTEM_TASK:> Refresh and get info for this node, updating the cache. <END_TASK> <USER_TASK:> Description: def _get_node(self, node_id): """Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache if node_id in self.cached_nodes: return self.cached_nodes[node_id] # Node not in {pending, running} -- retry with a point query. This # usually means the node was recently preempted or terminated. matches = list(self.ec2.instances.filter(InstanceIds=[node_id])) assert len(matches) == 1, "Invalid instance id {}".format(node_id) return matches[0]
<SYSTEM_TASK:> Whether the given result meets this trial's stopping criteria. <END_TASK> <USER_TASK:> Description: def should_stop(self, result): """Whether the given result meets this trial's stopping criteria."""
if result.get(DONE): return True for criteria, stop_value in self.stopping_criterion.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result {}.".format( criteria, result)) if result[criteria] >= stop_value: return True return False
<SYSTEM_TASK:> Whether this trial is due for checkpointing. <END_TASK> <USER_TASK:> Description: def should_checkpoint(self): """Whether this trial is due for checkpointing."""
result = self.last_result or {} if result.get(DONE) and self.checkpoint_at_end: return True if self.checkpoint_freq: return result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0 else: return False
<SYSTEM_TASK:> Returns a progress message for printing out to the console. <END_TASK> <USER_TASK:> Description: def progress_string(self): """Returns a progress message for printing out to the console."""
if not self.last_result: return self._status_string() def location_string(hostname, pid): if hostname == os.uname()[1]: return "pid={}".format(pid) else: return "{} pid={}".format(hostname, pid) pieces = [ "{}".format(self._status_string()), "[{}]".format( self.resources.summary_string()), "[{}]".format( location_string( self.last_result.get(HOSTNAME), self.last_result.get(PID))), "{} s".format( int(self.last_result.get(TIME_TOTAL_S))) ] if self.last_result.get(TRAINING_ITERATION) is not None: pieces.append("{} iter".format( self.last_result[TRAINING_ITERATION])) if self.last_result.get(TIMESTEPS_TOTAL) is not None: pieces.append("{} ts".format(self.last_result[TIMESTEPS_TOTAL])) if self.last_result.get(EPISODE_REWARD_MEAN) is not None: pieces.append("{} rew".format( format(self.last_result[EPISODE_REWARD_MEAN], ".3g"))) if self.last_result.get(MEAN_LOSS) is not None: pieces.append("{} loss".format( format(self.last_result[MEAN_LOSS], ".3g"))) if self.last_result.get(MEAN_ACCURACY) is not None: pieces.append("{} acc".format( format(self.last_result[MEAN_ACCURACY], ".3g"))) return ", ".join(pieces)
<SYSTEM_TASK:> Returns whether the trial qualifies for restoring. <END_TASK> <USER_TASK:> Description: def should_recover(self): """Returns whether the trial qualifies for restoring. This is if a checkpoint frequency is set and has not failed more than max_failures. This may return true even when there may not yet be a checkpoint. """
return (self.checkpoint_freq > 0 and (self.num_failures < self.max_failures or self.max_failures < 0))
<SYSTEM_TASK:> Compares two checkpoints based on the attribute attr_mean param. <END_TASK> <USER_TASK:> Description: def compare_checkpoints(self, attr_mean): """Compares two checkpoints based on the attribute attr_mean param. Greater than is used by default. If command-line parameter checkpoint_score_attr starts with "min-" less than is used. Arguments: attr_mean: mean of attribute value for the current checkpoint Returns: True: when attr_mean is greater than previous checkpoint attr_mean and greater than function is selected when attr_mean is less than previous checkpoint attr_mean and less than function is selected False: when attr_mean is not in alignment with selected cmp fn """
if self._cmp_greater and attr_mean > self.best_checkpoint_attr_value: return True elif (not self._cmp_greater and attr_mean < self.best_checkpoint_attr_value): return True return False
<SYSTEM_TASK:> take 1D float array of rewards and compute discounted reward <END_TASK> <USER_TASK:> Description: def discount_rewards(r): """take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r) running_add = 0 for t in reversed(range(0, r.size)): # Reset the sum, since this was a game boundary (pong specific!). if r[t] != 0: running_add = 0 running_add = running_add * gamma + r[t] discounted_r[t] = running_add return discounted_r
<SYSTEM_TASK:> Load a class at runtime given a full path. <END_TASK> <USER_TASK:> Description: def load_class(path): """ Load a class at runtime given a full path. Example of the path: mypkg.mysubpkg.myclass """
class_data = path.split(".") if len(class_data) < 2: raise ValueError( "You need to pass a valid path like mymodule.provider_class") module_path = ".".join(class_data[:-1]) class_str = class_data[-1] module = importlib.import_module(module_path) return getattr(module, class_str)
<SYSTEM_TASK:> Terminates a set of nodes. May be overridden with a batch method. <END_TASK> <USER_TASK:> Description: def terminate_nodes(self, node_ids): """Terminates a set of nodes. May be overridden with a batch method."""
for node_id in node_ids: logger.info("NodeProvider: " "{}: Terminating node".format(node_id)) self.terminate_node(node_id)
<SYSTEM_TASK:> Passes the result to BayesOpt unless early terminated or errored <END_TASK> <USER_TASK:> Description: def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to BayesOpt unless early terminated or errored"""
if result: self.optimizer.register( params=self._live_trial_mapping[trial_id], target=result[self._reward_attr]) del self._live_trial_mapping[trial_id]
<SYSTEM_TASK:> Execute method with arg and return the result. <END_TASK> <USER_TASK:> Description: def _execute_and_seal_error(method, arg, method_name): """Execute method with arg and return the result. If the method fails, return a RayTaskError so it can be sealed in the resultOID and retried by user. """
try: return method(arg) except Exception: return ray.worker.RayTaskError(method_name, traceback.format_exc())
<SYSTEM_TASK:> Helper method to dispatch a batch of input to self.serve_method. <END_TASK> <USER_TASK:> Description: def _dispatch(self, input_batch: List[SingleQuery]): """Helper method to dispatch a batch of input to self.serve_method."""
method = getattr(self, self.serve_method) if hasattr(method, "ray_serve_batched_input"): batch = [inp.data for inp in input_batch] result = _execute_and_seal_error(method, batch, self.serve_method) for res, inp in zip(result, input_batch): ray.worker.global_worker.put_object(inp.result_object_id, res) else: for inp in input_batch: result = _execute_and_seal_error(method, inp.data, self.serve_method) ray.worker.global_worker.put_object(inp.result_object_id, result)
<SYSTEM_TASK:> Returns the gym env wrapper of the given class, or None. <END_TASK> <USER_TASK:> Description: def get_wrapper_by_cls(env, cls): """Returns the gym env wrapper of the given class, or None."""
currentenv = env while True: if isinstance(currentenv, cls): return currentenv elif isinstance(currentenv, gym.Wrapper): currentenv = currentenv.env else: return None
<SYSTEM_TASK:> Configure environment for DeepMind-style Atari. <END_TASK> <USER_TASK:> Description: def wrap_deepmind(env, dim=84, framestack=True): """Configure environment for DeepMind-style Atari. Note that we assume reward clipping is done outside the wrapper. Args: dim (int): Dimension to resize observations to (dim x dim). framestack (bool): Whether to framestack observations. """
env = MonitorEnv(env) env = NoopResetEnv(env, noop_max=30) if "NoFrameskip" in env.spec.id: env = MaxAndSkipEnv(env, skip=4) env = EpisodicLifeEnv(env) if "FIRE" in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env, dim) # env = ScaledFloatFrame(env) # TODO: use for dqn? # env = ClipRewardEnv(env) # reward clipping is handled by policy eval if framestack: env = FrameStack(env, 4) return env
<SYSTEM_TASK:> Call ray.get and then queue the object ids for deletion. <END_TASK> <USER_TASK:> Description: def ray_get_and_free(object_ids): """Call ray.get and then queue the object ids for deletion. This function should be used whenever possible in RLlib, to optimize memory usage. The only exception is when an object_id is shared among multiple readers. Args: object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free. Returns: The result of ray.get(object_ids). """
global _last_free_time global _to_free result = ray.get(object_ids) if type(object_ids) is not list: object_ids = [object_ids] _to_free.extend(object_ids) # batch calls to free to reduce overheads now = time.time() if (len(_to_free) > MAX_FREE_QUEUE_SIZE or now - _last_free_time > FREE_DELAY_S): ray.internal.free(_to_free) _to_free = [] _last_free_time = now return result
<SYSTEM_TASK:> Returns an array of a given size that is 64-byte aligned. <END_TASK> <USER_TASK:> Description: def aligned_array(size, dtype, align=64): """Returns an array of a given size that is 64-byte aligned. The returned array can be efficiently copied into GPU memory by TensorFlow. """
n = size * dtype.itemsize empty = np.empty(n + (align - 1), dtype=np.uint8) data_align = empty.ctypes.data % align offset = 0 if data_align == 0 else (align - data_align) output = empty[offset:offset + n].view(dtype) assert len(output) == size, len(output) assert output.ctypes.data % align == 0, output.ctypes.data return output
<SYSTEM_TASK:> Concatenate arrays, ensuring the output is 64-byte aligned. <END_TASK> <USER_TASK:> Description: def concat_aligned(items): """Concatenate arrays, ensuring the output is 64-byte aligned. We only align float arrays; other arrays are concatenated as normal. This should be used instead of np.concatenate() to improve performance when the output array is likely to be fed into TensorFlow. """
if len(items) == 0: return [] elif len(items) == 1: # we assume the input is aligned. In any case, it doesn't help # performance to force align it since that incurs a needless copy. return items[0] elif (isinstance(items[0], np.ndarray) and items[0].dtype in [np.float32, np.float64, np.uint8]): dtype = items[0].dtype flat = aligned_array(sum(s.size for s in items), dtype) batch_dim = sum(s.shape[0] for s in items) new_shape = (batch_dim, ) + items[0].shape[1:] output = flat.reshape(new_shape) assert output.ctypes.data % 64 == 0, output.ctypes.data np.concatenate(items, out=output) return output else: return np.concatenate(items)
<SYSTEM_TASK:> Gets an item from the queue. <END_TASK> <USER_TASK:> Description: def get(self, block=True, timeout=None): """Gets an item from the queue. Uses polling if block=True, so there is no guarantee of order if multiple consumers get from the same empty queue. Returns: The next item in the queue. Raises: Empty if the queue is empty and blocking is False. """
if not block: success, item = ray.get(self.actor.get.remote()) if not success: raise Empty elif timeout is None: # Polling # Use a not_empty condition variable or return a promise? success, item = ray.get(self.actor.get.remote()) while not success: # Consider adding time.sleep here success, item = ray.get(self.actor.get.remote()) elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: endtime = time.time() + timeout # Polling # Use a not_full condition variable or return a promise? success = False while not success and time.time() < endtime: success, item = ray.get(self.actor.get.remote()) if not success: raise Empty return item
<SYSTEM_TASK:> Annotation for documenting method overrides. <END_TASK> <USER_TASK:> Description: def override(cls): """Annotation for documenting method overrides. Arguments: cls (type): The superclass that provides the overriden method. If this cls does not actually have the method, an error is raised. """
def check_override(method): if method.__name__ not in dir(cls): raise NameError("{} does not override any method of {}".format( method, cls)) return method return check_override
<SYSTEM_TASK:> Adds new trial. <END_TASK> <USER_TASK:> Description: def on_trial_add(self, trial_runner, trial): """Adds new trial. On a new trial add, if current bracket is not filled, add to current bracket. Else, if current band is not filled, create new bracket, add to current bracket. Else, create new iteration, create new bracket, add to bracket."""
cur_bracket = self._state["bracket"] cur_band = self._hyperbands[self._state["band_idx"]] if cur_bracket is None or cur_bracket.filled(): retry = True while retry: # if current iteration is filled, create new iteration if self._cur_band_filled(): cur_band = [] self._hyperbands.append(cur_band) self._state["band_idx"] += 1 # cur_band will always be less than s_max_1 or else filled s = len(cur_band) assert s < self._s_max_1, "Current band is filled!" if self._get_r0(s) == 0: logger.info("Bracket too small - Retrying...") cur_bracket = None else: retry = False cur_bracket = Bracket(self._time_attr, self._get_n0(s), self._get_r0(s), self._max_t_attr, self._eta, s) cur_band.append(cur_bracket) self._state["bracket"] = cur_bracket self._state["bracket"].add_trial(trial) self._trial_info[trial] = cur_bracket, self._state["band_idx"]
<SYSTEM_TASK:> Checks if the current band is filled. <END_TASK> <USER_TASK:> Description: def _cur_band_filled(self): """Checks if the current band is filled. The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]] return len(cur_band) == self._s_max_1
<SYSTEM_TASK:> If bracket is finished, all trials will be stopped. <END_TASK> <USER_TASK:> Description: def on_trial_result(self, trial_runner, trial, result): """If bracket is finished, all trials will be stopped. If a given trial finishes and bracket iteration is not done, the trial will be paused and resources will be given up. This scheduler will not start trials but will stop trials. The current running trial will not be handled, as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial] bracket.update_trial_stats(trial, result) if bracket.continue_trial(trial): return TrialScheduler.CONTINUE action = self._process_bracket(trial_runner, bracket, trial) return action
<SYSTEM_TASK:> This is called whenever a trial makes progress. <END_TASK> <USER_TASK:> Description: def _process_bracket(self, trial_runner, bracket, trial): """This is called whenever a trial makes progress. When all live trials in the bracket have no more iterations left, Trials will be successively halved. If bracket is done, all non-running trials will be stopped and cleaned up, and during each halving phase, bad trials will be stopped while good trials will return to "PENDING"."""
action = TrialScheduler.PAUSE if bracket.cur_iter_done(): if bracket.finished(): bracket.cleanup_full(trial_runner) return TrialScheduler.STOP good, bad = bracket.successive_halving(self._reward_attr) # kill bad trials self._num_stopped += len(bad) for t in bad: if t.status == Trial.PAUSED: trial_runner.stop_trial(t) elif t.status == Trial.RUNNING: bracket.cleanup_trial(t) action = TrialScheduler.STOP else: raise Exception("Trial with unexpected status encountered") # ready the good trials - if trial is too far ahead, don't continue for t in good: if t.status not in [Trial.PAUSED, Trial.RUNNING]: raise Exception("Trial with unexpected status encountered") if bracket.continue_trial(t): if t.status == Trial.PAUSED: trial_runner.trial_executor.unpause_trial(t) elif t.status == Trial.RUNNING: action = TrialScheduler.CONTINUE return action
<SYSTEM_TASK:> Notification when trial terminates. <END_TASK> <USER_TASK:> Description: def on_trial_remove(self, trial_runner, trial): """Notification when trial terminates. Trial info is removed from bracket. Triggers halving if bracket is not finished."""
bracket, _ = self._trial_info[trial] bracket.cleanup_trial(trial) if not bracket.finished(): self._process_bracket(trial_runner, bracket, trial)
<SYSTEM_TASK:> Fair scheduling within iteration by completion percentage. <END_TASK> <USER_TASK:> Description: def choose_trial_to_run(self, trial_runner): """Fair scheduling within iteration by completion percentage. List of trials not used since all trials are tracked as state of scheduler. If iteration is occupied (ie, no trials to run), then look into next iteration. """
for hyperband in self._hyperbands: # band will have None entries if no resources # are to be allocated to that bracket. scrubbed = [b for b in hyperband if b is not None] for bracket in sorted( scrubbed, key=lambda b: b.completion_percentage()): for trial in bracket.current_trials(): if (trial.status == Trial.PENDING and trial_runner.has_resources(trial.resources)): return trial return None
<SYSTEM_TASK:> This provides a progress notification for the algorithm. <END_TASK> <USER_TASK:> Description: def debug_string(self): """This provides a progress notification for the algorithm. For each bracket, the algorithm will output a string as follows: Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%): {PENDING: 2, RUNNING: 3, TERMINATED: 2} "Max Size" indicates the max number of pending/running experiments set according to the Hyperband algorithm. "Milestone" indicates the iterations a trial will run for before the next halving will occur. "Completed" indicates an approximate progress metric. Some brackets, like ones that are unfilled, will not reach 100%. """
out = "Using HyperBand: " out += "num_stopped={} total_brackets={}".format( self._num_stopped, sum(len(band) for band in self._hyperbands)) for i, band in enumerate(self._hyperbands): out += "\nRound #{}:".format(i) for bracket in band: out += "\n {}".format(bracket) return out
<SYSTEM_TASK:> Add trial to bracket assuming bracket is not filled. <END_TASK> <USER_TASK:> Description: def add_trial(self, trial): """Add trial to bracket assuming bracket is not filled. At a later iteration, a newly added trial will be given equal opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!" self._live_trials[trial] = None self._all_trials.append(trial)
<SYSTEM_TASK:> Checks if all iterations have completed. <END_TASK> <USER_TASK:> Description: def cur_iter_done(self): """Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r`"""
return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values())
<SYSTEM_TASK:> Update result for trial. Called after trial has finished <END_TASK> <USER_TASK:> Description: def update_trial_stats(self, trial, result): """Update result for trial. Called after trial has finished an iteration - will decrement iteration count. TODO(rliaw): The other alternative is to keep the trials in and make sure they're not set as pending later."""
assert trial in self._live_trials assert self._get_result_time(result) >= 0 delta = self._get_result_time(result) - \ self._get_result_time(self._live_trials[trial]) assert delta >= 0 self._completed_progress += delta self._live_trials[trial] = result
<SYSTEM_TASK:> Cleans up bracket after bracket is completely finished. <END_TASK> <USER_TASK:> Description: def cleanup_full(self, trial_runner): """Cleans up bracket after bracket is completely finished. Lets the last trial continue to run until termination condition kicks in."""
for trial in self.current_trials(): if (trial.status == Trial.PAUSED): trial_runner.stop_trial(trial)
<SYSTEM_TASK:> Read the client table. <END_TASK> <USER_TASK:> Description: def parse_client_table(redis_client): """Read the client table. Args: redis_client: A client to the primary Redis shard. Returns: A list of information about the nodes in the cluster. """
NIL_CLIENT_ID = ray.ObjectID.nil().binary() message = redis_client.execute_command("RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.CLIENT, "", NIL_CLIENT_ID) # Handle the case where no clients are returned. This should only # occur potentially immediately after the cluster is started. if message is None: return [] node_info = {} gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0) ordered_client_ids = [] # Since GCS entries are append-only, we override so that # only the latest entries are kept. for i in range(gcs_entry.EntriesLength()): client = (ray.gcs_utils.ClientTableData.GetRootAsClientTableData( gcs_entry.Entries(i), 0)) resources = { decode(client.ResourcesTotalLabel(i)): client.ResourcesTotalCapacity(i) for i in range(client.ResourcesTotalLabelLength()) } client_id = ray.utils.binary_to_hex(client.ClientId()) # If this client is being removed, then it must # have previously been inserted, and # it cannot have previously been removed. if not client.IsInsertion(): assert client_id in node_info, "Client removed not found!" assert node_info[client_id]["IsInsertion"], ( "Unexpected duplicate removal of client.") else: ordered_client_ids.append(client_id) node_info[client_id] = { "ClientID": client_id, "IsInsertion": client.IsInsertion(), "NodeManagerAddress": decode( client.NodeManagerAddress(), allow_none=True), "NodeManagerPort": client.NodeManagerPort(), "ObjectManagerPort": client.ObjectManagerPort(), "ObjectStoreSocketName": decode( client.ObjectStoreSocketName(), allow_none=True), "RayletSocketName": decode( client.RayletSocketName(), allow_none=True), "Resources": resources } # NOTE: We return the list comprehension below instead of simply doing # 'list(node_info.values())' in order to have the nodes appear in the order # that they joined the cluster. Python dictionaries do not preserve # insertion order. We could use an OrderedDict, but then we'd have to be # sure to only insert a given node a single time (clients that die appear # twice in the GCS log). return [node_info[client_id] for client_id in ordered_client_ids]
<SYSTEM_TASK:> Initialize the GlobalState object by connecting to Redis. <END_TASK> <USER_TASK:> Description: def _initialize_global_state(self, redis_address, redis_password=None, timeout=20): """Initialize the GlobalState object by connecting to Redis. It's possible that certain keys in Redis may not have been fully populated yet. In this case, we will retry this method until they have been populated or we exceed a timeout. Args: redis_address: The Redis address to connect. redis_password: The password of the redis server. """
self.redis_client = services.create_redis_client( redis_address, redis_password) start_time = time.time() num_redis_shards = None redis_shard_addresses = [] while time.time() - start_time < timeout: # Attempt to get the number of Redis shards. num_redis_shards = self.redis_client.get("NumRedisShards") if num_redis_shards is None: print("Waiting longer for NumRedisShards to be populated.") time.sleep(1) continue num_redis_shards = int(num_redis_shards) if num_redis_shards < 1: raise Exception("Expected at least one Redis shard, found " "{}.".format(num_redis_shards)) # Attempt to get all of the Redis shards. redis_shard_addresses = self.redis_client.lrange( "RedisShards", start=0, end=-1) if len(redis_shard_addresses) != num_redis_shards: print("Waiting longer for RedisShards to be populated.") time.sleep(1) continue # If we got here then we successfully got all of the information. break # Check to see if we timed out. if time.time() - start_time >= timeout: raise Exception("Timed out while attempting to initialize the " "global state. num_redis_shards = {}, " "redis_shard_addresses = {}".format( num_redis_shards, redis_shard_addresses)) # Get the rest of the information. self.redis_clients = [] for shard_address in redis_shard_addresses: self.redis_clients.append( services.create_redis_client(shard_address.decode(), redis_password))
<SYSTEM_TASK:> Execute a Redis command on the appropriate Redis shard based on key. <END_TASK> <USER_TASK:> Description: def _execute_command(self, key, *args): """Execute a Redis command on the appropriate Redis shard based on key. Args: key: The object ID or the task ID that the query is about. args: The command to run. Returns: The value returned by the Redis command. """
client = self.redis_clients[key.redis_shard_hash() % len( self.redis_clients)] return client.execute_command(*args)
<SYSTEM_TASK:> Execute the KEYS command on all Redis shards. <END_TASK> <USER_TASK:> Description: def _keys(self, pattern): """Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards. """
result = [] for client in self.redis_clients: result.extend(list(client.scan_iter(match=pattern))) return result
<SYSTEM_TASK:> Fetch and parse the object table information for a single object ID. <END_TASK> <USER_TASK:> Description: def _object_table(self, object_id): """Fetch and parse the object table information for a single object ID. Args: object_id: An object ID to get information about. Returns: A dictionary with information about the object ID in question. """
# Allow the argument to be either an ObjectID or a hex string. if not isinstance(object_id, ray.ObjectID): object_id = ray.ObjectID(hex_to_binary(object_id)) # Return information about a single object ID. message = self._execute_command(object_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.OBJECT, "", object_id.binary()) if message is None: return {} gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) assert gcs_entry.EntriesLength() > 0 entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData( gcs_entry.Entries(0), 0) object_info = { "DataSize": entry.ObjectSize(), "Manager": entry.Manager(), } return object_info
<SYSTEM_TASK:> Fetch and parse the object table info for one or more object IDs. <END_TASK> <USER_TASK:> Description: def object_table(self, object_id=None): """Fetch and parse the object table info for one or more object IDs. Args: object_id: An object ID to fetch information about. If this is None, then the entire object table is fetched. Returns: Information from the object table. """
self._check_connected() if object_id is not None: # Return information about a single object ID. return self._object_table(object_id) else: # Return the entire object table. object_keys = self._keys(ray.gcs_utils.TablePrefix_OBJECT_string + "*") object_ids_binary = { key[len(ray.gcs_utils.TablePrefix_OBJECT_string):] for key in object_keys } results = {} for object_id_binary in object_ids_binary: results[binary_to_object_id(object_id_binary)] = ( self._object_table(binary_to_object_id(object_id_binary))) return results
<SYSTEM_TASK:> Fetch and parse the task table information for a single task ID. <END_TASK> <USER_TASK:> Description: def _task_table(self, task_id): """Fetch and parse the task table information for a single task ID. Args: task_id: A task ID to get information about. Returns: A dictionary with information about the task ID in question. """
assert isinstance(task_id, ray.TaskID) message = self._execute_command(task_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.RAYLET_TASK, "", task_id.binary()) if message is None: return {} gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) assert gcs_entries.EntriesLength() == 1 task_table_message = ray.gcs_utils.Task.GetRootAsTask( gcs_entries.Entries(0), 0) execution_spec = task_table_message.TaskExecutionSpec() task_spec = task_table_message.TaskSpecification() task = ray._raylet.Task.from_string(task_spec) function_descriptor_list = task.function_descriptor_list() function_descriptor = FunctionDescriptor.from_bytes_list( function_descriptor_list) task_spec_info = { "DriverID": task.driver_id().hex(), "TaskID": task.task_id().hex(), "ParentTaskID": task.parent_task_id().hex(), "ParentCounter": task.parent_counter(), "ActorID": (task.actor_id().hex()), "ActorCreationID": task.actor_creation_id().hex(), "ActorCreationDummyObjectID": ( task.actor_creation_dummy_object_id().hex()), "ActorCounter": task.actor_counter(), "Args": task.arguments(), "ReturnObjectIDs": task.returns(), "RequiredResources": task.required_resources(), "FunctionID": function_descriptor.function_id.hex(), "FunctionHash": binary_to_hex(function_descriptor.function_hash), "ModuleName": function_descriptor.module_name, "ClassName": function_descriptor.class_name, "FunctionName": function_descriptor.function_name, } return { "ExecutionSpec": { "Dependencies": [ execution_spec.Dependencies(i) for i in range(execution_spec.DependenciesLength()) ], "LastTimestamp": execution_spec.LastTimestamp(), "NumForwards": execution_spec.NumForwards() }, "TaskSpec": task_spec_info }
<SYSTEM_TASK:> Fetch and parse the task table information for one or more task IDs. <END_TASK> <USER_TASK:> Description: def task_table(self, task_id=None): """Fetch and parse the task table information for one or more task IDs. Args: task_id: A hex string of the task ID to fetch information about. If this is None, then the task object table is fetched. Returns: Information from the task table. """
self._check_connected() if task_id is not None: task_id = ray.TaskID(hex_to_binary(task_id)) return self._task_table(task_id) else: task_table_keys = self._keys( ray.gcs_utils.TablePrefix_RAYLET_TASK_string + "*") task_ids_binary = [ key[len(ray.gcs_utils.TablePrefix_RAYLET_TASK_string):] for key in task_table_keys ] results = {} for task_id_binary in task_ids_binary: results[binary_to_hex(task_id_binary)] = self._task_table( ray.TaskID(task_id_binary)) return results
<SYSTEM_TASK:> Get the profile events for a given batch of profile events. <END_TASK> <USER_TASK:> Description: def _profile_table(self, batch_id): """Get the profile events for a given batch of profile events. Args: batch_id: An identifier for a batch of profile events. Returns: A list of the profile events for the specified batch. """
# TODO(rkn): This method should support limiting the number of log # events and should also support returning a window of events. message = self._execute_command(batch_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.PROFILE, "", batch_id.binary()) if message is None: return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) profile_events = [] for i in range(gcs_entries.EntriesLength()): profile_table_message = ( ray.gcs_utils.ProfileTableData.GetRootAsProfileTableData( gcs_entries.Entries(i), 0)) component_type = decode(profile_table_message.ComponentType()) component_id = binary_to_hex(profile_table_message.ComponentId()) node_ip_address = decode( profile_table_message.NodeIpAddress(), allow_none=True) for j in range(profile_table_message.ProfileEventsLength()): profile_event_message = profile_table_message.ProfileEvents(j) profile_event = { "event_type": decode(profile_event_message.EventType()), "component_id": component_id, "node_ip_address": node_ip_address, "component_type": component_type, "start_time": profile_event_message.StartTime(), "end_time": profile_event_message.EndTime(), "extra_data": json.loads( decode(profile_event_message.ExtraData())), } profile_events.append(profile_event) return profile_events
<SYSTEM_TASK:> Return a list of profiling events that can viewed as a timeline. <END_TASK> <USER_TASK:> Description: def chrome_tracing_dump(self, filename=None): """Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """
# TODO(rkn): Support including the task specification data in the # timeline. # TODO(rkn): This should support viewing just a window of time or a # limited number of events. profile_table = self.profile_table() all_events = [] for component_id_hex, component_events in profile_table.items(): # Only consider workers and drivers. component_type = component_events[0]["component_type"] if component_type not in ["worker", "driver"]: continue for event in component_events: new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": event["node_ip_address"], # The identifier for the row that the event appears in. "tid": event["component_type"] + ":" + event["component_id"], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": self._default_color_mapping[event["event_type"]], # The extra user-defined data. "args": event["extra_data"], } # Modify the json with the additional user-defined extra data. # This can be used to add fields or override existing fields. if "cname" in event["extra_data"]: new_event["cname"] = event["extra_data"]["cname"] if "name" in event["extra_data"]: new_event["name"] = event["extra_data"]["name"] all_events.append(new_event) if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
<SYSTEM_TASK:> Return a list of transfer events that can viewed as a timeline. <END_TASK> <USER_TASK:> Description: def chrome_tracing_object_transfer_dump(self, filename=None): """Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """
client_id_to_address = {} for client_info in ray.global_state.client_table(): client_id_to_address[client_info["ClientID"]] = "{}:{}".format( client_info["NodeManagerAddress"], client_info["ObjectManagerPort"]) all_events = [] for key, items in self.profile_table().items(): # Only consider object manager events. if items[0]["component_type"] != "object_manager": continue for event in items: if event["event_type"] == "transfer_send": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "transfer_receive": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "receive_pull_request": object_id, remote_client_id = event["extra_data"] else: assert False, "This should be unreachable." # Choose a color by reading the first couple of hex digits of # the object ID as an integer and turning that into a color. object_id_int = int(object_id[:2], 16) color = self._chrome_tracing_colors[object_id_int % len( self._chrome_tracing_colors)] new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": client_id_to_address[key], # The identifier for the row that the event appears in. "tid": client_id_to_address[remote_client_id], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": color, # The extra user-defined data. "args": event["extra_data"], } all_events.append(new_event) # Add another box with a color indicating whether it was a send # or a receive event. if event["event_type"] == "transfer_send": additional_event = new_event.copy() additional_event["cname"] = "black" all_events.append(additional_event) elif event["event_type"] == "transfer_receive": additional_event = new_event.copy() additional_event["cname"] = "grey" all_events.append(additional_event) else: pass if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
<SYSTEM_TASK:> Get a dictionary mapping worker ID to worker information. <END_TASK> <USER_TASK:> Description: def workers(self): """Get a dictionary mapping worker ID to worker information."""
worker_keys = self.redis_client.keys("Worker*") workers_data = {} for worker_key in worker_keys: worker_info = self.redis_client.hgetall(worker_key) worker_id = binary_to_hex(worker_key[len("Workers:"):]) workers_data[worker_id] = { "node_ip_address": decode(worker_info[b"node_ip_address"]), "plasma_store_socket": decode( worker_info[b"plasma_store_socket"]) } if b"stderr_file" in worker_info: workers_data[worker_id]["stderr_file"] = decode( worker_info[b"stderr_file"]) if b"stdout_file" in worker_info: workers_data[worker_id]["stdout_file"] = decode( worker_info[b"stdout_file"]) return workers_data
<SYSTEM_TASK:> Get the current total cluster resources. <END_TASK> <USER_TASK:> Description: def cluster_resources(self): """Get the current total cluster resources. Note that this information can grow stale as nodes are added to or removed from the cluster. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. """
resources = defaultdict(int) clients = self.client_table() for client in clients: # Only count resources from live clients. if client["IsInsertion"]: for key, value in client["Resources"].items(): resources[key] += value return dict(resources)
<SYSTEM_TASK:> Get the current available cluster resources. <END_TASK> <USER_TASK:> Description: def available_resources(self): """Get the current available cluster resources. This is different from `cluster_resources` in that this will return idle (available) resources rather than total resources. Note that this information can grow stale as tasks start and finish. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. """
available_resources_by_id = {} subscribe_clients = [ redis_client.pubsub(ignore_subscribe_messages=True) for redis_client in self.redis_clients ] for subscribe_client in subscribe_clients: subscribe_client.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL) client_ids = self._live_client_ids() while set(available_resources_by_id.keys()) != client_ids: for subscribe_client in subscribe_clients: # Parse client message raw_message = subscribe_client.get_message() if (raw_message is None or raw_message["channel"] != ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL): continue data = raw_message["data"] gcs_entries = ( ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( data, 0)) heartbeat_data = gcs_entries.Entries(0) message = (ray.gcs_utils.HeartbeatTableData. GetRootAsHeartbeatTableData(heartbeat_data, 0)) # Calculate available resources for this client num_resources = message.ResourcesAvailableLabelLength() dynamic_resources = {} for i in range(num_resources): resource_id = decode(message.ResourcesAvailableLabel(i)) dynamic_resources[resource_id] = ( message.ResourcesAvailableCapacity(i)) # Update available resources for this client client_id = ray.utils.binary_to_hex(message.ClientId()) available_resources_by_id[client_id] = dynamic_resources # Update clients in cluster client_ids = self._live_client_ids() # Remove disconnected clients for client_id in available_resources_by_id.keys(): if client_id not in client_ids: del available_resources_by_id[client_id] # Calculate total available resources total_available_resources = defaultdict(int) for available_resources in available_resources_by_id.values(): for resource_id, num_available in available_resources.items(): total_available_resources[resource_id] += num_available # Close the pubsub clients to avoid leaking file descriptors. for subscribe_client in subscribe_clients: subscribe_client.close() return dict(total_available_resources)
<SYSTEM_TASK:> Get the error messages for a specific driver. <END_TASK> <USER_TASK:> Description: def _error_messages(self, driver_id): """Get the error messages for a specific driver. Args: driver_id: The ID of the driver to get the errors for. Returns: A list of the error messages for this driver. """
assert isinstance(driver_id, ray.DriverID) message = self.redis_client.execute_command( "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.ERROR_INFO, "", driver_id.binary()) # If there are no errors, return early. if message is None: return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) error_messages = [] for i in range(gcs_entries.EntriesLength()): error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData( gcs_entries.Entries(i), 0) assert driver_id.binary() == error_data.DriverId() error_message = { "type": decode(error_data.Type()), "message": decode(error_data.ErrorMessage()), "timestamp": error_data.Timestamp(), } error_messages.append(error_message) return error_messages
<SYSTEM_TASK:> Get the error messages for all drivers or a specific driver. <END_TASK> <USER_TASK:> Description: def error_messages(self, driver_id=None): """Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver. """
if driver_id is not None: assert isinstance(driver_id, ray.DriverID) return self._error_messages(driver_id) error_table_keys = self.redis_client.keys( ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*") driver_ids = [ key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):] for key in error_table_keys ] return { binary_to_hex(driver_id): self._error_messages( ray.DriverID(driver_id)) for driver_id in driver_ids }
<SYSTEM_TASK:> Returns the total length of all of the flattened variables. <END_TASK> <USER_TASK:> Description: def get_flat_size(self): """Returns the total length of all of the flattened variables. Returns: The length of all flattened variables concatenated. """
return sum( np.prod(v.get_shape().as_list()) for v in self.variables.values())
<SYSTEM_TASK:> Gets the weights and returns them as a flat array. <END_TASK> <USER_TASK:> Description: def get_flat(self): """Gets the weights and returns them as a flat array. Returns: 1D Array containing the flattened weights. """
self._check_sess() return np.concatenate([ v.eval(session=self.sess).flatten() for v in self.variables.values() ])
<SYSTEM_TASK:> Sets the weights to new_weights, converting from a flat array. <END_TASK> <USER_TASK:> Description: def set_flat(self, new_weights): """Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights. """
self._check_sess() shapes = [v.get_shape().as_list() for v in self.variables.values()] arrays = unflatten(new_weights, shapes) placeholders = [ self.placeholders[k] for k, v in self.variables.items() ] self.sess.run( list(self.assignment_nodes.values()), feed_dict=dict(zip(placeholders, arrays)))
<SYSTEM_TASK:> Returns a dictionary containing the weights of the network. <END_TASK> <USER_TASK:> Description: def get_weights(self): """Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights. """
self._check_sess() return { k: v.eval(session=self.sess) for k, v in self.variables.items() }
<SYSTEM_TASK:> Sets the weights to new_weights. <END_TASK> <USER_TASK:> Description: def set_weights(self, new_weights): """Sets the weights to new_weights. Note: Can set subsets of variables as well, by only passing in the variables you want to be set. Args: new_weights (Dict): Dictionary mapping variable names to their weights. """
self._check_sess() assign_list = [ self.assignment_nodes[name] for name in new_weights.keys() if name in self.assignment_nodes ] assert assign_list, ("No variables in the input matched those in the " "network. Possible cause: Two networks were " "defined in the same TensorFlow graph. To fix " "this, place each network definition in its own " "tf.Graph.") self.sess.run( assign_list, feed_dict={ self.placeholders[name]: value for (name, value) in new_weights.items() if name in self.placeholders })
<SYSTEM_TASK:> Construct a serialized ErrorTableData object. <END_TASK> <USER_TASK:> Description: def construct_error_message(driver_id, error_type, message, timestamp): """Construct a serialized ErrorTableData object. Args: driver_id: The ID of the driver that the error should go to. If this is nil, then the error will go to all drivers. error_type: The type of the error. message: The error message. timestamp: The time of the error. Returns: The serialized object. """
builder = flatbuffers.Builder(0) driver_offset = builder.CreateString(driver_id.binary()) error_type_offset = builder.CreateString(error_type) message_offset = builder.CreateString(message) ray.core.generated.ErrorTableData.ErrorTableDataStart(builder) ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId( builder, driver_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddType( builder, error_type_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage( builder, message_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp( builder, timestamp) error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd( builder) builder.Finish(error_data_offset) return bytes(builder.Output())
<SYSTEM_TASK:> Manually shutdown the async API. <END_TASK> <USER_TASK:> Description: def shutdown(): """Manually shutdown the async API. Cancels all related tasks and all the socket transportation. """
global handler, transport, protocol if handler is not None: handler.close() transport.close() handler = None transport = None protocol = None
<SYSTEM_TASK:> This removes some non-critical state from the primary Redis shard. <END_TASK> <USER_TASK:> Description: def flush_redis_unsafe(redis_client=None): """This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called. """
if redis_client is None: ray.worker.global_worker.check_connected() redis_client = ray.worker.global_worker.redis_client # Delete the log files from the primary Redis shard. keys = redis_client.keys("LOGFILE:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} log files from Redis.".format(num_deleted)) # Delete the event log from the primary Redis shard. keys = redis_client.keys("event_log:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} event logs from Redis.".format(num_deleted))
<SYSTEM_TASK:> Creates a copy of self using existing input placeholders. <END_TASK> <USER_TASK:> Description: def copy(self, existing_inputs): """Creates a copy of self using existing input placeholders."""
return PPOPolicyGraph( self.observation_space, self.action_space, self.config, existing_inputs=existing_inputs)
<SYSTEM_TASK:> deepnn builds the graph for a deep net for classifying digits. <END_TASK> <USER_TASK:> Description: def deepnn(x): """deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. """
# Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope("reshape"): x_image = tf.reshape(x, [-1, 28, 28, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope("conv1"): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer - downsamples by 2X. with tf.name_scope("pool1"): h_pool1 = max_pool_2x2(h_conv1) # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope("conv2"): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # Second pooling layer. with tf.name_scope("pool2"): h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. with tf.name_scope("fc1"): W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of # features. with tf.name_scope("dropout"): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope("fc2"): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 return y_conv, keep_prob
<SYSTEM_TASK:> Get signature parameters <END_TASK> <USER_TASK:> Description: def get_signature_params(func): """Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported """
# The first condition for Cython functions, the latter for Cython instance # methods if is_cython(func): attrs = [ "__code__", "__annotations__", "__defaults__", "__kwdefaults__" ] if all(hasattr(func, attr) for attr in attrs): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError("{!r} is not a Python function we can process" .format(func)) return list(funcsigs.signature(func).parameters.items())
<SYSTEM_TASK:> Check if we support the signature of this function. <END_TASK> <USER_TASK:> Description: def check_signature_supported(func, warn=False): """Check if we support the signature of this function. We currently do not allow remote functions to have **kwargs. We also do not support keyword arguments in conjunction with a *args argument. Args: func: The function whose signature should be checked. warn: If this is true, a warning will be printed if the signature is not supported. If it is false, an exception will be raised if the signature is not supported. Raises: Exception: An exception is raised if the signature is not supported. """
function_name = func.__name__ sig_params = get_signature_params(func) has_kwargs_param = False has_kwonly_param = False for keyword_name, parameter in sig_params: if parameter.kind == Parameter.VAR_KEYWORD: has_kwargs_param = True if parameter.kind == Parameter.KEYWORD_ONLY: has_kwonly_param = True if has_kwargs_param: message = ("The function {} has a **kwargs argument, which is " "currently not supported.".format(function_name)) if warn: logger.warning(message) else: raise Exception(message) if has_kwonly_param: message = ("The function {} has a keyword only argument " "(defined after * or *args), which is currently " "not supported.".format(function_name)) if warn: logger.warning(message) else: raise Exception(message)
<SYSTEM_TASK:> Extract the function signature from the function. <END_TASK> <USER_TASK:> Description: def extract_signature(func, ignore_first=False): """Extract the function signature from the function. Args: func: The function whose signature should be extracted. ignore_first: True if the first argument should be ignored. This should be used when func is a method of a class. Returns: A function signature object, which includes the names of the keyword arguments as well as their default values. """
sig_params = get_signature_params(func) if ignore_first: if len(sig_params) == 0: raise Exception("Methods must take a 'self' argument, but the " "method '{}' does not have one.".format( func.__name__)) sig_params = sig_params[1:] # Construct the argument default values and other argument information. arg_names = [] arg_defaults = [] arg_is_positionals = [] keyword_names = set() for arg_name, parameter in sig_params: arg_names.append(arg_name) arg_defaults.append(parameter.default) arg_is_positionals.append(parameter.kind == parameter.VAR_POSITIONAL) if parameter.kind == Parameter.POSITIONAL_OR_KEYWORD: # Note KEYWORD_ONLY arguments currently unsupported. keyword_names.add(arg_name) return FunctionSignature(arg_names, arg_defaults, arg_is_positionals, keyword_names, func.__name__)
<SYSTEM_TASK:> Extend the arguments that were passed into a function. <END_TASK> <USER_TASK:> Description: def extend_args(function_signature, args, kwargs): """Extend the arguments that were passed into a function. This extends the arguments that were passed into a function with the default arguments provided in the function definition. Args: function_signature: The function signature of the function being called. args: The non-keyword arguments passed into the function. kwargs: The keyword arguments passed into the function. Returns: An extended list of arguments to pass into the function. Raises: Exception: An exception may be raised if the function cannot be called with these arguments. """
arg_names = function_signature.arg_names arg_defaults = function_signature.arg_defaults arg_is_positionals = function_signature.arg_is_positionals keyword_names = function_signature.keyword_names function_name = function_signature.function_name args = list(args) for keyword_name in kwargs: if keyword_name not in keyword_names: raise Exception("The name '{}' is not a valid keyword argument " "for the function '{}'.".format( keyword_name, function_name)) # Fill in the remaining arguments. for skipped_name in arg_names[0:len(args)]: if skipped_name in kwargs: raise Exception("Positional and keyword value provided for the " "argument '{}' for the function '{}'".format( keyword_name, function_name)) zipped_info = zip(arg_names, arg_defaults, arg_is_positionals) zipped_info = list(zipped_info)[len(args):] for keyword_name, default_value, is_positional in zipped_info: if keyword_name in kwargs: args.append(kwargs[keyword_name]) else: if default_value != funcsigs._empty: args.append(default_value) else: # This means that there is a missing argument. Unless this is # the last argument and it is a *args argument in which case it # can be omitted. if not is_positional: raise Exception("No value was provided for the argument " "'{}' for the function '{}'.".format( keyword_name, function_name)) no_positionals = len(arg_is_positionals) == 0 or not arg_is_positionals[-1] too_many_arguments = len(args) > len(arg_names) and no_positionals if too_many_arguments: raise Exception("Too many arguments were passed to the function '{}'" .format(function_name)) return args
<SYSTEM_TASK:> Poll for cloud resource manager operation until finished. <END_TASK> <USER_TASK:> Description: def wait_for_crm_operation(operation): """Poll for cloud resource manager operation until finished."""
logger.info("wait_for_crm_operation: " "Waiting for operation {} to finish...".format(operation)) for _ in range(MAX_POLLS): result = crm.operations().get(name=operation["name"]).execute() if "error" in result: raise Exception(result["error"]) if "done" in result and result["done"]: logger.info("wait_for_crm_operation: Operation done.") break time.sleep(POLL_INTERVAL) return result
<SYSTEM_TASK:> Poll for global compute operation until finished. <END_TASK> <USER_TASK:> Description: def wait_for_compute_global_operation(project_name, operation): """Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: " "Waiting for operation {} to finish...".format( operation["name"])) for _ in range(MAX_POLLS): result = compute.globalOperations().get( project=project_name, operation=operation["name"], ).execute() if "error" in result: raise Exception(result["error"]) if result["status"] == "DONE": logger.info("wait_for_compute_global_operation: " "Operation done.") break time.sleep(POLL_INTERVAL) return result
<SYSTEM_TASK:> Returns public and private key paths for a given key_name. <END_TASK> <USER_TASK:> Description: def key_pair_paths(key_name): """Returns public and private key paths for a given key_name."""
public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name)) private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name)) return public_key_path, private_key_path
<SYSTEM_TASK:> Create public and private ssh-keys. <END_TASK> <USER_TASK:> Description: def generate_rsa_key_pair(): """Create public and private ssh-keys."""
key = rsa.generate_private_key( backend=default_backend(), public_exponent=65537, key_size=2048) public_key = key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH).decode("utf-8") pem = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).decode("utf-8") return public_key, pem
<SYSTEM_TASK:> Setup a Google Cloud Platform Project. <END_TASK> <USER_TASK:> Description: def _configure_project(config): """Setup a Google Cloud Platform Project. Google Compute Platform organizes all the resources, such as storage buckets, users, and instances under projects. This is different from aws ec2 where everything is global. """
project_id = config["provider"].get("project_id") assert config["provider"]["project_id"] is not None, ( "'project_id' must be set in the 'provider' section of the autoscaler" " config. Notice that the project id must be globally unique.") project = _get_project(project_id) if project is None: # Project not found, try creating it _create_project(project_id) project = _get_project(project_id) assert project is not None, "Failed to create project" assert project["lifecycleState"] == "ACTIVE", ( "Project status needs to be ACTIVE, got {}".format( project["lifecycleState"])) config["provider"]["project_id"] = project["projectId"] return config
<SYSTEM_TASK:> Setup a gcp service account with IAM roles. <END_TASK> <USER_TASK:> Description: def _configure_iam_role(config): """Setup a gcp service account with IAM roles. Creates a gcp service acconut and binds IAM roles which allow it to control control storage/compute services. Specifically, the head node needs to have an IAM role that allows it to create further gce instances and store items in google cloud storage. TODO: Allow the name/id of the service account to be configured """
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format( account_id=DEFAULT_SERVICE_ACCOUNT_ID, project_id=config["provider"]["project_id"]) service_account = _get_service_account(email, config) if service_account is None: logger.info("_configure_iam_role: " "Creating new service account {}".format( DEFAULT_SERVICE_ACCOUNT_ID)) service_account = _create_service_account( DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config) assert service_account is not None, "Failed to create service account" _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES) config["head_node"]["serviceAccounts"] = [{ "email": service_account["email"], # NOTE: The amount of access is determined by the scope + IAM # role of the service account. Even if the cloud-platform scope # gives (scope) access to the whole cloud-platform, the service # account is limited by the IAM rights specified below. "scopes": ["https://www.googleapis.com/auth/cloud-platform"] }] return config
<SYSTEM_TASK:> Configure SSH access, using an existing key pair if possible. <END_TASK> <USER_TASK:> Description: def _configure_key_pair(config): """Configure SSH access, using an existing key pair if possible. Creates a project-wide ssh key that can be used to access all the instances unless explicitly prohibited by instance config. The ssh-keys created by ray are of format: [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] where: [USERNAME] is the user for the SSH key, specified in the config. [KEY_VALUE] is the public SSH key value. """
if "ssh_private_key" in config["auth"]: return config ssh_user = config["auth"]["ssh_user"] project = compute.projects().get( project=config["provider"]["project_id"]).execute() # Key pairs associated with project meta data. The key pairs are general, # and not just ssh keys. ssh_keys_str = next( (item for item in project["commonInstanceMetadata"].get("items", []) if item["key"] == "ssh-keys"), {}).get("value", "") ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else [] # Try a few times to get or create a good key pair. key_found = False for i in range(10): key_name = key_pair_name(i, config["provider"]["region"], config["provider"]["project_id"], ssh_user) public_key_path, private_key_path = key_pair_paths(key_name) for ssh_key in ssh_keys: key_parts = ssh_key.split(" ") if len(key_parts) != 3: continue if key_parts[2] == ssh_user and os.path.exists(private_key_path): # Found a key key_found = True break # Create a key since it doesn't exist locally or in GCP if not key_found and not os.path.exists(private_key_path): logger.info("_configure_key_pair: " "Creating new key pair {}".format(key_name)) public_key, private_key = generate_rsa_key_pair() _create_project_ssh_key_pair(project, public_key, ssh_user) with open(private_key_path, "w") as f: f.write(private_key) os.chmod(private_key_path, 0o600) with open(public_key_path, "w") as f: f.write(public_key) key_found = True break if key_found: break assert key_found, "SSH keypair for user {} not found for {}".format( ssh_user, private_key_path) assert os.path.exists(private_key_path), ( "Private key file {} not found for user {}" "".format(private_key_path, ssh_user)) logger.info("_configure_key_pair: " "Private key not specified in config, using" "{}".format(private_key_path)) config["auth"]["ssh_private_key"] = private_key_path return config
<SYSTEM_TASK:> Pick a reasonable subnet if not specified by the config. <END_TASK> <USER_TASK:> Description: def _configure_subnet(config): """Pick a reasonable subnet if not specified by the config."""
# Rationale: avoid subnet lookup if the network is already # completely manually configured if ("networkInterfaces" in config["head_node"] and "networkInterfaces" in config["worker_nodes"]): return config subnets = _list_subnets(config) if not subnets: raise NotImplementedError("Should be able to create subnet.") # TODO: make sure that we have usable subnet. Maybe call # compute.subnetworks().listUsable? For some reason it didn't # work out-of-the-box default_subnet = subnets[0] if "networkInterfaces" not in config["head_node"]: config["head_node"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] if "networkInterfaces" not in config["worker_nodes"]: config["worker_nodes"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] return config
<SYSTEM_TASK:> Add new IAM roles for the service account. <END_TASK> <USER_TASK:> Description: def _add_iam_policy_binding(service_account, roles): """Add new IAM roles for the service account."""
project_id = service_account["projectId"] email = service_account["email"] member_id = "serviceAccount:" + email policy = crm.projects().getIamPolicy(resource=project_id).execute() already_configured = True for role in roles: role_exists = False for binding in policy["bindings"]: if binding["role"] == role: if member_id not in binding["members"]: binding["members"].append(member_id) already_configured = False role_exists = True if not role_exists: already_configured = False policy["bindings"].append({ "members": [member_id], "role": role, }) if already_configured: # In some managed environments, an admin needs to grant the # roles, so only call setIamPolicy if needed. return result = crm.projects().setIamPolicy( resource=project_id, body={ "policy": policy, }).execute() return result
<SYSTEM_TASK:> Inserts an ssh-key into project commonInstanceMetadata <END_TASK> <USER_TASK:> Description: def _create_project_ssh_key_pair(project, public_key, ssh_user): """Inserts an ssh-key into project commonInstanceMetadata"""
key_parts = public_key.split(" ") # Sanity checks to make sure that the generated key matches expectation assert len(key_parts) == 2, key_parts assert key_parts[0] == "ssh-rsa", key_parts new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format( ssh_user=ssh_user, key_value=key_parts[1]) common_instance_metadata = project["commonInstanceMetadata"] items = common_instance_metadata.get("items", []) ssh_keys_i = next( (i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None) if ssh_keys_i is None: items.append({"key": "ssh-keys", "value": new_ssh_meta}) else: ssh_keys = items[ssh_keys_i] ssh_keys["value"] += "\n" + new_ssh_meta items[ssh_keys_i] = ssh_keys common_instance_metadata["items"] = items operation = compute.projects().setCommonInstanceMetadata( project=project["name"], body=common_instance_metadata).execute() response = wait_for_compute_global_operation(project["name"], operation) return response
<SYSTEM_TASK:> An experimental alternate way to submit remote functions. <END_TASK> <USER_TASK:> Description: def _remote(self, args=None, kwargs=None, num_return_vals=None, num_cpus=None, num_gpus=None, resources=None): """An experimental alternate way to submit remote functions."""
worker = ray.worker.get_global_worker() worker.check_connected() if self._last_export_session < worker._session_index: # If this function was exported in a previous session, we need to # export this function again, because current GCS doesn't have it. self._last_export_session = worker._session_index worker.function_actor_manager.export(self) kwargs = {} if kwargs is None else kwargs args = [] if args is None else args args = ray.signature.extend_args(self._function_signature, args, kwargs) if num_return_vals is None: num_return_vals = self._num_return_vals resources = ray.utils.resources_from_resource_arguments( self._num_cpus, self._num_gpus, self._resources, num_cpus, num_gpus, resources) if worker.mode == ray.worker.LOCAL_MODE: # In LOCAL_MODE, remote calls simply execute the function. # We copy the arguments to prevent the function call from # mutating them and to match the usual behavior of # immutable remote objects. result = self._function(*copy.deepcopy(args)) return result object_ids = worker.submit_task( self._function_descriptor, args, num_return_vals=num_return_vals, resources=resources) if len(object_ids) == 1: return object_ids[0] elif len(object_ids) > 1: return object_ids
<SYSTEM_TASK:> Append an object to the linked list. <END_TASK> <USER_TASK:> Description: def append(self, future): """Append an object to the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. """
future.prev = self.tail if self.tail is None: assert self.head is None self.head = future else: self.tail.next = future self.tail = future # Once done, it will be removed from the list. future.add_done_callback(self.remove)
<SYSTEM_TASK:> Remove an object from the linked list. <END_TASK> <USER_TASK:> Description: def remove(self, future): """Remove an object from the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. """
if self._loop.get_debug(): logger.debug("Removing %s from the linked list.", future) if future.prev is None: assert future is self.head self.head = future.next if self.head is None: self.tail = None if not self.cancelled(): self.set_result(None) else: self.head.prev = None elif future.next is None: assert future is self.tail self.tail = future.prev if self.tail is None: self.head = None if not self.cancelled(): self.set_result(None) else: self.tail.prev = None
<SYSTEM_TASK:> Manually cancel all tasks assigned to this event loop. <END_TASK> <USER_TASK:> Description: def cancel(self, *args, **kwargs): """Manually cancel all tasks assigned to this event loop."""
# Because remove all futures will trigger `set_result`, # we cancel itself first. super().cancel() for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. if not future.cancelled(): future.cancel()
<SYSTEM_TASK:> Turn an object_id into a Future object. <END_TASK> <USER_TASK:> Description: def as_future(self, object_id, check_ready=True): """Turn an object_id into a Future object. Args: object_id: A Ray's object_id. check_ready (bool): If true, check if the object_id is ready. Returns: PlasmaObjectFuture: A future object that waits the object_id. """
if not isinstance(object_id, ray.ObjectID): raise TypeError("Input should be an ObjectID.") plain_object_id = plasma.ObjectID(object_id.binary()) fut = PlasmaObjectFuture(loop=self._loop, object_id=plain_object_id) if check_ready: ready, _ = ray.wait([object_id], timeout=0) if ready: if self._loop.get_debug(): logger.debug("%s has been ready.", plain_object_id) self._complete_future(fut) return fut if plain_object_id not in self._waiting_dict: linked_list = PlasmaObjectLinkedList(self._loop, plain_object_id) linked_list.add_done_callback(self._unregister_callback) self._waiting_dict[plain_object_id] = linked_list self._waiting_dict[plain_object_id].append(fut) if self._loop.get_debug(): logger.debug("%s added to the waiting list.", fut) return fut
<SYSTEM_TASK:> Returns a list of all trials' information. <END_TASK> <USER_TASK:> Description: def get_all_trials(self): """Returns a list of all trials' information."""
response = requests.get(urljoin(self._path, "trials")) return self._deserialize(response)
<SYSTEM_TASK:> Returns trial information by trial_id. <END_TASK> <USER_TASK:> Description: def get_trial(self, trial_id): """Returns trial information by trial_id."""
response = requests.get( urljoin(self._path, "trials/{}".format(trial_id))) return self._deserialize(response)
<SYSTEM_TASK:> Requests to stop trial by trial_id. <END_TASK> <USER_TASK:> Description: def stop_trial(self, trial_id): """Requests to stop trial by trial_id."""
response = requests.put( urljoin(self._path, "trials/{}".format(trial_id))) return self._deserialize(response)
<SYSTEM_TASK:> Apply the given function to each remote worker. <END_TASK> <USER_TASK:> Description: def foreach_worker(self, fn): """Apply the given function to each remote worker. Returns: List of results from applying the function. """
results = ray.get([w.foreach_worker.remote(fn) for w in self.workers]) return results
<SYSTEM_TASK:> Apply the given function to each model replica in each worker. <END_TASK> <USER_TASK:> Description: def foreach_model(self, fn): """Apply the given function to each model replica in each worker. Returns: List of results from applying the function. """
results = ray.get([w.foreach_model.remote(fn) for w in self.workers]) out = [] for r in results: out.extend(r) return out
<SYSTEM_TASK:> Apply the given function to a single model replica. <END_TASK> <USER_TASK:> Description: def for_model(self, fn): """Apply the given function to a single model replica. Returns: Result from applying the function. """
return ray.get(self.workers[0].for_model.remote(fn))
<SYSTEM_TASK:> Run a single SGD step. <END_TASK> <USER_TASK:> Description: def step(self, fetch_stats=False): """Run a single SGD step. Arguments: fetch_stats (bool): Whether to return stats from the step. This can slow down the computation by acting as a global barrier. """
if self.strategy == "ps": return _distributed_sgd_step( self.workers, self.ps_list, write_timeline=False, fetch_stats=fetch_stats) else: return _simple_sgd_step(self.workers)
<SYSTEM_TASK:> Wrapper for starting a router and register it. <END_TASK> <USER_TASK:> Description: def start_router(router_class, router_name): """Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor. """
handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle