text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Exports model of this trial based on trial.export_formats.
<END_TASK>
<USER_TASK:>
Description:
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
""" |
if trial.export_formats and len(trial.export_formats) > 0:
return ray.get(
trial.runner.export_model.remote(trial.export_formats))
return {} |
<SYSTEM_TASK:>
Generates an actor that will execute a particular instance of
<END_TASK>
<USER_TASK:>
Description:
def __generate_actor(self, instance_id, operator, input, output):
"""Generates an actor that will execute a particular instance of
the logical operator
Attributes:
instance_id (UUID): The id of the instance the actor will execute.
operator (Operator): The metadata of the logical operator.
input (DataInput): The input gate that manages input channels of
the instance (see: DataInput in communication.py).
input (DataOutput): The output gate that manages output channels
of the instance (see: DataOutput in communication.py).
""" |
actor_id = (operator.id, instance_id)
# Record the physical dataflow graph (for debugging purposes)
self.__add_channel(actor_id, input, output)
# Select actor to construct
if operator.type == OpType.Source:
source = operator_instance.Source.remote(actor_id, operator, input,
output)
source.register_handle.remote(source)
return source.start.remote()
elif operator.type == OpType.Map:
map = operator_instance.Map.remote(actor_id, operator, input,
output)
map.register_handle.remote(map)
return map.start.remote()
elif operator.type == OpType.FlatMap:
flatmap = operator_instance.FlatMap.remote(actor_id, operator,
input, output)
flatmap.register_handle.remote(flatmap)
return flatmap.start.remote()
elif operator.type == OpType.Filter:
filter = operator_instance.Filter.remote(actor_id, operator, input,
output)
filter.register_handle.remote(filter)
return filter.start.remote()
elif operator.type == OpType.Reduce:
reduce = operator_instance.Reduce.remote(actor_id, operator, input,
output)
reduce.register_handle.remote(reduce)
return reduce.start.remote()
elif operator.type == OpType.TimeWindow:
pass
elif operator.type == OpType.KeyBy:
keyby = operator_instance.KeyBy.remote(actor_id, operator, input,
output)
keyby.register_handle.remote(keyby)
return keyby.start.remote()
elif operator.type == OpType.Sum:
sum = operator_instance.Reduce.remote(actor_id, operator, input,
output)
# Register target handle at state actor
state_actor = operator.state_actor
if state_actor is not None:
state_actor.register_target.remote(sum)
# Register own handle
sum.register_handle.remote(sum)
return sum.start.remote()
elif operator.type == OpType.Sink:
pass
elif operator.type == OpType.Inspect:
inspect = operator_instance.Inspect.remote(actor_id, operator,
input, output)
inspect.register_handle.remote(inspect)
return inspect.start.remote()
elif operator.type == OpType.ReadTextFile:
# TODO (john): Colocate the source with the input file
read = operator_instance.ReadTextFile.remote(
actor_id, operator, input, output)
read.register_handle.remote(read)
return read.start.remote()
else: # TODO (john): Add support for other types of operators
sys.exit("Unrecognized or unsupported {} operator type.".format(
operator.type)) |
<SYSTEM_TASK:>
Generates one actor for each instance of the given logical
<END_TASK>
<USER_TASK:>
Description:
def __generate_actors(self, operator, upstream_channels,
downstream_channels):
"""Generates one actor for each instance of the given logical
operator.
Attributes:
operator (Operator): The logical operator metadata.
upstream_channels (list): A list of all upstream channels for
all instances of the operator.
downstream_channels (list): A list of all downstream channels
for all instances of the operator.
""" |
num_instances = operator.num_instances
logger.info("Generating {} actors of type {}...".format(
num_instances, operator.type))
in_channels = upstream_channels.pop(
operator.id) if upstream_channels else []
handles = []
for i in range(num_instances):
# Collect input and output channels for the particular instance
ip = [
channel for channel in in_channels
if channel.dst_instance_id == i
] if in_channels else []
op = [
channel for channels_list in downstream_channels.values()
for channel in channels_list if channel.src_instance_id == i
]
log = "Constructed {} input and {} output channels "
log += "for the {}-th instance of the {} operator."
logger.debug(log.format(len(ip), len(op), i, operator.type))
input_gate = DataInput(ip)
output_gate = DataOutput(op, operator.partitioning_strategies)
handle = self.__generate_actor(i, operator, input_gate,
output_gate)
if handle:
handles.append(handle)
return handles |
<SYSTEM_TASK:>
Deploys and executes the physical dataflow.
<END_TASK>
<USER_TASK:>
Description:
def execute(self):
"""Deploys and executes the physical dataflow.""" |
self._collect_garbage() # Make sure everything is clean
# TODO (john): Check if dataflow has any 'logical inconsistencies'
# For example, if there is a forward partitioning strategy but
# the number of downstream instances is larger than the number of
# upstream instances, some of the downstream instances will not be
# used at all
# Each operator instance is implemented as a Ray actor
# Actors are deployed in topological order, as we traverse the
# logical dataflow from sources to sinks. At each step, data
# producers wait for acknowledge from consumers before starting
# generating data.
upstream_channels = {}
for node in nx.topological_sort(self.logical_topo):
operator = self.operators[node]
# Generate downstream data channels
downstream_channels = self._generate_channels(operator)
# Instantiate Ray actors
handles = self.__generate_actors(operator, upstream_channels,
downstream_channels)
if handles:
self.actor_handles.extend(handles)
upstream_channels.update(downstream_channels)
logger.debug("Running...")
return self.actor_handles |
<SYSTEM_TASK:>
Sets the number of instances for the source operator of the stream.
<END_TASK>
<USER_TASK:>
Description:
def set_parallelism(self, num_instances):
"""Sets the number of instances for the source operator of the stream.
Attributes:
num_instances (int): The level of parallelism for the source
operator of the stream.
""" |
assert (num_instances > 0)
self.env._set_parallelism(self.src_operator_id, num_instances)
return self |
<SYSTEM_TASK:>
Applies a map operator to the stream.
<END_TASK>
<USER_TASK:>
Description:
def map(self, map_fn, name="Map"):
"""Applies a map operator to the stream.
Attributes:
map_fn (function): The user-defined logic of the map.
""" |
op = Operator(
_generate_uuid(),
OpType.Map,
name,
map_fn,
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Applies a flatmap operator to the stream.
<END_TASK>
<USER_TASK:>
Description:
def flat_map(self, flatmap_fn):
"""Applies a flatmap operator to the stream.
Attributes:
flatmap_fn (function): The user-defined logic of the flatmap
(e.g. split()).
""" |
op = Operator(
_generate_uuid(),
OpType.FlatMap,
"FlatMap",
flatmap_fn,
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Applies a key_by operator to the stream.
<END_TASK>
<USER_TASK:>
Description:
def key_by(self, key_selector):
"""Applies a key_by operator to the stream.
Attributes:
key_attribute_index (int): The index of the key attributed
(assuming tuple records).
""" |
op = Operator(
_generate_uuid(),
OpType.KeyBy,
"KeyBy",
other=key_selector,
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Applies a system time window to the stream.
<END_TASK>
<USER_TASK:>
Description:
def time_window(self, window_width_ms):
"""Applies a system time window to the stream.
Attributes:
window_width_ms (int): The length of the window in ms.
""" |
op = Operator(
_generate_uuid(),
OpType.TimeWindow,
"TimeWindow",
num_instances=self.env.config.parallelism,
other=window_width_ms)
return self.__register(op) |
<SYSTEM_TASK:>
Applies a filter to the stream.
<END_TASK>
<USER_TASK:>
Description:
def filter(self, filter_fn):
"""Applies a filter to the stream.
Attributes:
filter_fn (function): The user-defined filter function.
""" |
op = Operator(
_generate_uuid(),
OpType.Filter,
"Filter",
filter_fn,
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Inspects the content of the stream.
<END_TASK>
<USER_TASK:>
Description:
def inspect(self, inspect_logic):
"""Inspects the content of the stream.
Attributes:
inspect_logic (function): The user-defined inspect function.
""" |
op = Operator(
_generate_uuid(),
OpType.Inspect,
"Inspect",
inspect_logic,
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Closes the stream with a sink operator.
<END_TASK>
<USER_TASK:>
Description:
def sink(self):
"""Closes the stream with a sink operator.""" |
op = Operator(
_generate_uuid(),
OpType.Sink,
"Sink",
num_instances=self.env.config.parallelism)
return self.__register(op) |
<SYSTEM_TASK:>
Open some closed files if they may have new lines.
<END_TASK>
<USER_TASK:>
Description:
def open_closed_files(self):
"""Open some closed files if they may have new lines.
Opening more files may require us to close some of the already open
files.
""" |
if not self.can_open_more_files:
# If we can't open any more files. Close all of the files.
self.close_all_files()
files_with_no_updates = []
while len(self.closed_file_infos) > 0:
if (len(self.open_file_infos) >=
ray_constants.LOG_MONITOR_MAX_OPEN_FILES):
self.can_open_more_files = False
break
file_info = self.closed_file_infos.pop(0)
assert file_info.file_handle is None
# Get the file size to see if it has gotten bigger since we last
# opened it.
try:
file_size = os.path.getsize(file_info.filename)
except (IOError, OSError) as e:
# Catch "file not found" errors.
if e.errno == errno.ENOENT:
logger.warning("Warning: The file {} was not "
"found.".format(file_info.filename))
self.log_filenames.remove(file_info.filename)
continue
raise e
# If some new lines have been added to this file, try to reopen the
# file.
if file_size > file_info.size_when_last_opened:
try:
f = open(file_info.filename, "r")
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning("Warning: The file {} was not "
"found.".format(file_info.filename))
self.log_filenames.remove(file_info.filename)
continue
else:
raise e
f.seek(file_info.file_position)
file_info.filesize_when_last_opened = file_size
file_info.file_handle = f
self.open_file_infos.append(file_info)
else:
files_with_no_updates.append(file_info)
# Add the files with no changes back to the list of closed files.
self.closed_file_infos += files_with_no_updates |
<SYSTEM_TASK:>
Run the log monitor.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Run the log monitor.
This will query Redis once every second to check if there are new log
files to monitor. It will also store those log files in Redis.
""" |
while True:
self.update_log_filenames()
self.open_closed_files()
anything_published = self.check_log_files_and_publish_updates()
# If nothing was published, then wait a little bit before checking
# for logs to avoid using too much CPU.
if not anything_published:
time.sleep(0.05) |
<SYSTEM_TASK:>
Chains generator given experiment specifications.
<END_TASK>
<USER_TASK:>
Description:
def add_configurations(self, experiments):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
""" |
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
self._trial_generator = itertools.chain(
self._trial_generator,
self._generate_trials(experiment.spec, experiment.name)) |
<SYSTEM_TASK:>
Provides a batch of Trial objects to be queued into the TrialRunner.
<END_TASK>
<USER_TASK:>
Description:
def next_trials(self):
"""Provides a batch of Trial objects to be queued into the TrialRunner.
A batch ends when self._trial_generator returns None.
Returns:
trials (list): Returns a list of trials.
""" |
trials = []
for trial in self._trial_generator:
if trial is None:
return trials
trials += [trial]
self._finished = True
return trials |
<SYSTEM_TASK:>
Generates trials with configurations from `_suggest`.
<END_TASK>
<USER_TASK:>
Description:
def _generate_trials(self, experiment_spec, output_path=""):
"""Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec`
""" |
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
for _ in range(experiment_spec.get("num_samples", 1)):
trial_id = Trial.generate_id()
while True:
suggested_config = self._suggest(trial_id)
if suggested_config is None:
yield None
else:
break
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"], suggested_config)
flattened_config = resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(
str(self._counter), format_vars(flattened_config))
yield create_trial_from_spec(
spec,
output_path,
self._parser,
experiment_tag=tag,
trial_id=trial_id) |
<SYSTEM_TASK:>
Flattens a nested dict by joining keys into tuple of paths.
<END_TASK>
<USER_TASK:>
Description:
def resolve_nested_dict(nested_dict):
"""Flattens a nested dict by joining keys into tuple of paths.
Can then be passed into `format_vars`.
""" |
res = {}
for k, v in nested_dict.items():
if isinstance(v, dict):
for k_, v_ in resolve_nested_dict(v).items():
res[(k, ) + k_] = v_
else:
res[(k, )] = v
return res |
<SYSTEM_TASK:>
Run main entry for AutoMLBoard.
<END_TASK>
<USER_TASK:>
Description:
def run_board(args):
"""
Run main entry for AutoMLBoard.
Args:
args: args parsed from command line
""" |
init_config(args)
# backend service, should import after django settings initialized
from backend.collector import CollectorService
service = CollectorService(
args.logdir,
args.reload_interval,
standalone=False,
log_level=args.log_level)
service.run()
# frontend service
logger.info("Try to start automlboard on port %s\n" % args.port)
command = [
os.path.join(root_path, "manage.py"), "runserver",
"0.0.0.0:%s" % args.port, "--noreload"
]
execute_from_command_line(command) |
<SYSTEM_TASK:>
Initialize configs of the service.
<END_TASK>
<USER_TASK:>
Description:
def init_config(args):
"""
Initialize configs of the service.
Do the following things:
1. automl board settings
2. database settings
3. django settings
""" |
os.environ["AUTOMLBOARD_LOGDIR"] = args.logdir
os.environ["AUTOMLBOARD_LOGLEVEL"] = args.log_level
os.environ["AUTOMLBOARD_RELOAD_INTERVAL"] = str(args.reload_interval)
if args.db:
try:
db_address_reg = re.compile(r"(.*)://(.*):(.*)@(.*):(.*)/(.*)")
match = re.match(db_address_reg, args.db_address)
os.environ["AUTOMLBOARD_DB_ENGINE"] = match.group(1)
os.environ["AUTOMLBOARD_DB_USER"] = match.group(2)
os.environ["AUTOMLBOARD_DB_PASSWORD"] = match.group(3)
os.environ["AUTOMLBOARD_DB_HOST"] = match.group(4)
os.environ["AUTOMLBOARD_DB_PORT"] = match.group(5)
os.environ["AUTOMLBOARD_DB_NAME"] = match.group(6)
logger.info("Using %s as the database backend." % match.group(1))
except BaseException as e:
raise DatabaseError(e)
else:
logger.info("Using sqlite3 as the database backend, "
"information will be stored in automlboard.db")
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"ray.tune.automlboard.settings")
django.setup()
command = [os.path.join(root_path, "manage.py"), "migrate", "--run-syncdb"]
execute_from_command_line(command) |
<SYSTEM_TASK:>
Get the IDs of the GPUs that are available to the worker.
<END_TASK>
<USER_TASK:>
Description:
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
""" |
if _mode() == LOCAL_MODE:
raise Exception("ray.get_gpu_ids() currently does not work in PYTHON "
"MODE.")
all_resource_ids = global_worker.raylet_client.resource_ids()
assigned_ids = [
resource_id for resource_id, _ in all_resource_ids.get("GPU", [])
]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
return assigned_ids |
<SYSTEM_TASK:>
Initialize the serialization library.
<END_TASK>
<USER_TASK:>
Description:
def _initialize_serialization(driver_id, worker=global_worker):
"""Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling.
""" |
serialization_context = pyarrow.default_serialization_context()
# Tell the serialization context to use the cloudpickle version that we
# ship with Ray.
serialization_context.set_pickle(pickle.dumps, pickle.loads)
pyarrow.register_torch_serialization_handlers(serialization_context)
for id_type in ray._raylet._ID_TYPES:
serialization_context.register_type(
id_type,
"{}.{}".format(id_type.__module__, id_type.__name__),
pickle=True)
def actor_handle_serializer(obj):
return obj._serialization_helper(True)
def actor_handle_deserializer(serialized_obj):
new_handle = ray.actor.ActorHandle.__new__(ray.actor.ActorHandle)
new_handle._deserialization_helper(serialized_obj, True)
return new_handle
# We register this serializer on each worker instead of calling
# register_custom_serializer from the driver so that isinstance still
# works.
serialization_context.register_type(
ray.actor.ActorHandle,
"ray.ActorHandle",
pickle=False,
custom_serializer=actor_handle_serializer,
custom_deserializer=actor_handle_deserializer)
worker.serialization_context_map[driver_id] = serialization_context
# Register exception types.
for error_cls in RAY_EXCEPTION_TYPES:
register_custom_serializer(
error_cls,
use_dict=True,
local=True,
driver_id=driver_id,
class_id=error_cls.__module__ + ". " + error_cls.__name__,
)
# Tell Ray to serialize lambdas with pickle.
register_custom_serializer(
type(lambda: 0),
use_pickle=True,
local=True,
driver_id=driver_id,
class_id="lambda")
# Tell Ray to serialize types with pickle.
register_custom_serializer(
type(int),
use_pickle=True,
local=True,
driver_id=driver_id,
class_id="type")
# Tell Ray to serialize FunctionSignatures as dictionaries. This is
# used when passing around actor handles.
register_custom_serializer(
ray.signature.FunctionSignature,
use_dict=True,
local=True,
driver_id=driver_id,
class_id="ray.signature.FunctionSignature") |
<SYSTEM_TASK:>
Prints log messages from workers on all of the nodes.
<END_TASK>
<USER_TASK:>
Description:
def print_logs(redis_client, threads_stopped):
"""Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
""" |
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have been
# received with no break in between. If this number grows continually,
# then the worker is probably not able to process the log messages as
# rapidly as they are coming in.
num_consecutive_messages_received = 0
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
data = json.loads(ray.utils.decode(msg["data"]))
if data["ip"] == localhost:
for line in data["lines"]:
print("{}{}(pid={}){} {}".format(
colorama.Style.DIM, colorama.Fore.CYAN, data["pid"],
colorama.Style.RESET_ALL, line))
else:
for line in data["lines"]:
print("{}{}(pid={}, ip={}){} {}".format(
colorama.Style.DIM, colorama.Fore.CYAN, data["pid"],
data["ip"], colorama.Style.RESET_ALL, line))
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding logs "
"to the driver, use 'ray.init(log_to_driver=False)'.")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close() |
<SYSTEM_TASK:>
Prints message received in the given output queue.
<END_TASK>
<USER_TASK:>
Description:
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
""" |
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug("Suppressing error from worker: {}".format(error))
else:
logger.error(
"Possible unhandled error from worker: {}".format(error)) |
<SYSTEM_TASK:>
Listen to error messages in the background on the driver.
<END_TASK>
<USER_TASK:>
Description:
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
task_error_queue (queue.Queue): A queue used to communicate with the
thread that prints the errors found by this thread.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
""" |
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = str(
ray.gcs_utils.TablePubsub.ERROR_INFO).encode("ascii")
worker.error_message_pubsub_client.subscribe(error_pubsub_channel)
# worker.error_message_pubsub_client.psubscribe("*")
try:
# Get the exports that occurred before the call to subscribe.
error_messages = global_state.error_messages(worker.task_driver_id)
for error_message in error_messages:
logger.error(error_message)
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
msg["data"], 0)
assert gcs_entry.EntriesLength() == 1
error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(
gcs_entry.Entries(0), 0)
driver_id = error_data.DriverId()
if driver_id not in [
worker.task_driver_id.binary(),
DriverID.nil().binary()
]:
continue
error_message = ray.utils.decode(error_data.ErrorMessage())
if (ray.utils.decode(
error_data.Type()) == ray_constants.TASK_PUSH_ERROR):
# Delay it a bit to see if we can suppress it
task_error_queue.put((error_message, time.time()))
else:
logger.error(error_message)
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close() |
<SYSTEM_TASK:>
Disconnect this worker from the raylet and object store.
<END_TASK>
<USER_TASK:>
Description:
def disconnect():
"""Disconnect this worker from the raylet and object store.""" |
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "profiler") and hasattr(worker.profiler, "t"):
worker.profiler.join_flush_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "printer_thread"):
worker.printer_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.function_actor_manager.reset_cache()
worker.serialization_context_map.clear()
if hasattr(worker, "raylet_client"):
del worker.raylet_client
if hasattr(worker, "plasma_client"):
worker.plasma_client.disconnect() |
<SYSTEM_TASK:>
Attempt to produce a deterministic class ID for a given class.
<END_TASK>
<USER_TASK:>
Description:
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
""" |
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
"WARNING: Could not produce a deterministic class ID for class "
"{}".format(cls))
return hashlib.sha1(new_class_id).digest() |
<SYSTEM_TASK:>
Enable serialization and deserialization for a particular class.
<END_TASK>
<USER_TASK:>
Description:
def register_custom_serializer(cls,
use_pickle=False,
use_dict=False,
serializer=None,
deserializer=None,
local=False,
driver_id=None,
class_id=None):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should use this custom serializer for.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
driver_id: ID of the driver that we want to register the class for.
class_id: ID of the class that we are registering. If this is not
specified, we will calculate a new one inside the function.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable.
""" |
worker = global_worker
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided.")
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer must "
"be specified.")
if use_dict:
# Raise an exception if cls cannot be serialized efficiently by Ray.
serialization.check_serializable(cls)
if class_id is None:
if not local:
# In this case, the class ID will be used to deduplicate the class
# across workers. Note that cloudpickle unfortunately does not
# produce deterministic strings, so these IDs could be different
# on different workers. We could use something weaker like
# cls.__name__, however that would run the risk of having
# collisions.
# TODO(rkn): We should improve this.
try:
# Attempt to produce a class ID that will be the same on each
# worker. However, determinism is not guaranteed, and the
# result may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception:
raise serialization.CloudPickleError("Failed to pickle class "
"'{}'".format(cls))
else:
# In this case, the class ID only needs to be meaningful on this
# worker and not across workers.
class_id = _random_string()
# Make sure class_id is a string.
class_id = ray.utils.binary_to_hex(class_id)
if driver_id is None:
driver_id = worker.task_driver_id
assert isinstance(driver_id, DriverID)
def register_class_for_serialization(worker_info):
# TODO(rkn): We need to be more thoughtful about what to do if custom
# serializers have already been registered for class_id. In some cases,
# we may want to use the last user-defined serializers and ignore
# subsequent calls to register_custom_serializer that were made by the
# system.
serialization_context = worker_info[
"worker"].get_serialization_context(driver_id)
serialization_context.register_type(
cls,
class_id,
pickle=use_pickle,
custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
worker.run_function_on_all_workers(register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually need
# to ship the class definition.
register_class_for_serialization({"worker": worker}) |
<SYSTEM_TASK:>
Get a remote object or a list of remote objects from the object store.
<END_TASK>
<USER_TASK:>
Description:
def get(object_ids):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
Returns:
A Python object or a list of Python objects.
Raises:
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
""" |
worker = global_worker
worker.check_connected()
with profiling.profile("ray.get"):
if worker.mode == LOCAL_MODE:
# In LOCAL_MODE, ray.get is the identity operation (the input will
# actually be a value not an objectid).
return object_ids
global last_task_error_raise_time
if isinstance(object_ids, list):
values = worker.get_object(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
raise value
return values
else:
value = worker.get_object([object_ids])[0]
if isinstance(value, RayError):
# If the result is a RayError, then the task that created
# this object failed, and we should propagate the error message
# here.
last_task_error_raise_time = time.time()
raise value
return value |
<SYSTEM_TASK:>
Store an object in the object store.
<END_TASK>
<USER_TASK:>
Description:
def put(value):
"""Store an object in the object store.
Args:
value: The Python object to be stored.
Returns:
The object ID assigned to this value.
""" |
worker = global_worker
worker.check_connected()
with profiling.profile("ray.put"):
if worker.mode == LOCAL_MODE:
# In LOCAL_MODE, ray.put is the identity operation.
return value
object_id = ray._raylet.compute_put_id(
worker.current_task_id,
worker.task_context.put_index,
)
worker.put_object(object_id, value)
worker.task_context.put_index += 1
return object_id |
<SYSTEM_TASK:>
Define a remote function or an actor class.
<END_TASK>
<USER_TASK:>
Description:
def remote(*args, **kwargs):
"""Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
* **max_reconstructions**: Only for *actors*. This specifies the maximum
number of times that the actor should be reconstructed when it dies
unexpectedly. The minimum valid value is 0 (default), which indicates
that the actor doesn't need to be reconstructed. And the maximum valid
value is ray.ray_constants.INFINITE_RECONSTRUCTIONS.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo(object):
def method(self):
return 1
""" |
worker = get_global_worker()
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'num_cpus', 'num_gpus', "
"'resources', 'max_calls', "
"or 'max_reconstructions', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals", "num_cpus", "num_gpus", "resources",
"max_calls", "max_reconstructions"
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise Exception("The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(
type(resources)))
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs.get("num_return_vals")
max_calls = kwargs.get("max_calls")
max_reconstructions = kwargs.get("max_reconstructions")
return make_decorator(
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
max_calls=max_calls,
max_reconstructions=max_reconstructions,
worker=worker) |
<SYSTEM_TASK:>
A thread-local that contains the following attributes.
<END_TASK>
<USER_TASK:>
Description:
def task_context(self):
"""A thread-local that contains the following attributes.
current_task_id: For the main thread, this field is the ID of this
worker's current running task; for other threads, this field is a
fake random ID.
task_index: The number of tasks that have been submitted from the
current task.
put_index: The number of objects that have been put from the current
task.
""" |
if not hasattr(self._task_context, "initialized"):
# Initialize task_context for the current thread.
if ray.utils.is_main_thread():
# If this is running on the main thread, initialize it to
# NIL. The actual value will set when the worker receives
# a task from raylet backend.
self._task_context.current_task_id = TaskID.nil()
else:
# If this is running on a separate thread, then the mapping
# to the current task ID may not be correct. Generate a
# random task ID so that the backend can differentiate
# between different threads.
self._task_context.current_task_id = TaskID(_random_string())
if getattr(self, "_multithreading_warned", False) is not True:
logger.warning(
"Calling ray.get or ray.wait in a separate thread "
"may lead to deadlock if the main thread blocks on "
"this thread and there are not enough resources to "
"execute more tasks")
self._multithreading_warned = True
self._task_context.task_index = 0
self._task_context.put_index = 1
self._task_context.initialized = True
return self._task_context |
<SYSTEM_TASK:>
Get the SerializationContext of the driver that this worker is processing.
<END_TASK>
<USER_TASK:>
Description:
def get_serialization_context(self, driver_id):
"""Get the SerializationContext of the driver that this worker is processing.
Args:
driver_id: The ID of the driver that indicates which driver to get
the serialization context for.
Returns:
The serialization context of the given driver.
""" |
# This function needs to be proctected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
with self.lock:
if driver_id not in self.serialization_context_map:
_initialize_serialization(driver_id)
return self.serialization_context_map[driver_id] |
<SYSTEM_TASK:>
Store an object and attempt to register its class if needed.
<END_TASK>
<USER_TASK:>
Description:
def store_and_register(self, object_id, value, depth=100):
"""Store an object and attempt to register its class if needed.
Args:
object_id: The ID of the object to store.
value: The value to put in the object store.
depth: The maximum number of classes to recursively register.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
""" |
counter = 0
while True:
if counter == depth:
raise Exception("Ray exceeded the maximum number of classes "
"that it will recursively serialize when "
"attempting to serialize an object of "
"type {}.".format(type(value)))
counter += 1
try:
if isinstance(value, bytes):
# If the object is a byte array, skip serializing it and
# use a special metadata to indicate it's raw binary. So
# that this object can also be read by Java.
self.plasma_client.put_raw_buffer(
value,
object_id=pyarrow.plasma.ObjectID(object_id.binary()),
metadata=ray_constants.RAW_BUFFER_METADATA,
memcopy_threads=self.memcopy_threads)
else:
self.plasma_client.put(
value,
object_id=pyarrow.plasma.ObjectID(object_id.binary()),
memcopy_threads=self.memcopy_threads,
serialization_context=self.get_serialization_context(
self.task_driver_id))
break
except pyarrow.SerializationCallbackError as e:
try:
register_custom_serializer(
type(e.example_object), use_dict=True)
warning_message = ("WARNING: Serializing objects of type "
"{} by expanding them as dictionaries "
"of their fields. This behavior may "
"be incorrect in some cases.".format(
type(e.example_object)))
logger.debug(warning_message)
except (serialization.RayNotDictionarySerializable,
serialization.CloudPickleError,
pickle.pickle.PicklingError, Exception):
# We also handle generic exceptions here because
# cloudpickle can fail with many different types of errors.
try:
register_custom_serializer(
type(e.example_object), use_pickle=True)
warning_message = ("WARNING: Falling back to "
"serializing objects of type {} by "
"using pickle. This may be "
"inefficient.".format(
type(e.example_object)))
logger.warning(warning_message)
except serialization.CloudPickleError:
register_custom_serializer(
type(e.example_object),
use_pickle=True,
local=True)
warning_message = ("WARNING: Pickling the class {} "
"failed, so we are using pickle "
"and only registering the class "
"locally.".format(
type(e.example_object)))
logger.warning(warning_message) |
<SYSTEM_TASK:>
Put value in the local object store with object id objectid.
<END_TASK>
<USER_TASK:>
Description:
def put_object(self, object_id, value):
"""Put value in the local object store with object id objectid.
This assumes that the value for objectid has not yet been placed in the
local object store.
Args:
object_id (object_id.ObjectID): The object ID of the value to be
put.
value: The value to put in the object store.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
""" |
# Make sure that the value is not an object ID.
if isinstance(value, ObjectID):
raise TypeError(
"Calling 'put' on an ray.ObjectID is not allowed "
"(similarly, returning an ray.ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectID in a list and "
"call 'put' on it (or return it).")
# Serialize and put the object in the object store.
try:
self.store_and_register(object_id, value)
except pyarrow.PlasmaObjectExists:
# The object already exists in the object store, so there is no
# need to add it again. TODO(rkn): We need to compare the hashes
# and make sure that the objects are in fact the same. We also
# should return an error code to the caller instead of printing a
# message.
logger.info(
"The object with ID {} already exists in the object store."
.format(object_id))
except TypeError:
# This error can happen because one of the members of the object
# may not be serializable for cloudpickle. So we need these extra
# fallbacks here to start from the beginning. Hopefully the object
# could have a `__reduce__` method.
register_custom_serializer(type(value), use_pickle=True)
warning_message = ("WARNING: Serializing the class {} failed, "
"so are are falling back to cloudpickle."
.format(type(value)))
logger.warning(warning_message)
self.store_and_register(object_id, value) |
<SYSTEM_TASK:>
Get the value or values in the object store associated with the IDs.
<END_TASK>
<USER_TASK:>
Description:
def get_object(self, object_ids):
"""Get the value or values in the object store associated with the IDs.
Return the values from the local object store for object_ids. This will
block until all the values for object_ids have been written to the
local object store.
Args:
object_ids (List[object_id.ObjectID]): A list of the object IDs
whose values should be retrieved.
""" |
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ObjectID):
raise TypeError(
"Attempting to call `get` on the value {}, "
"which is not an ray.ObjectID.".format(object_id))
# Do an initial fetch for remote objects. We divide the fetch into
# smaller fetches so as to not block the manager for a prolonged period
# of time in a single call.
plain_object_ids = [
plasma.ObjectID(object_id.binary()) for object_id in object_ids
]
for i in range(0, len(object_ids),
ray._config.worker_fetch_request_size()):
self.raylet_client.fetch_or_reconstruct(
object_ids[i:(i + ray._config.worker_fetch_request_size())],
True)
# Get the objects. We initially try to get the objects immediately.
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
# Construct a dictionary mapping object IDs that we haven't gotten yet
# to their original index in the object_ids argument.
unready_ids = {
plain_object_ids[i].binary(): i
for (i, val) in enumerate(final_results)
if val is plasma.ObjectNotAvailable
}
if len(unready_ids) > 0:
# Try reconstructing any objects we haven't gotten yet. Try to
# get them until at least get_timeout_milliseconds
# milliseconds passes, then repeat.
while len(unready_ids) > 0:
object_ids_to_fetch = [
plasma.ObjectID(unready_id)
for unready_id in unready_ids.keys()
]
ray_object_ids_to_fetch = [
ObjectID(unready_id) for unready_id in unready_ids.keys()
]
fetch_request_size = ray._config.worker_fetch_request_size()
for i in range(0, len(object_ids_to_fetch),
fetch_request_size):
self.raylet_client.fetch_or_reconstruct(
ray_object_ids_to_fetch[i:(i + fetch_request_size)],
False,
self.current_task_id,
)
results = self.retrieve_and_deserialize(
object_ids_to_fetch,
max([
ray._config.get_timeout_milliseconds(),
int(0.01 * len(unready_ids)),
]),
)
# Remove any entries for objects we received during this
# iteration so we don't retrieve the same object twice.
for i, val in enumerate(results):
if val is not plasma.ObjectNotAvailable:
object_id = object_ids_to_fetch[i].binary()
index = unready_ids[object_id]
final_results[index] = val
unready_ids.pop(object_id)
# If there were objects that we weren't able to get locally,
# let the raylet know that we're now unblocked.
self.raylet_client.notify_unblocked(self.current_task_id)
assert len(final_results) == len(object_ids)
return final_results |
<SYSTEM_TASK:>
Submit a remote task to the scheduler.
<END_TASK>
<USER_TASK:>
Description:
def submit_task(self,
function_descriptor,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
max_actor_reconstructions=0,
execution_dependencies=None,
new_actor_handles=None,
num_return_vals=None,
resources=None,
placement_resources=None,
driver_id=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with
function_descriptor with arguments args. Retrieve object IDs for the
outputs of the function from the scheduler and immediately return them.
Args:
function_descriptor: The function descriptor to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objects.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
resources: The resource requirements for this task.
placement_resources: The resources required for placing the task.
If this is not provided or if it is an empty dictionary, then
the placement resources will be equal to resources.
driver_id: The ID of the relevant driver. This is almost always the
driver ID of the driver that is currently running. However, in
the exceptional case that an actor task is being dispatched to
an actor created by a different driver, this should be the
driver ID of the driver that created the actor.
Returns:
The return object IDs for this task.
""" |
with profiling.profile("submit_task"):
if actor_id is None:
assert actor_handle_id is None
actor_id = ActorID.nil()
actor_handle_id = ActorHandleID.nil()
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ActorID.nil()
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = ObjectID.nil()
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_raylet = []
for arg in args:
if isinstance(arg, ObjectID):
args_for_raylet.append(arg)
elif ray._raylet.check_simple_value(arg):
args_for_raylet.append(arg)
else:
args_for_raylet.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
if new_actor_handles is None:
new_actor_handles = []
if driver_id is None:
driver_id = self.task_driver_id
if resources is None:
raise ValueError("The resources dictionary is required.")
for value in resources.values():
assert (isinstance(value, int) or isinstance(value, float))
if value < 0:
raise ValueError(
"Resource quantities must be nonnegative.")
if (value >= 1 and isinstance(value, float)
and not value.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers.")
# Remove any resources with zero quantity requirements
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity > 0
}
if placement_resources is None:
placement_resources = {}
# Increment the worker's task index to track how many tasks
# have been submitted by the current task so far.
self.task_context.task_index += 1
# The parent task must be set for the submitted task.
assert not self.current_task_id.is_nil()
# Current driver id must not be nil when submitting a task.
# Because every task must belong to a driver.
assert not self.task_driver_id.is_nil()
# Submit the task to raylet.
function_descriptor_list = (
function_descriptor.get_function_descriptor_list())
assert isinstance(driver_id, DriverID)
task = ray._raylet.Task(
driver_id,
function_descriptor_list,
args_for_raylet,
num_return_vals,
self.current_task_id,
self.task_context.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
max_actor_reconstructions,
actor_id,
actor_handle_id,
actor_counter,
new_actor_handles,
execution_dependencies,
resources,
placement_resources,
)
self.raylet_client.submit_task(task)
return task.returns() |
<SYSTEM_TASK:>
Run arbitrary code on all of the workers.
<END_TASK>
<USER_TASK:>
Description:
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
""" |
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hmset(
key, {
"driver_id": self.task_driver_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers)
})
self.redis_client.rpush("Exports", key) |
<SYSTEM_TASK:>
Retrieve the arguments for the remote function.
<END_TASK>
<USER_TASK:>
Description:
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
""" |
arguments = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ObjectID):
# get the object from the local object store
argument = self.get_object([arg])[0]
if isinstance(argument, RayError):
raise argument
else:
# pass the argument by value
argument = arg
arguments.append(argument)
return arguments |
<SYSTEM_TASK:>
Store the outputs of a remote function in the local object store.
<END_TASK>
<USER_TASK:>
Description:
def _store_outputs_in_object_store(self, object_ids, outputs):
"""Store the outputs of a remote function in the local object store.
This stores the values that were returned by a remote function in the
local object store. If any of the return values are object IDs, then
these object IDs are aliased with the object IDs that the scheduler
assigned for the return values. This is called by the worker that
executes the remote function.
Note:
The arguments object_ids and outputs should have the same length.
Args:
object_ids (List[ObjectID]): The object IDs that were assigned to
the outputs of the remote function call.
outputs (Tuple): The value returned by the remote function. If the
remote function was supposed to only return one value, then its
output was wrapped in a tuple with one element prior to being
passed into this function.
""" |
for i in range(len(object_ids)):
if isinstance(outputs[i], ray.actor.ActorHandle):
raise Exception("Returning an actor handle from a remote "
"function is not allowed).")
if outputs[i] is ray.experimental.no_return.NoReturn:
if not self.plasma_client.contains(
pyarrow.plasma.ObjectID(object_ids[i].binary())):
raise RuntimeError(
"Attempting to return 'ray.experimental.NoReturn' "
"from a remote function, but the corresponding "
"ObjectID does not exist in the local object store.")
else:
self.put_object(object_ids[i], outputs[i]) |
<SYSTEM_TASK:>
Execute a task assigned to this worker.
<END_TASK>
<USER_TASK:>
Description:
def _process_task(self, task, function_execution_info):
"""Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
""" |
assert self.current_task_id.is_nil()
assert self.task_context.task_index == 0
assert self.task_context.put_index == 1
if task.actor_id().is_nil():
# If this worker is not an actor, check that `task_driver_id`
# was reset when the worker finished the previous task.
assert self.task_driver_id.is_nil()
# Set the driver ID of the current running task. This is
# needed so that if the task throws an exception, we propagate
# the error message to the correct driver.
self.task_driver_id = task.driver_id()
else:
# If this worker is an actor, task_driver_id wasn't reset.
# Check that current task's driver ID equals the previous one.
assert self.task_driver_id == task.driver_id()
self.task_context.current_task_id = task.task_id()
function_descriptor = FunctionDescriptor.from_bytes_list(
task.function_descriptor_list())
args = task.arguments()
return_object_ids = task.returns()
if (not task.actor_id().is_nil()
or not task.actor_creation_id().is_nil()):
dummy_return_id = return_object_ids.pop()
function_executor = function_execution_info.function
function_name = function_execution_info.function_name
# Get task arguments from the object store.
try:
if function_name != "__ray_terminate__":
self.reraise_actor_init_error()
self.memory_monitor.raise_if_low_memory()
with profiling.profile("task:deserialize_arguments"):
arguments = self._get_arguments_for_execution(
function_name, args)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
return
# Execute the task.
try:
self._current_task = task
with profiling.profile("task:execute"):
if (task.actor_id().is_nil()
and task.actor_creation_id().is_nil()):
outputs = function_executor(*arguments)
else:
if not task.actor_id().is_nil():
key = task.actor_id()
else:
key = task.actor_creation_id()
outputs = function_executor(dummy_return_id,
self.actors[key], *arguments)
except Exception as e:
# Determine whether the exception occured during a task, not an
# actor method.
task_exception = task.actor_id().is_nil()
traceback_str = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
self._handle_process_task_failure(
function_descriptor, return_object_ids, e, traceback_str)
return
finally:
self._current_task = None
# Store the outputs in the local object store.
try:
with profiling.profile("task:store_outputs"):
# If this is an actor task, then the last object ID returned by
# the task is a dummy output, not returned by the function
# itself. Decrement to get the correct number of return values.
num_returns = len(return_object_ids)
if num_returns == 1:
outputs = (outputs, )
self._store_outputs_in_object_store(return_object_ids, outputs)
except Exception as e:
self._handle_process_task_failure(
function_descriptor, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc())) |
<SYSTEM_TASK:>
Wait for a task to be ready and process the task.
<END_TASK>
<USER_TASK:>
Description:
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
Args:
task: The task to execute.
""" |
function_descriptor = FunctionDescriptor.from_bytes_list(
task.function_descriptor_list())
driver_id = task.driver_id()
# TODO(rkn): It would be preferable for actor creation tasks to share
# more of the code path with regular task execution.
if not task.actor_creation_id().is_nil():
assert self.actor_id.is_nil()
self.actor_id = task.actor_creation_id()
self.actor_creation_task_id = task.task_id()
actor_class = self.function_actor_manager.load_actor_class(
driver_id, function_descriptor)
self.actors[self.actor_id] = actor_class.__new__(actor_class)
self.actor_checkpoint_info[self.actor_id] = ActorCheckpointInfo(
num_tasks_since_last_checkpoint=0,
last_checkpoint_timestamp=int(1000 * time.time()),
checkpoint_ids=[],
)
execution_info = self.function_actor_manager.get_execution_info(
driver_id, function_descriptor)
# Execute the task.
function_name = execution_info.function_name
extra_data = {"name": function_name, "task_id": task.task_id().hex()}
if task.actor_id().is_nil():
if task.actor_creation_id().is_nil():
title = "ray_worker:{}()".format(function_name)
next_title = "ray_worker"
else:
actor = self.actors[task.actor_creation_id()]
title = "ray_{}:{}()".format(actor.__class__.__name__,
function_name)
next_title = "ray_{}".format(actor.__class__.__name__)
else:
actor = self.actors[task.actor_id()]
title = "ray_{}:{}()".format(actor.__class__.__name__,
function_name)
next_title = "ray_{}".format(actor.__class__.__name__)
with profiling.profile("task", extra_data=extra_data):
with _changeproctitle(title, next_title):
self._process_task(task, execution_info)
# Reset the state fields so the next task can run.
self.task_context.current_task_id = TaskID.nil()
self.task_context.task_index = 0
self.task_context.put_index = 1
if self.actor_id.is_nil():
# Don't need to reset task_driver_id if the worker is an
# actor. Because the following tasks should all have the
# same driver id.
self.task_driver_id = DriverID.nil()
# Reset signal counters so that the next task can get
# all past signals.
ray_signal.reset()
# Increase the task execution counter.
self.function_actor_manager.increase_task_counter(
driver_id, function_descriptor)
reached_max_executions = (self.function_actor_manager.get_task_counter(
driver_id, function_descriptor) == execution_info.max_calls)
if reached_max_executions:
self.raylet_client.disconnect()
sys.exit(0) |
<SYSTEM_TASK:>
Get the next task from the raylet.
<END_TASK>
<USER_TASK:>
Description:
def _get_next_task_from_raylet(self):
"""Get the next task from the raylet.
Returns:
A task from the raylet.
""" |
with profiling.profile("worker_idle"):
task = self.raylet_client.get_task()
# Automatically restrict the GPUs available to this task.
ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
return task |
<SYSTEM_TASK:>
The main loop a worker runs to receive and execute tasks.
<END_TASK>
<USER_TASK:>
Description:
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks.""" |
def exit(signum, frame):
shutdown()
sys.exit(0)
signal.signal(signal.SIGTERM, exit)
while True:
task = self._get_next_task_from_raylet()
self._wait_for_and_process_task(task) |
<SYSTEM_TASK:>
This methods reshapes all values in a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def flatten(weights, start=0, stop=2):
"""This methods reshapes all values in a dictionary.
The indices from start to stop will be flattened into a single index.
Args:
weights: A dictionary mapping keys to numpy arrays.
start: The starting index.
stop: The ending index.
""" |
for key, val in weights.items():
new_shape = val.shape[0:start] + (-1, ) + val.shape[stop:]
weights[key] = val.reshape(new_shape)
return weights |
<SYSTEM_TASK:>
Get a dictionary of addresses.
<END_TASK>
<USER_TASK:>
Description:
def address_info(self):
"""Get a dictionary of addresses.""" |
return {
"node_ip_address": self._node_ip_address,
"redis_address": self._redis_address,
"object_store_address": self._plasma_store_socket_name,
"raylet_socket_name": self._raylet_socket_name,
"webui_url": self._webui_url,
} |
<SYSTEM_TASK:>
Return a incremental temporary file name. The file is not created.
<END_TASK>
<USER_TASK:>
Description:
def _make_inc_temp(self, suffix="", prefix="", directory_name="/tmp/ray"):
"""Return a incremental temporary file name. The file is not created.
Args:
suffix (str): The suffix of the temp file.
prefix (str): The prefix of the temp file.
directory_name (str) : The base directory of the temp file.
Returns:
A string of file name. If there existing a file having
the same name, the returned name will look like
"{directory_name}/{prefix}.{unique_index}{suffix}"
""" |
directory_name = os.path.expanduser(directory_name)
index = self._incremental_dict[suffix, prefix, directory_name]
# `tempfile.TMP_MAX` could be extremely large,
# so using `range` in Python2.x should be avoided.
while index < tempfile.TMP_MAX:
if index == 0:
filename = os.path.join(directory_name, prefix + suffix)
else:
filename = os.path.join(directory_name,
prefix + "." + str(index) + suffix)
index += 1
if not os.path.exists(filename):
# Save the index.
self._incremental_dict[suffix, prefix, directory_name] = index
return filename
raise FileExistsError(errno.EEXIST,
"No usable temporary filename found") |
<SYSTEM_TASK:>
Generate partially randomized filenames for log files.
<END_TASK>
<USER_TASK:>
Description:
def new_log_files(self, name, redirect_output=True):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for
logging stdout and stderr and false if stdout and stderr
should not be redirected.
If it is None, it will use the "redirect_output" Ray parameter.
Returns:
If redirect_output is true, this will return a tuple of two
file handles. The first is for redirecting stdout and the
second is for redirecting stderr.
If redirect_output is false, this will return a tuple
of two None objects.
""" |
if redirect_output is None:
redirect_output = self._ray_params.redirect_output
if not redirect_output:
return None, None
log_stdout = self._make_inc_temp(
suffix=".out", prefix=name, directory_name=self._logs_dir)
log_stderr = self._make_inc_temp(
suffix=".err", prefix=name, directory_name=self._logs_dir)
# Line-buffer the output (mode 1).
log_stdout_file = open(log_stdout, "a", buffering=1)
log_stderr_file = open(log_stderr, "a", buffering=1)
return log_stdout_file, log_stderr_file |
<SYSTEM_TASK:>
Prepare the socket file for raylet and plasma.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_socket_file(self, socket_path, default_prefix):
"""Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare.
""" |
if socket_path is not None:
if os.path.exists(socket_path):
raise Exception("Socket file {} exists!".format(socket_path))
socket_dir = os.path.dirname(socket_path)
try_to_create_directory(socket_dir)
return socket_path
return self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir) |
<SYSTEM_TASK:>
Start the raylet.
<END_TASK>
<USER_TASK:>
Description:
def start_raylet(self, use_valgrind=False, use_profiler=False):
"""Start the raylet.
Args:
use_valgrind (bool): True if we should start the process in
valgrind.
use_profiler (bool): True if we should start the process in the
valgrind profiler.
""" |
stdout_file, stderr_file = self.new_log_files("raylet")
process_info = ray.services.start_raylet(
self._redis_address,
self._node_ip_address,
self._raylet_socket_name,
self._plasma_store_socket_name,
self._ray_params.worker_path,
self._temp_dir,
self._ray_params.num_cpus,
self._ray_params.num_gpus,
self._ray_params.resources,
self._ray_params.object_manager_port,
self._ray_params.node_manager_port,
self._ray_params.redis_password,
use_valgrind=use_valgrind,
use_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file,
config=self._config,
include_java=self._ray_params.include_java,
java_worker_options=self._ray_params.java_worker_options,
load_code_from_local=self._ray_params.load_code_from_local,
)
assert ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info] |
<SYSTEM_TASK:>
Create new logging files for workers to redirect its output.
<END_TASK>
<USER_TASK:>
Description:
def new_worker_redirected_log_file(self, worker_id):
"""Create new logging files for workers to redirect its output.""" |
worker_stdout_file, worker_stderr_file = (self.new_log_files(
"worker-" + ray.utils.binary_to_hex(worker_id), True))
return worker_stdout_file, worker_stderr_file |
<SYSTEM_TASK:>
Start head processes on the node.
<END_TASK>
<USER_TASK:>
Description:
def start_head_processes(self):
"""Start head processes on the node.""" |
logger.info(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
assert self._redis_address is None
# If this is the head node, start the relevant head node processes.
self.start_redis()
self.start_monitor()
self.start_raylet_monitor()
# The dashboard is Python3.x only.
if PY3 and self._ray_params.include_webui:
self.start_dashboard() |
<SYSTEM_TASK:>
Start all of the processes on the node.
<END_TASK>
<USER_TASK:>
Description:
def start_ray_processes(self):
"""Start all of the processes on the node.""" |
logger.info(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
self.start_plasma_store()
self.start_raylet()
if PY3:
self.start_reporter()
if self._ray_params.include_log_monitor:
self.start_log_monitor() |
<SYSTEM_TASK:>
Kill a process of a given type.
<END_TASK>
<USER_TASK:>
Description:
def _kill_process_type(self,
process_type,
allow_graceful=False,
check_alive=True,
wait=False):
"""Kill a process of a given type.
If the process type is PROCESS_TYPE_REDIS_SERVER, then we will kill all
of the Redis servers.
If the process was started in valgrind, then we will raise an exception
if the process has a non-zero exit code.
Args:
process_type: The type of the process to kill.
allow_graceful (bool): Send a SIGTERM first and give the process
time to exit gracefully. If that doesn't work, then use
SIGKILL. We usually want to do this outside of tests.
check_alive (bool): If true, then we expect the process to be alive
and will raise an exception if the process is already dead.
wait (bool): If true, then this method will not return until the
process in question has exited.
Raises:
This process raises an exception in the following cases:
1. The process had already died and check_alive is true.
2. The process had been started in valgrind and had a non-zero
exit code.
""" |
process_infos = self.all_processes[process_type]
if process_type != ray_constants.PROCESS_TYPE_REDIS_SERVER:
assert len(process_infos) == 1
for process_info in process_infos:
process = process_info.process
# Handle the case where the process has already exited.
if process.poll() is not None:
if check_alive:
raise Exception("Attempting to kill a process of type "
"'{}', but this process is already dead."
.format(process_type))
else:
continue
if process_info.use_valgrind:
process.terminate()
process.wait()
if process.returncode != 0:
message = ("Valgrind detected some errors in process of "
"type {}. Error code {}.".format(
process_type, process.returncode))
if process_info.stdout_file is not None:
with open(process_info.stdout_file, "r") as f:
message += "\nPROCESS STDOUT:\n" + f.read()
if process_info.stderr_file is not None:
with open(process_info.stderr_file, "r") as f:
message += "\nPROCESS STDERR:\n" + f.read()
raise Exception(message)
continue
if process_info.use_valgrind_profiler:
# Give process signal to write profiler data.
os.kill(process.pid, signal.SIGINT)
# Wait for profiling data to be written.
time.sleep(0.1)
if allow_graceful:
# Allow the process one second to exit gracefully.
process.terminate()
timer = threading.Timer(1, lambda process: process.kill(),
[process])
try:
timer.start()
process.wait()
finally:
timer.cancel()
if process.poll() is not None:
continue
# If the process did not exit within one second, force kill it.
process.kill()
# The reason we usually don't call process.wait() here is that
# there's some chance we'd end up waiting a really long time.
if wait:
process.wait()
del self.all_processes[process_type] |
<SYSTEM_TASK:>
Kill the Redis servers.
<END_TASK>
<USER_TASK:>
Description:
def kill_redis(self, check_alive=True):
"""Kill the Redis servers.
Args:
check_alive (bool): Raise an exception if any of the processes
were already dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_REDIS_SERVER, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the plasma store.
<END_TASK>
<USER_TASK:>
Description:
def kill_plasma_store(self, check_alive=True):
"""Kill the plasma store.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_PLASMA_STORE, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the raylet.
<END_TASK>
<USER_TASK:>
Description:
def kill_raylet(self, check_alive=True):
"""Kill the raylet.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the log monitor.
<END_TASK>
<USER_TASK:>
Description:
def kill_log_monitor(self, check_alive=True):
"""Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the reporter.
<END_TASK>
<USER_TASK:>
Description:
def kill_reporter(self, check_alive=True):
"""Kill the reporter.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
# reporter is started only in PY3.
if PY3:
self._kill_process_type(
ray_constants.PROCESS_TYPE_REPORTER, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the dashboard.
<END_TASK>
<USER_TASK:>
Description:
def kill_dashboard(self, check_alive=True):
"""Kill the dashboard.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_DASHBOARD, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the monitor.
<END_TASK>
<USER_TASK:>
Description:
def kill_monitor(self, check_alive=True):
"""Kill the monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_MONITOR, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill the raylet monitor.
<END_TASK>
<USER_TASK:>
Description:
def kill_raylet_monitor(self, check_alive=True):
"""Kill the raylet monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead.
""" |
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive) |
<SYSTEM_TASK:>
Kill all of the processes.
<END_TASK>
<USER_TASK:>
Description:
def kill_all_processes(self, check_alive=True, allow_graceful=False):
"""Kill all of the processes.
Note that This is slower than necessary because it calls kill, wait,
kill, wait, ... instead of kill, kill, ..., wait, wait, ...
Args:
check_alive (bool): Raise an exception if any of the processes were
already dead.
""" |
# Kill the raylet first. This is important for suppressing errors at
# shutdown because we give the raylet a chance to exit gracefully and
# clean up its child worker processes. If we were to kill the plasma
# store (or Redis) first, that could cause the raylet to exit
# ungracefully, leading to more verbose output from the workers.
if ray_constants.PROCESS_TYPE_RAYLET in self.all_processes:
self._kill_process_type(
ray_constants.PROCESS_TYPE_RAYLET,
check_alive=check_alive,
allow_graceful=allow_graceful)
# We call "list" to copy the keys because we are modifying the
# dictionary while iterating over it.
for process_type in list(self.all_processes.keys()):
self._kill_process_type(
process_type,
check_alive=check_alive,
allow_graceful=allow_graceful) |
<SYSTEM_TASK:>
Return a list of the live processes.
<END_TASK>
<USER_TASK:>
Description:
def live_processes(self):
"""Return a list of the live processes.
Returns:
A list of the live processes.
""" |
result = []
for process_type, process_infos in self.all_processes.items():
for process_info in process_infos:
if process_info.process.poll() is None:
result.append((process_type, process_info.process))
return result |
<SYSTEM_TASK:>
Create a large array of noise to be shared by all workers.
<END_TASK>
<USER_TASK:>
Description:
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers.""" |
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise |
<SYSTEM_TASK:>
Register a new model that can be obtained with `get_model_config`.
<END_TASK>
<USER_TASK:>
Description:
def register_model(model_name, dataset_name, model_func):
"""Register a new model that can be obtained with `get_model_config`.""" |
model_map = _get_model_map(dataset_name)
if model_name in model_map:
raise ValueError("Model \"%s\" is already registered for dataset"
"\"%s\"" % (model_name, dataset_name))
model_map[model_name] = model_func |
<SYSTEM_TASK:>
Do a rollout.
<END_TASK>
<USER_TASK:>
Description:
def rollout(policy, env, timestep_limit=None, add_noise=False, offset=0):
"""Do a rollout.
If add_noise is True, the rollout will take noisy actions with
noise drawn from that stream. Otherwise, no action noise will be added.
Parameters
----------
policy: tf object
policy from which to draw actions
env: GymEnv
environment from which to draw rewards, done, and next state
timestep_limit: int, optional
steps after which to end the rollout
add_noise: bool, optional
indicates whether exploratory action noise should be added
offset: int, optional
value to subtract from the reward. For example, survival bonus
from humanoid
""" |
env_timestep_limit = env.spec.max_episode_steps
timestep_limit = (env_timestep_limit if timestep_limit is None else min(
timestep_limit, env_timestep_limit))
rews = []
t = 0
observation = env.reset()
for _ in range(timestep_limit or 999999):
ac = policy.compute(observation, add_noise=add_noise, update=True)[0]
observation, rew, done, _ = env.step(ac)
rew -= np.abs(offset)
rews.append(rew)
t += 1
if done:
break
rews = np.array(rews, dtype=np.float32)
return rews, t |
<SYSTEM_TASK:>
Provides Trial objects to be queued into the TrialRunner.
<END_TASK>
<USER_TASK:>
Description:
def next_trials(self):
"""Provides Trial objects to be queued into the TrialRunner.
Returns:
trials (list): Returns a list of trials.
""" |
trials = list(self._trial_generator)
if self._shuffle:
random.shuffle(trials)
self._finished = True
return trials |
<SYSTEM_TASK:>
Generates Trial objects with the variant generation process.
<END_TASK>
<USER_TASK:>
Description:
def _generate_trials(self, unresolved_spec, output_path=""):
"""Generates Trial objects with the variant generation process.
Uses a fixed point iteration to resolve variants. All trials
should be able to be generated at once.
See also: `ray.tune.suggest.variant_generator`.
Yields:
Trial object
""" |
if "run" not in unresolved_spec:
raise TuneError("Must specify `run` in {}".format(unresolved_spec))
for _ in range(unresolved_spec.get("num_samples", 1)):
for resolved_vars, spec in generate_variants(unresolved_spec):
experiment_tag = str(self._counter)
if resolved_vars:
experiment_tag += "_{}".format(resolved_vars)
self._counter += 1
yield create_trial_from_spec(
spec,
output_path,
self._parser,
experiment_tag=experiment_tag) |
<SYSTEM_TASK:>
Returns result of applying `self.operation`
<END_TASK>
<USER_TASK:>
Description:
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(
arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array
elements.
""" |
if end is None:
end = self._capacity - 1
if end < 0:
end += self._capacity
return self._reduce_helper(start, end, 1, 0, self._capacity - 1) |
<SYSTEM_TASK:>
Serialize this policy for Monitor to pick up.
<END_TASK>
<USER_TASK:>
Description:
def set_flushing_policy(flushing_policy):
"""Serialize this policy for Monitor to pick up.""" |
if "RAY_USE_NEW_GCS" not in os.environ:
raise Exception(
"set_flushing_policy() is only available when environment "
"variable RAY_USE_NEW_GCS is present at both compile and run time."
)
ray.worker.global_worker.check_connected()
redis_client = ray.worker.global_worker.redis_client
serialized = pickle.dumps(flushing_policy)
redis_client.set("gcs_flushing_policy", serialized) |
<SYSTEM_TASK:>
Returns ssh key to connecting to cluster workers.
<END_TASK>
<USER_TASK:>
Description:
def get_ssh_key():
"""Returns ssh key to connecting to cluster workers.
If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key
will be used for syncing across different nodes.
""" |
path = os.environ.get("TUNE_CLUSTER_SSH_KEY",
os.path.expanduser("~/ray_bootstrap_key.pem"))
if os.path.exists(path):
return path
return None |
<SYSTEM_TASK:>
Passes the result to HyperOpt unless early terminated or errored.
<END_TASK>
<USER_TASK:>
Description:
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to HyperOpt unless early terminated or errored.
The result is internally negated when interacting with HyperOpt
so that HyperOpt can "maximize" this value, as it minimizes on default.
""" |
ho_trial = self._get_hyperopt_trial(trial_id)
if ho_trial is None:
return
ho_trial["refresh_time"] = hpo.utils.coarse_utcnow()
if error:
ho_trial["state"] = hpo.base.JOB_STATE_ERROR
ho_trial["misc"]["error"] = (str(TuneError), "Tune Error")
elif early_terminated:
ho_trial["state"] = hpo.base.JOB_STATE_ERROR
ho_trial["misc"]["error"] = (str(TuneError), "Tune Removed")
else:
ho_trial["state"] = hpo.base.JOB_STATE_DONE
hp_result = self._to_hyperopt_result(result)
ho_trial["result"] = hp_result
self._hpopt_trials.refresh()
del self._live_trial_mapping[trial_id] |
<SYSTEM_TASK:>
Tells plasma to prefetch the given object_id.
<END_TASK>
<USER_TASK:>
Description:
def plasma_prefetch(object_id):
"""Tells plasma to prefetch the given object_id.""" |
local_sched_client = ray.worker.global_worker.raylet_client
ray_obj_id = ray.ObjectID(object_id)
local_sched_client.fetch_or_reconstruct([ray_obj_id], True) |
<SYSTEM_TASK:>
Get an object directly from plasma without going through object table.
<END_TASK>
<USER_TASK:>
Description:
def plasma_get(object_id):
"""Get an object directly from plasma without going through object table.
Precondition: plasma_prefetch(object_id) has been called before.
""" |
client = ray.worker.global_worker.plasma_client
plasma_id = ray.pyarrow.plasma.ObjectID(object_id)
while not client.contains(plasma_id):
pass
return client.get(plasma_id) |
<SYSTEM_TASK:>
Restores the state of the batched queue for writing.
<END_TASK>
<USER_TASK:>
Description:
def enable_writes(self):
"""Restores the state of the batched queue for writing.""" |
self.write_buffer = []
self.flush_lock = threading.RLock()
self.flush_thread = FlushThread(self.max_batch_time,
self._flush_writes) |
<SYSTEM_TASK:>
Checks for backpressure by the downstream reader.
<END_TASK>
<USER_TASK:>
Description:
def _wait_for_reader(self):
"""Checks for backpressure by the downstream reader.""" |
if self.max_size <= 0: # Unlimited queue
return
if self.write_item_offset - self.cached_remote_offset <= self.max_size:
return # Hasn't reached max size
remote_offset = internal_kv._internal_kv_get(self.read_ack_key)
if remote_offset is None:
# logger.debug("[writer] Waiting for reader to start...")
while remote_offset is None:
time.sleep(0.01)
remote_offset = internal_kv._internal_kv_get(self.read_ack_key)
remote_offset = int(remote_offset)
if self.write_item_offset - remote_offset > self.max_size:
logger.debug(
"[writer] Waiting for reader to catch up {} to {} - {}".format(
remote_offset, self.write_item_offset, self.max_size))
while self.write_item_offset - remote_offset > self.max_size:
time.sleep(0.01)
remote_offset = int(
internal_kv._internal_kv_get(self.read_ack_key))
self.cached_remote_offset = remote_offset |
<SYSTEM_TASK:>
Collects at least train_batch_size samples, never discarding any.
<END_TASK>
<USER_TASK:>
Description:
def collect_samples(agents, sample_batch_size, num_envs_per_worker,
train_batch_size):
"""Collects at least train_batch_size samples, never discarding any.""" |
num_timesteps_so_far = 0
trajectories = []
agent_dict = {}
for agent in agents:
fut_sample = agent.sample.remote()
agent_dict[fut_sample] = agent
while agent_dict:
[fut_sample], _ = ray.wait(list(agent_dict))
agent = agent_dict.pop(fut_sample)
next_sample = ray_get_and_free(fut_sample)
assert next_sample.count >= sample_batch_size * num_envs_per_worker
num_timesteps_so_far += next_sample.count
trajectories.append(next_sample)
# Only launch more tasks if we don't already have enough pending
pending = len(agent_dict) * sample_batch_size * num_envs_per_worker
if num_timesteps_so_far + pending < train_batch_size:
fut_sample2 = agent.sample.remote()
agent_dict[fut_sample2] = agent
return SampleBatch.concat_samples(trajectories) |
<SYSTEM_TASK:>
Collects at least train_batch_size samples.
<END_TASK>
<USER_TASK:>
Description:
def collect_samples_straggler_mitigation(agents, train_batch_size):
"""Collects at least train_batch_size samples.
This is the legacy behavior as of 0.6, and launches extra sample tasks to
potentially improve performance but can result in many wasted samples.
""" |
num_timesteps_so_far = 0
trajectories = []
agent_dict = {}
for agent in agents:
fut_sample = agent.sample.remote()
agent_dict[fut_sample] = agent
while num_timesteps_so_far < train_batch_size:
# TODO(pcm): Make wait support arbitrary iterators and remove the
# conversion to list here.
[fut_sample], _ = ray.wait(list(agent_dict))
agent = agent_dict.pop(fut_sample)
# Start task with next trajectory and record it in the dictionary.
fut_sample2 = agent.sample.remote()
agent_dict[fut_sample2] = agent
next_sample = ray_get_and_free(fut_sample)
num_timesteps_so_far += next_sample.count
trajectories.append(next_sample)
logger.info("Discarding {} sample tasks".format(len(agent_dict)))
return SampleBatch.concat_samples(trajectories) |
<SYSTEM_TASK:>
Improve the formatting of an exception thrown by a remote function.
<END_TASK>
<USER_TASK:>
Description:
def format_error_message(exception_message, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
""" |
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines) |
<SYSTEM_TASK:>
Check if an object is a Cython function or method
<END_TASK>
<USER_TASK:>
Description:
def is_cython(obj):
"""Check if an object is a Cython function or method""" |
# TODO(suo): We could split these into two functions, one for Cython
# functions and another for Cython methods.
# TODO(suo): There doesn't appear to be a Cython function 'type' we can
# check against via isinstance. Please correct me if I'm wrong.
def check_cython(x):
return type(x).__name__ == "cython_function_or_method"
# Check if function or method, respectively
return check_cython(obj) or \
(hasattr(obj, "__func__") and check_cython(obj.__func__)) |
<SYSTEM_TASK:>
Check if an object is a function or method.
<END_TASK>
<USER_TASK:>
Description:
def is_function_or_method(obj):
"""Check if an object is a function or method.
Args:
obj: The Python object in question.
Returns:
True if the object is an function or method.
""" |
return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj) |
<SYSTEM_TASK:>
Generate a random string to use as an ID.
<END_TASK>
<USER_TASK:>
Description:
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
""" |
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id |
<SYSTEM_TASK:>
Make this unicode in Python 3, otherwise leave it as bytes.
<END_TASK>
<USER_TASK:>
Description:
def decode(byte_str, allow_none=False):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
""" |
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(
"The argument {} must be a bytes object.".format(byte_str))
if sys.version_info >= (3, 0):
return byte_str.decode("ascii")
else:
return byte_str |
<SYSTEM_TASK:>
Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
<END_TASK>
<USER_TASK:>
Description:
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
if CUDA_VISIBLE_DEVICES is set, this returns a list of integers with
the IDs of the GPUs. If it is not set, this returns None.
""" |
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
return [int(i) for i in gpu_ids_str.split(",")] |
<SYSTEM_TASK:>
Determine a task's resource requirements.
<END_TASK>
<USER_TASK:>
Description:
def resources_from_resource_arguments(default_num_cpus, default_num_gpus,
default_resources, runtime_num_cpus,
runtime_num_gpus, runtime_resources):
"""Determine a task's resource requirements.
Args:
default_num_cpus: The default number of CPUs required by this function
or actor method.
default_num_gpus: The default number of GPUs required by this function
or actor method.
default_resources: The default custom resources required by this
function or actor method.
runtime_num_cpus: The number of CPUs requested when the task was
invoked.
runtime_num_gpus: The number of GPUs requested when the task was
invoked.
runtime_resources: The custom resources requested when the task was
invoked.
Returns:
A dictionary of the resource requirements for the task.
""" |
if runtime_resources is not None:
resources = runtime_resources.copy()
elif default_resources is not None:
resources = default_resources.copy()
else:
resources = {}
if "CPU" in resources or "GPU" in resources:
raise ValueError("The resources dictionary must not "
"contain the key 'CPU' or 'GPU'")
assert default_num_cpus is not None
resources["CPU"] = (default_num_cpus
if runtime_num_cpus is None else runtime_num_cpus)
if runtime_num_gpus is not None:
resources["GPU"] = runtime_num_gpus
elif default_num_gpus is not None:
resources["GPU"] = default_num_gpus
return resources |
<SYSTEM_TASK:>
Run vmstat and get a particular statistic.
<END_TASK>
<USER_TASK:>
Description:
def vmstat(stat):
"""Run vmstat and get a particular statistic.
Args:
stat: The statistic that we are interested in retrieving.
Returns:
The parsed output.
""" |
out = subprocess.check_output(["vmstat", "-s"])
stat = stat.encode("ascii")
for line in out.split(b"\n"):
line = line.strip()
if stat in line:
return int(line.split(b" ")[0])
raise ValueError("Can't find {} in 'vmstat' output.".format(stat)) |
<SYSTEM_TASK:>
Run a sysctl command and parse the output.
<END_TASK>
<USER_TASK:>
Description:
def sysctl(command):
"""Run a sysctl command and parse the output.
Args:
command: A sysctl command with an argument, for example,
["sysctl", "hw.memsize"].
Returns:
The parsed output.
""" |
out = subprocess.check_output(command)
result = out.split(b" ")[1]
try:
return int(result)
except ValueError:
return result |
<SYSTEM_TASK:>
Return the total amount of system memory in bytes.
<END_TASK>
<USER_TASK:>
Description:
def get_system_memory():
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
""" |
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
memory_limit_filename = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = None
try:
import psutil
psutil_memory_in_bytes = psutil.virtual_memory().total
except ImportError:
pass
if psutil_memory_in_bytes is not None:
memory_in_bytes = psutil_memory_in_bytes
elif sys.platform == "linux" or sys.platform == "linux2":
# Handle Linux.
bytes_in_kilobyte = 1024
memory_in_bytes = vmstat("total memory") * bytes_in_kilobyte
else:
# Handle MacOS.
memory_in_bytes = sysctl(["sysctl", "hw.memsize"])
if docker_limit is not None:
return min(docker_limit, memory_in_bytes)
else:
return memory_in_bytes |
<SYSTEM_TASK:>
Get the size of the shared memory file system.
<END_TASK>
<USER_TASK:>
Description:
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
""" |
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail |
<SYSTEM_TASK:>
Send a warning message if the pickled object is too large.
<END_TASK>
<USER_TASK:>
Description:
def check_oversized_pickle(pickled, name, obj_type, worker):
"""Send a warning message if the pickled object is too large.
Args:
pickled: the pickled object.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', 'actor', or 'object'.
worker: the worker used to send warning message.
""" |
length = len(pickled)
if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE:
return
warning_message = (
"Warning: The {} {} has size {} when pickled. "
"It will be stored in Redis, which could cause memory issues. "
"This may mean that its definition uses a large array or other object."
).format(obj_type, name, length)
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
warning_message,
driver_id=worker.task_driver_id) |
<SYSTEM_TASK:>
Create a thread-safe proxy which locks every method call
<END_TASK>
<USER_TASK:>
Description:
def thread_safe_client(client, lock=None):
"""Create a thread-safe proxy which locks every method call
for the given client.
Args:
client: the client object to be guarded.
lock: the lock object that will be used to lock client's methods.
If None, a new lock will be used.
Returns:
A thread-safe proxy for the given client.
""" |
if lock is None:
lock = threading.Lock()
return _ThreadSafeProxy(client, lock) |
<SYSTEM_TASK:>
Assemble an array from a distributed array of object IDs.
<END_TASK>
<USER_TASK:>
Description:
def assemble(self):
"""Assemble an array from a distributed array of object IDs.""" |
first_block = ray.get(self.objectids[(0, ) * self.ndim])
dtype = first_block.dtype
result = np.zeros(self.shape, dtype=dtype)
for index in np.ndindex(*self.num_blocks):
lower = DistArray.compute_block_lower(index, self.shape)
upper = DistArray.compute_block_upper(index, self.shape)
result[[slice(l, u) for (l, u) in zip(lower, upper)]] = ray.get(
self.objectids[index])
return result |
<SYSTEM_TASK:>
Computes action log-probs from policy logits and actions.
<END_TASK>
<USER_TASK:>
Description:
def multi_log_probs_from_logits_and_actions(policy_logits, actions):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing a softmax policy.
actions: A list with length of ACTION_SPACE of int32
tensors of shapes
[T, B],
...,
[T, B]
with actions.
Returns:
A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B],
...,
[T, B]
corresponding to the sampling log probability
of the chosen action w.r.t. the policy.
""" |
log_probs = []
for i in range(len(policy_logits)):
log_probs.append(-tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=policy_logits[i], labels=actions[i]))
return log_probs |
<SYSTEM_TASK:>
multi_from_logits wrapper used only for tests
<END_TASK>
<USER_TASK:>
Description:
def from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
"""multi_from_logits wrapper used only for tests""" |
res = multi_from_logits(
[behaviour_policy_logits], [target_policy_logits], [actions],
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
name=name)
return VTraceFromLogitsReturns(
vs=res.vs,
pg_advantages=res.pg_advantages,
log_rhos=res.log_rhos,
behaviour_action_log_probs=tf.squeeze(
res.behaviour_action_log_probs, axis=0),
target_action_log_probs=tf.squeeze(
res.target_action_log_probs, axis=0),
) |
<SYSTEM_TASK:>
r"""V-trace for softmax policies.
<END_TASK>
<USER_TASK:>
Description:
def multi_from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
behaviour_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax behaviour
policy.
target_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax target
policy.
actions: A list with length of ACTION_SPACE of int32
tensors of shapes
[T, B],
...,
[T, B]
with actions sampled from the behaviour policy.
discounts: A float32 tensor of shape [T, B] with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
""" |
for i in range(len(behaviour_policy_logits)):
behaviour_policy_logits[i] = tf.convert_to_tensor(
behaviour_policy_logits[i], dtype=tf.float32)
target_policy_logits[i] = tf.convert_to_tensor(
target_policy_logits[i], dtype=tf.float32)
actions[i] = tf.convert_to_tensor(actions[i], dtype=tf.int32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits[i].shape.assert_has_rank(3)
target_policy_logits[i].shape.assert_has_rank(3)
actions[i].shape.assert_has_rank(2)
with tf.name_scope(
name,
values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value
]):
target_action_log_probs = multi_log_probs_from_logits_and_actions(
target_policy_logits, actions)
behaviour_action_log_probs = multi_log_probs_from_logits_and_actions(
behaviour_policy_logits, actions)
log_rhos = get_log_rhos(target_action_log_probs,
behaviour_action_log_probs)
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict()) |
<SYSTEM_TASK:>
With the selected log_probs for multi-discrete actions of behaviour
<END_TASK>
<USER_TASK:>
Description:
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace.""" |
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos |
<SYSTEM_TASK:>
weight_variable generates a weight variable of a given shape.
<END_TASK>
<USER_TASK:>
Description:
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape.""" |
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial) |
<SYSTEM_TASK:>
bias_variable generates a bias variable of a given shape.
<END_TASK>
<USER_TASK:>
Description:
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape.""" |
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial) |
<SYSTEM_TASK:>
Prints output of given dataframe to fit into terminal.
<END_TASK>
<USER_TASK:>
Description:
def print_format_output(dataframe):
"""Prints output of given dataframe to fit into terminal.
Returns:
table (pd.DataFrame): Final outputted dataframe.
dropped_cols (list): Columns dropped due to terminal size.
empty_cols (list): Empty columns (dropped on default).
""" |
print_df = pd.DataFrame()
dropped_cols = []
empty_cols = []
# column display priority is based on the info_keys passed in
for i, col in enumerate(dataframe):
if dataframe[col].isnull().all():
# Don't add col to print_df if is fully empty
empty_cols += [col]
continue
print_df[col] = dataframe[col]
test_table = tabulate(print_df, headers="keys", tablefmt="psql")
if str(test_table).index("\n") > TERM_WIDTH:
# Drop all columns beyond terminal width
print_df.drop(col, axis=1, inplace=True)
dropped_cols += list(dataframe.columns)[i:]
break
table = tabulate(
print_df, headers="keys", tablefmt="psql", showindex="never")
print(table)
if dropped_cols:
print("Dropped columns:", dropped_cols)
print("Please increase your terminal size to view remaining columns.")
if empty_cols:
print("Empty columns:", empty_cols)
return table, dropped_cols, empty_cols |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.