text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Returns a list of one-hot encodings for all parameters. <END_TASK> <USER_TASK:> Description: def generate_random_one_hot_encoding(self): """Returns a list of one-hot encodings for all parameters. 1 one-hot np.array for 1 parameter, and the 1's place is randomly chosen. """
encoding = [] for ps in self.param_list: one_hot = np.zeros(ps.choices_count()) choice = random.randrange(ps.choices_count()) one_hot[choice] = 1 encoding.append(one_hot) return encoding
<SYSTEM_TASK:> Apply one hot encoding to generate a specific config. <END_TASK> <USER_TASK:> Description: def apply_one_hot_encoding(self, one_hot_encoding): """Apply one hot encoding to generate a specific config. Arguments: one_hot_encoding (list): A list of one hot encodings, 1 for each parameter. The shape of each encoding should match that ``ParameterSpace`` Returns: A dict config with specific <name, value> pair """
config = {} for ps, one_hot in zip(self.param_list, one_hot_encoding): index = np.argmax(one_hot) config[ps.name] = ps.choices[index] return config
<SYSTEM_TASK:> Pin an object in the object store. <END_TASK> <USER_TASK:> Description: def pin_in_object_store(obj): """Pin an object in the object store. It will be available as long as the pinning process is alive. The pinned object can be retrieved by calling get_pinned_object on the identifier returned by this call. """
obj_id = ray.put(_to_pinnable(obj)) _pinned_objects.append(ray.get(obj_id)) return "{}{}".format(PINNED_OBJECT_PREFIX, base64.b64encode(obj_id.binary()).decode("utf-8"))
<SYSTEM_TASK:> Returns a new dict that is d1 and d2 deep merged. <END_TASK> <USER_TASK:> Description: def merge_dicts(d1, d2): """Returns a new dict that is d1 and d2 deep merged."""
merged = copy.deepcopy(d1) deep_update(merged, d2, True, []) return merged
<SYSTEM_TASK:> Updates original dict with values from new_dict recursively. <END_TASK> <USER_TASK:> Description: def deep_update(original, new_dict, new_keys_allowed, whitelist): """Updates original dict with values from new_dict recursively. If new key is introduced in new_dict, then if new_keys_allowed is not True, an error will be thrown. Further, for sub-dicts, if the key is in the whitelist, then new subkeys can be introduced. Args: original (dict): Dictionary with default values. new_dict (dict): Dictionary with values to be updated new_keys_allowed (bool): Whether new keys are allowed. whitelist (list): List of keys that correspond to dict values where new subkeys can be introduced. This is only at the top level. """
for k, value in new_dict.items(): if k not in original: if not new_keys_allowed: raise Exception("Unknown config parameter `{}` ".format(k)) if isinstance(original.get(k), dict): if k in whitelist: deep_update(original[k], value, True, []) else: deep_update(original[k], value, new_keys_allowed, []) else: original[k] = value return original
<SYSTEM_TASK:> Similar to completed but only returns once the object is local. <END_TASK> <USER_TASK:> Description: def completed_prefetch(self, blocking_wait=False, max_yield=999): """Similar to completed but only returns once the object is local. Assumes obj_id only is one id."""
for worker, obj_id in self.completed(blocking_wait=blocking_wait): plasma_id = ray.pyarrow.plasma.ObjectID(obj_id.binary()) (ray.worker.global_worker.raylet_client.fetch_or_reconstruct( [obj_id], True)) self._fetching.append((worker, obj_id)) remaining = [] num_yielded = 0 for worker, obj_id in self._fetching: plasma_id = ray.pyarrow.plasma.ObjectID(obj_id.binary()) if (num_yielded < max_yield and ray.worker.global_worker.plasma_client.contains( plasma_id)): yield (worker, obj_id) num_yielded += 1 else: remaining.append((worker, obj_id)) self._fetching = remaining
<SYSTEM_TASK:> Notify that some evaluators may be removed. <END_TASK> <USER_TASK:> Description: def reset_evaluators(self, evaluators): """Notify that some evaluators may be removed."""
for obj_id, ev in self._tasks.copy().items(): if ev not in evaluators: del self._tasks[obj_id] del self._objects[obj_id] ok = [] for ev, obj_id in self._fetching: if ev in evaluators: ok.append((ev, obj_id)) self._fetching = ok
<SYSTEM_TASK:> Iterate over train batches. <END_TASK> <USER_TASK:> Description: def iter_train_batches(self, max_yield=999): """Iterate over train batches. Arguments: max_yield (int): Max number of batches to iterate over in this cycle. Setting this avoids iter_train_batches returning too much data at once. """
for ev, sample_batch in self._augment_with_replay( self.sample_tasks.completed_prefetch( blocking_wait=True, max_yield=max_yield)): sample_batch.decompress_if_needed() self.batch_buffer.append(sample_batch) if sum(b.count for b in self.batch_buffer) >= self.train_batch_size: train_batch = self.batch_buffer[0].concat_samples( self.batch_buffer) yield train_batch self.batch_buffer = [] # If the batch was replayed, skip the update below. if ev is None: continue # Put in replay buffer if enabled if self.replay_buffer_num_slots > 0: if len(self.replay_batches) < self.replay_buffer_num_slots: self.replay_batches.append(sample_batch) else: self.replay_batches[self.replay_index] = sample_batch self.replay_index += 1 self.replay_index %= self.replay_buffer_num_slots ev.set_weights.remote(self.broadcasted_weights) self.num_weight_syncs += 1 self.num_sent_since_broadcast += 1 # Kick off another sample request self.sample_tasks.add(ev, ev.sample.remote())
<SYSTEM_TASK:> Create or updates an autoscaling Ray cluster from a config json. <END_TASK> <USER_TASK:> Description: def create_or_update_cluster(config_file, override_min_workers, override_max_workers, no_restart, restart_only, yes, override_cluster_name): """Create or updates an autoscaling Ray cluster from a config json."""
config = yaml.load(open(config_file).read()) if override_min_workers is not None: config["min_workers"] = override_min_workers if override_max_workers is not None: config["max_workers"] = override_max_workers if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) get_or_create_head_node(config, config_file, no_restart, restart_only, yes, override_cluster_name)
<SYSTEM_TASK:> Destroys all nodes of a Ray cluster described by a config json. <END_TASK> <USER_TASK:> Description: def teardown_cluster(config_file, yes, workers_only, override_cluster_name): """Destroys all nodes of a Ray cluster described by a config json."""
config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name validate_config(config) config = fillout_defaults(config) confirm("This will destroy your cluster", yes) provider = get_node_provider(config["provider"], config["cluster_name"]) try: def remaining_nodes(): if workers_only: A = [] else: A = [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "head" }) ] A += [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "worker" }) ] return A # Loop here to check that both the head and worker nodes are actually # really gone A = remaining_nodes() with LogTimer("teardown_cluster: Termination done."): while A: logger.info("teardown_cluster: " "Terminating {} nodes...".format(len(A))) provider.terminate_nodes(A) time.sleep(1) A = remaining_nodes() finally: provider.cleanup()
<SYSTEM_TASK:> Attaches to a screen for the specified cluster. <END_TASK> <USER_TASK:> Description: def attach_cluster(config_file, start, use_tmux, override_cluster_name, new): """Attaches to a screen for the specified cluster. Arguments: config_file: path to the cluster yaml start: whether to start the cluster if it isn't up use_tmux: whether to use tmux as multiplexer override_cluster_name: set the name of the cluster new: whether to force a new screen """
if use_tmux: if new: cmd = "tmux new" else: cmd = "tmux attach || tmux new" else: if new: cmd = "screen -L" else: cmd = "screen -L -xRR" exec_cluster(config_file, cmd, False, False, False, False, start, override_cluster_name, None)
<SYSTEM_TASK:> Runs a command on the specified cluster. <END_TASK> <USER_TASK:> Description: def exec_cluster(config_file, cmd, docker, screen, tmux, stop, start, override_cluster_name, port_forward): """Runs a command on the specified cluster. Arguments: config_file: path to the cluster yaml cmd: command to run docker: whether to run command in docker container of config screen: whether to run in a screen tmux: whether to run in a tmux session stop: whether to stop the cluster after command run start: whether to start the cluster if it isn't up override_cluster_name: set the name of the cluster port_forward: port to forward """
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=start) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) def wrap_docker(command): container_name = config["docker"]["container_name"] if not container_name: raise ValueError("Docker container not specified in config.") return with_docker_exec( [command], container_name=container_name)[0] cmd = wrap_docker(cmd) if docker else cmd if stop: shutdown_cmd = ( "ray stop; ray teardown ~/ray_bootstrap_config.yaml " "--yes --workers-only") if docker: shutdown_cmd = wrap_docker(shutdown_cmd) cmd += ("; {}; sudo shutdown -h now".format(shutdown_cmd)) _exec( updater, cmd, screen, tmux, expect_error=stop, port_forward=port_forward) if tmux or screen: attach_command_parts = ["ray attach", config_file] if override_cluster_name is not None: attach_command_parts.append( "--cluster-name={}".format(override_cluster_name)) if tmux: attach_command_parts.append("--tmux") elif screen: attach_command_parts.append("--screen") attach_command = " ".join(attach_command_parts) attach_info = "Use `{}` to check on command status.".format( attach_command) logger.info(attach_info) finally: provider.cleanup()
<SYSTEM_TASK:> Rsyncs files. <END_TASK> <USER_TASK:> Description: def rsync(config_file, source, target, override_cluster_name, down): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local """
config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=False) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up rsync(source, target, check_error=False) finally: provider.cleanup()
<SYSTEM_TASK:> Returns head node IP for given configuration file if exists. <END_TASK> <USER_TASK:> Description: def get_head_node_ip(config_file, override_cluster_name): """Returns head node IP for given configuration file if exists."""
config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name provider = get_node_provider(config["provider"], config["cluster_name"]) try: head_node = _get_head_node(config, config_file, override_cluster_name) if config.get("provider", {}).get("use_internal_ips", False) is True: head_node_ip = provider.internal_ip(head_node) else: head_node_ip = provider.external_ip(head_node) finally: provider.cleanup() return head_node_ip
<SYSTEM_TASK:> Returns worker node IPs for given configuration file. <END_TASK> <USER_TASK:> Description: def get_worker_node_ips(config_file, override_cluster_name): """Returns worker node IPs for given configuration file."""
config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name provider = get_node_provider(config["provider"], config["cluster_name"]) try: nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: "worker"}) if config.get("provider", {}).get("use_internal_ips", False) is True: return [provider.internal_ip(node) for node in nodes] else: return [provider.external_ip(node) for node in nodes] finally: provider.cleanup()
<SYSTEM_TASK:> Returns logits and aux_logits from images. <END_TASK> <USER_TASK:> Description: def build_network(self, images, phase_train=True, nclass=1001, image_depth=3, data_type=tf.float32, data_format="NCHW", use_tf_layers=True, fp16_vars=False): """Returns logits and aux_logits from images."""
if data_format == "NCHW": images = tf.transpose(images, [0, 3, 1, 2]) var_type = tf.float32 if data_type == tf.float16 and fp16_vars: var_type = tf.float16 network = convnet_builder.ConvNetBuilder( images, image_depth, phase_train, use_tf_layers, data_format, data_type, var_type) with tf.variable_scope( "cg", custom_getter=network.get_custom_getter()): self.add_inference(network) # Add the final fully-connected class layer logits = (network.affine(nclass, activation="linear") if not self.skip_final_affine_layer() else network.top_layer) aux_logits = None if network.aux_top_layer is not None: with network.switch_to_aux_top_layer(): aux_logits = network.affine( nclass, activation="linear", stddev=0.001) if data_type == tf.float16: # TODO(reedwm): Determine if we should do this cast here. logits = tf.cast(logits, tf.float32) if aux_logits is not None: aux_logits = tf.cast(aux_logits, tf.float32) return logits, aux_logits
<SYSTEM_TASK:> Helper class for renaming Agent => Trainer with a warning. <END_TASK> <USER_TASK:> Description: def renamed_class(cls): """Helper class for renaming Agent => Trainer with a warning."""
class DeprecationWrapper(cls): def __init__(self, config=None, env=None, logger_creator=None): old_name = cls.__name__.replace("Trainer", "Agent") new_name = cls.__name__ logger.warn("DeprecationWarning: {} has been renamed to {}. ". format(old_name, new_name) + "This will raise an error in the future.") cls.__init__(self, config, env, logger_creator) DeprecationWrapper.__name__ = cls.__name__ return DeprecationWrapper
<SYSTEM_TASK:> Profile a span of time so that it appears in the timeline visualization. <END_TASK> <USER_TASK:> Description: def profile(event_type, extra_data=None): """Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement. """
worker = ray.worker.global_worker return RayLogSpanRaylet(worker.profiler, event_type, extra_data=extra_data)
<SYSTEM_TASK:> Drivers run this as a thread to flush profile data in the <END_TASK> <USER_TASK:> Description: def _periodically_flush_profile_events(self): """Drivers run this as a thread to flush profile data in the background."""
# Note(rkn): This is run on a background thread in the driver. It uses # the raylet client. This should be ok because it doesn't read # from the raylet client and we have the GIL here. However, # if either of those things changes, then we could run into issues. while True: # Sleep for 1 second. This will be interrupted if # self.threads_stopped is set. self.threads_stopped.wait(timeout=1) # Exit if we received a signal that we should stop. if self.threads_stopped.is_set(): return self.flush_profile_data()
<SYSTEM_TASK:> Push the logged profiling data to the global control store. <END_TASK> <USER_TASK:> Description: def flush_profile_data(self): """Push the logged profiling data to the global control store."""
with self.lock: events = self.events self.events = [] if self.worker.mode == ray.WORKER_MODE: component_type = "worker" else: component_type = "driver" self.worker.raylet_client.push_profile_events( component_type, ray.UniqueID(self.worker.worker_id), self.worker.node_ip_address, events)
<SYSTEM_TASK:> Add a key-value pair to the extra_data dict. <END_TASK> <USER_TASK:> Description: def set_attribute(self, key, value): """Add a key-value pair to the extra_data dict. This can be used to add attributes that are not available when ray.profile was called. Args: key: The attribute name. value: The attribute value. """
if not isinstance(key, str) or not isinstance(value, str): raise ValueError("The arguments 'key' and 'value' must both be " "strings. Instead they are {} and {}.".format( key, value)) self.extra_data[key] = value
<SYSTEM_TASK:> Syncs the local logdir on driver to worker if possible. <END_TASK> <USER_TASK:> Description: def sync_to_worker_if_possible(self): """Syncs the local logdir on driver to worker if possible. Requires ray cluster to be started with the autoscaler. Also requires rsync to be installed. """
if self.worker_ip == self.local_ip: return ssh_key = get_ssh_key() ssh_user = get_ssh_user() global _log_sync_warned if ssh_key is None or ssh_user is None: if not _log_sync_warned: logger.error("Log sync requires cluster to be setup with " "`ray up`.") _log_sync_warned = True return if not distutils.spawn.find_executable("rsync"): logger.error("Log sync requires rsync to be installed.") return source = "{}/".format(self.local_dir) target = "{}@{}:{}/".format(ssh_user, self.worker_ip, self.local_dir) final_cmd = (("""rsync -savz -e "ssh -i {} -o ConnectTimeout=120s """ """-o StrictHostKeyChecking=no" {} {}""").format( quote(ssh_key), quote(source), quote(target))) logger.info("Syncing results to %s", str(self.worker_ip)) sync_process = subprocess.Popen( final_cmd, shell=True, stdout=self.logfile) sync_process.wait()
<SYSTEM_TASK:> Forward pass for the mixer. <END_TASK> <USER_TASK:> Description: def forward(self, agent_qs, states): """Forward pass for the mixer. Arguments: agent_qs: Tensor of shape [B, T, n_agents, n_actions] states: Tensor of shape [B, T, state_dim] """
bs = agent_qs.size(0) states = states.reshape(-1, self.state_dim) agent_qs = agent_qs.view(-1, 1, self.n_agents) # First layer w1 = th.abs(self.hyper_w_1(states)) b1 = self.hyper_b_1(states) w1 = w1.view(-1, self.n_agents, self.embed_dim) b1 = b1.view(-1, 1, self.embed_dim) hidden = F.elu(th.bmm(agent_qs, w1) + b1) # Second layer w_final = th.abs(self.hyper_w_final(states)) w_final = w_final.view(-1, self.embed_dim, 1) # State-dependent bias v = self.V(states).view(-1, 1, 1) # Compute final output y = th.bmm(hidden, w_final) + v # Reshape and return q_tot = y.view(bs, -1, 1) return q_tot
<SYSTEM_TASK:> Passes the result to SigOpt unless early terminated or errored. <END_TASK> <USER_TASK:> Description: def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to SigOpt unless early terminated or errored. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial. """
if result: self.conn.experiments(self.experiment.id).observations().create( suggestion=self._live_trial_mapping[trial_id].id, value=result[self._reward_attr], ) # Update the experiment object self.experiment = self.conn.experiments(self.experiment.id).fetch() elif error or early_terminated: # Reports a failed Observation self.conn.experiments(self.experiment.id).observations().create( failed=True, suggestion=self._live_trial_mapping[trial_id].id) del self._live_trial_mapping[trial_id]
<SYSTEM_TASK:> Bottleneck block with identity short-cut for ResNet v1. <END_TASK> <USER_TASK:> Description: def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride): """Bottleneck block with identity short-cut for ResNet v1. Args: cnn: the network to append bottleneck blocks. depth: the number of output filters for this bottleneck block. depth_bottleneck: the number of bottleneck filters for this block. stride: Stride used in the first layer of the bottleneck block. """
input_layer = cnn.top_layer in_size = cnn.top_size name_key = "resnet_v1" name = name_key + str(cnn.counts[name_key]) cnn.counts[name_key] += 1 with tf.variable_scope(name): if depth == in_size: if stride == 1: shortcut = input_layer else: shortcut = cnn.apool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) else: shortcut = cnn.conv( depth, 1, 1, stride, stride, activation=None, use_batch_norm=True, input_layer=input_layer, num_channels_in=in_size, bias=None) cnn.conv( depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size, use_batch_norm=True, bias=None) cnn.conv( depth_bottleneck, 3, 3, 1, 1, mode="SAME_RESNET", use_batch_norm=True, bias=None) res = cnn.conv( depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None) output = tf.nn.relu(shortcut + res) cnn.top_layer = output cnn.top_size = depth
<SYSTEM_TASK:> Bottleneck block with identity short-cut. <END_TASK> <USER_TASK:> Description: def bottleneck_block(cnn, depth, depth_bottleneck, stride, pre_activation): """Bottleneck block with identity short-cut. Args: cnn: the network to append bottleneck blocks. depth: the number of output filters for this bottleneck block. depth_bottleneck: the number of bottleneck filters for this block. stride: Stride used in the first layer of the bottleneck block. pre_activation: use pre_activation structure used in v2 or not. """
if pre_activation: bottleneck_block_v2(cnn, depth, depth_bottleneck, stride) else: bottleneck_block_v1(cnn, depth, depth_bottleneck, stride)
<SYSTEM_TASK:> Residual block with identity short-cut. <END_TASK> <USER_TASK:> Description: def residual_block(cnn, depth, stride, pre_activation): """Residual block with identity short-cut. Args: cnn: the network to append residual blocks. depth: the number of output filters for this residual block. stride: Stride used in the first layer of the residual block. pre_activation: use pre_activation structure or not. """
input_layer = cnn.top_layer in_size = cnn.top_size if in_size != depth: # Plan A of shortcut. shortcut = cnn.apool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) padding = (depth - in_size) // 2 if cnn.channel_pos == "channels_last": shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [padding, padding]]) else: shortcut = tf.pad(shortcut, [[0, 0], [padding, padding], [0, 0], [0, 0]]) else: shortcut = input_layer if pre_activation: res = cnn.batch_norm(input_layer) res = tf.nn.relu(res) else: res = input_layer cnn.conv( depth, 3, 3, stride, stride, input_layer=res, num_channels_in=in_size, use_batch_norm=True, bias=None) if pre_activation: res = cnn.conv( depth, 3, 3, 1, 1, activation=None, use_batch_norm=False, bias=None) output = shortcut + res else: res = cnn.conv( depth, 3, 3, 1, 1, activation=None, use_batch_norm=True, bias=None) output = tf.nn.relu(shortcut + res) cnn.top_layer = output cnn.top_size = depth
<SYSTEM_TASK:> Applies updates from the buffer of another filter. <END_TASK> <USER_TASK:> Description: def apply_changes(self, other, with_buffer=False): """Applies updates from the buffer of another filter. Params: other (MeanStdFilter): Other filter to apply info from with_buffer (bool): Flag for specifying if the buffer should be copied from other. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, 1.5, 2] >>> b = MeanStdFilter(()) >>> b(10) >>> a.apply_changes(b, with_buffer=False) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [3, 4.333333333333333, 2] >>> a.apply_changes(b, with_buffer=True) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [4, 5.75, 1] """
self.rs.update(other.buffer) if with_buffer: self.buffer = other.buffer.copy()
<SYSTEM_TASK:> Syncs all fields together from other filter. <END_TASK> <USER_TASK:> Description: def sync(self, other): """Syncs all fields together from other filter. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, array(1.5), 2] >>> b = MeanStdFilter(()) >>> b(10) >>> print([b.rs.n, b.rs.mean, b.buffer.n]) [1, array(10.0), 1] >>> a.sync(b) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [1, array(10.0), 1] """
assert other.shape == self.shape, "Shapes don't match!" self.demean = other.demean self.destd = other.destd self.clip = other.clip self.rs = other.rs.copy() self.buffer = other.buffer.copy()
<SYSTEM_TASK:> Returns non-concurrent version of current class <END_TASK> <USER_TASK:> Description: def as_serializable(self): """Returns non-concurrent version of current class"""
other = MeanStdFilter(self.shape) other.sync(self) return other
<SYSTEM_TASK:> Parse all_reduce_spec. <END_TASK> <USER_TASK:> Description: def parse_all_reduce_spec(all_reduce_spec): """Parse all_reduce_spec. Args: all_reduce_spec: a string specifying a combination of all-reduce algorithms to apply for gradient reduction. Returns: a list of AllReduceSpecTuple. Raises: ValueError: all_reduce_spec is not well-formed. An all_reduce_spec has BNF form: int ::= positive whole number g_int ::= int[KkMGT]? alg_spec ::= alg | alg#int range_spec ::= alg_spec | alg_spec/alg_spec spec ::= range_spec | range_spec:g_int:range_spec Not all syntactically correct specifications are supported. Examples of supported all_reduce_spec strings, with semantics explained: "xring" == apply ring all-reduce to all tensors "xring#2" == apply ring all-reduce to all tensors, using two simultaneous transfer rings, each operating on 1/2 of each tensor. "nccl" == apply NCCL all-reduce to all tensors (only works within a single worker process where all devices are GPUs) "nccl/xring" == apply NCCL all-reduce to all tensors within each worker to produce at least one full-reduced (locally) value, then apply ring all-reduce to one such value from each worker, then apply NCCL broadcast to propagate those globally reduced values back to every device within each worker. "pscpu" == Shuffle reduce using worker CPUs as the gather devices: each distributed tensor is reduced by copying all instances to one of the worker CPUs, computing the reduction there, then copying back to each participating device. Tensor reductions are assigned to specific CPUs round-robin. "psgpu#4" == Arrange all GPUs across all workers into groups of 4. Each distributed tensor is shuffle reduced against one such group of 4 GPUs, selected round-robin. That is, each tensor is split across 4 shards for the reduction. "pscpu:2k:pscpu#2:64k:xring" == Apply single-shard pscpu to tensors of size <= 2048 elements, apply 2-shard pscpu to tensors up to size 64k elements, apply xring to larger tensors. "pscpu/pscpu#2" == Use shuffle gather to locally reduce each tensor on the worker's CPU, then use 2-shard shuffle to reduce those locally reduced tensors across workers (on the worker CPUs), then scatter the globally reduced values locally from each worker CPU. """
range_parts = all_reduce_spec.split(":") + ["-1"] if len(range_parts) % 2: raise ValueError( "all_reduce_spec not well formed: %s" % all_reduce_spec) limit = 0 spec = [] alg = None shards = 1 for i, range_part in enumerate(range_parts): if i % 2 == 1: try: limit = parse_general_int(range_part) spec.append( AllReduceSpecTuple(alg=alg, shards=shards, limit=limit)) except ValueError: raise ValueError( "all_reduce_spec (%s) contains non-integer range %s" % (all_reduce_spec, range_part)) else: alg = range_part alg_parts = range_part.split("#") alg = alg_parts[0] if len(alg_parts) > 1: try: shards = int(alg_parts[1]) except ValueError: raise ValueError( "all_reduce_spec (%s) contains non-integer " "shards %s" % all_reduce_spec, alg_parts[1]) else: shards = 1 if alg not in [ "nccl", "nccl/xring", "nccl/rechd", "nccl/pscpu", "xring", "pscpu", "psgpu", "pscpu/pscpu" ]: raise ValueError("all_reduce_spec (%s) contains invalid alg %s" % (all_reduce_spec, alg)) return spec
<SYSTEM_TASK:> Build list of device prefix names for all_reduce. <END_TASK> <USER_TASK:> Description: def build_all_reduce_device_prefixes(job_name, num_tasks): """Build list of device prefix names for all_reduce. Args: job_name: "worker", "ps" or "localhost". num_tasks: number of jobs across which device names should be generated. Returns: A list of device name prefix strings. Each element spells out the full host name without adding the device. e.g. "/job:worker/task:0" """
if job_name != "localhost": return ["/job:%s/task:%d" % (job_name, d) for d in range(0, num_tasks)] else: assert num_tasks == 1 return ["/job:%s" % job_name]
<SYSTEM_TASK:> Group device names into groups of group_size. <END_TASK> <USER_TASK:> Description: def group_device_names(devices, group_size): """Group device names into groups of group_size. Args: devices: list of strings naming devices. group_size: int >= 1 Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size = 0 then each device will appear exactly once. Raises: ValueError: group_size > len(devices) """
num_devices = len(devices) if group_size > num_devices: raise ValueError( "only %d devices, but group_size=%d" % (num_devices, group_size)) num_groups = ( num_devices // group_size + (1 if (num_devices % group_size != 0) else 0)) groups = [[] for i in range(num_groups)] for i in range(0, num_groups * group_size): groups[i % num_groups].append(devices[i % num_devices]) return groups
<SYSTEM_TASK:> Break gradients into two sets according to tensor size. <END_TASK> <USER_TASK:> Description: def split_grads_by_size(threshold_size, device_grads): """Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= theshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements. """
small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads
<SYSTEM_TASK:> Calculate the average gradient for a shared variable across all towers. <END_TASK> <USER_TASK:> Description: def aggregate_single_gradient(grad_and_vars, use_mean, check_inf_nan): """Calculate the average gradient for a shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the variable calculated for a single tower, and the number of pairs equals the number of towers. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf. """
grads = [g for g, _ in grad_and_vars] grad = tf.add_n(grads) if use_mean and len(grads) > 1: grad = tf.multiply(grad, 1.0 / len(grads)) v = grad_and_vars[0][1] if check_inf_nan: has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads))) return (grad, v), has_nan_or_inf else: return (grad, v), None
<SYSTEM_TASK:> Aggregate gradients, controlling device for the aggregation. <END_TASK> <USER_TASK:> Description: def aggregate_gradients_using_copy_with_device_selection( tower_grads, avail_devices, use_mean=True, check_inf_nan=False): """Aggregate gradients, controlling device for the aggregation. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf. """
agg_grads = [] has_nan_or_inf_list = [] for i, single_grads in enumerate(zip(*tower_grads)): with tf.device(avail_devices[i % len(avail_devices)]): grad_and_var, has_nan_or_inf = aggregate_single_gradient( single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) return agg_grads
<SYSTEM_TASK:> Extract consecutive ranges and singles from index_list. <END_TASK> <USER_TASK:> Description: def extract_ranges(index_list, range_size_limit=32): """Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists it will be returned as multiple ranges. Returns: ranges, singles where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order. """
if not index_list: return [], [] first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if i == last + 1 and (last - first) <= range_size_limit: last = i else: if last > first: ranges.append([first, last]) else: singles.append(first) first = i last = i if last > first: ranges.append([first, last]) else: singles.append(first) return ranges, singles
<SYSTEM_TASK:> Form the concatenation of a specified range of gradient tensors. <END_TASK> <USER_TASK:> Description: def pack_range(key, packing, grad_vars, rng): """Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one tower. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors. """
to_pack = grad_vars[rng[0]:rng[1] + 1] members = [] variables = [] restore_shapes = [] with tf.name_scope("pack"): for g, v in to_pack: variables.append(v) restore_shapes.append(g.shape) with tf.device(g.device): members.append(tf.reshape(g, [-1])) packing[key] = GradPackTuple( indices=range(rng[0], rng[1] + 1), vars=variables, shapes=restore_shapes) with tf.device(members[0].device): return tf.concat(members, 0)
<SYSTEM_TASK:> Unpack a previously packed collection of gradient tensors. <END_TASK> <USER_TASK:> Description: def unpack_grad_tuple(gv, gpt): """Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction. """
elt_widths = [x.num_elements() for x in gpt.shapes] with tf.device(gv[0][0].device): with tf.name_scope("unpack"): splits = tf.split(gv[0], elt_widths) unpacked_gv = [] for idx, s in enumerate(splits): unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv
<SYSTEM_TASK:> Concatenate gradients together more intelligently. <END_TASK> <USER_TASK:> Description: def pack_small_tensors(tower_grads, max_bytes=0): """Concatenate gradients together more intelligently. Does binpacking Args: tower_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. """
assert max_bytes >= 0 orig_grads = [g for g, _ in tower_grads[0]] # Check to make sure sizes are accurate; not entirely important assert all(g.dtype == tf.float32 for g in orig_grads) sizes = [4 * g.shape.num_elements() for g in orig_grads] print_stats(sizes) small_ranges = [] large_indices = [] new_sizes = [] def end_interval(indices, small_ranges, large_indices): if len(indices) > 1: small_ranges.insert(0, [indices[0], indices[-1]]) else: large_indices.insert(0, indices[0]) cur_range = [] cur_size = 0 for i, s in reversed(list(enumerate(sizes))): if cur_size > max_bytes: end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) cur_range = [] cur_size = 0 cur_range.insert(0, i) cur_size += s end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) print_stats(new_sizes) num_gv = len(orig_grads) packing = {} if len(small_ranges): new_tower_grads = [] for dev_idx, gv_list in enumerate(tower_grads): assert len(gv_list) == num_gv, ( "Possible cause: " "Networks constructed on different workers " "don't have the same number of variables. " "If you use tf.GraphKeys or tf.global_variables() " "with multiple graphs per worker during network " "construction, you need to use " "appropriate scopes, see " "https://github.com/ray-project/ray/issues/3136") new_gv_list = [] for r in small_ranges: key = "%d:%d" % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), "packing_var_placeholder")) for i in large_indices: new_gv_list.append(gv_list[i]) new_tower_grads.append(new_gv_list) return new_tower_grads, packing else: return tower_grads, None
<SYSTEM_TASK:> Undo the structure alterations to tower_grads done by pack_small_tensors. <END_TASK> <USER_TASK:> Description: def unpack_small_tensors(tower_grads, packing): """Undo the structure alterations to tower_grads done by pack_small_tensors. Args: tower_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to tower_grads. Returns: new_tower_grads: identical to tower_grads except that concatentations of small tensors have been split apart and returned to their original positions, paired with their original variables. """
if not packing: return tower_grads new_tower_grads = [] num_devices = len(tower_grads) num_packed = len(packing.keys()) // num_devices for dev_idx, gv_list in enumerate(tower_grads): new_gv_list = gv_list[num_packed:] for i in xrange(0, num_packed): k = "%d:%d" % (dev_idx, i) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for gi, idx in enumerate(gpt.indices): assert idx == gpt.indices[gi] new_gv_list.insert(idx, gv[gi]) new_tower_grads.append(new_gv_list) return new_tower_grads
<SYSTEM_TASK:> CSV outputted with Headers as first set of results. <END_TASK> <USER_TASK:> Description: def _init(self): """CSV outputted with Headers as first set of results."""
# Note that we assume params.json was already created by JsonLogger progress_file = os.path.join(self.logdir, "progress.csv") self._continuing = os.path.exists(progress_file) self._file = open(progress_file, "a") self._csv_out = None
<SYSTEM_TASK:> Sends the current log directory to the remote node. <END_TASK> <USER_TASK:> Description: def sync_results_to_new_location(self, worker_ip): """Sends the current log directory to the remote node. Syncing will not occur if the cluster is not started with the Ray autoscaler. """
if worker_ip != self._log_syncer.worker_ip: self._log_syncer.set_worker_ip(worker_ip) self._log_syncer.sync_to_worker_if_possible()
<SYSTEM_TASK:> Inserts value into config by path, generating intermediate dictionaries. <END_TASK> <USER_TASK:> Description: def deep_insert(path_list, value, config): """Inserts value into config by path, generating intermediate dictionaries. Example: >>> deep_insert(path.split("."), value, {}) """
if len(path_list) > 1: inside_config = config.setdefault(path_list[0], {}) deep_insert(path_list[1:], value, inside_config) else: config[path_list[0]] = value
<SYSTEM_TASK:> Create a FunctionDescriptor instance from list of bytes. <END_TASK> <USER_TASK:> Description: def from_bytes_list(cls, function_descriptor_list): """Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list. """
assert isinstance(function_descriptor_list, list) if len(function_descriptor_list) == 0: # This is a function descriptor of driver task. return FunctionDescriptor.for_driver_task() elif (len(function_descriptor_list) == 3 or len(function_descriptor_list) == 4): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if len(function_descriptor_list) == 4: return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception( "Invalid input for FunctionDescriptor.from_bytes_list")
<SYSTEM_TASK:> Create a FunctionDescriptor from a function instance. <END_TASK> <USER_TASK:> Description: def from_function(cls, function): """Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function. """
module_name = function.__module__ function_name = function.__name__ class_name = "" function_source_hasher = hashlib.sha1() try: # If we are running a script or are in IPython, include the source # code in the hash. source = inspect.getsource(function) if sys.version_info[0] >= 3: source = source.encode() function_source_hasher.update(source) function_source_hash = function_source_hasher.digest() except (IOError, OSError, TypeError): # Source code may not be available: # e.g. Cython or Python interpreter. function_source_hash = b"" return cls(module_name, function_name, class_name, function_source_hash)
<SYSTEM_TASK:> Create a FunctionDescriptor from a class. <END_TASK> <USER_TASK:> Description: def from_class(cls, target_class): """Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class. """
module_name = target_class.__module__ class_name = target_class.__name__ return cls(module_name, "__init__", class_name)
<SYSTEM_TASK:> See whether this function descriptor is for a driver or not. <END_TASK> <USER_TASK:> Description: def is_for_driver_task(self): """See whether this function descriptor is for a driver or not. Returns: True if this function descriptor is for driver tasks. """
return all( len(x) == 0 for x in [self.module_name, self.class_name, self.function_name])
<SYSTEM_TASK:> Calculate the function id of current function descriptor. <END_TASK> <USER_TASK:> Description: def _get_function_id(self): """Calculate the function id of current function descriptor. This function id is calculated from all the fields of function descriptor. Returns: ray.ObjectID to represent the function descriptor. """
if self.is_for_driver_task: return ray.FunctionID.nil() function_id_hash = hashlib.sha1() # Include the function module and name in the hash. function_id_hash.update(self.module_name.encode("ascii")) function_id_hash.update(self.function_name.encode("ascii")) function_id_hash.update(self.class_name.encode("ascii")) function_id_hash.update(self._function_source_hash) # Compute the function ID. function_id = function_id_hash.digest() return ray.FunctionID(function_id)
<SYSTEM_TASK:> Return a list of bytes representing the function descriptor. <END_TASK> <USER_TASK:> Description: def get_function_descriptor_list(self): """Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. """
descriptor_list = [] if self.is_for_driver_task: # Driver task returns an empty list. return descriptor_list else: descriptor_list.append(self.module_name.encode("ascii")) descriptor_list.append(self.class_name.encode("ascii")) descriptor_list.append(self.function_name.encode("ascii")) if len(self._function_source_hash) != 0: descriptor_list.append(self._function_source_hash) return descriptor_list
<SYSTEM_TASK:> Export cached remote functions <END_TASK> <USER_TASK:> Description: def export_cached(self): """Export cached remote functions Note: this should be called only once when worker is connected. """
for remote_function in self._functions_to_export: self._do_export(remote_function) self._functions_to_export = None for info in self._actors_to_export: (key, actor_class_info) = info self._publish_actor_class_to_key(key, actor_class_info)
<SYSTEM_TASK:> Pickle a remote function and export it to redis. <END_TASK> <USER_TASK:> Description: def _do_export(self, remote_function): """Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object. """
if self._worker.load_code_from_local: return # Work around limitations of Python pickling. function = remote_function._function function_name_global_valid = function.__name__ in function.__globals__ function_name_global_value = function.__globals__.get( function.__name__) # Allow the function to reference itself as a global variable if not is_cython(function): function.__globals__[function.__name__] = remote_function try: pickled_function = pickle.dumps(function) finally: # Undo our changes if function_name_global_valid: function.__globals__[function.__name__] = ( function_name_global_value) else: del function.__globals__[function.__name__] check_oversized_pickle(pickled_function, remote_function._function_name, "remote function", self._worker) key = (b"RemoteFunction:" + self._worker.task_driver_id.binary() + b":" + remote_function._function_descriptor.function_id.binary()) self._worker.redis_client.hmset( key, { "driver_id": self._worker.task_driver_id.binary(), "function_id": remote_function._function_descriptor. function_id.binary(), "name": remote_function._function_name, "module": function.__module__, "function": pickled_function, "max_calls": remote_function._max_calls }) self._worker.redis_client.rpush("Exports", key)
<SYSTEM_TASK:> Wait until the function to be executed is present on this worker. <END_TASK> <USER_TASK:> Description: def _wait_for_function(self, function_descriptor, driver_id, timeout=10): """Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out. """
start_time = time.time() # Only send the warning once. warning_sent = False while True: with self.lock: if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors): break if time.time() - start_time > timeout: warning_message = ("This worker was asked to execute a " "function that it does not have " "registered. You may have to restart " "Ray.") if not warning_sent: ray.utils.push_error_to_driver( self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id) warning_sent = True time.sleep(0.001)
<SYSTEM_TASK:> Push an actor class definition to Redis. <END_TASK> <USER_TASK:> Description: def _publish_actor_class_to_key(self, key, actor_class_info): """Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class. """
# We set the driver ID here because it may not have been available when # the actor class was defined. self._worker.redis_client.hmset(key, actor_class_info) self._worker.redis_client.rpush("Exports", key)
<SYSTEM_TASK:> Load the actor class. <END_TASK> <USER_TASK:> Description: def load_actor_class(self, driver_id, function_descriptor): """Load the actor class. Args: driver_id: Driver ID of the actor. function_descriptor: Function descriptor of the actor constructor. Returns: The actor class. """
function_id = function_descriptor.function_id # Check if the actor class already exists in the cache. actor_class = self._loaded_actor_classes.get(function_id, None) if actor_class is None: # Load actor class. if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() # Load actor class from local code. actor_class = self._load_actor_from_local( driver_id, function_descriptor) else: # Load actor class from GCS. actor_class = self._load_actor_class_from_gcs( driver_id, function_descriptor) # Save the loaded actor class in cache. self._loaded_actor_classes[function_id] = actor_class # Generate execution info for the methods of this actor class. module_name = function_descriptor.module_name actor_class_name = function_descriptor.class_name actor_methods = inspect.getmembers( actor_class, predicate=is_function_or_method) for actor_method_name, actor_method in actor_methods: method_descriptor = FunctionDescriptor( module_name, actor_method_name, actor_class_name) method_id = method_descriptor.function_id executor = self._make_actor_method_executor( actor_method_name, actor_method, actor_imported=True, ) self._function_execution_info[driver_id][method_id] = ( FunctionExecutionInfo( function=executor, function_name=actor_method_name, max_calls=0, )) self._num_task_executions[driver_id][method_id] = 0 self._num_task_executions[driver_id][function_id] = 0 return actor_class
<SYSTEM_TASK:> Load actor class from local code. <END_TASK> <USER_TASK:> Description: def _load_actor_from_local(self, driver_id, function_descriptor): """Load actor class from local code."""
module_name, class_name = (function_descriptor.module_name, function_descriptor.class_name) try: module = importlib.import_module(module_name) actor_class = getattr(module, class_name) if isinstance(actor_class, ray.actor.ActorClass): return actor_class._modified_class else: return actor_class except Exception: logger.exception( "Failed to load actor_class %s.".format(class_name)) raise Exception( "Actor {} failed to be imported from local code.".format( class_name))
<SYSTEM_TASK:> Make an executor that wraps a user-defined actor method. <END_TASK> <USER_TASK:> Description: def _make_actor_method_executor(self, method_name, method, actor_imported): """Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method. """
def actor_method_executor(dummy_return_id, actor, *args): # Update the actor's task counter to reflect the task we're about # to execute. self._worker.actor_task_counter += 1 # Execute the assigned method and save a checkpoint if necessary. try: if is_class_method(method): method_returns = method(*args) else: method_returns = method(actor, *args) except Exception as e: # Save the checkpoint before allowing the method exception # to be thrown, but don't save the checkpoint for actor # creation task. if (isinstance(actor, ray.actor.Checkpointable) and self._worker.actor_task_counter != 1): self._save_and_log_checkpoint(actor) raise e else: # Handle any checkpointing operations before storing the # method's return values. # NOTE(swang): If method_returns is a pointer to the actor's # state and the checkpointing operations can modify the return # values if they mutate the actor's state. Is this okay? if isinstance(actor, ray.actor.Checkpointable): # If this is the first task to execute on the actor, try to # resume from a checkpoint. if self._worker.actor_task_counter == 1: if actor_imported: self._restore_and_log_checkpoint(actor) else: # Save the checkpoint before returning the method's # return values. self._save_and_log_checkpoint(actor) return method_returns return actor_method_executor
<SYSTEM_TASK:> Save an actor checkpoint if necessary and log any errors. <END_TASK> <USER_TASK:> Description: def _save_and_log_checkpoint(self, actor): """Save an actor checkpoint if necessary and log any errors. Args: actor: The actor to checkpoint. Returns: The result of the actor's user-defined `save_checkpoint` method. """
actor_id = self._worker.actor_id checkpoint_info = self._worker.actor_checkpoint_info[actor_id] checkpoint_info.num_tasks_since_last_checkpoint += 1 now = int(1000 * time.time()) checkpoint_context = ray.actor.CheckpointContext( actor_id, checkpoint_info.num_tasks_since_last_checkpoint, now - checkpoint_info.last_checkpoint_timestamp) # If we should take a checkpoint, notify raylet to prepare a checkpoint # and then call `save_checkpoint`. if actor.should_checkpoint(checkpoint_context): try: now = int(1000 * time.time()) checkpoint_id = (self._worker.raylet_client. prepare_actor_checkpoint(actor_id)) checkpoint_info.checkpoint_ids.append(checkpoint_id) actor.save_checkpoint(actor_id, checkpoint_id) if (len(checkpoint_info.checkpoint_ids) > ray._config.num_actor_checkpoints_to_keep()): actor.checkpoint_expired( actor_id, checkpoint_info.checkpoint_ids.pop(0), ) checkpoint_info.num_tasks_since_last_checkpoint = 0 checkpoint_info.last_checkpoint_timestamp = now except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
<SYSTEM_TASK:> Restore an actor from a checkpoint if available and log any errors. <END_TASK> <USER_TASK:> Description: def _restore_and_log_checkpoint(self, actor): """Restore an actor from a checkpoint if available and log any errors. This should only be called on workers that have just executed an actor creation task. Args: actor: The actor to restore from a checkpoint. """
actor_id = self._worker.actor_id try: checkpoints = ray.actor.get_checkpoints_for_actor(actor_id) if len(checkpoints) > 0: # If we found previously saved checkpoints for this actor, # call the `load_checkpoint` callback. checkpoint_id = actor.load_checkpoint(actor_id, checkpoints) if checkpoint_id is not None: # Check that the returned checkpoint id is in the # `available_checkpoints` list. msg = ( "`load_checkpoint` must return a checkpoint id that " + "exists in the `available_checkpoints` list, or eone.") assert any(checkpoint_id == checkpoint.checkpoint_id for checkpoint in checkpoints), msg # Notify raylet that this actor has been resumed from # a checkpoint. (self._worker.raylet_client. notify_actor_resumed_from_checkpoint( actor_id, checkpoint_id)) except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
<SYSTEM_TASK:> This implements the common experience collection logic. <END_TASK> <USER_TASK:> Description: def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn, unroll_length, horizon, preprocessors, obs_filters, clip_rewards, clip_actions, pack, callbacks, tf_sess, perf_stats, soft_horizon): """This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`. """
try: if not horizon: horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps) except Exception: logger.debug("no episode horizon specified, assuming inf") if not horizon: horizon = float("inf") # Pool of batch builders, which can be shared across episodes to pack # trajectory data. batch_builder_pool = [] def get_batch_builder(): if batch_builder_pool: return batch_builder_pool.pop() else: return MultiAgentSampleBatchBuilder( policies, clip_rewards, callbacks.get("on_postprocess_traj")) def new_episode(): episode = MultiAgentEpisode(policies, policy_mapping_fn, get_batch_builder, extra_batch_callback) if callbacks.get("on_episode_start"): callbacks["on_episode_start"]({ "env": base_env, "policy": policies, "episode": episode, }) return episode active_episodes = defaultdict(new_episode) while True: perf_stats.iters += 1 t0 = time.time() # Get observations from all ready agents unfiltered_obs, rewards, dones, infos, off_policy_actions = \ base_env.poll() perf_stats.env_wait_time += time.time() - t0 if log_once("env_returns"): logger.info("Raw obs from env: {}".format( summarize(unfiltered_obs))) logger.info("Info return from env: {}".format(summarize(infos))) # Process observations and prepare for policy evaluation t1 = time.time() active_envs, to_eval, outputs = _process_observations( base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon) perf_stats.processing_time += time.time() - t1 for o in outputs: yield o # Do batched policy eval t2 = time.time() eval_results = _do_policy_eval(tf_sess, to_eval, policies, active_episodes) perf_stats.inference_time += time.time() - t2 # Process results and update episode state t3 = time.time() actions_to_send = _process_policy_eval_results( to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions) perf_stats.processing_time += time.time() - t3 # Return computed actions to ready envs. We also send to envs that have # taken off-policy actions; those envs are free to ignore the action. t4 = time.time() base_env.send_actions(actions_to_send) perf_stats.env_wait_time += time.time() - t4
<SYSTEM_TASK:> Call compute actions on observation batches to get next actions. <END_TASK> <USER_TASK:> Description: def _do_policy_eval(tf_sess, to_eval, policies, active_episodes): """Call compute actions on observation batches to get next actions. Returns: eval_results: dict of policy to compute_action() outputs. """
eval_results = {} if tf_sess: builder = TFRunBuilder(tf_sess, "policy_eval") pending_fetches = {} else: builder = None if log_once("compute_actions_input"): logger.info("Inputs to compute_actions():\n\n{}\n".format( summarize(to_eval))) for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) policy = _get_or_raise(policies, policy_id) if builder and (policy.compute_actions.__code__ is TFPolicyGraph.compute_actions.__code__): # TODO(ekl): how can we make info batch available to TF code? pending_fetches[policy_id] = policy._build_compute_actions( builder, [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data]) else: eval_results[policy_id] = policy.compute_actions( [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data], info_batch=[t.info for t in eval_data], episodes=[active_episodes[t.env_id] for t in eval_data]) if builder: for k, v in pending_fetches.items(): eval_results[k] = builder.get(v) if log_once("compute_actions_result"): logger.info("Outputs of compute_actions():\n\n{}\n".format( summarize(eval_results))) return eval_results
<SYSTEM_TASK:> Process the output of policy neural network evaluation. <END_TASK> <USER_TASK:> Description: def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions): """Process the output of policy neural network evaluation. Records policy evaluation results into the given episode objects and returns replies to send back to agents in the env. Returns: actions_to_send: nested dict of env id -> agent id -> agent replies. """
actions_to_send = defaultdict(dict) for env_id in active_envs: actions_to_send[env_id] = {} # at minimum send empty dict for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) actions, rnn_out_cols, pi_info_cols = eval_results[policy_id] if len(rnn_in_cols) != len(rnn_out_cols): raise ValueError("Length of RNN in did not match RNN out, got: " "{} vs {}".format(rnn_in_cols, rnn_out_cols)) # Add RNN state info for f_i, column in enumerate(rnn_in_cols): pi_info_cols["state_in_{}".format(f_i)] = column for f_i, column in enumerate(rnn_out_cols): pi_info_cols["state_out_{}".format(f_i)] = column # Save output rows actions = _unbatch_tuple_actions(actions) policy = _get_or_raise(policies, policy_id) for i, action in enumerate(actions): env_id = eval_data[i].env_id agent_id = eval_data[i].agent_id if clip_actions: actions_to_send[env_id][agent_id] = clip_action( action, policy.action_space) else: actions_to_send[env_id][agent_id] = action episode = active_episodes[env_id] episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols]) episode._set_last_pi_info( agent_id, {k: v[i] for k, v in pi_info_cols.items()}) if env_id in off_policy_actions and \ agent_id in off_policy_actions[env_id]: episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id]) else: episode._set_last_action(agent_id, action) return actions_to_send
<SYSTEM_TASK:> Atari games have multiple logical episodes, one per life. <END_TASK> <USER_TASK:> Description: def _fetch_atari_metrics(base_env): """Atari games have multiple logical episodes, one per life. However for metrics reporting we count full episodes all lives included. """
unwrapped = base_env.get_unwrapped() if not unwrapped: return None atari_out = [] for u in unwrapped: monitor = get_wrapper_by_cls(u, MonitorEnv) if not monitor: return None for eps_rew, eps_len in monitor.next_episode_results(): atari_out.append(RolloutMetrics(eps_len, eps_rew, {}, {}, {})) return atari_out
<SYSTEM_TASK:> Compare two version number strings of the form W.X.Y.Z. <END_TASK> <USER_TASK:> Description: def compare_version(a, b): """Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger. """
aa = string.split(a, ".") bb = string.split(b, ".") for i in range(0, 4): if aa[i] != bb[i]: return cmp(int(aa[i]), int(bb[i])) return 0
<SYSTEM_TASK:> Create CMake instance and execute configure step <END_TASK> <USER_TASK:> Description: def configure_cmake(self): """Create CMake instance and execute configure step """
cmake = CMake(self) cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared cmake.configure() return cmake
<SYSTEM_TASK:> Collect built libraries names and solve flatc path. <END_TASK> <USER_TASK:> Description: def package_info(self): """Collect built libraries names and solve flatc path. """
self.cpp_info.libs = tools.collect_libs(self) self.user_info.flatc = os.path.join(self.package_folder, "bin", "flatc")
<SYSTEM_TASK:> Offset provides access into the Table's vtable. <END_TASK> <USER_TASK:> Description: def Offset(self, vtableOffset): """Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length."""
vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos) vtableEnd = self.Get(N.VOffsetTFlags, vtable) if vtableOffset < vtableEnd: return self.Get(N.VOffsetTFlags, vtable + vtableOffset) return 0
<SYSTEM_TASK:> Indirect retrieves the relative offset stored at `offset`. <END_TASK> <USER_TASK:> Description: def Indirect(self, off): """Indirect retrieves the relative offset stored at `offset`."""
N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
<SYSTEM_TASK:> String gets a string from data stored inside the flatbuffer. <END_TASK> <USER_TASK:> Description: def String(self, off): """String gets a string from data stored inside the flatbuffer."""
N.enforce_number(off, N.UOffsetTFlags) off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) start = off + N.UOffsetTFlags.bytewidth length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return bytes(self.Bytes[start:start+length])
<SYSTEM_TASK:> VectorLen retrieves the length of the vector whose offset is stored <END_TASK> <USER_TASK:> Description: def VectorLen(self, off): """VectorLen retrieves the length of the vector whose offset is stored at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
<SYSTEM_TASK:> Vector retrieves the start of data of the vector whose offset is <END_TASK> <USER_TASK:> Description: def Vector(self, off): """Vector retrieves the start of data of the vector whose offset is stored at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags) off += self.Pos x = off + self.Get(N.UOffsetTFlags, off) # data starts after metadata containing the vector length x += N.UOffsetTFlags.bytewidth return x
<SYSTEM_TASK:> Union initializes any Table-derived type to point to the union at <END_TASK> <USER_TASK:> Description: def Union(self, t2, off): """Union initializes any Table-derived type to point to the union at the given offset."""
assert type(t2) is Table N.enforce_number(off, N.UOffsetTFlags) off += self.Pos t2.Pos = off + self.Get(N.UOffsetTFlags, off) t2.Bytes = self.Bytes
<SYSTEM_TASK:> Get retrieves a value of the type specified by `flags` at the <END_TASK> <USER_TASK:> Description: def Get(self, flags, off): """ Get retrieves a value of the type specified by `flags` at the given offset. """
N.enforce_number(off, N.UOffsetTFlags) return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
<SYSTEM_TASK:> GetVOffsetTSlot retrieves the VOffsetT that the given vtable location <END_TASK> <USER_TASK:> Description: def GetVOffsetTSlot(self, slot, d): """ GetVOffsetTSlot retrieves the VOffsetT that the given vtable location points to. If the vtable value is zero, the default value `d` will be returned. """
N.enforce_number(slot, N.VOffsetTFlags) N.enforce_number(d, N.VOffsetTFlags) off = self.Offset(slot) if off == 0: return d return off
<SYSTEM_TASK:> Script that finds and runs flatc built from source. <END_TASK> <USER_TASK:> Description: def main(): """Script that finds and runs flatc built from source."""
if len(sys.argv) < 2: sys.stderr.write('Usage: run_flatc.py flatbuffers_dir [flatc_args]\n') return 1 cwd = os.getcwd() flatc = '' flatbuffers_dir = sys.argv[1] for path in FLATC_SEARCH_PATHS: current = os.path.join(flatbuffers_dir, path, 'flatc' + EXECUTABLE_EXTENSION) if os.path.exists(current): flatc = current break if not flatc: sys.stderr.write('flatc not found\n') return 1 command = [flatc] + sys.argv[2:] return subprocess.call(command)
<SYSTEM_TASK:> Returns the numpy module if it exists on the system, <END_TASK> <USER_TASK:> Description: def import_numpy(): """ Returns the numpy module if it exists on the system, otherwise returns None. """
try: imp.find_module('numpy') numpy_exists = True except ImportError: numpy_exists = False if numpy_exists: # We do this outside of try/except block in case numpy exists # but is not installed correctly. We do not want to catch an # incorrect installation which would manifest as an # ImportError. import numpy as np else: np = None return np
<SYSTEM_TASK:> vtableEqual compares an unwritten vtable to a written vtable. <END_TASK> <USER_TASK:> Description: def vtableEqual(a, objectStart, b): """vtableEqual compares an unwritten vtable to a written vtable."""
N.enforce_number(objectStart, N.UOffsetTFlags) if len(a) * N.VOffsetTFlags.bytewidth != len(b): return False for i, elem in enumerate(a): x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) # Skip vtable entries that indicate a default value. if x == 0 and elem == 0: pass else: y = objectStart - elem if x != y: return False return True
<SYSTEM_TASK:> StartObject initializes bookkeeping for writing a new object. <END_TASK> <USER_TASK:> Description: def StartObject(self, numfields): """StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested() # use 32-bit offsets so that arithmetic doesn't overflow. self.current_vtable = [0 for _ in range_func(numfields)] self.objectEnd = self.Offset() self.nested = True
<SYSTEM_TASK:> WriteVtable serializes the vtable for the current object, if needed. <END_TASK> <USER_TASK:> Description: def WriteVtable(self): """ WriteVtable serializes the vtable for the current object, if needed. Before writing out the vtable, this checks pre-existing vtables for equality to this one. If an equal vtable is found, point the object to the existing vtable and return. Because vtable values are sensitive to alignment of object data, not all logically-equal vtables will be deduplicated. A vtable has the following format: <VOffsetT: size of the vtable in bytes, including this value> <VOffsetT: size of the object in bytes, including the vtable offset> <VOffsetT: offset for a field> * N, where N is the number of fields in the schema for this type. Includes deprecated fields. Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide. An object has the following format: <SOffsetT: offset to this object's vtable (may be negative)> <byte: data>+ """
# Prepend a zero scalar to the object. Later in this function we'll # write an offset here that points to the object's vtable: self.PrependSOffsetTRelative(0) objectOffset = self.Offset() existingVtable = None # Trim trailing 0 offsets. while self.current_vtable and self.current_vtable[-1] == 0: self.current_vtable.pop() # Search backwards through existing vtables, because similar vtables # are likely to have been recently appended. See # BenchmarkVtableDeduplication for a case in which this heuristic # saves about 30% of the time used in writing objects with duplicate # tables. i = len(self.vtables) - 1 while i >= 0: # Find the other vtable, which is associated with `i`: vt2Offset = self.vtables[i] vt2Start = len(self.Bytes) - vt2Offset vt2Len = encode.Get(packer.voffset, self.Bytes, vt2Start) metadata = VtableMetadataFields * N.VOffsetTFlags.bytewidth vt2End = vt2Start + vt2Len vt2 = self.Bytes[vt2Start+metadata:vt2End] # Compare the other vtable to the one under consideration. # If they are equal, store the offset and break: if vtableEqual(self.current_vtable, objectOffset, vt2): existingVtable = vt2Offset break i -= 1 if existingVtable is None: # Did not find a vtable, so write this one to the buffer. # Write out the current vtable in reverse , because # serialization occurs in last-first order: i = len(self.current_vtable) - 1 while i >= 0: off = 0 if self.current_vtable[i] != 0: # Forward reference to field; # use 32bit number to ensure no overflow: off = objectOffset - self.current_vtable[i] self.PrependVOffsetT(off) i -= 1 # The two metadata fields are written last. # First, store the object bytesize: objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd) self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize)) # Second, store the vtable bytesize: vBytes = len(self.current_vtable) + VtableMetadataFields vBytes *= N.VOffsetTFlags.bytewidth self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes)) # Next, write the offset to the new vtable in the # already-allocated SOffsetT at the beginning of this object: objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) encode.Write(packer.soffset, self.Bytes, objectStart, SOffsetTFlags.py_type(self.Offset() - objectOffset)) # Finally, store this vtable in memory for future # deduplication: self.vtables.append(self.Offset()) else: # Found a duplicate vtable. objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) self.head = UOffsetTFlags.py_type(objectStart) # Write the offset to the found vtable in the # already-allocated SOffsetT at the beginning of this object: encode.Write(packer.soffset, self.Bytes, self.Head(), SOffsetTFlags.py_type(existingVtable - objectOffset)) self.current_vtable = None return objectOffset
<SYSTEM_TASK:> Pad places zeros at the current offset. <END_TASK> <USER_TASK:> Description: def Pad(self, n): """Pad places zeros at the current offset."""
for i in range_func(n): self.Place(0, N.Uint8Flags)
<SYSTEM_TASK:> PrependSOffsetTRelative prepends an SOffsetT, relative to where it <END_TASK> <USER_TASK:> Description: def PrependSOffsetTRelative(self, off): """ PrependSOffsetTRelative prepends an SOffsetT, relative to where it will be written. """
# Ensure alignment is already done: self.Prep(N.SOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth self.PlaceSOffsetT(off2)
<SYSTEM_TASK:> Prepends an unsigned offset into vector data, relative to where it <END_TASK> <USER_TASK:> Description: def PrependUOffsetTRelative(self, off): """Prepends an unsigned offset into vector data, relative to where it will be written. """
# Ensure alignment is already done: self.Prep(N.UOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth self.PlaceUOffsetT(off2)
<SYSTEM_TASK:> StartVector initializes bookkeeping for writing a new vector. <END_TASK> <USER_TASK:> Description: def StartVector(self, elemSize, numElems, alignment): """ StartVector initializes bookkeeping for writing a new vector. A vector has the following format: - <UOffsetT: number of elements in this vector> - <T: data>+, where T is the type of elements of this vector. """
self.assertNotNested() self.nested = True self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems) self.Prep(alignment, elemSize*numElems) # In case alignment > int. return self.Offset()
<SYSTEM_TASK:> EndVector writes data necessary to finish vector construction. <END_TASK> <USER_TASK:> Description: def EndVector(self, vectorNumElems): """EndVector writes data necessary to finish vector construction."""
self.assertNested() ## @cond FLATBUFFERS_INTERNAL self.nested = False ## @endcond # we already made space for this, so write without PrependUint32 self.PlaceUOffsetT(vectorNumElems) return self.Offset()
<SYSTEM_TASK:> CreateString writes a null-terminated byte string as a vector. <END_TASK> <USER_TASK:> Description: def CreateString(self, s, encoding='utf-8', errors='strict'): """CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if isinstance(s, compat.string_types): x = s.encode(encoding, errors) elif isinstance(s, compat.binary_types): x = s else: raise TypeError("non-string passed to CreateString") self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth) self.Place(0, N.Uint8Flags) l = UOffsetTFlags.py_type(len(s)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
<SYSTEM_TASK:> CreateString writes a byte vector. <END_TASK> <USER_TASK:> Description: def CreateByteVector(self, x): """CreateString writes a byte vector."""
self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if not isinstance(x, compat.binary_types): raise TypeError("non-byte vector passed to CreateByteVector") self.Prep(N.UOffsetTFlags.bytewidth, len(x)*N.Uint8Flags.bytewidth) l = UOffsetTFlags.py_type(len(x)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
<SYSTEM_TASK:> CreateNumpyVector writes a numpy array into the buffer. <END_TASK> <USER_TASK:> Description: def CreateNumpyVector(self, x): """CreateNumpyVector writes a numpy array into the buffer."""
if np is None: # Numpy is required for this feature raise NumpyRequiredForThisFeature("Numpy was not found.") if not isinstance(x, np.ndarray): raise TypeError("non-numpy-ndarray passed to CreateNumpyVector") if x.dtype.kind not in ['b', 'i', 'u', 'f']: raise TypeError("numpy-ndarray holds elements of unsupported datatype") if x.ndim > 1: raise TypeError("multidimensional-ndarray passed to CreateNumpyVector") self.StartVector(x.itemsize, x.size, x.dtype.alignment) # Ensure little endian byte ordering if x.dtype.str[0] == "<": x_lend = x else: x_lend = x.byteswap(inplace=False) # Calculate total length l = UOffsetTFlags.py_type(x_lend.itemsize * x_lend.size) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond # tobytes ensures c_contiguous ordering self.Bytes[self.Head():self.Head()+l] = x_lend.tobytes(order='C') return self.EndVector(x.size)
<SYSTEM_TASK:> Structs are always stored inline, so need to be created right <END_TASK> <USER_TASK:> Description: def assertStructIsInline(self, obj): """ Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. """
N.enforce_number(obj, N.UOffsetTFlags) if obj != self.Offset(): msg = ("flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder.") raise StructIsNotInlineError(msg)
<SYSTEM_TASK:> Slot sets the vtable key `voffset` to the current location in the <END_TASK> <USER_TASK:> Description: def Slot(self, slotnum): """ Slot sets the vtable key `voffset` to the current location in the buffer. """
self.assertNested() self.current_vtable[slotnum] = self.Offset()
<SYSTEM_TASK:> Finish finalizes a buffer, pointing to the given `rootTable`. <END_TASK> <USER_TASK:> Description: def __Finish(self, rootTable, sizePrefix): """Finish finalizes a buffer, pointing to the given `rootTable`."""
N.enforce_number(rootTable, N.UOffsetTFlags) prepSize = N.UOffsetTFlags.bytewidth if sizePrefix: prepSize += N.Int32Flags.bytewidth self.Prep(self.minalign, prepSize) self.PrependUOffsetTRelative(rootTable) if sizePrefix: size = len(self.Bytes) - self.Head() N.enforce_number(size, N.Int32Flags) self.PrependInt32(size) self.finished = True return self.Head()
<SYSTEM_TASK:> PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at <END_TASK> <USER_TASK:> Description: def PrependUOffsetTRelativeSlot(self, o, x, d): """ PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written. """
if x != d: self.PrependUOffsetTRelative(x) self.Slot(o)
<SYSTEM_TASK:> PrependStructSlot prepends a struct onto the object at vtable slot `o`. <END_TASK> <USER_TASK:> Description: def PrependStructSlot(self, v, x, d): """ PrependStructSlot prepends a struct onto the object at vtable slot `o`. Structs are stored inline, so nothing additional is being added. In generated code, `d` is always 0. """
N.enforce_number(d, N.UOffsetTFlags) if x != d: self.assertStructIsInline(x) self.Slot(v)
<SYSTEM_TASK:> Place prepends a value specified by `flags` to the Builder, <END_TASK> <USER_TASK:> Description: def Place(self, x, flags): """ Place prepends a value specified by `flags` to the Builder, without checking for available space. """
N.enforce_number(x, flags) self.head = self.head - flags.bytewidth encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
<SYSTEM_TASK:> PlaceVOffsetT prepends a VOffsetT to the Builder, without checking <END_TASK> <USER_TASK:> Description: def PlaceVOffsetT(self, x): """PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space. """
N.enforce_number(x, N.VOffsetTFlags) self.head = self.head - N.VOffsetTFlags.bytewidth encode.Write(packer.voffset, self.Bytes, self.Head(), x)
<SYSTEM_TASK:> PlaceSOffsetT prepends a SOffsetT to the Builder, without checking <END_TASK> <USER_TASK:> Description: def PlaceSOffsetT(self, x): """PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space. """
N.enforce_number(x, N.SOffsetTFlags) self.head = self.head - N.SOffsetTFlags.bytewidth encode.Write(packer.soffset, self.Bytes, self.Head(), x)
<SYSTEM_TASK:> PlaceUOffsetT prepends a UOffsetT to the Builder, without checking <END_TASK> <USER_TASK:> Description: def PlaceUOffsetT(self, x): """PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space. """
N.enforce_number(x, N.UOffsetTFlags) self.head = self.head - N.UOffsetTFlags.bytewidth encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
<SYSTEM_TASK:> r"""Return full path to the user-shared data dir for this application. <END_TASK> <USER_TASK:> Description: def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """
if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path
<SYSTEM_TASK:> r"""Return full path to the user-specific config dir for this application. <END_TASK> <USER_TASK:> Description: def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """
if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path
<SYSTEM_TASK:> r"""Sends a GET request. <END_TASK> <USER_TASK:> Description: def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """
kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs)
<SYSTEM_TASK:> Writes out dict as toml to a file <END_TASK> <USER_TASK:> Description: def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """
if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d