text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Call tuner to process final results <END_TASK> <USER_TASK:> Description: def _handle_final_metric_data(self, data): """Call tuner to process final results """
id_ = data['parameter_id'] value = data['value'] if id_ in _customized_parameter_ids: self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value) else: self.tuner.receive_trial_result(id_, _trial_params[id_], value)
<SYSTEM_TASK:> Call assessor to process intermediate results <END_TASK> <USER_TASK:> Description: def _handle_intermediate_metric_data(self, data): """Call assessor to process intermediate results """
if data['type'] != 'PERIODICAL': return if self.assessor is None: return trial_job_id = data['trial_job_id'] if trial_job_id in _ended_trials: return history = _trial_history[trial_job_id] history[data['sequence']] = data['value'] ordered_history = _sort_history(history) if len(ordered_history) < data['sequence']: # no user-visible update since last time return try: result = self.assessor.assess_trial(trial_job_id, ordered_history) except Exception as e: _logger.exception('Assessor error') if isinstance(result, bool): result = AssessResult.Good if result else AssessResult.Bad elif not isinstance(result, AssessResult): msg = 'Result of Assessor.assess_trial must be an object of AssessResult, not %s' raise RuntimeError(msg % type(result)) if result is AssessResult.Bad: _logger.debug('BAD, kill %s', trial_job_id) send(CommandType.KillTrialJob, json_tricks.dumps(trial_job_id)) # notify tuner _logger.debug('env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]', dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS) if dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS == 'true': self._earlystop_notify_tuner(data) else: _logger.debug('GOOD')
<SYSTEM_TASK:> Send last intermediate result as final result to tuner in case the <END_TASK> <USER_TASK:> Description: def _earlystop_notify_tuner(self, data): """Send last intermediate result as final result to tuner in case the trial is early stopped. """
_logger.debug('Early stop notify tuner data: [%s]', data) data['type'] = 'FINAL' if multi_thread_enabled(): self._handle_final_metric_data(data) else: self.enqueue_command(CommandType.ReportMetricData, data)
<SYSTEM_TASK:> train and eval the model <END_TASK> <USER_TASK:> Description: def train_eval(): """ train and eval the model """
global trainloader global testloader global net (x_train, y_train) = trainloader (x_test, y_test) = testloader # train procedure net.fit( x=x_train, y=y_train, batch_size=args.batch_size, validation_data=(x_test, y_test), epochs=args.epochs, shuffle=True, callbacks=[ SendMetrics(), EarlyStopping(min_delta=0.001, patience=10), TensorBoard(log_dir=TENSORBOARD_DIR), ], ) # trial report final acc to tuner _, acc = net.evaluate(x_test, y_test) logger.debug("Final result is: %.3f", acc) nni.report_final_result(acc)
<SYSTEM_TASK:> return the values of n and r for the next round <END_TASK> <USER_TASK:> Description: def get_n_r(self): """return the values of n and r for the next round"""
return math.floor(self.n / self.eta**self.i + _epsilon), math.floor(self.r * self.eta**self.i + _epsilon)
<SYSTEM_TASK:> i means the ith round. Increase i by 1 <END_TASK> <USER_TASK:> Description: def increase_i(self): """i means the ith round. Increase i by 1"""
self.i += 1 if self.i > self.bracket_id: self.no_more_trial = True
<SYSTEM_TASK:> Randomly generate num hyperparameter configurations from search space <END_TASK> <USER_TASK:> Description: def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state): # pylint: disable=invalid-name """Randomly generate num hyperparameter configurations from search space Parameters ---------- num: int the number of hyperparameter configurations Returns ------- list a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] """
global _KEY # pylint: disable=global-statement assert self.i == 0 hyperparameter_configs = dict() for _ in range(num): params_id = create_bracket_parameter_id(self.bracket_id, self.i) params = json2paramater(searchspace_json, random_state) params[_KEY] = r hyperparameter_configs[params_id] = params self._record_hyper_configs(hyperparameter_configs) return [[key, value] for key, value in hyperparameter_configs.items()]
<SYSTEM_TASK:> after generating one round of hyperconfigs, this function records the generated hyperconfigs, <END_TASK> <USER_TASK:> Description: def _record_hyper_configs(self, hyper_configs): """after generating one round of hyperconfigs, this function records the generated hyperconfigs, creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs in this round to be 0, and increase the round number. Parameters ---------- hyper_configs: list the generated hyperconfigs """
self.hyper_configs.append(hyper_configs) self.configs_perf.append(dict()) self.num_finished_configs.append(0) self.num_configs_to_run.append(len(hyper_configs)) self.increase_i()
<SYSTEM_TASK:> conv2d returns a 2d convolution layer with full stride. <END_TASK> <USER_TASK:> Description: def conv2d(x_input, w_matrix): """conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
<SYSTEM_TASK:> max_pool downsamples a feature map by 2X. <END_TASK> <USER_TASK:> Description: def max_pool(x_input, pool_size): """max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME')
<SYSTEM_TASK:> Change search space from json format to hyperopt format <END_TASK> <USER_TASK:> Description: def json2space(x, oldy=None, name=NodeType.Root.value): """Change search space from json format to hyperopt format """
y = list() if isinstance(x, dict): if NodeType.Type.value in x.keys(): _type = x[NodeType.Type.value] name = name + '-' + _type if _type == 'choice': if oldy != None: _index = oldy[NodeType.Index.value] y += json2space(x[NodeType.Value.value][_index], oldy[NodeType.Value.value], name=name+'[%d]' % _index) else: y += json2space(x[NodeType.Value.value], None, name=name) y.append(name) else: for key in x.keys(): y += json2space(x[key], (oldy[key] if oldy != None else None), name+"[%s]" % str(key)) elif isinstance(x, list): for i, x_i in enumerate(x): y += json2space(x_i, (oldy[i] if oldy != None else None), name+"[%d]" % i) else: pass return y
<SYSTEM_TASK:> Delete index information from params <END_TASK> <USER_TASK:> Description: def _split_index(params): """Delete index information from params Parameters ---------- params : dict Returns ------- result : dict """
result = {} for key in params: if isinstance(params[key], dict): value = params[key]['_value'] else: value = params[key] result[key] = value return result
<SYSTEM_TASK:> The distance between two layers. <END_TASK> <USER_TASK:> Description: def layer_distance(a, b): """The distance between two layers."""
# pylint: disable=unidiomatic-typecheck if type(a) != type(b): return 1.0 if is_layer(a, "Conv"): att_diff = [ (a.filters, b.filters), (a.kernel_size, b.kernel_size), (a.stride, b.stride), ] return attribute_difference(att_diff) if is_layer(a, "Pooling"): att_diff = [ (a.padding, b.padding), (a.kernel_size, b.kernel_size), (a.stride, b.stride), ] return attribute_difference(att_diff) return 0.0
<SYSTEM_TASK:> The distance between the layers of two neural networks. <END_TASK> <USER_TASK:> Description: def layers_distance(list_a, list_b): """The distance between the layers of two neural networks."""
len_a = len(list_a) len_b = len(list_b) f = np.zeros((len_a + 1, len_b + 1)) f[-1][-1] = 0 for i in range(-1, len_a): f[i][-1] = i + 1 for j in range(-1, len_b): f[-1][j] = j + 1 for i in range(len_a): for j in range(len_b): f[i][j] = min( f[i][j - 1] + 1, f[i - 1][j] + 1, f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]), ) return f[len_a - 1][len_b - 1]
<SYSTEM_TASK:> The distance between two skip-connections. <END_TASK> <USER_TASK:> Description: def skip_connection_distance(a, b): """The distance between two skip-connections."""
if a[2] != b[2]: return 1.0 len_a = abs(a[1] - a[0]) len_b = abs(b[1] - b[0]) return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b))
<SYSTEM_TASK:> The distance between the skip-connections of two neural networks. <END_TASK> <USER_TASK:> Description: def skip_connections_distance(list_a, list_b): """The distance between the skip-connections of two neural networks."""
distance_matrix = np.zeros((len(list_a), len(list_b))) for i, a in enumerate(list_a): for j, b in enumerate(list_b): distance_matrix[i][j] = skip_connection_distance(a, b) return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs( len(list_a) - len(list_b) )
<SYSTEM_TASK:> The Euclidean distance between two vectors. <END_TASK> <USER_TASK:> Description: def vector_distance(a, b): """The Euclidean distance between two vectors."""
a = np.array(a) b = np.array(b) return np.linalg.norm(a - b)
<SYSTEM_TASK:> Check if the target descriptor is in the descriptors. <END_TASK> <USER_TASK:> Description: def contain(descriptors, target_descriptor): """Check if the target descriptor is in the descriptors."""
for descriptor in descriptors: if edit_distance(descriptor, target_descriptor) < 1e-5: return True return False
<SYSTEM_TASK:> Incrementally fit the regressor. <END_TASK> <USER_TASK:> Description: def incremental_fit(self, train_x, train_y): """ Incrementally fit the regressor. """
if not self._first_fitted: raise ValueError("The first_fit function needs to be called first.") train_x, train_y = np.array(train_x), np.array(train_y) # Incrementally compute K up_right_k = edit_distance_matrix(self._x, train_x) down_left_k = np.transpose(up_right_k) down_right_k = edit_distance_matrix(train_x) up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) down_k = np.concatenate((down_left_k, down_right_k), axis=1) temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) k_matrix = bourgain_embedding_matrix(temp_distance_matrix) diagonal = np.diag_indices_from(k_matrix) diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :]) k_matrix[diagonal] += self.alpha try: self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 except LinAlgError: return self self._x = np.concatenate((self._x, train_x), axis=0) self._y = np.concatenate((self._y, train_y), axis=0) self._distance_matrix = temp_distance_matrix self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 return self
<SYSTEM_TASK:> Fit the regressor for the first time. <END_TASK> <USER_TASK:> Description: def first_fit(self, train_x, train_y): """ Fit the regressor for the first time. """
train_x, train_y = np.array(train_x), np.array(train_y) self._x = np.copy(train_x) self._y = np.copy(train_y) self._distance_matrix = edit_distance_matrix(self._x) k_matrix = bourgain_embedding_matrix(self._distance_matrix) k_matrix[np.diag_indices_from(k_matrix)] += self.alpha self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 self._first_fitted = True return self
<SYSTEM_TASK:> A recursive function to return the content of the tree in a dict. <END_TASK> <USER_TASK:> Description: def get_dict(self, u=None): """ A recursive function to return the content of the tree in a dict."""
if u is None: return self.get_dict(self.root) children = [] for v in self.adj_list[u]: children.append(self.get_dict(v)) ret = {"name": u, "children": children} return ret
<SYSTEM_TASK:> Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers <END_TASK> <USER_TASK:> Description: def update_hash(self, layers: Iterable): """ Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers """
if self.graph_type == LayerType.input.value: return hasher = hashlib.md5() hasher.update(LayerType(self.graph_type).name.encode('ascii')) hasher.update(str(self.size).encode('ascii')) for i in self.input: if layers[i].hash_id is None: raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) hasher.update(layers[i].hash_id.encode('ascii')) self.hash_id = hasher.hexdigest()
<SYSTEM_TASK:> generate new id and event hook for new Individual <END_TASK> <USER_TASK:> Description: def generate_new_id(self): """ generate new id and event hook for new Individual """
self.events.append(Event()) indiv_id = self.indiv_counter self.indiv_counter += 1 return indiv_id
<SYSTEM_TASK:> initialize populations for evolution tuner <END_TASK> <USER_TASK:> Description: def init_population(self, population_size, graph_max_layer, graph_min_layer): """ initialize populations for evolution tuner """
population = [] graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer, inputs=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], output=[Layer(LayerType.output.value, inputs=[4], size='x'), Layer(LayerType.output.value, inputs=[5], size='y')], hide=[Layer(LayerType.attention.value, inputs=[0, 1], output=[2]), Layer(LayerType.attention.value, inputs=[1, 0], output=[3])]) for _ in range(population_size): graph_tmp = copy.deepcopy(graph) graph_tmp.mutation() population.append(Individual(indiv_id=self.generate_new_id(), graph_cfg=graph_tmp, result=None)) return population
<SYSTEM_TASK:> Add a new layer to the graph. The nodes should be created in advance. <END_TASK> <USER_TASK:> Description: def _add_edge(self, layer, input_id, output_id): """Add a new layer to the graph. The nodes should be created in advance."""
if layer in self.layer_to_id: layer_id = self.layer_to_id[layer] if input_id not in self.layer_id_to_input_node_ids[layer_id]: self.layer_id_to_input_node_ids[layer_id].append(input_id) if output_id not in self.layer_id_to_output_node_ids[layer_id]: self.layer_id_to_output_node_ids[layer_id].append(output_id) else: layer_id = len(self.layer_list) self.layer_list.append(layer) self.layer_to_id[layer] = layer_id self.layer_id_to_input_node_ids[layer_id] = [input_id] self.layer_id_to_output_node_ids[layer_id] = [output_id] self.adj_list[input_id].append((output_id, layer_id)) self.reverse_adj_list[output_id].append((input_id, layer_id))
<SYSTEM_TASK:> Redirect the layer to a new node. <END_TASK> <USER_TASK:> Description: def _redirect_edge(self, u_id, v_id, new_v_id): """Redirect the layer to a new node. Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id` while keeping all other property of the edge the same. """
layer_id = None for index, edge_tuple in enumerate(self.adj_list[u_id]): if edge_tuple[0] == v_id: layer_id = edge_tuple[1] self.adj_list[u_id][index] = (new_v_id, layer_id) self.layer_list[layer_id].output = self.node_list[new_v_id] break for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]): if edge_tuple[0] == u_id: layer_id = edge_tuple[1] self.reverse_adj_list[v_id].remove(edge_tuple) break self.reverse_adj_list[new_v_id].append((u_id, layer_id)) for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]): if value == v_id: self.layer_id_to_output_node_ids[layer_id][index] = new_v_id break
<SYSTEM_TASK:> Replace the layer with a new layer. <END_TASK> <USER_TASK:> Description: def _replace_layer(self, layer_id, new_layer): """Replace the layer with a new layer."""
old_layer = self.layer_list[layer_id] new_layer.input = old_layer.input new_layer.output = old_layer.output new_layer.output.shape = new_layer.output_shape self.layer_list[layer_id] = new_layer self.layer_to_id[new_layer] = layer_id self.layer_to_id.pop(old_layer)
<SYSTEM_TASK:> Return the topological order of the node IDs from the input node to the output node. <END_TASK> <USER_TASK:> Description: def topological_order(self): """Return the topological order of the node IDs from the input node to the output node."""
q = Queue() in_degree = {} for i in range(self.n_nodes): in_degree[i] = 0 for u in range(self.n_nodes): for v, _ in self.adj_list[u]: in_degree[v] += 1 for i in range(self.n_nodes): if in_degree[i] == 0: q.put(i) order_list = [] while not q.empty(): u = q.get() order_list.append(u) for v, _ in self.adj_list[u]: in_degree[v] -= 1 if in_degree[v] == 0: q.put(v) return order_list
<SYSTEM_TASK:> Given two node IDs, return all the pooling layers between them. <END_TASK> <USER_TASK:> Description: def _get_pooling_layers(self, start_node_id, end_node_id): """Given two node IDs, return all the pooling layers between them."""
layer_list = [] node_list = [start_node_id] assert self._depth_first_search(end_node_id, layer_list, node_list) ret = [] for layer_id in layer_list: layer = self.layer_list[layer_id] if is_layer(layer, "Pooling"): ret.append(layer) elif is_layer(layer, "Conv") and layer.stride != 1: ret.append(layer) return ret
<SYSTEM_TASK:> Search for all the layers and nodes down the path. <END_TASK> <USER_TASK:> Description: def _depth_first_search(self, target_id, layer_id_list, node_list): """Search for all the layers and nodes down the path. A recursive function to search all the layers and nodes between the node in the node_list and the node with target_id."""
assert len(node_list) <= self.n_nodes u = node_list[-1] if u == target_id: return True for v, layer_id in self.adj_list[u]: layer_id_list.append(layer_id) node_list.append(v) if self._depth_first_search(target_id, layer_id_list, node_list): return True layer_id_list.pop() node_list.pop() return False
<SYSTEM_TASK:> Insert the new_layers after the node with start_node_id. <END_TASK> <USER_TASK:> Description: def _insert_new_layers(self, new_layers, start_node_id, end_node_id): """Insert the new_layers after the node with start_node_id."""
new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) temp_output_id = new_node_id for layer in new_layers[:-1]: temp_output_id = self.add_layer(layer, temp_output_id) self._add_edge(new_layers[-1], temp_output_id, end_node_id) new_layers[-1].input = self.node_list[temp_output_id] new_layers[-1].output = self.node_list[end_node_id] self._redirect_edge(start_node_id, end_node_id, new_node_id)
<SYSTEM_TASK:> Extract the the description of the Graph as an instance of NetworkDescriptor. <END_TASK> <USER_TASK:> Description: def extract_descriptor(self): """Extract the the description of the Graph as an instance of NetworkDescriptor."""
main_chain = self.get_main_chain() index_in_main_chain = {} for index, u in enumerate(main_chain): index_in_main_chain[u] = index ret = NetworkDescriptor() for u in main_chain: for v, layer_id in self.adj_list[u]: if v not in index_in_main_chain: continue layer = self.layer_list[layer_id] copied_layer = copy(layer) copied_layer.weights = None ret.add_layer(deepcopy(copied_layer)) for u in index_in_main_chain: for v, layer_id in self.adj_list[u]: if v not in index_in_main_chain: temp_u = u temp_v = v temp_layer_id = layer_id skip_type = None while not (temp_v in index_in_main_chain and temp_u in index_in_main_chain): if is_layer(self.layer_list[temp_layer_id], "Concatenate"): skip_type = NetworkDescriptor.CONCAT_CONNECT if is_layer(self.layer_list[temp_layer_id], "Add"): skip_type = NetworkDescriptor.ADD_CONNECT temp_u = temp_v temp_v, temp_layer_id = self.adj_list[temp_v][0] ret.add_skip_connection( index_in_main_chain[u], index_in_main_chain[temp_u], skip_type ) elif index_in_main_chain[v] - index_in_main_chain[u] != 1: skip_type = None if is_layer(self.layer_list[layer_id], "Concatenate"): skip_type = NetworkDescriptor.CONCAT_CONNECT if is_layer(self.layer_list[layer_id], "Add"): skip_type = NetworkDescriptor.ADD_CONNECT ret.add_skip_connection( index_in_main_chain[u], index_in_main_chain[v], skip_type ) return ret
<SYSTEM_TASK:> Return a list of layer IDs in the main chain. <END_TASK> <USER_TASK:> Description: def get_main_chain_layers(self): """Return a list of layer IDs in the main chain."""
main_chain = self.get_main_chain() ret = [] for u in main_chain: for v, layer_id in self.adj_list[u]: if v in main_chain and u in main_chain: ret.append(layer_id) return ret
<SYSTEM_TASK:> Run the tuner. <END_TASK> <USER_TASK:> Description: def run(self): """Run the tuner. This function will never return unless raise. """
_logger.info('Start dispatcher') if dispatcher_env_vars.NNI_MODE == 'resume': self.load_checkpoint() while True: command, data = receive() if data: data = json_tricks.loads(data) if command is None or command is CommandType.Terminate: break if multi_thread_enabled(): result = self.pool.map_async(self.process_command_thread, [(command, data)]) self.thread_results.append(result) if any([thread_result.ready() and not thread_result.successful() for thread_result in self.thread_results]): _logger.debug('Caught thread exception') break else: self.enqueue_command(command, data) if self.worker_exceptions: break _logger.info('Dispatcher exiting...') self.stopping = True if multi_thread_enabled(): self.pool.close() self.pool.join() else: self.default_worker.join() self.assessor_worker.join() _logger.info('Terminated by NNI manager')
<SYSTEM_TASK:> Process commands in command queues. <END_TASK> <USER_TASK:> Description: def command_queue_worker(self, command_queue): """Process commands in command queues. """
while True: try: # set timeout to ensure self.stopping is checked periodically command, data = command_queue.get(timeout=3) try: self.process_command(command, data) except Exception as e: _logger.exception(e) self.worker_exceptions.append(e) break except Empty: pass if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()): break
<SYSTEM_TASK:> Worker thread to process a command. <END_TASK> <USER_TASK:> Description: def process_command_thread(self, request): """Worker thread to process a command. """
command, data = request if multi_thread_enabled(): try: self.process_command(command, data) except Exception as e: _logger.exception(str(e)) raise else: pass
<SYSTEM_TASK:> assess whether a trial should be early stop by curve fitting algorithm <END_TASK> <USER_TASK:> Description: def assess_trial(self, trial_job_id, trial_history): """assess whether a trial should be early stop by curve fitting algorithm Parameters ---------- trial_job_id: int trial job id trial_history: list The history performance matrix of each trial Returns ------- bool AssessResult.Good or AssessResult.Bad Raises ------ Exception unrecognize exception in curvefitting_assessor """
self.trial_job_id = trial_job_id self.trial_history = trial_history if not self.set_best_performance: return AssessResult.Good curr_step = len(trial_history) if curr_step < self.start_step: return AssessResult.Good if trial_job_id in self.last_judgment_num.keys() and curr_step - self.last_judgment_num[trial_job_id] < self.gap: return AssessResult.Good self.last_judgment_num[trial_job_id] = curr_step try: start_time = datetime.datetime.now() # Predict the final result curvemodel = CurveModel(self.target_pos) predict_y = curvemodel.predict(trial_history) logger.info('Prediction done. Trial job id = ', trial_job_id, '. Predict value = ', predict_y) if predict_y is None: logger.info('wait for more information to predict precisely') return AssessResult.Good standard_performance = self.completed_best_performance * self.threshold end_time = datetime.datetime.now() if (end_time - start_time).seconds > 60: logger.warning('Curve Fitting Assessor Runtime Exceeds 60s, Trial Id = ', self.trial_job_id, 'Trial History = ', self.trial_history) if self.higher_better: if predict_y > standard_performance: return AssessResult.Good return AssessResult.Bad else: if predict_y < standard_performance: return AssessResult.Good return AssessResult.Bad except Exception as exception: logger.exception('unrecognize exception in curvefitting_assessor', exception)
<SYSTEM_TASK:> Returns a set of trial neural architecture, as a serializable object. <END_TASK> <USER_TASK:> Description: def generate_parameters(self, parameter_id): """ Returns a set of trial neural architecture, as a serializable object. Parameters ---------- parameter_id : int """
if not self.history: self.init_search() new_father_id = None generated_graph = None if not self.training_queue: new_father_id, generated_graph = self.generate() new_model_id = self.model_count self.model_count += 1 self.training_queue.append((generated_graph, new_father_id, new_model_id)) self.descriptors.append(generated_graph.extract_descriptor()) graph, father_id, model_id = self.training_queue.pop(0) # from graph to json json_model_path = os.path.join(self.path, str(model_id) + ".json") json_out = graph_to_json(graph, json_model_path) self.total_data[parameter_id] = (json_out, father_id, model_id) return json_out
<SYSTEM_TASK:> Call the generators to generate the initial architectures for the search. <END_TASK> <USER_TASK:> Description: def init_search(self): """Call the generators to generate the initial architectures for the search."""
if self.verbose: logger.info("Initializing search.") for generator in self.generators: graph = generator(self.n_classes, self.input_shape).generate( self.default_model_len, self.default_model_width ) model_id = self.model_count self.model_count += 1 self.training_queue.append((graph, -1, model_id)) self.descriptors.append(graph.extract_descriptor()) if self.verbose: logger.info("Initialization finished.")
<SYSTEM_TASK:> Generate the next neural architecture. <END_TASK> <USER_TASK:> Description: def generate(self): """Generate the next neural architecture. Returns ------- other_info: any object Anything to be saved in the training queue together with the architecture. generated_graph: Graph An instance of Graph. """
generated_graph, new_father_id = self.bo.generate(self.descriptors) if new_father_id is None: new_father_id = 0 generated_graph = self.generators[0]( self.n_classes, self.input_shape ).generate(self.default_model_len, self.default_model_width) return new_father_id, generated_graph
<SYSTEM_TASK:> Update the controller with evaluation result of a neural architecture. <END_TASK> <USER_TASK:> Description: def update(self, other_info, graph, metric_value, model_id): """ Update the controller with evaluation result of a neural architecture. Parameters ---------- other_info: any object In our case it is the father ID in the search tree. graph: Graph An instance of Graph. The trained neural architecture. metric_value: float The final evaluated metric value. model_id: int """
father_id = other_info self.bo.fit([graph.extract_descriptor()], [metric_value]) self.bo.add_child(father_id, model_id)
<SYSTEM_TASK:> Add model to the history, x_queue and y_queue <END_TASK> <USER_TASK:> Description: def add_model(self, metric_value, model_id): """ Add model to the history, x_queue and y_queue Parameters ---------- metric_value : float graph : dict model_id : int Returns ------- model : dict """
if self.verbose: logger.info("Saving model.") # Update best_model text file ret = {"model_id": model_id, "metric_value": metric_value} self.history.append(ret) if model_id == self.get_best_model_id(): file = open(os.path.join(self.path, "best_model.txt"), "w") file.write("best model: " + str(model_id)) file.close() return ret
<SYSTEM_TASK:> Get the best model_id from history using the metric value <END_TASK> <USER_TASK:> Description: def get_best_model_id(self): """ Get the best model_id from history using the metric value """
if self.optimize_mode is OptimizeMode.Maximize: return max(self.history, key=lambda x: x["metric_value"])["model_id"] return min(self.history, key=lambda x: x["metric_value"])["model_id"]
<SYSTEM_TASK:> Get the model by model_id <END_TASK> <USER_TASK:> Description: def load_model_by_id(self, model_id): """Get the model by model_id Parameters ---------- model_id : int model index Returns ------- load_model : Graph the model graph representation """
with open(os.path.join(self.path, str(model_id) + ".json")) as fin: json_str = fin.read().replace("\n", "") load_model = json_to_graph(json_str) return load_model
<SYSTEM_TASK:> Update the self.x_bounds and self.x_types by the search_space.json <END_TASK> <USER_TASK:> Description: def update_search_space(self, search_space): """Update the self.x_bounds and self.x_types by the search_space.json Parameters ---------- search_space : dict """
self.x_bounds = [[] for i in range(len(search_space))] self.x_types = [NONE_TYPE for i in range(len(search_space))] for key in search_space: self.key_order.append(key) key_type = {} if isinstance(search_space, dict): for key in search_space: key_type = search_space[key]['_type'] key_range = search_space[key]['_value'] idx = self.key_order.index(key) if key_type == 'quniform': if key_range[2] == 1: self.x_bounds[idx] = [key_range[0], key_range[1]] self.x_types[idx] = 'range_int' else: bounds = [] for value in np.arange(key_range[0], key_range[1], key_range[2]): bounds.append(value) self.x_bounds[idx] = bounds self.x_types[idx] = 'discrete_int' elif key_type == 'randint': self.x_bounds[idx] = [0, key_range[0]] self.x_types[idx] = 'range_int' elif key_type == 'uniform': self.x_bounds[idx] = [key_range[0], key_range[1]] self.x_types[idx] = 'range_continuous' elif key_type == 'choice': self.x_bounds[idx] = key_range for key_value in key_range: if not isinstance(key_value, (int, float)): raise RuntimeError("Metis Tuner only support numerical choice.") self.x_types[idx] = 'discrete_int' else: logger.info("Metis Tuner doesn't support this kind of variable: " + str(key_type)) raise RuntimeError("Metis Tuner doesn't support this kind of variable: " + str(key_type)) else: logger.info("The format of search space is not a dict.") raise RuntimeError("The format of search space is not a dict.") self.minimize_starting_points = _rand_init(self.x_bounds, self.x_types, \ self.selection_num_starting_points)
<SYSTEM_TASK:> Generate next parameter for trial <END_TASK> <USER_TASK:> Description: def generate_parameters(self, parameter_id): """Generate next parameter for trial If the number of trial result is lower than cold start number, metis will first random generate some parameters. Otherwise, metis will choose the parameters by the Gussian Process Model and the Gussian Mixture Model. Parameters ---------- parameter_id : int Returns ------- result : dict """
if len(self.samples_x) < self.cold_start_num: init_parameter = _rand_init(self.x_bounds, self.x_types, 1)[0] results = self._pack_output(init_parameter) else: self.minimize_starting_points = _rand_init(self.x_bounds, self.x_types, \ self.selection_num_starting_points) results = self._selection(self.samples_x, self.samples_y_aggregation, self.samples_y, self.x_bounds, self.x_types, threshold_samplessize_resampling=(None if self.no_resampling is True else 50), no_candidates=self.no_candidates, minimize_starting_points=self.minimize_starting_points, minimize_constraints_fun=self.minimize_constraints_fun) logger.info("Generate paramageters:\n" + str(results)) return results
<SYSTEM_TASK:> Tuner receive result from trial. <END_TASK> <USER_TASK:> Description: def receive_trial_result(self, parameter_id, parameters, value): """Tuner receive result from trial. Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key. """
value = extract_scalar_reward(value) if self.optimize_mode == OptimizeMode.Maximize: value = -value logger.info("Received trial result.") logger.info("value is :" + str(value)) logger.info("parameter is : " + str(parameters)) # parse parameter to sample_x sample_x = [0 for i in range(len(self.key_order))] for key in parameters: idx = self.key_order.index(key) sample_x[idx] = parameters[key] # parse value to sample_y temp_y = [] if sample_x in self.samples_x: idx = self.samples_x.index(sample_x) temp_y = self.samples_y[idx] temp_y.append(value) self.samples_y[idx] = temp_y # calculate y aggregation median = get_median(temp_y) self.samples_y_aggregation[idx] = [median] else: self.samples_x.append(sample_x) self.samples_y.append([value]) # calculate y aggregation self.samples_y_aggregation.append([value])
<SYSTEM_TASK:> Run the thread, logging everything. <END_TASK> <USER_TASK:> Description: def run(self): """Run the thread, logging everything. If the log_collection is 'none', the log content will not be enqueued """
for line in iter(self.pipeReader.readline, ''): self.orig_stdout.write(line.rstrip() + '\n') self.orig_stdout.flush() if self.log_collection == 'none': # If not match metrics, do not put the line into queue if not self.log_pattern.match(line): continue self.queue.put(line) self.pipeReader.close()
<SYSTEM_TASK:> Extract scalar reward from trial result. <END_TASK> <USER_TASK:> Description: def extract_scalar_reward(value, scalar_key='default'): """ Extract scalar reward from trial result. Raises ------ RuntimeError Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int. """
if isinstance(value, float) or isinstance(value, int): reward = value elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)): reward = value[scalar_key] else: raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') return reward
<SYSTEM_TASK:> convert dict type to tuple to solve unhashable problem. <END_TASK> <USER_TASK:> Description: def convert_dict2tuple(value): """ convert dict type to tuple to solve unhashable problem. """
if isinstance(value, dict): for _keys in value: value[_keys] = convert_dict2tuple(value[_keys]) return tuple(sorted(value.items())) else: return value
<SYSTEM_TASK:> Function to sample a new configuration <END_TASK> <USER_TASK:> Description: def get_config(self, budget): """Function to sample a new configuration This function is called inside BOHB to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled Returns ------- config return a valid configuration with parameters and budget """
logger.debug('start sampling a new configuration.') sample = None info_dict = {} # If no model is available, sample from prior # also mix in a fraction of random configs if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction: sample = self.configspace.sample_configuration() info_dict['model_based_pick'] = False if sample is None: sample, info_dict= self.sample_from_largest_budget(info_dict) sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample.get_dictionary() ).get_dictionary() logger.debug('done sampling a new configuration.') sample['TRIAL_BUDGET'] = budget return sample
<SYSTEM_TASK:> Function to register finished runs. Every time a run has finished, this function should be called <END_TASK> <USER_TASK:> Description: def new_result(self, loss, budget, parameters, update_model=True): """ Function to register finished runs. Every time a run has finished, this function should be called to register it with the loss. Parameters: ----------- loss: float the loss of the parameters budget: float the budget of the parameters parameters: dict the parameters of this trial update_model: bool whether use this parameter to update BP model Returns ------- None """
if loss is None: # One could skip crashed results, but we decided # assign a +inf loss and count them as bad configurations loss = np.inf if budget not in self.configs.keys(): self.configs[budget] = [] self.losses[budget] = [] # skip model building if we already have a bigger model if max(list(self.kde_models.keys()) + [-np.inf]) > budget: return # We want to get a numerical representation of the configuration in the original space conf = ConfigSpace.Configuration(self.configspace, parameters) self.configs[budget].append(conf.get_array()) self.losses[budget].append(loss) # skip model building: # a) if not enough points are available if len(self.configs[budget]) <= self.min_points_in_model - 1: logger.debug("Only %i run(s) for budget %f available, need more than %s \ -> can't build model!"%(len(self.configs[budget]), budget, self.min_points_in_model+1)) return # b) during warnm starting when we feed previous results in and only update once if not update_model: return train_configs = np.array(self.configs[budget]) train_losses = np.array(self.losses[budget]) n_good = max(self.min_points_in_model, (self.top_n_percent * train_configs.shape[0])//100) n_bad = max(self.min_points_in_model, ((100-self.top_n_percent)*train_configs.shape[0])//100) # Refit KDE for the current budget idx = np.argsort(train_losses) train_data_good = self.impute_conditional_data(train_configs[idx[:n_good]]) train_data_bad = self.impute_conditional_data(train_configs[idx[n_good:n_good+n_bad]]) if train_data_good.shape[0] <= train_data_good.shape[1]: return if train_data_bad.shape[0] <= train_data_bad.shape[1]: return #more expensive crossvalidation method #bw_estimation = 'cv_ls' # quick rule of thumb bw_estimation = 'normal_reference' bad_kde = sm.nonparametric.KDEMultivariate(data=train_data_bad, var_type=self.kde_vartypes, bw=bw_estimation) good_kde = sm.nonparametric.KDEMultivariate(data=train_data_good, var_type=self.kde_vartypes, bw=bw_estimation) bad_kde.bw = np.clip(bad_kde.bw, self.min_bandwidth, None) good_kde.bw = np.clip(good_kde.bw, self.min_bandwidth, None) self.kde_models[budget] = { 'good': good_kde, 'bad' : bad_kde } # update probs for the categorical parameters for later sampling logger.debug('done building a new model for budget %f based on %i/%i split\nBest loss for this budget:%f\n' %(budget, n_good, n_bad, np.min(train_losses)))
<SYSTEM_TASK:> return the value of the f_comb when epoch = pos <END_TASK> <USER_TASK:> Description: def f_comb(self, pos, sample): """return the value of the f_comb when epoch = pos Parameters ---------- pos: int the epoch number of the position you want to predict sample: list sample is a (1 * NUM_OF_FUNCTIONS) matrix, representing{w1, w2, ... wk} Returns ------- int The expected matrix at pos with all the active function's prediction """
ret = 0 for i in range(self.effective_model_num): model = self.effective_model[i] y = self.predict_y(model, pos) ret += sample[i] * y return ret
<SYSTEM_TASK:> generate num hyperparameter configurations from search space using Bayesian optimization <END_TASK> <USER_TASK:> Description: def get_hyperparameter_configurations(self, num, r, config_generator): """generate num hyperparameter configurations from search space using Bayesian optimization Parameters ---------- num: int the number of hyperparameter configurations Returns ------- list a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] """
global _KEY assert self.i == 0 hyperparameter_configs = dict() for _ in range(num): params_id = create_bracket_parameter_id(self.s, self.i) params = config_generator.get_config(r) params[_KEY] = r hyperparameter_configs[params_id] = params self._record_hyper_configs(hyperparameter_configs) return [[key, value] for key, value in hyperparameter_configs.items()]
<SYSTEM_TASK:> Initialize Tuner, including creating Bayesian optimization-based parametric models <END_TASK> <USER_TASK:> Description: def handle_initialize(self, data): """Initialize Tuner, including creating Bayesian optimization-based parametric models and search space formations Parameters ---------- data: search space search space of this experiment Raises ------ ValueError Error: Search space is None """
logger.info('start to handle_initialize') # convert search space jason to ConfigSpace self.handle_update_search_space(data) # generate BOHB config_generator using Bayesian optimization if self.search_space: self.cg = CG_BOHB(configspace=self.search_space, min_points_in_model=self.min_points_in_model, top_n_percent=self.top_n_percent, num_samples=self.num_samples, random_fraction=self.random_fraction, bandwidth_factor=self.bandwidth_factor, min_bandwidth=self.min_bandwidth) else: raise ValueError('Error: Search space is None') # generate first brackets self.generate_new_bracket() send(CommandType.Initialized, '')
<SYSTEM_TASK:> recerive the number of request and generate trials <END_TASK> <USER_TASK:> Description: def handle_request_trial_jobs(self, data): """recerive the number of request and generate trials Parameters ---------- data: int number of trial jobs that nni manager ask to generate """
# Receive new request self.credit += data for _ in range(self.credit): self._request_one_trial_job()
<SYSTEM_TASK:> receive the information of trial end and generate next configuaration. <END_TASK> <USER_TASK:> Description: def handle_trial_end(self, data): """receive the information of trial end and generate next configuaration. Parameters ---------- data: dict() it has three keys: trial_job_id, event, hyper_params trial_job_id: the id generated by training service event: the job's state hyper_params: the hyperparameters (a string) generated and returned by tuner """
logger.debug('Tuner handle trial end, result is %s', data) hyper_params = json_tricks.loads(data['hyper_params']) s, i, _ = hyper_params['parameter_id'].split('_') hyper_configs = self.brackets[int(s)].inform_trial_end(int(i)) if hyper_configs is not None: logger.debug( 'bracket %s next round %s, hyper_configs: %s', s, i, hyper_configs) self.generated_hyper_configs = self.generated_hyper_configs + hyper_configs for _ in range(self.credit): self._request_one_trial_job() # Finish this bracket and generate a new bracket elif self.brackets[int(s)].no_more_trial: self.curr_s -= 1 self.generate_new_bracket() for _ in range(self.credit): self._request_one_trial_job()
<SYSTEM_TASK:> reveice the metric data and update Bayesian optimization with final result <END_TASK> <USER_TASK:> Description: def handle_report_metric_data(self, data): """reveice the metric data and update Bayesian optimization with final result Parameters ---------- data: it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'. Raises ------ ValueError Data type not supported """
logger.debug('handle report metric data = %s', data) assert 'value' in data value = extract_scalar_reward(data['value']) if self.optimize_mode is OptimizeMode.Maximize: reward = -value else: reward = value assert 'parameter_id' in data s, i, _ = data['parameter_id'].split('_') logger.debug('bracket id = %s, metrics value = %s, type = %s', s, value, data['type']) s = int(s) assert 'type' in data if data['type'] == 'FINAL': # and PERIODICAL metric are independent, thus, not comparable. assert 'sequence' in data self.brackets[s].set_config_perf( int(i), data['parameter_id'], sys.maxsize, value) self.completed_hyper_configs.append(data) _parameters = self.parameters[data['parameter_id']] _parameters.pop(_KEY) # update BO with loss, max_s budget, hyperparameters self.cg.new_result(loss=reward, budget=data['sequence'], parameters=_parameters, update_model=True) elif data['type'] == 'PERIODICAL': self.brackets[s].set_config_perf( int(i), data['parameter_id'], data['sequence'], value) else: raise ValueError( 'Data type not supported: {}'.format(data['type']))
<SYSTEM_TASK:> data_transforms for mnist dataset <END_TASK> <USER_TASK:> Description: def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): """ data_transforms for mnist dataset """
if mnist_mean is None: mnist_mean = [0.5] if mnist_std is None: mnist_std = [0.5] train_transform = transforms.Compose( [ transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std), ] ) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) valid_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] ) return train_transform, valid_transform
<SYSTEM_TASK:> Compute the mean and std value of dataset. <END_TASK> <USER_TASK:> Description: def get_mean_and_std(dataset): """Compute the mean and std value of dataset."""
dataloader = torch.utils.data.DataLoader( dataset, batch_size=1, shuffle=True, num_workers=2 ) mean = torch.zeros(3) std = torch.zeros(3) print("==> Computing mean and std..") for inputs, _ in dataloader: for i in range(3): mean[i] += inputs[:, i, :, :].mean() std[i] += inputs[:, i, :, :].std() mean.div_(len(dataset)) std.div_(len(dataset)) return mean, std
<SYSTEM_TASK:> Convert all args to a dict such that every key and value in the dict is the same as the value of the arg. <END_TASK> <USER_TASK:> Description: def convert_args_to_dict(call, with_lambda=False): """Convert all args to a dict such that every key and value in the dict is the same as the value of the arg. Return the AST Call node with only one arg that is the dictionary """
keys, values = list(), list() for arg in call.args: if type(arg) in [ast.Str, ast.Num]: arg_value = arg else: # if arg is not a string or a number, we use its source code as the key arg_value = astor.to_source(arg).strip('\n"') arg_value = ast.Str(str(arg_value)) arg = make_lambda(arg) if with_lambda else arg keys.append(arg_value) values.append(arg) del call.args[:] call.args.append(ast.Dict(keys=keys, values=values)) return call
<SYSTEM_TASK:> get args from command line <END_TASK> <USER_TASK:> Description: def get_args(): """ get args from command line """
parser = argparse.ArgumentParser("FashionMNIST") parser.add_argument("--batch_size", type=int, default=128, help="batch size") parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") parser.add_argument("--epochs", type=int, default=200, help="epoch limit") parser.add_argument( "--learning_rate", type=float, default=0.001, help="learning rate" ) parser.add_argument("--cutout", action="store_true", default=False, help="use cutout") parser.add_argument("--cutout_length", type=int, default=8, help="cutout length") parser.add_argument( "--model_path", type=str, default="./", help="Path to save the destination model" ) return parser.parse_args()
<SYSTEM_TASK:> train model on each epoch in trainset <END_TASK> <USER_TASK:> Description: def train(epoch): """ train model on each epoch in trainset """
global trainloader global testloader global net global criterion global optimizer logger.debug("Epoch: %d", epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() acc = 100.0 * correct / total logger.debug( "Loss: %.3f | Acc: %.3f%% (%d/%d)", train_loss / (batch_idx + 1), 100.0 * correct / total, correct, total, ) return acc
<SYSTEM_TASK:> Percent of time over the past second was utilized. <END_TASK> <USER_TASK:> Description: def utilization(self): """Percent of time over the past second was utilized. Details: Percent of time over the past second during which one or more kernels was executing on the GPU. Percent of time over the past second during which global (device) memory was being read or written Example: >>> print(ctx.device(0).utilization()) {'gpu': 4L, 'memory': 6L} """
class GpuUtilizationInfo(Structure): _fields_ = [ ('gpu', c_uint), ('memory', c_uint), ] c_util = GpuUtilizationInfo() _check_return(_NVML.get_function( "nvmlDeviceGetUtilizationRates")(self.hnd, byref(c_util))) return {'gpu': c_util.gpu, 'memory': c_util.memory}
<SYSTEM_TASK:> Get number of devices <END_TASK> <USER_TASK:> Description: def num_devices(self): """Get number of devices """
c_count = c_uint() _check_return(_NVML.get_function( "nvmlDeviceGetCount_v2")(byref(c_count))) return c_count.value
<SYSTEM_TASK:> Get a specific GPU device <END_TASK> <USER_TASK:> Description: def device(self, idx): """Get a specific GPU device Args: idx: index of device Returns: NvidiaDevice: single GPU device """
class GpuDevice(Structure): pass c_nvmlDevice_t = POINTER(GpuDevice) c_index = c_uint(idx) device = c_nvmlDevice_t() _check_return(_NVML.get_function( "nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device))) return NvidiaDevice(device)
<SYSTEM_TASK:> Download and extract the tarball from Alex's website. Copied from tensorflow example <END_TASK> <USER_TASK:> Description: def maybe_download_and_extract(dest_directory, cifar_classnum): """Download and extract the tarball from Alex's website. Copied from tensorflow example """
assert cifar_classnum == 10 or cifar_classnum == 100 if cifar_classnum == 10: cifar_foldername = 'cifar-10-batches-py' else: cifar_foldername = 'cifar-100-python' if os.path.isdir(os.path.join(dest_directory, cifar_foldername)): logger.info("Found cifar{} data in {}.".format(cifar_classnum, dest_directory)) return else: DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100 filename = DATA_URL[0].split('/')[-1] filepath = os.path.join(dest_directory, filename) download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1]) tarfile.open(filepath, 'r:gz').extractall(dest_directory)
<SYSTEM_TASK:> Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one. <END_TASK> <USER_TASK:> Description: def build_or_reuse_placeholder(tensor_spec): """ Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one. Args: tensor_spec (tf.TensorSpec): Returns: tf.Tensor: """
g = tfv1.get_default_graph() name = tensor_spec.name try: tensor = g.get_tensor_by_name(name + ':0') assert "Placeholder" in tensor.op.type, "Tensor {} exists but is not a placeholder!".format(name) assert tensor_spec.is_compatible_with(tensor), \ "Tensor {} exists but is not compatible with the signature!".format(tensor) return tensor except KeyError: with tfv1.name_scope(None): # clear any name scope it might get called in ret = tfv1.placeholder( tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name) return ret
<SYSTEM_TASK:> Check that op is in the subgraph induced by the dependencies of targets. <END_TASK> <USER_TASK:> Description: def dependency_of_targets(targets, op): """ Check that op is in the subgraph induced by the dependencies of targets. The result is memoized. This is useful if some SessionRunHooks should be run only together with certain ops. Args: targets: a tuple of ops or tensors. The targets to find dependencies of. op (tf.Operation or tf.Tensor): Returns: bool: True if any one of `targets` depend on `op`. """
# TODO tensorarray? sparsetensor? if isinstance(op, tf.Tensor): op = op.op assert isinstance(op, tf.Operation), op from tensorflow.contrib.graph_editor import get_backward_walk_ops # alternative implementation can use graph_util.extract_sub_graph dependent_ops = get_backward_walk_ops(targets, control_inputs=True) return op in dependent_ops
<SYSTEM_TASK:> Check that op is in the subgraph induced by the dependencies of fetches. <END_TASK> <USER_TASK:> Description: def dependency_of_fetches(fetches, op): """ Check that op is in the subgraph induced by the dependencies of fetches. fetches may have more general structure. Args: fetches: An argument to `sess.run`. Nested structure will affect performance. op (tf.Operation or tf.Tensor): Returns: bool: True if any of `fetches` depend on `op`. """
try: from tensorflow.python.client.session import _FetchHandler as FetchHandler # use the graph of the op, so that this function can be called without being under a default graph handler = FetchHandler(op.graph, fetches, {}) targets = tuple(handler.fetches() + handler.targets()) except ImportError: if isinstance(fetches, list): targets = tuple(fetches) elif isinstance(fetches, dict): raise ValueError("Don't know how to parse dictionary to fetch list! " "This is a bug of tensorpack.") else: targets = (fetches, ) return dependency_of_targets(targets, op)
<SYSTEM_TASK:> Summarize a tensor by different methods. <END_TASK> <USER_TASK:> Description: def add_tensor_summary(x, types, name=None, collections=None, main_tower_only=True): """ Summarize a tensor by different methods. Args: x (tf.Tensor): a tensor to summarize types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms name (str): summary name. Defaults to be the op name. collections (list[str]): collections of the summary ops. main_tower_only (bool): Only run under main training tower. If set to True, calling this function under other TowerContext has no effect. Example: .. code-block:: python with tf.name_scope('mysummaries'): # to not mess up tensorboard add_tensor_summary( tensor, ['histogram', 'rms', 'sparsity'], name='mytensor') """
types = set(types) if name is None: name = x.op.name ctx = get_current_tower_context() if main_tower_only and ctx is not None and not ctx.is_main_training_tower: return SUMMARY_TYPES_DIC = { 'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections), 'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections), 'sparsity': lambda: tf.summary.scalar( name + '-sparsity', tf.nn.zero_fraction(x), collections=collections), 'mean': lambda: tf.summary.scalar( name + '-mean', tf.reduce_mean(x), collections=collections), 'rms': lambda: tf.summary.scalar( name + '-rms', rms(x), collections=collections) } for typ in types: SUMMARY_TYPES_DIC[typ]()
<SYSTEM_TASK:> Add summary ops for all trainable variables matching the regex, under a <END_TASK> <USER_TASK:> Description: def add_param_summary(*summary_lists, **kwargs): """ Add summary ops for all trainable variables matching the regex, under a reused 'param-summary' name scope. This function is a no-op if not calling from main training tower. Args: summary_lists (list): each is (regex, [list of summary type]). Summary type is defined in :func:`add_tensor_summary`. collections (list[str]): collections of the summary ops. Example: .. code-block:: python add_param_summary( ('.*/W', ['histogram', 'rms']), ('.*/gamma', ['scalar']), ) """
collections = kwargs.pop('collections', None) assert len(kwargs) == 0, "Unknown kwargs: " + str(kwargs) ctx = get_current_tower_context() if ctx is not None and not ctx.is_main_training_tower: return params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) with cached_name_scope('param-summary'): for p in params: name = p.op.name for rgx, actions in summary_lists: if not rgx.endswith('$'): rgx = rgx + '$' if re.match(rgx, name): add_tensor_summary(p, actions, name=name, collections=collections)
<SYSTEM_TASK:> Summarize the moving average for scalar tensors. <END_TASK> <USER_TASK:> Description: def add_moving_summary(*args, **kwargs): """ Summarize the moving average for scalar tensors. This function is a no-op if not calling from main training tower. Args: args: scalar tensors to summarize decay (float): the decay rate. Defaults to 0.95. collection (str or None): the name of the collection to add EMA-maintaining ops. The default will work together with the default :class:`MovingAverageSummary` callback. summary_collections ([str]): the names of collections to add the summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`). Returns: [tf.Tensor]: list of tensors returned by assign_moving_average, which can be used to maintain the EMA. """
decay = kwargs.pop('decay', 0.95) coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY) summ_coll = kwargs.pop('summary_collections', None) assert len(kwargs) == 0, "Unknown arguments: " + str(kwargs) ctx = get_current_tower_context() # allow ctx to be none if ctx is not None and not ctx.is_main_training_tower: return [] graph = tf.get_default_graph() try: control_flow_ctx = graph._get_control_flow_context() # XLA does not support summaries anyway # However, this function will generate unnecessary dependency edges, # which makes the tower function harder to compile under XLA, so we skip it if control_flow_ctx is not None and control_flow_ctx.IsXLAContext(): return except Exception: pass if tf.get_variable_scope().reuse is True: logger.warn("add_moving_summary() called under reuse=True scope, ignored.") return [] for x in args: assert isinstance(x, (tf.Tensor, tf.Variable)), x assert x.get_shape().ndims == 0, \ "add_moving_summary() only accepts scalar tensor! Got one with {}".format(x.get_shape()) ema_ops = [] for c in args: name = re.sub('tower[0-9]+/', '', c.op.name) with tf.name_scope(None): if not c.dtype.is_floating: c = tf.cast(c, tf.float32) # assign_moving_average creates variables with op names, therefore clear ns first. with _enter_vs_reuse_ns('EMA') as vs: ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype, initializer=tf.constant_initializer(), trainable=False) ns = vs.original_name_scope with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear ema_op = moving_averages.assign_moving_average( ema_var, c, decay, zero_debias=True, name=name + '_EMA_apply') ema_ops.append(ema_op) with tf.name_scope(None): tf.summary.scalar( name + '-summary', ema_op, collections=summ_coll) # write the EMA value as a summary if coll is not None: for op in ema_ops: tf.add_to_collection(coll, op) return ema_ops
<SYSTEM_TASK:> Export trained model to use it in TensorFlow Serving or cloudML. <END_TASK> <USER_TASK:> Description: def export_serving(model_path): """Export trained model to use it in TensorFlow Serving or cloudML. """
pred_config = PredictConfig( session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) ModelExporter(pred_config).export_serving('/tmp/exported')
<SYSTEM_TASK:> Export trained model to use it as a frozen and pruned inference graph in <END_TASK> <USER_TASK:> Description: def export_compact(model_path): """Export trained model to use it as a frozen and pruned inference graph in mobile applications. """
pred_config = PredictConfig( session_init=get_model_loader(model_path), model=Model(), input_names=['input_img'], output_names=['prediction_img']) ModelExporter(pred_config).export_compact('/tmp/compact_graph.pb')
<SYSTEM_TASK:> Run inference from a training model checkpoint. <END_TASK> <USER_TASK:> Description: def apply(model_path): """Run inference from a training model checkpoint. """
pred_config = PredictConfig( session_init=get_model_loader(model_path), model=Model(), input_names=['input_img'], output_names=['prediction_img']) pred = OfflinePredictor(pred_config) img = cv2.imread('lena.png') prediction = pred([img])[0] cv2.imwrite('applied_default.jpg', prediction[0])
<SYSTEM_TASK:> Run inference from a different graph, which receives encoded images buffers. <END_TASK> <USER_TASK:> Description: def apply_inference_graph(model_path): """Run inference from a different graph, which receives encoded images buffers. """
pred_config = PredictConfig( session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes']) pred = OfflinePredictor(pred_config) buf = open('lena.png', 'rb').read() prediction = pred([buf])[0] with open('applied_inference_graph.png', 'wb') as f: f.write(prediction[0])
<SYSTEM_TASK:> Run the pruned and frozen inference graph. <END_TASK> <USER_TASK:> Description: def apply_compact(graph_path): """Run the pruned and frozen inference graph. """
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # Note, we just load the graph and do *not* need to initialize anything. with tf.gfile.GFile(graph_path, "rb") as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def) input_img = sess.graph.get_tensor_by_name('import/input_img:0') prediction_img = sess.graph.get_tensor_by_name('import/prediction_img:0') prediction = sess.run(prediction_img, {input_img: cv2.imread('lena.png')[None, ...]}) cv2.imwrite('applied_compact.png', prediction[0])
<SYSTEM_TASK:> Gather useful debug information from a datapoint. <END_TASK> <USER_TASK:> Description: def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3): """ Gather useful debug information from a datapoint. Args: entry: the datapoint component k (int): index of this component in current datapoint depth (int, optional): recursion depth max_depth, max_list: same as in :meth:`__init__`. Returns: string: debug message """
class _elementInfo(object): def __init__(self, el, pos, depth=0, max_list=3): self.shape = "" self.type = type(el).__name__ self.dtype = "" self.range = "" self.sub_elements = [] self.ident = " " * (depth * 2) self.pos = pos numpy_scalar_types = list(itertools.chain(*np.sctypes.values())) if isinstance(el, (int, float, bool)): self.range = " with value {}".format(el) elif type(el) is np.ndarray: self.shape = " of shape {}".format(el.shape) self.dtype = ":{}".format(str(el.dtype)) self.range = " in range [{}, {}]".format(el.min(), el.max()) elif type(el) in numpy_scalar_types: self.range = " with value {}".format(el) elif isinstance(el, (list)): self.shape = " of len {}".format(len(el)) if depth < max_depth: for k, subel in enumerate(el): if k < max_list: self.sub_elements.append(_elementInfo(subel, k, depth + 1, max_list)) else: self.sub_elements.append(" " * ((depth + 1) * 2) + '...') break else: if len(el) > 0: self.sub_elements.append(" " * ((depth + 1) * 2) + ' ...') def __str__(self): strings = [] vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range) strings.append("{}{}: {}{}{}{}".format(*vals)) for k, el in enumerate(self.sub_elements): strings.append(str(el)) return "\n".join(strings) return str(_elementInfo(entry, k, depth, max_list))
<SYSTEM_TASK:> Wrapper around optimizers to apply gradient processors. <END_TASK> <USER_TASK:> Description: def apply_grad_processors(opt, gradprocs): """ Wrapper around optimizers to apply gradient processors. Args: opt (tf.train.Optimizer): gradprocs (list[GradientProcessor]): gradient processors to add to the optimizer. Returns: a :class:`tf.train.Optimizer` instance which runs the gradient processors before updating the variables. """
assert isinstance(gradprocs, (list, tuple)), gradprocs for gp in gradprocs: assert isinstance(gp, GradientProcessor), gp class _ApplyGradientProcessor(ProxyOptimizer): def __init__(self, opt, gradprocs): self._gradprocs = gradprocs[:] super(_ApplyGradientProcessor, self).__init__(opt) def apply_gradients(self, grads_and_vars, global_step=None, name=None): g = self._apply(grads_and_vars) return self._opt.apply_gradients(g, global_step, name) def _apply(self, g): for proc in self._gradprocs: g = proc.process(g) return g return _ApplyGradientProcessor(opt, gradprocs)
<SYSTEM_TASK:> Running multiple `predict_dataflow` in multiple threads, and aggregate the results. <END_TASK> <USER_TASK:> Description: def multithread_predict_dataflow(dataflows, model_funcs): """ Running multiple `predict_dataflow` in multiple threads, and aggregate the results. Args: dataflows: a list of DataFlow to be used in :func:`predict_dataflow` model_funcs: a list of callable to be used in :func:`predict_dataflow` Returns: list of dict, in the format used by `DetectionDataset.eval_or_save_inference_results` """
num_worker = len(model_funcs) assert len(dataflows) == num_worker if num_worker == 1: return predict_dataflow(dataflows[0], model_funcs[0]) kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {} with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \ tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar: futures = [] for dataflow, pred in zip(dataflows, model_funcs): futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar)) all_results = list(itertools.chain(*[fut.result() for fut in futures])) return all_results
<SYSTEM_TASK:> Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll <END_TASK> <USER_TASK:> Description: def _init_runtime(self): """ Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll have workers that run on multiGPUs """
if self.idx != 0: from tensorpack.models.registry import disable_layer_logging disable_layer_logging() self.predictor = OfflinePredictor(self.config) if self.idx == 0: with self.predictor.graph.as_default(): describe_trainable_vars()
<SYSTEM_TASK:> Fetch a batch of data without waiting <END_TASK> <USER_TASK:> Description: def fetch_batch(self): """ Fetch a batch of data without waiting"""
inp, f = self.queue.get() nr_input_var = len(inp) batched, futures = [[] for _ in range(nr_input_var)], [] for k in range(nr_input_var): batched[k].append(inp[k]) futures.append(f) while len(futures) < self.batch_size: try: inp, f = self.queue.get_nowait() for k in range(nr_input_var): batched[k].append(inp[k]) futures.append(f) except queue.Empty: break # do not wait for k in range(nr_input_var): batched[k] = np.asarray(batched[k]) return batched, futures
<SYSTEM_TASK:> return an image generated from z <END_TASK> <USER_TASK:> Description: def generator(self, z): """ return an image generated from z"""
nf = 64 l = FullyConnected('fc0', z, nf * 8 * 4 * 4, activation=tf.identity) l = tf.reshape(l, [-1, 4, 4, nf * 8]) l = BNReLU(l) with argscope(Conv2DTranspose, activation=BNReLU, kernel_size=4, strides=2): l = Conv2DTranspose('deconv1', l, nf * 4) l = Conv2DTranspose('deconv2', l, nf * 2) l = Conv2DTranspose('deconv3', l, nf) l = Conv2DTranspose('deconv4', l, 3, activation=tf.identity) l = tf.tanh(l, name='gen') return l
<SYSTEM_TASK:> A shorthand of BatchNormalization + ReLU. <END_TASK> <USER_TASK:> Description: def BNReLU(x, name=None): """ A shorthand of BatchNormalization + ReLU. """
x = BatchNorm('bn', x) x = tf.nn.relu(x, name=name) return x
<SYSTEM_TASK:> When a dependency of a class is not available, create a dummy class which throws ImportError when used. <END_TASK> <USER_TASK:> Description: def create_dummy_class(klass, dependency): """ When a dependency of a class is not available, create a dummy class which throws ImportError when used. Args: klass (str): name of the class. dependency (str): name of the dependency. Returns: class: a class object """
assert not building_rtfd() class _DummyMetaClass(type): # throw error on class attribute access def __getattr__(_, __): raise AttributeError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass)) @six.add_metaclass(_DummyMetaClass) class _Dummy(object): # throw error on constructor def __init__(self, *args, **kwargs): raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass)) return _Dummy
<SYSTEM_TASK:> When a dependency of a function is not available, create a dummy function which throws ImportError when used. <END_TASK> <USER_TASK:> Description: def create_dummy_func(func, dependency): """ When a dependency of a function is not available, create a dummy function which throws ImportError when used. Args: func (str): name of the function. dependency (str or list[str]): name(s) of the dependency. Returns: function: a function object """
assert not building_rtfd() if isinstance(dependency, (list, tuple)): dependency = ','.join(dependency) def _dummy(*args, **kwargs): raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func)) return _dummy
<SYSTEM_TASK:> Log deprecation warning. <END_TASK> <USER_TASK:> Description: def log_deprecated(name="", text="", eos=""): """ Log deprecation warning. Args: name (str): name of the deprecated item. text (str, optional): information about the deprecation. eos (str, optional): end of service date such as "YYYY-MM-DD". """
assert name or text if eos: eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b") if name: if eos: warn_msg = "%s will be deprecated %s. %s" % (name, eos, text) else: warn_msg = "%s was deprecated. %s" % (name, text) else: warn_msg = text if eos: warn_msg += " Legacy period ends %s" % eos logger.warn("[Deprecated] " + warn_msg)
<SYSTEM_TASK:> Create a hook-only callback which maintain EMA of the queue size. <END_TASK> <USER_TASK:> Description: def _create_ema_callback(self): """ Create a hook-only callback which maintain EMA of the queue size. Also tf.summary.scalar the EMA. """
with self.cached_name_scope(): # in TF there is no API to get queue capacity, so we can only summary the size size = tf.cast(self.queue.size(), tf.float32, name='queue_size') size_ema_op = add_moving_summary(size, collection=None, decay=0.5)[0].op ret = RunOp( lambda: size_ema_op, run_before=False, run_as_trigger=False, run_step=True) ret.name_scope = "InputSource/EMA" return ret
<SYSTEM_TASK:> Wrap a dataflow to tf.data.Dataset. <END_TASK> <USER_TASK:> Description: def dataflow_to_dataset(df, types): """ Wrap a dataflow to tf.data.Dataset. This function will also reset the dataflow. If the dataflow itself is finite, the returned dataset is also finite. Therefore, if used for training, you'll need to add `.repeat()` on the returned dataset. Args: df (DataFlow): a dataflow which produces lists types([tf.DType]): list of types Returns: (tf.data.Dataset) """
# TODO theoretically it can support dict assert isinstance(df, DataFlow), df assert isinstance(types, (list, tuple)), types df = MapData(df, lambda dp: tuple(dp)) df.reset_state() ds = tf.data.Dataset.from_generator( df.get_data, tuple(types)) return ds
<SYSTEM_TASK:> Computes pairwise intersection-over-area between box collections. <END_TASK> <USER_TASK:> Description: def ioa(boxes1, boxes2): """Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """
intersect = intersection(boxes1, boxes2) inv_areas = np.expand_dims(1.0 / area(boxes2), axis=0) return intersect * inv_areas
<SYSTEM_TASK:> Download the data from Marlin's website, unless it's already here. <END_TASK> <USER_TASK:> Description: def maybe_download(url, work_directory): """Download the data from Marlin's website, unless it's already here."""
filename = url.split("/")[-1] filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): logger.info("Downloading to {}...".format(filepath)) download(url, work_directory) return filepath
<SYSTEM_TASK:> Return the directory structure of "dir". <END_TASK> <USER_TASK:> Description: def guess_dir_structure(dir): """ Return the directory structure of "dir". Args: dir(str): something like '/path/to/imagenet/val' Returns: either 'train' or 'original' """
subdir = os.listdir(dir)[0] # find a subdir starting with 'n' if subdir.startswith('n') and \ os.path.isdir(os.path.join(dir, subdir)): dir_structure = 'train' else: dir_structure = 'original' logger.info( "[ILSVRC12] Assuming directory {} has '{}' structure.".format( dir, dir_structure)) return dir_structure
<SYSTEM_TASK:> Change relative filename to abosolute file name. <END_TASK> <USER_TASK:> Description: def _use_absolute_file_name(self, img): """ Change relative filename to abosolute file name. """
img['file_name'] = os.path.join( self._imgdir, img['file_name']) assert os.path.isfile(img['file_name']), img['file_name']
<SYSTEM_TASK:> Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection. <END_TASK> <USER_TASK:> Description: def _add_detection_gt(self, img, add_mask): """ Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection. If add_mask is True, also add 'segmentation' in coco poly format. """
# ann_ids = self.coco.getAnnIds(imgIds=img['image_id']) # objs = self.coco.loadAnns(ann_ids) objs = self.coco.imgToAnns[img['image_id']] # equivalent but faster than the above two lines # clean-up boxes valid_objs = [] width = img.pop('width') height = img.pop('height') for objid, obj in enumerate(objs): if obj.get('ignore', 0) == 1: continue x1, y1, w, h = obj['bbox'] # bbox is originally in float # x1/y1 means upper-left corner and w/h means true w/h. This can be verified by segmentation pixels. # But we do make an assumption here that (0.0, 0.0) is upper-left corner of the first pixel x1 = np.clip(float(x1), 0, width) y1 = np.clip(float(y1), 0, height) w = np.clip(float(x1 + w), 0, width) - x1 h = np.clip(float(y1 + h), 0, height) - y1 # Require non-zero seg area and more than 1x1 box size if obj['area'] > 1 and w > 0 and h > 0 and w * h >= 4: obj['bbox'] = [x1, y1, x1 + w, y1 + h] valid_objs.append(obj) if add_mask: segs = obj['segmentation'] if not isinstance(segs, list): assert obj['iscrowd'] == 1 obj['segmentation'] = None else: valid_segs = [np.asarray(p).reshape(-1, 2).astype('float32') for p in segs if len(p) >= 6] if len(valid_segs) == 0: logger.error("Object {} in image {} has no valid polygons!".format(objid, img['file_name'])) elif len(valid_segs) < len(segs): logger.warn("Object {} in image {} has invalid polygons!".format(objid, img['file_name'])) obj['segmentation'] = valid_segs # all geometrically-valid boxes are returned boxes = np.asarray([obj['bbox'] for obj in valid_objs], dtype='float32') # (n, 4) cls = np.asarray([ self.COCO_id_to_category_id[obj['category_id']] for obj in valid_objs], dtype='int32') # (n,) is_crowd = np.asarray([obj['iscrowd'] for obj in valid_objs], dtype='int8') # add the keys img['boxes'] = boxes # nx4 img['class'] = cls # n, always >0 img['is_crowd'] = is_crowd # n, if add_mask: # also required to be float32 img['segmentation'] = [ obj['segmentation'] for obj in valid_objs]
<SYSTEM_TASK:> Load and merges several instance files together. <END_TASK> <USER_TASK:> Description: def load_many(basedir, names, add_gt=True, add_mask=False): """ Load and merges several instance files together. Returns the same format as :meth:`COCODetection.load`. """
if not isinstance(names, (list, tuple)): names = [names] ret = [] for n in names: coco = COCODetection(basedir, n) ret.extend(coco.load(add_gt, add_mask=add_mask)) return ret
<SYSTEM_TASK:> Surround a context with a timer. <END_TASK> <USER_TASK:> Description: def timed_operation(msg, log_start=False): """ Surround a context with a timer. Args: msg(str): the log to print. log_start(bool): whether to print also at the beginning. Example: .. code-block:: python with timed_operation('Good Stuff'): time.sleep(1) Will print: .. code-block:: python Good stuff finished, time:1sec. """
assert len(msg) if log_start: logger.info('Start {} ...'.format(msg)) start = timer() yield msg = msg[0].upper() + msg[1:] logger.info('{} finished, time:{:.4f} sec.'.format( msg, timer() - start))
<SYSTEM_TASK:> A context which add the time spent inside to TotalTimer. <END_TASK> <USER_TASK:> Description: def total_timer(msg): """ A context which add the time spent inside to TotalTimer. """
start = timer() yield t = timer() - start _TOTAL_TIMER_DATA[msg].feed(t)
<SYSTEM_TASK:> Print the content of the TotalTimer, if it's not empty. This function will automatically get <END_TASK> <USER_TASK:> Description: def print_total_timer(): """ Print the content of the TotalTimer, if it's not empty. This function will automatically get called when program exits. """
if len(_TOTAL_TIMER_DATA) == 0: return for k, v in six.iteritems(_TOTAL_TIMER_DATA): logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format( k, v.sum, v.count, v.average))
<SYSTEM_TASK:> Make sure processes terminate when main process exit. <END_TASK> <USER_TASK:> Description: def ensure_proc_terminate(proc): """ Make sure processes terminate when main process exit. Args: proc (multiprocessing.Process or list) """
if isinstance(proc, list): for p in proc: ensure_proc_terminate(p) return def stop_proc_by_weak_ref(ref): proc = ref() if proc is None: return if not proc.is_alive(): return proc.terminate() proc.join() assert isinstance(proc, mp.Process) atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))