desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Grab signature (if given) and summary'
| def _parse_summary(self):
| summary = self._doc.read_to_next_empty_line()
summary_str = '\n'.join([s.strip() for s in summary])
if re.compile('^([\\w. ]+=)?[\\w\\.]+\\(.*\\)$').match(summary_str):
self['Signature'] = summary_str
if (not self._is_at_section()):
self['Summary'] = self._doc.read_to_next_empty_line()
elif re.compile('^[\\w]+\n[-]+').match(summary_str):
self['Summary'] = ''
self._doc.reset()
else:
self['Summary'] = summary
if (not self._is_at_section()):
self['Extended Summary'] = self._read_to_next_section()
|
'Create a wrapper for a Keras model
:param model: A Keras model'
| def __init__(self, model=None):
| super(KerasModelWrapper, self).__init__()
if (model is None):
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None
|
'Looks for the name of the softmax layer.
:return: Softmax layer name'
| def _get_softmax_name(self):
| for (i, layer) in enumerate(self.model.layers):
cfg = layer.get_config()
if (('activation' in cfg) and (cfg['activation'] == 'softmax')):
return layer.name
raise Exception('No softmax layers found')
|
'Looks for the name of the layer producing the logits.
:return: name of layer producing the logits'
| def _get_logits_name(self):
| softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
node = softmax_layer.inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name
|
':param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits'
| def get_logits(self, x):
| logits_name = self._get_logits_name()
return self.get_layer(x, logits_name)
|
':param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs'
| def get_probs(self, x):
| name = self._get_softmax_name()
return self.get_layer(x, name)
|
':return: Names of all the layers kept by Keras'
| def get_layer_names(self):
| layer_names = [x.name for x in self.model.layers]
return layer_names
|
'Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.'
| def fprop(self, x):
| from keras.models import Model as KerasModel
if (self.keras_model is None):
new_input = self.model.get_input_at(0)
out_layers = [x_layer.output for x_layer in self.model.layers]
self.keras_model = KerasModel(new_input, out_layers)
outputs = self.keras_model(x)
if (len(self.model.layers) == 1):
outputs = [outputs]
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
|
'For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).'
| def __call__(self, *args, **kwargs):
| return self.get_probs(*args, **kwargs)
|
'Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features'
| def get_layer(self, x, layer):
| return self.fprop(x)[layer]
|
':param x: A symbolic representation of the network input
:return: A symbolic representation of the output logits (i.e., the
values fed as inputs to the softmax layer).'
| def get_logits(self, x):
| return self.get_layer(x, 'logits')
|
':param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).'
| def get_probs(self, x):
| if ('probs' in self.get_layer_names()):
return self.get_layer(x, 'probs')
else:
import tensorflow as tf
return tf.nn.softmax(self.get_logits(x))
|
':return: a list of names for the layers that can be exposed by this
model abstraction.'
| def get_layer_names(self):
| if hasattr(self, 'layer_names'):
return self.layer_names
raise NotImplementedError('`get_layer_names` not implemented.')
|
'Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.'
| def fprop(self, x):
| raise NotImplementedError('`_fprop` not implemented.')
|
'Wrap a callable function that takes a tensor as input and returns
a tensor as output with the given layer name.
:param callable_fn: The callable function taking a tensor and
returning a given layer as output.
:param output_layer: A string of the output layer returned by the
function. (Usually either "probs" or "logits".)'
| def __init__(self, callable_fn, output_layer):
| self.output_layer = output_layer
self.callable_fn = callable_fn
|
'Constructs model and return probabilities for given input.'
| def __call__(self, x_input):
| reuse = (True if self.built else None)
with slim.arg_scope(inception.inception_v3_arg_scope()):
(_, end_points) = inception.inception_v3(x_input, num_classes=self.num_classes, is_training=False, reuse=reuse)
self.built = True
output = end_points['Predictions']
probs = output.op.inputs[0]
return probs
|
'Initializes instance of Submission class.
Args:
directory: location of the submission.
container: URL of Docker container which should be used to run submission.
entry_point: entry point script, which invokes submission.
use_gpu: whether to use Docker with GPU or not.'
| def __init__(self, directory, container, entry_point, use_gpu):
| self.name = os.path.basename(directory)
self.directory = directory
self.container = container
self.entry_point = entry_point
self.use_gpu = use_gpu
|
'Returns appropriate Docker binary to use.'
| def docker_binary(self):
| return ('nvidia-docker' if self.use_gpu else 'docker')
|
'Initializes instance of Attack class.'
| def __init__(self, directory, container, entry_point, use_gpu):
| super(Attack, self).__init__(directory, container, entry_point, use_gpu)
|
'Runs attack inside Docker.
Args:
input_dir: directory with input (dataset).
output_dir: directory where output (adversarial images) should be written.
epsilon: maximum allowed size of adversarial perturbation,
should be in range [0, 255].'
| def run(self, input_dir, output_dir, epsilon):
| print('Running attack ', self.name)
cmd = [self.docker_binary(), 'run', '-v', '{0}:/input_images'.format(input_dir), '-v', '{0}:/output_images'.format(output_dir), '-v', '{0}:/code'.format(self.directory), '-w', '/code', self.container, ('./' + self.entry_point), '/input_images', '/output_images', str(epsilon)]
print(' '.join(cmd))
subprocess.call(cmd)
|
'Initializes instance of Defense class.'
| def __init__(self, directory, container, entry_point, use_gpu):
| super(Defense, self).__init__(directory, container, entry_point, use_gpu)
|
'Runs defense inside Docker.
Args:
input_dir: directory with input (adversarial images).
output_dir: directory to write output (classification result).'
| def run(self, input_dir, output_dir):
| print('Running defense ', self.name)
cmd = [self.docker_binary(), 'run', '-v', '{0}:/input_images'.format(input_dir), '-v', '{0}:/output_data'.format(output_dir), '-v', '{0}:/code'.format(self.directory), '-w', '/code', self.container, ('./' + self.entry_point), '/input_images', '/output_data/result.csv']
print(' '.join(cmd))
subprocess.call(cmd)
|
'Initializes instance of AttacksOutput class.
Args:
dataset_dir: location of the dataset.
attacks_output_dir: where to write results of attacks.
targeted_attacks_output_dir: where to write results of targeted attacks.
all_adv_examples_dir: directory to copy all adversarial examples from
all attacks.
epsilon: maximum allowed size of adversarial perturbation.'
| def __init__(self, dataset_dir, attacks_output_dir, targeted_attacks_output_dir, all_adv_examples_dir, epsilon):
| self.attacks_output_dir = attacks_output_dir
self.targeted_attacks_output_dir = targeted_attacks_output_dir
self.all_adv_examples_dir = all_adv_examples_dir
self._load_dataset_clipping(dataset_dir, epsilon)
self._output_image_idx = 0
self._output_to_attack_mapping = {}
self._attack_image_count = 0
self._targeted_attack_image_count = 0
self._attack_names = set()
self._targeted_attack_names = set()
|
'Helper method which loads dataset and determines clipping range.
Args:
dataset_dir: location of the dataset.
epsilon: maximum allowed size of adversarial perturbation.'
| def _load_dataset_clipping(self, dataset_dir, epsilon):
| self.dataset_max_clip = {}
self.dataset_min_clip = {}
self._dataset_image_count = 0
for fname in os.listdir(dataset_dir):
if (not fname.endswith('.png')):
continue
image_id = fname[:(-4)]
image = np.array(Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))
image = image.astype('int32')
self._dataset_image_count += 1
self.dataset_max_clip[image_id] = np.clip((image + epsilon), 0, 255).astype('uint8')
self.dataset_min_clip[image_id] = np.clip((image - epsilon), 0, 255).astype('uint8')
|
'Clips results of attack and copy it to directory with all images.
Args:
attack_name: name of the attack.
is_targeted: if True then attack is targeted, otherwise non-targeted.'
| def clip_and_copy_attack_outputs(self, attack_name, is_targeted):
| if is_targeted:
self._targeted_attack_names.add(attack_name)
else:
self._attack_names.add(attack_name)
attack_dir = os.path.join((self.targeted_attacks_output_dir if is_targeted else self.attacks_output_dir), attack_name)
for fname in os.listdir(attack_dir):
if (not (fname.endswith('.png') or fname.endswith('.jpg'))):
continue
image_id = fname[:(-4)]
if (image_id not in self.dataset_max_clip):
continue
image_max_clip = self.dataset_max_clip[image_id]
image_min_clip = self.dataset_min_clip[image_id]
adversarial_image = np.array(Image.open(os.path.join(attack_dir, fname)).convert('RGB'))
clipped_adv_image = np.clip(adversarial_image, image_min_clip, image_max_clip)
output_basename = '{0:08d}'.format(self._output_image_idx)
self._output_image_idx += 1
self._output_to_attack_mapping[output_basename] = (attack_name, is_targeted, image_id)
if is_targeted:
self._targeted_attack_image_count += 1
else:
self._attack_image_count += 1
Image.fromarray(clipped_adv_image).save(os.path.join(self.all_adv_examples_dir, (output_basename + '.png')))
|
'Returns list of all non-targeted attacks.'
| @property
def attack_names(self):
| return self._attack_names
|
'Returns list of all targeted attacks.'
| @property
def targeted_attack_names(self):
| return self._targeted_attack_names
|
'Returns number of all images generated by non-targeted attacks.'
| @property
def attack_image_count(self):
| return self._attack_image_count
|
'Returns number of all images in the dataset.'
| @property
def dataset_image_count(self):
| return self._dataset_image_count
|
'Returns number of all images generated by targeted attacks.'
| @property
def targeted_attack_image_count(self):
| return self._targeted_attack_image_count
|
'Returns information about image based on it\'s filename.'
| def image_by_base_filename(self, filename):
| return self._output_to_attack_mapping[filename]
|
'Initializes instance of DatasetMetadata.'
| def __init__(self, filename):
| self._true_labels = {}
self._target_classes = {}
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
try:
row_idx_image_id = header_row.index('ImageId')
row_idx_true_label = header_row.index('TrueLabel')
row_idx_target_class = header_row.index('TargetClass')
except ValueError:
raise IOError('Invalid format of dataset metadata.')
for row in reader:
if (len(row) < len(header_row)):
continue
try:
image_id = row[row_idx_image_id]
self._true_labels[image_id] = int(row[row_idx_true_label])
self._target_classes[image_id] = int(row[row_idx_target_class])
except (IndexError, ValueError):
raise IOError('Invalid format of dataset metadata')
|
'Returns true label for image with given ID.'
| def get_true_label(self, image_id):
| return self._true_labels[image_id]
|
'Returns target class for image with given ID.'
| def get_target_class(self, image_id):
| return self._target_classes[image_id]
|
'Saves target classed for all dataset images into given file.'
| def save_target_classes(self, filename):
| with open(filename, 'w') as f:
for (k, v) in self._target_classes.items():
f.write('{0}.png,{1}\n'.format(k, v))
|
'Identical to above test_save_load'
| def test_save_load(self):
| policy = ResnetPolicy(['board', 'liberties', 'sensibleness', 'capture_size'])
model_file = 'TESTPOLICY.json'
weights_file = 'TESTWEIGHTS.h5'
model_file2 = 'TESTPOLICY2.json'
weights_file2 = 'TESTWEIGHTS2.h5'
policy.save_model(model_file)
policy.model.save_weights(weights_file, overwrite=True)
policy.save_model(model_file2, weights_file2)
copypolicy = ResnetPolicy.load_model(model_file)
copypolicy.model.load_weights(weights_file)
copypolicy2 = ResnetPolicy.load_model(model_file2)
for (w1, w2) in zip(copypolicy.model.get_weights(), copypolicy2.model.get_weights()):
self.assertTrue(np.all((w1 == w2)))
self.assertTrue((type(policy) == type(copypolicy)))
os.remove(model_file)
os.remove(weights_file)
os.remove(model_file2)
os.remove(weights_file2)
|
'Helper function to count the number of expansions past the root using the dummy policy'
| def _count_expansions(self):
| node = self.mcts._root
expansions = 0
for (action, _) in sorted(dummy_policy(self.gs), key=itemgetter(1), reverse=True):
if (action in node._children):
expansions += 1
node = node._children[action]
else:
break
return expansions
|
'A copy of test_get_liberties_after but where captures are imminent'
| def test_get_liberties_after_cap(self):
| gs = capture_board()
pp = Preprocess(['liberties_after'])
feature = pp.state_to_tensor(gs)[0].transpose((1, 2, 0))
one_hot_liberties = np.zeros((gs.size, gs.size, 8))
for (x, y) in gs.get_legal_moves():
copy = gs.copy()
copy.do_move((x, y))
libs = copy.liberty_counts[(x, y)]
one_hot_liberties[(x, y, min((libs - 1), 7))] = 1
for i in range(8):
self.assertTrue(np.all((feature[:, :, i] == one_hot_liberties[:, :, i])), ('bad expectation: stones with %d liberties after move' % (i + 1)))
|
'Play one turn, update game state, save to disk'
| def play(self):
| end_of_game = self._play(self.player1)
return end_of_game
|
'create a neural net object that preprocesses according to feature_list and uses
a neural network specified by keyword arguments (using subclass\' create_network())
optional argument: init_network (boolean). If set to False, skips initializing
self.model and self.forward and the calling function should set them.'
| def __init__(self, feature_list, **kwargs):
| self.preprocessor = Preprocess(feature_list)
kwargs['input_dim'] = self.preprocessor.output_dim
if kwargs.get('init_network', True):
self.model = self.__class__.create_network(**kwargs)
self.forward = self._model_forward()
|
'Construct a function using the current keras backend that, when given a batch
of inputs, simply processes them forward and returns the output
This is as opposed to model.compile(), which takes a loss function
and training method.
c.f. https://github.com/fchollet/keras/issues/1426'
| def _model_forward(self):
| if self.model.uses_learning_phase:
forward_function = K.function([self.model.input, K.learning_phase()], [self.model.output])
return (lambda inpt: forward_function([inpt, 0])[0])
else:
forward_function = K.function([self.model.input], [self.model.output])
return (lambda inpt: forward_function([inpt])[0])
|
'create a new neural net object from the architecture specified in json_file'
| @staticmethod
def load_model(json_file):
| with open(json_file, 'r') as f:
object_specs = json.load(f)
class_name = object_specs.get('class', 'CNNPolicy')
try:
network_class = NeuralNetBase.subclasses[class_name]
except KeyError:
raise ValueError('Unknown neural network type in json file: {}\n(was it registered with the @neuralnet decorator?)'.format(class_name))
new_net = network_class(object_specs['feature_list'], init_network=False)
new_net.model = model_from_json(object_specs['keras_model'], custom_objects={'Bias': Bias})
if ('weights_file' in object_specs):
new_net.model.load_weights(object_specs['weights_file'])
new_net.forward = new_net._model_forward()
return new_net
|
'write the network model and preprocessing features to the specified file
If a weights_file (.hdf5 extension) is also specified, model weights are also
saved to that file and will be reloaded automatically in a call to load_model'
| def save_model(self, json_file, weights_file=None):
| object_specs = {'class': self.__class__.__name__, 'keras_model': self.model.to_json(), 'feature_list': self.preprocessor.feature_list}
if (weights_file is not None):
self.model.save_weights(weights_file)
object_specs['weights_file'] = weights_file
with open(json_file, 'w') as f:
json.dump(object_specs, f)
|
'helper function to normalize a distribution over the given list of moves
and return a list of (move, prob) tuples'
| def _select_moves_and_normalize(self, nn_output, moves, size):
| if (len(moves) == 0):
return []
move_indices = [flatten_idx(m, size) for m in moves]
distribution = nn_output[move_indices]
distribution = (distribution / distribution.sum())
return zip(moves, distribution)
|
'Given a list of states, evaluates them all at once to make best use of GPU
batching capabilities.
Analogous to [eval_state(s) for s in states]
Returns: a parallel list of move distributions as in eval_state'
| def batch_eval_state(self, states, moves_lists=None):
| n_states = len(states)
if (n_states == 0):
return []
state_size = states[0].size
if (not all([(st.size == state_size) for st in states])):
raise ValueError('all states must have the same size')
nn_input = np.concatenate([self.preprocessor.state_to_tensor(s) for s in states], axis=0)
network_output = self.forward(nn_input)
moves_lists = (moves_lists or [st.get_legal_moves() for st in states])
results = ([None] * n_states)
for i in range(n_states):
results[i] = self._select_moves_and_normalize(network_output[i], moves_lists[i], state_size)
return results
|
'Given a GameState object, returns a list of (action, probability) pairs
according to the network outputs
If a list of moves is specified, only those moves are kept in the distribution'
| def eval_state(self, state, moves=None):
| tensor = self.preprocessor.state_to_tensor(state)
network_output = self.forward(tensor)
moves = (moves or state.get_legal_moves())
return self._select_moves_and_normalize(network_output[0], moves, state.size)
|
'construct a convolutional neural network.
Keword Arguments:
- input_dim: depth of features to be processed by first layer (no default)
- board: width of the go board to be processed (default 19)
- filters_per_layer: number of filters used on every layer (default 128)
- filters_per_layer_K: (where K is between 1 and <layers>) number of filters
used on layer K (default #filters_per_layer)
- layers: number of convolutional steps (default 12)
- filter_width_K: (where K is between 1 and <layers>) width of filter on
layer K (default 3 except 1st layer which defaults to 5).
Must be odd.'
| @staticmethod
def create_network(**kwargs):
| defaults = {'board': 19, 'filters_per_layer': 128, 'layers': 12, 'filter_width_1': 5}
params = defaults
params.update(kwargs)
network = Sequential()
network.add(convolutional.Convolution2D(input_shape=(params['input_dim'], params['board'], params['board']), nb_filter=params.get('filters_per_layer_1', params['filters_per_layer']), nb_row=params['filter_width_1'], nb_col=params['filter_width_1'], init='uniform', activation='relu', border_mode='same'))
for i in range(2, (params['layers'] + 1)):
filter_key = ('filter_width_%d' % i)
filter_width = params.get(filter_key, 3)
filter_count_key = ('filters_per_layer_%d' % i)
filter_nb = params.get(filter_count_key, params['filters_per_layer'])
network.add(convolutional.Convolution2D(nb_filter=filter_nb, nb_row=filter_width, nb_col=filter_width, init='uniform', activation='relu', border_mode='same'))
network.add(convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', border_mode='same'))
network.add(Flatten())
network.add(Bias())
network.add(Activation('softmax'))
return network
|
'construct a convolutional neural network with Resnet-style skip connections.
Arguments are the same as with the default CNNPolicy network, except the default
number of layers is 20 plus a new n_skip parameter
Keword Arguments:
- input_dim: depth of features to be processed by first layer (no default)
- board: width of the go board to be processed (default 19)
- filters_per_layer: number of filters used on every layer (default 128)
- layers: number of convolutional steps (default 20)
- filter_width_K: (where K is between 1 and <layers>) width of filter on
layer K (default 3 except 1st layer which defaults to 5).
Must be odd.
- n_skip_K: (where K is as in filter_width_K) number of convolutional
layers to skip with the linear path starting at K. Only valid
at K >= 1. (Each layer defaults to 1)
Note that n_skip_1=s means that the next valid value of n_skip_* is 3
A diagram may help explain (numbers indicate layer):
1 2 3 4 5 6
I--C--B--R--C--B--R--C--M--B--R--C--B--R--C--B--R--C--M ... M --R--F--O
[n_skip_1 = 2] [n_skip_3 = 3]
I - input
B - BatchNormalization
R - ReLU
C - Conv2D
F - Flatten
O - output
M - merge
The input is always passed through a Conv2D layer, the output of which
layer is counted as \'1\'. Each subsequent [R -- C] block is counted as
one \'layer\'. The \'merge\' layer isn\'t counted; hence if n_skip_1 is 2,
the next valid skip parameter is n_skip_3, which will start at the
output of the merge'
| @staticmethod
def create_network(**kwargs):
| defaults = {'board': 19, 'filters_per_layer': 128, 'layers': 20, 'filter_width_1': 5}
params = defaults
params.update(kwargs)
model_input = Input(shape=(params['input_dim'], params['board'], params['board']))
convolution_path = convolutional.Convolution2D(input_shape=(), nb_filter=params['filters_per_layer'], nb_row=params['filter_width_1'], nb_col=params['filter_width_1'], init='uniform', activation='linear', border_mode='same')(model_input)
def add_resnet_unit(path, K, **params):
"Add a resnet unit to path starting at layer 'K',\n adding as many (ReLU + Conv2D) modules as specified by n_skip_K\n\n Returns new path and next layer index, i.e. K + n_skip_K, in a tuple\n "
block_input = path
skip_key = ('n_skip_%d' % K)
n_skip = params.get(skip_key, 1)
for i in range(n_skip):
layer = (K + i)
path = BatchNormalization()(path)
path = Activation('relu')(path)
filter_key = ('filter_width_%d' % layer)
filter_width = params.get(filter_key, 3)
path = convolutional.Convolution2D(nb_filter=params['filters_per_layer'], nb_row=filter_width, nb_col=filter_width, init='uniform', activation='linear', border_mode='same')(path)
path = merge([block_input, path], mode='sum')
return (path, (K + n_skip))
layer = 1
while (layer < params['layers']):
(convolution_path, layer) = add_resnet_unit(convolution_path, layer, **params)
if (layer > params['layers']):
print 'Due to skipping, ended with {} layers instead of {}'.format(layer, params['layers'])
convolution_path = Activation('relu')(convolution_path)
convolution_path = convolutional.Convolution2D(nb_filter=1, nb_row=1, nb_col=1, init='uniform', border_mode='same')(convolution_path)
network_output = Flatten()(convolution_path)
network_output = Bias()(network_output)
network_output = Activation('softmax')(network_output)
return Model(input=[model_input], output=[network_output])
|
'Get the group of connected same-color stones to the given position
Keyword arguments:
position -- a tuple of (x, y)
x being the column index of the starting position of the search
y being the row index of the starting position of the search
Return:
a set of tuples consist of (x, y)s which are the same-color cluster
which contains the input single position. len(group) is size of the cluster, can be large.'
| def get_group(self, position):
| (x, y) = position
return self.group_sets[x][y]
|
'returns a list of the unique groups adjacent to position
\'unique\' means that, for example in this position:
. B W . .
. W W . .
only the one white group would be returned on get_groups_around((1,1))'
| def get_groups_around(self, position):
| groups = []
for (nx, ny) in self._neighbors(position):
group = self.group_sets[nx][ny]
if ((len(group) > 0) and (group not in groups)):
groups.append(self.group_sets[nx][ny])
return groups
|
'simply return True iff position is within the bounds of [0, self.size)'
| def _on_board(self, position):
| (x, y) = position
return ((x >= 0) and (y >= 0) and (x < self.size) and (y < self.size))
|
'A private helper function that simply returns a list of positions neighboring
the given (x,y) position. Basically it handles edges and corners.'
| def _neighbors(self, position):
| return GameState.__NEIGHBORS_CACHE[self.size][position]
|
'Like _neighbors but for diagonal positions'
| def _diagonals(self, position):
| (x, y) = position
return filter(self._on_board, [((x - 1), (y - 1)), ((x + 1), (y + 1)), ((x + 1), (y - 1)), ((x - 1), (y + 1))])
|
'A private helper function to update self.group_sets and self.liberty_sets
given that a stone was just played at `position`'
| def _update_neighbors(self, position):
| (x, y) = position
merged_group = set()
merged_group.add(position)
merged_libs = self.liberty_sets[x][y]
for (nx, ny) in self._neighbors(position):
self.liberty_sets[nx][ny] -= set([position])
if (self.board[nx][ny] == (- self.current_player)):
new_liberty_count = len(self.liberty_sets[nx][ny])
for (gx, gy) in self.group_sets[nx][ny]:
self.liberty_counts[gx][gy] = new_liberty_count
elif (self.board[x][y] == self.board[nx][ny]):
merged_group |= self.group_sets[nx][ny]
merged_libs |= self.liberty_sets[nx][ny]
count_merged_libs = len(merged_libs)
for (gx, gy) in merged_group:
self.group_sets[gx][gy] = merged_group
self.liberty_sets[gx][gy] = merged_libs
self.liberty_counts[gx][gy] = count_merged_libs
|
'A private helper function to take a group off the board (due to capture),
updating group sets and liberties along the way'
| def _remove_group(self, group):
| for (x, y) in group:
self._update_hash((x, y), self.board[(x, y)])
self.board[(x, y)] = EMPTY
for (x, y) in group:
self.group_sets[x][y] = set()
self.liberty_sets[x][y] = set()
self.liberty_counts[x][y] = (-1)
self.stone_ages[x][y] = (-1)
for (nx, ny) in self._neighbors((x, y)):
if (self.board[(nx, ny)] == EMPTY):
self.liberty_sets[x][y].add((nx, ny))
else:
self.liberty_sets[nx][ny].add((x, y))
for (gx, gy) in self.group_sets[nx][ny]:
self.liberty_counts[gx][gy] = len(self.liberty_sets[nx][ny])
|
'get a copy of this Game state'
| def copy(self):
| other = GameState(self.size, self.komi)
other.board = self.board.copy()
other.current_player = self.current_player
other.ko = self.ko
other.handicaps = list(self.handicaps)
other.history = list(self.history)
other.num_black_prisoners = self.num_black_prisoners
other.num_white_prisoners = self.num_white_prisoners
other.enforce_superko = self.enforce_superko
other.current_hash = self.current_hash.copy()
other.previous_hashes = self.previous_hashes.copy()
def get_copy(s, set_copies={}):
if (id(s) not in set_copies):
set_copies[id(s)] = set(s)
return set_copies[id(s)]
for x in range(self.size):
for y in range(self.size):
other.group_sets[x][y] = get_copy(self.group_sets[x][y])
other.liberty_sets[x][y] = get_copy(self.liberty_sets[x][y])
other.liberty_counts = self.liberty_counts.copy()
return other
|
'return true if having current_player play at <action> would be suicide'
| def is_suicide(self, action):
| (x, y) = action
num_liberties_here = len(self.liberty_sets[x][y])
if (num_liberties_here == 0):
for (nx, ny) in self._neighbors(action):
is_friendly_group = (self.board[(nx, ny)] == self.current_player)
group_has_other_liberties = (len((self.liberty_sets[nx][ny] - set([action]))) > 0)
if (is_friendly_group and group_has_other_liberties):
return False
is_enemy_group = (self.board[(nx, ny)] == (- self.current_player))
if (is_enemy_group and (not group_has_other_liberties)):
return False
return True
return False
|
'Find all actions that the current_player has done in the past, taking into
account the fact that history starts with BLACK when there are no
handicaps or with WHITE when there are.'
| def is_positional_superko(self, action):
| if ((len(self.handicaps) == 0) and (self.current_player == BLACK)):
player_history = self.history[0::2]
elif ((len(self.handicaps) > 0) and (self.current_player == WHITE)):
player_history = self.history[0::2]
else:
player_history = self.history[1::2]
if ((action not in self.handicaps) and (action not in player_history)):
return False
state_copy = self.copy()
state_copy.enforce_superko = False
state_copy.do_move(action)
if (state_copy.current_hash in self.previous_hashes):
return True
else:
return False
|
'determine if the given action (x,y tuple) is a legal move
note: we only check ko, not superko at this point (TODO?)'
| def is_legal(self, action):
| if (action is PASS_MOVE):
return True
(x, y) = action
if (not self._on_board(action)):
return False
if (self.board[x][y] != EMPTY):
return False
if self.is_suicide(action):
return False
if (action == self.ko):
return False
if (self.enforce_superko and self.is_positional_superko(action)):
return False
return True
|
'returns whether the position is empty and is surrounded by all stones of \'owner\''
| def is_eyeish(self, position, owner):
| (x, y) = position
if (self.board[(x, y)] != EMPTY):
return False
for (nx, ny) in self._neighbors(position):
if (self.board[(nx, ny)] != owner):
return False
return True
|
'returns whether the position is a true eye of \'owner\'
Requires a recursive call; empty spaces diagonal to \'position\' are fine
as long as they themselves are eyes'
| def is_eye(self, position, owner, stack=[]):
| if (not self.is_eyeish(position, owner)):
return False
num_bad_diagonal = 0
allowable_bad_diagonal = (1 if (len(self._neighbors(position)) == 4) else 0)
for d in self._diagonals(position):
if (self.board[d] == (- owner)):
num_bad_diagonal += 1
elif ((self.board[d] == EMPTY) and (d not in stack)):
stack.append(position)
if (not self.is_eye(d, owner, stack)):
num_bad_diagonal += 1
stack.pop()
if (num_bad_diagonal > allowable_bad_diagonal):
return False
return True
|
'Check if moving at action results in a ladder capture, defined as being next
to an enemy group with two liberties, and with no ladder_escape move afterward
for the other player.
If prey is None, check all adjacent groups, otherwise only the prey
group is checked. In the (prey is None) case, if this move is a ladder
capture for any adjance group, it\'s considered a ladder capture.
Recursion depth between is_ladder_capture() and is_ladder_escape() is
controlled by the remaining_attempts argument. If it reaches 0, the
move is assumed not to be a ladder capture.'
| def is_ladder_capture(self, action, prey=None, remaining_attempts=80):
| if (not self.is_legal(action)):
return False
if (remaining_attempts <= 0):
return True
hunter_player = self.current_player
prey_player = (- self.current_player)
if (prey is None):
neighbor_groups_stones = [next(iter(group)) for group in self.get_groups_around(action)]
potential_prey = [(nx, ny) for (nx, ny) in neighbor_groups_stones if ((self.board[nx][ny] == prey_player) and (self.liberty_counts[nx][ny] == 2))]
else:
potential_prey = [prey]
for (prey_x, prey_y) in potential_prey:
tmp = self.copy()
tmp.do_move(action)
possible_escapes = tmp.liberty_sets[prey_x][prey_y].copy()
for prey_stone in tmp.group_sets[prey_x][prey_y]:
for (nx, ny) in tmp._neighbors(prey_stone):
if ((tmp.board[nx][ny] == hunter_player) and (tmp.liberty_counts[nx][ny] == 1)):
possible_escapes |= tmp.liberty_sets[nx][ny]
if (not any((tmp.is_ladder_escape((escape_x, escape_y), prey=(prey_x, prey_y), remaining_attempts=(remaining_attempts - 1)) for (escape_x, escape_y) in possible_escapes))):
return True
return False
|
'Check if moving at action results in a ladder escape, defined as being next
to a current player\'s group with one liberty, with no ladder captures
afterward. Going from 1 to >= 3 liberties is counted as escape, or a
move giving two liberties without a subsequent ladder capture.
If prey is None, check all adjacent groups, otherwise only the prey
group is checked. In the (prey is None) case, if this move is a ladder
escape for any adjacent group, this move is a ladder escape.
Recursion depth between is_ladder_capture() and is_ladder_escape() is
controlled by the remaining_attempts argument. If it reaches 0, the
move is assumed not to be a ladder capture.'
| def is_ladder_escape(self, action, prey=None, remaining_attempts=80):
| if (not self.is_legal(action)):
return False
if (remaining_attempts <= 0):
return False
prey_player = self.current_player
if (prey is None):
neighbor_groups_stones = [next(iter(group)) for group in self.get_groups_around(action)]
potential_prey = [(nx, ny) for (nx, ny) in neighbor_groups_stones if ((self.board[nx][ny] == prey_player) and (self.liberty_counts[nx][ny] == 1))]
else:
potential_prey = [prey]
for (prey_x, prey_y) in potential_prey:
tmp = self.copy()
tmp.do_move(action)
if (tmp.liberty_counts[prey_x][prey_y] >= 3):
return True
if (tmp.liberty_counts[prey_x][prey_y] == 1):
continue
if any((tmp.is_ladder_capture(possible_capture, prey=(prey_x, prey_y), remaining_attempts=(remaining_attempts - 1)) for possible_capture in tmp.liberty_sets[prey_x][prey_y])):
continue
return True
return False
|
'Calculate score of board state and return player ID (1, -1, or 0 for tie)
corresponding to winner. Uses \'Area scoring\'.'
| def get_winner(self):
| score_white = np.sum((self.board == WHITE))
score_black = np.sum((self.board == BLACK))
empties = zip(*np.where((self.board == EMPTY)))
for empty in empties:
if self.is_eyeish(empty, BLACK):
score_black += 1
elif self.is_eyeish(empty, WHITE):
score_white += 1
score_white += self.komi
score_white -= self.passes_white
score_black -= self.passes_black
if (score_black > score_white):
winner = BLACK
elif (score_white > score_black):
winner = WHITE
else:
winner = 0
return winner
|
'Returns the color of the player who will make the next move.'
| def get_current_player(self):
| return self.current_player
|
'Play stone at action=(x,y). If color is not specified, current_player is used
If it is a legal move, current_player switches to the opposite color
If not, an IllegalMove exception is raised'
| def do_move(self, action, color=None):
| color = (color or self.current_player)
reset_player = self.current_player
self.current_player = color
if self.is_legal(action):
self.ko = None
self.stone_ages[(self.stone_ages >= 0)] += 1
if (action is not PASS_MOVE):
(x, y) = action
self.board[x][y] = color
self._update_hash(action, color)
self._update_neighbors(action)
self.stone_ages[x][y] = 0
for (nx, ny) in self._neighbors(action):
if ((self.board[(nx, ny)] == (- color)) and (len(self.liberty_sets[nx][ny]) == 0)):
captured_group = self.group_sets[nx][ny]
num_captured = len(captured_group)
self._remove_group(captured_group)
if (color == BLACK):
self.num_white_prisoners += num_captured
else:
self.num_black_prisoners += num_captured
if (num_captured == 1):
would_recapture = (len(self.liberty_sets[x][y]) == 1)
recapture_size_is_1 = (len(self.group_sets[x][y]) == 1)
if (would_recapture and recapture_size_is_1):
self.ko = (nx, ny)
self.previous_hashes.add(self.current_hash)
else:
if (color == BLACK):
self.passes_black += 1
if (color == WHITE):
self.passes_white += 1
self.current_player = (- color)
self.history.append(action)
self.__legal_move_cache = None
else:
self.current_player = reset_player
raise IllegalMove(str(action))
if (len(self.history) > 1):
if ((self.history[(-1)] is PASS_MOVE) and (self.history[(-2)] is PASS_MOVE) and (self.current_player == WHITE)):
self.is_end_of_game = True
return self.is_end_of_game
|
'Batch version of get_move. A list of moves is returned (one per state)'
| def get_moves(self, states):
| sensible_move_lists = [[move for move in st.get_legal_moves(include_eyes=False)] for st in states]
all_moves_distributions = self.policy.batch_eval_state(states, sensible_move_lists)
move_list = ([None] * len(states))
for (i, move_probs) in enumerate(all_moves_distributions):
if ((len(move_probs) == 0) or (len(states[i].history) > self.move_limit)):
move_list[i] = go.PASS_MOVE
elif ((self.greedy_start is not None) and (len(states[i].history) >= self.greedy_start)):
max_prob = max(move_probs, key=itemgetter(1))
move_list[i] = max_prob[0]
else:
(moves, probabilities) = zip(*move_probs)
probabilities = self.apply_temperature(probabilities)
choice_idx = np.random.choice(len(moves), p=probabilities)
move_list[i] = moves[choice_idx]
return move_list
|
'create a preprocessor object that will concatenate together the
given list of features'
| def __init__(self, feature_list=DEFAULT_FEATURES):
| self.output_dim = 0
self.feature_list = feature_list
self.processors = ([None] * len(feature_list))
for i in range(len(feature_list)):
feat = feature_list[i].lower()
if (feat in FEATURES):
self.processors[i] = FEATURES[feat]['function']
self.output_dim += FEATURES[feat]['size']
else:
raise ValueError(('uknown feature: %s' % feat))
|
'Convert a GameState to a Theano-compatible tensor'
| def state_to_tensor(self, state):
| feat_tensors = [proc(state) for proc in self.processors]
(f, s) = (self.output_dim, state.size)
return np.concatenate(feat_tensors).reshape((1, f, s, s))
|
'Read the given SGF file into an iterable of (input,output) pairs
for neural network training
Each input is a GameState converted into one-hot neural net features
Each output is an action as an (x,y) pair (passes are skipped)
If this game\'s size does not match bd_size, a SizeMismatchError is raised'
| def convert_game(self, file_name, bd_size):
| with open(file_name, 'r') as file_object:
state_action_iterator = sgf_iter_states(file_object.read(), include_end=False)
for (state, move, player) in state_action_iterator:
if (state.size != bd_size):
raise SizeMismatchError()
if (move != go.PASS_MOVE):
nn_input = self.feature_processor.state_to_tensor(state)
(yield (nn_input, move))
|
'Convert all files in the iterable sgf_files into an hdf5 group to be stored in hdf5_file
Arguments:
- sgf_files : an iterable of relative or absolute paths to SGF files
- hdf5_file : the name of the HDF5 where features will be saved
- bd_size : side length of board of games that are loaded
- ignore_errors : if True, issues a Warning when there is an unknown
exception rather than halting. Note that sgf.ParseException and
go.IllegalMove exceptions are always skipped
The resulting file has the following properties:
states : dataset with shape (n_data, n_features, board width, board height)
actions : dataset with shape (n_data, 2) (actions are stored as x,y tuples of
where the move was played)
file_offsets : group mapping from filenames to tuples of (index, length)
For example, to find what positions in the dataset come from \'test.sgf\':
index, length = file_offsets[\'test.sgf\']
test_states = states[index:index+length]
test_actions = actions[index:index+length]'
| def sgfs_to_hdf5(self, sgf_files, hdf5_file, bd_size=19, ignore_errors=True, verbose=False):
| tmp_file = os.path.join(os.path.dirname(hdf5_file), ('.tmp.' + os.path.basename(hdf5_file)))
h5f = h5.File(tmp_file, 'w')
try:
states = h5f.require_dataset('states', dtype=np.uint8, shape=(1, self.n_features, bd_size, bd_size), maxshape=(None, self.n_features, bd_size, bd_size), exact=False, chunks=(64, self.n_features, bd_size, bd_size), compression='lzf')
actions = h5f.require_dataset('actions', dtype=np.uint8, shape=(1, 2), maxshape=(None, 2), exact=False, chunks=(1024, 2), compression='lzf')
file_offsets = h5f.require_group('file_offsets')
h5f['features'] = np.string_(','.join(self.feature_processor.feature_list))
if verbose:
print 'created HDF5 dataset in {}'.format(tmp_file)
next_idx = 0
for file_name in sgf_files:
if verbose:
print file_name
n_pairs = 0
file_start_idx = next_idx
try:
for (state, move) in self.convert_game(file_name, bd_size):
if (next_idx >= len(states)):
states.resize(((next_idx + 1), self.n_features, bd_size, bd_size))
actions.resize(((next_idx + 1), 2))
states[next_idx] = state
actions[next_idx] = move
n_pairs += 1
next_idx += 1
except go.IllegalMove:
warnings.warn(('Illegal Move encountered in %s\n DCTB dropping the remainder of the game' % file_name))
except sgf.ParseException:
warnings.warn(('Could not parse %s\n DCTB dropping game' % file_name))
except SizeMismatchError:
warnings.warn(('Skipping %s; wrong board size' % file_name))
except Exception as e:
if ignore_errors:
warnings.warn(('Unkown exception with file %s\n DCTB %s' % (file_name, e)), stacklevel=2)
else:
raise e
finally:
if (n_pairs > 0):
file_name_key = file_name.replace('/', ':')
file_offsets[file_name_key] = [file_start_idx, n_pairs]
if verbose:
print (' DCTB %d state/action pairs extracted' % n_pairs)
elif verbose:
print ' DCTB -no usable data-'
except Exception as e:
print 'sgfs_to_hdf5 failed'
os.remove(tmp_file)
raise e
if verbose:
print ('finished. renaming %s to %s' % (tmp_file, hdf5_file))
h5f.close()
os.rename(tmp_file, hdf5_file)
|
'Expand tree by creating new children.
Arguments:
action_priors -- output from policy function - a list of tuples of actions and their prior
probability according to the policy function.
Returns:
None'
| def expand(self, action_priors):
| for (action, prob) in action_priors:
if (action not in self._children):
self._children[action] = TreeNode(self, prob)
|
'Select action among children that gives maximum action value, Q plus bonus u(P).
Returns:
A tuple of (action, next_node)'
| def select(self):
| return max(self._children.iteritems(), key=(lambda act_node: act_node[1].get_value()))
|
'Update node values from leaf evaluation.
Arguments:
leaf_value -- the value of subtree evaluation from the current player\'s perspective.
c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and
prior probability, P, on this node\'s score.
Returns:
None'
| def update(self, leaf_value, c_puct):
| self._n_visits += 1
self._Q += ((leaf_value - self._Q) / self._n_visits)
if (not self.is_root()):
self._u = (((c_puct * self._P) * np.sqrt(self._parent._n_visits)) / (1 + self._n_visits))
|
'Like a call to update(), but applied recursively for all ancestors.
Note: it is important that this happens from the root downward so that \'parent\' visit
counts are correct.'
| def update_recursive(self, leaf_value, c_puct):
| if self._parent:
self._parent.update_recursive(leaf_value, c_puct)
self.update(leaf_value, c_puct)
|
'Calculate and return the value for this node: a combination of leaf evaluations, Q, and
this node\'s prior adjusted for its visit count, u'
| def get_value(self):
| return (self._Q + self._u)
|
'Check if leaf node (i.e. no nodes below this have been expanded).'
| def is_leaf(self):
| return (self._children == {})
|
'Arguments:
value_fn -- a function that takes in a state and ouputs a score in [-1, 1], i.e. the
expected value of the end game score from the current player\'s perspective.
policy_fn -- a function that takes in a state and outputs a list of (action, probability)
tuples for the current player.
rollout_policy_fn -- a coarse, fast version of policy_fn used in the rollout phase.
lmbda -- controls the relative weight of the value network and fast rollout policy result
in determining the value of a leaf node. lmbda must be in [0, 1], where 0 means use only
the value network and 1 means use only the result from the rollout.
c_puct -- a number in (0, inf) that controls how quickly exploration converges to the
maximum-value policy, where a higher value means relying on the prior more, and
should be used only in conjunction with a large value for n_playout.'
| def __init__(self, value_fn, policy_fn, rollout_policy_fn, lmbda=0.5, c_puct=5, rollout_limit=500, playout_depth=20, n_playout=10000):
| self._root = TreeNode(None, 1.0)
self._value = value_fn
self._policy = policy_fn
self._rollout = rollout_policy_fn
self._lmbda = lmbda
self._c_puct = c_puct
self._rollout_limit = rollout_limit
self._L = playout_depth
self._n_playout = n_playout
|
'Run a single playout from the root to the given depth, getting a value at the leaf and
propagating it back through its parents. State is modified in-place, so a copy must be
provided.
Arguments:
state -- a copy of the state.
leaf_depth -- after this many moves, leaves are evaluated.
Returns:
None'
| def _playout(self, state, leaf_depth):
| node = self._root
for i in range(leaf_depth):
if node.is_leaf():
action_probs = self._policy(state)
if (len(action_probs) == 0):
break
node.expand(action_probs)
(action, node) = node.select()
state.do_move(action)
v = (self._value(state) if (self._lmbda < 1) else 0)
z = (self._evaluate_rollout(state, self._rollout_limit) if (self._lmbda > 0) else 0)
leaf_value = (((1 - self._lmbda) * v) + (self._lmbda * z))
node.update_recursive(leaf_value, self._c_puct)
|
'Use the rollout policy to play until the end of the game, returning +1 if the current
player wins, -1 if the opponent wins, and 0 if it is a tie.'
| def _evaluate_rollout(self, state, limit):
| player = state.get_current_player()
for i in range(limit):
action_probs = self._rollout(state)
if (len(action_probs) == 0):
break
max_action = max(action_probs, key=itemgetter(1))[0]
state.do_move(max_action)
else:
print 'WARNING: rollout reached move limit'
winner = state.get_winner()
if (winner == 0):
return 0
else:
return (1 if (winner == player) else (-1))
|
'Runs all playouts sequentially and returns the most visited action.
Arguments:
state -- the current state, including both game state and the current player.
Returns:
the selected action'
| def get_move(self, state):
| for n in range(self._n_playout):
state_copy = state.copy()
self._playout(state_copy, self._L)
return max(self._root._children.iteritems(), key=(lambda act_node: act_node[1]._n_visits))[0]
|
'Step forward in the tree, keeping everything we already know about the subtree, assuming
that get_move() has been called already. Siblings of the new root will be garbage-collected.'
| def update_with_move(self, last_move):
| if (last_move in self._root._children):
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
|
'Tests that 1 + 1 always equals 2.'
| def test_basic_addition(self):
| self.assertEqual((1 + 1), 2)
|
'Tests that 1 + 1 always equals 2.'
| def test_basic_addition(self):
| self.assertEqual((1 + 1), 2)
|
'Tests that 1 + 1 always equals 2.'
| def test_basic_addition(self):
| self.assertEqual((1 + 1), 2)
|
'Tests that 1 + 1 always equals 2.'
| def test_basic_addition(self):
| self.assertEqual((1 + 1), 2)
|
''
| def __init__(self, IP, scan_type, file):
| Thread.__init__(self)
self.IP = IP
self.scan_type = scan_type
self.file = file
self.connstr = ''
self.scanresult = ''
|
'å€è¿çšrunæ¹æ³'
| def run(self):
| try:
cd = pyclamd.ClamdNetworkSocket(self.IP, 3310)
if cd.ping():
self.connstr = (self.IP + ' connection [OK]')
cd.reload()
if (self.scan_type == 'contscan_file'):
self.scanresult = '{0}\n'.format(cd.contscan_file(self.file))
elif (self.scan_type == 'multiscan_file'):
self.scanresult = '{0}\n'.format(cd.multiscan_file(self.file))
elif (self.scan_type == 'scan_file'):
self.scanresult = '{0}\n'.format(cd.scan_file(self.file))
time.sleep(1)
else:
self.connstr = (self.IP + ' ping error,exit')
return
except Exception as e:
self.connstr = ((self.IP + ' ') + str(e))
|
'RC4 algorithm'
| def crypt(self, data, key):
| x = 0
box = range(256)
for i in range(256):
x = (((x + box[i]) + ord(key[(i % len(key))])) % 256)
(box[i], box[x]) = (box[x], box[i])
x = y = 0
out = []
for char in data:
x = ((x + 1) % 256)
y = ((y + box[x]) % 256)
(box[x], box[y]) = (box[y], box[x])
out.append(chr((ord(char) ^ box[((box[x] + box[y]) % 256)])))
return ''.join(out)
|
'RC4 encryption with random salt and final encoding'
| def tencode(self, data, key, encode=base64.b64encode, salt_length=16):
| salt = ''
for n in range(salt_length):
salt += chr(random.randrange(256))
data = (salt + self.crypt(data, sha1((key + salt)).digest()))
if encode:
data = encode(data)
return data
|
'RC4 decryption of encoded data'
| def tdecode(self, data, key, decode=base64.b64decode, salt_length=16):
| if decode:
data = decode(data)
salt = data[:salt_length]
return self.crypt(data[salt_length:], sha1((key + salt)).digest())
|
''
| def OnOpenbutton(self, event):
| dlg = wx.FileDialog(self, message=u'\u9009\u62e9\u79c1\u94a5', defaultDir=os.getcwd(), defaultFile='', style=((wx.OPEN | wx.MULTIPLE) | wx.CHANGE_DIR))
if (dlg.ShowModal() == wx.ID_OK):
self.Importfilename = dlg.GetPaths()
self.Privatekey.SetValue(('%s' % self.Importfilename[0]))
dlg.Destroy()
|
'Recursively traverses the data structure, adding tree nodes to
match it.'
| def AddTreeNodes(self, parentItem, items):
| for item in items:
if (type(item) == str):
child = self.tree.AppendItem(parentItem, item)
self.tree.SetItemImage(child, self.fileidx, wx.TreeItemIcon_Normal)
self.tree.SetItemImage(child, self.fldropenidx, wx.TreeItemIcon_Expanded)
else:
newItem = self.tree.AppendItem(parentItem, item[0])
self.tree.SetItemImage(newItem, self.fldridx, wx.TreeItemIcon_Normal)
self.tree.SetItemImage(newItem, self.fldropenidx, wx.TreeItemIcon_Expanded)
self.AddTreeNodes(newItem, item[1])
|
'OnInit'
| def OnInit(self):
| frame = ServManageFrame(None, u'OManager\u670d\u52a1\u5668\u7ba1\u7406')
frame.Show()
self.SetTopWindow(frame)
return True
|
'OnExit'
| def OnExit(self):
| pass
|
'Tests that 1 + 1 always equals 2.'
| def test_basic_addition(self):
| self.failUnlessEqual((1 + 1), 2)
|
'Init the EM object.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None'
| def __init__(self, network_manager):
| self._nm = network_manager
self._extensions_str = []
self._extensions = []
self._interface = None
self._socket = None
self._should_continue = True
self._packets_to_send = {str(k): [] for k in range(1, 14)}
self._packets_to_send['*'] = []
self._channels_to_hop = []
self._current_channel = '1'
self._listen_thread = threading.Thread(target=self._listen)
self._send_thread = threading.Thread(target=self._send)
self._channelhop_thread = threading.Thread(target=self._channel_hop)
|
'Returns a list of all the uimethods.
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: List Object
:rtype: List'
| def get_ui_funcs(self):
| ui_funcs = []
for extension in self._extensions:
for attr in dir(extension):
if callable(getattr(extension, attr)):
method = getattr(extension, attr)
if hasattr(method, 'is_uimethod'):
ui_funcs.append(method)
return ui_funcs
|
'Returns a list of all the backend methods
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: dict object
:rtype: dict'
| def get_backend_funcs(self):
| backend_funcs = {}
for extension in self._extensions:
for attrname in dir(extension):
method = getattr(extension, attrname)
if hasattr(method, 'is_backendmethod'):
backend_funcs[method.__name__] = extension
return backend_funcs
|
'Change the interface\'s channel every three seconds
:param self: An ExtensionManager object
:type self: ExtensionManager
:return: None
:rtype: None
.. note: The channel range is between 1 to 13'
| def _channel_hop(self):
| while self._should_continue:
for channel in self._channels_to_hop:
if (self._current_channel != channel):
self._current_channel = channel
if self._should_continue:
try:
self._socket.close()
self._nm.set_interface_channel(self._interface, int(self._current_channel))
self._socket = linux.L2Socket(iface=self._interface)
time.sleep(3)
except BaseException:
continue
else:
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.