code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _server_loop(self, client, client_addr):
while not self._stopped and not _shutting_down:
try:
with self._unlock():
request = mock_server_receive_request(client, self)
self._requests_count += 1
self._log('%d\t%r' % (request.client_port, request))
# Give most recently added responders precedence.
for responder in reversed(self._autoresponders):
if responder.handle(request):
self._log('\t(autoresponse)')
break
else:
self._request_q.put(request)
except socket.error as error:
if error.errno in (errno.ECONNRESET, errno.EBADF):
# We hung up, or the client did.
break
raise
except select.error as error:
if error.args[0] == errno.EBADF:
# Closed.
break
else:
raise
except AssertionError:
traceback.print_exc()
break
self._log('disconnected: %s' % format_addr(client_addr))
client.close() | Read requests from one client socket, 'client'. |
def check_password(self, username, password):
try:
if SUPPORTS_VERIFY:
kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", ""), getattr(settings, "KRB5_VERIFY_KDC", True))
else:
kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", ""))
return True
except kerberos.BasicAuthError:
if getattr(settings, "KRB5_DEBUG", False):
logger.exception("Failure during authentication")
return False
except:
if getattr(settings, "KRB5_DEBUG", False):
logger.exception("Failure during authentication")
# for all other execptions also deny access
return False | The actual password checking logic. Separated from the authenticate code from Django for easier updating |
def main():
from optparse import OptionParser
parser = OptionParser('Start mock MongoDB server')
parser.add_option('-p', '--port', dest='port', default=27017,
help='port on which mock mongod listens')
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help="don't print messages to stdout")
options, cmdline_args = parser.parse_args()
if cmdline_args:
parser.error('Unrecognized argument(s): %s' % ' '.join(cmdline_args))
server = interactive_server(port=options.port, verbose=options.verbose)
try:
server.run()
print('Listening on port %d' % server.port)
time.sleep(1e6)
except KeyboardInterrupt:
server.stop() | Start an interactive `MockupDB`.
Use like ``python -m mockupdb``. |
def _calculate_influence(self, neighborhood):
grid = np.exp(-self.distance_grid / (neighborhood ** 2))
return grid.reshape(self.num_neurons, self.num_neurons)[:, :, None] | Pre-calculate the influence for a given value of sigma.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
Parameters
----------
neighborhood : float
The neighborhood value.
Returns
-------
neighborhood : numpy array
The influence from each neuron to each other neuron. |
def _initialize_distance_grid(self):
p = [self._grid_distance(i) for i in range(self.num_neurons)]
return np.array(p) | Initialize the distance grid by calls to _grid_dist. |
def _grid_distance(self, index):
# Take every dimension but the first in reverse
# then reverse that list again.
dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1]
coord = []
for idx, dim in enumerate(dimensions):
if idx != 0:
value = (index % dimensions[idx-1]) // dim
else:
value = index // dim
coord.append(value)
coord.append(index % self.map_dimensions[-1])
for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)):
x = np.abs(np.arange(width) - row) ** 2
dims = self.map_dimensions[::-1]
if idx:
dims = dims[:-idx]
x = np.broadcast_to(x, dims).T
if idx == 0:
distance = np.copy(x)
else:
distance += x.T
return distance | Calculate the distance grid for a single index position.
This is pre-calculated for fast neighborhood calculations
later on (see _calc_influence). |
def topographic_error(self, X, batch_size=1):
dist = self.transform(X, batch_size)
# Sort the distances and get the indices of the two smallest distances
# for each datapoint.
res = dist.argsort(1)[:, :2]
# Lookup the euclidean distance between these points in the distance
# grid
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons)
res = np.asarray([dgrid[x, y] for x, y in res])
# Subtract 1.0 because 1.0 is the smallest distance.
return np.sum(res > 1.0) / len(res) | Calculate the topographic error.
The topographic error is a measure of the spatial organization of the
map. Maps in which the most similar neurons are also close on the
grid have low topographic error and indicate that a problem has been
learned correctly.
Formally, the topographic error is the proportion of units for which
the two most similar neurons are not direct neighbors on the map.
Parameters
----------
X : numpy array.
The input data.
batch_size : int
The batch size to use when calculating the topographic error.
Returns
-------
error : numpy array
A vector of numbers, representing the topographic error
for each data point. |
def neighbors(self, distance=2.0):
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons)
for x, y in zip(*np.nonzero(dgrid <= distance)):
if x != y:
yield x, y | Get all neighbors for all neurons. |
def neighbor_difference(self):
differences = np.zeros(self.num_neurons)
num_neighbors = np.zeros(self.num_neurons)
distance, _ = self.distance_function(self.weights, self.weights)
for x, y in self.neighbors():
differences[x] += distance[x, y]
num_neighbors[x] += 1
return differences / num_neighbors | Get the euclidean distance between a node and its neighbors. |
def spread(self, X):
distance, _ = self.distance_function(X, self.weights)
dists_per_neuron = defaultdict(list)
for x, y in zip(np.argmin(distance, 1), distance):
dists_per_neuron[x].append(y[x])
out = np.zeros(self.num_neurons)
average_spread = {k: np.mean(v)
for k, v in dists_per_neuron.items()}
for x, y in average_spread.items():
out[x] = y
return out | Calculate the average spread for each node.
The average spread is a measure of how far each neuron is from the
data points which cluster to it.
Parameters
----------
X : numpy array
The input data.
Returns
-------
spread : numpy array
The average distance from each neuron to each data point. |
def invert_projection(self, X, identities):
distances = self.transform(X)
if len(distances) != len(identities):
raise ValueError("X and identities are not the same length: "
"{0} and {1}".format(len(X), len(identities)))
node_match = []
for d in distances.__getattribute__(self.argfunc)(0):
node_match.append(identities[d])
return np.array(node_match) | Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map |
def map_weights(self):
first_dim = self.map_dimensions[0]
if len(self.map_dimensions) != 1:
second_dim = np.prod(self.map_dimensions[1:])
else:
second_dim = 1
# Reshape to appropriate dimensions
return self.weights.reshape((first_dim,
second_dim,
self.data_dimensionality)) | Reshaped weights for visualization.
The weights are reshaped as
(W.shape[0], prod(W.shape[1:-1]), W.shape[2]).
This allows one to easily see patterns, even for hyper-dimensional
soms.
For one-dimensional SOMs, the returned array is of shape
(W.shape[0], 1, W.shape[2])
Returns
-------
w : numpy array
A three-dimensional array containing the weights in a
2D array for easy visualization. |
def load(cls, path):
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['map_dimensions'],
data['params']['lr']['orig'],
data['data_dimensionality'],
influence=data['params']['infl']['orig'],
lr_lambda=data['params']['lr']['factor'],
infl_lambda=data['params']['infl']['factor'])
s.weights = weights
s.trained = True
return s | Load a SOM from a JSON file saved with this package..
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class. |
def start(self):
LOG.info('Interacting with the CDN...')
with indicator.Spinner(run=self.run_indicator):
cdn_item = self._cdn()
self.print_virt_table(cdn_item.headers) | Return a list of objects from the API for a container. |
def remove_dirs(self, directory):
LOG.info('Removing directory [ %s ]', directory)
local_files = self._drectory_local_files(directory=directory)
for file_name in local_files:
try:
os.remove(file_name['local_object'])
except OSError as exp:
LOG.error(str(exp))
# Build a list of all local directories
directories = sorted(
[i for i, _, _ in os.walk(directory)],
reverse=True
)
# Remove directories
for directory_path in directories:
try:
os.removedirs(directory_path)
except OSError as exp:
if exp.errno != 2:
LOG.error(str(exp))
pass | Delete a directory recursively.
:param directory: $PATH to directory.
:type directory: ``str`` |
def _list_contents(self, last_obj=None, single_page_return=False):
if self.job_args.get('cdn_containers'):
if not self.job_args.get('fields'):
self.job_args['fields'] = [
'name',
'cdn_enabled',
'log_retention',
'ttl'
]
url = self.job_args['cdn_storage_url']
else:
url = self.job_args['storage_url']
objects_list = self.job.list_items(
url=url,
container=self.job_args['container'],
last_obj=last_obj,
spr=single_page_return
)
pattern_match = self.job_args.get('pattern_match')
if pattern_match:
self.match_filter(
idx_list=objects_list,
pattern=pattern_match,
dict_type=True
)
LOG.debug('List of objects: "%s"', objects_list)
return objects_list | Retrieve a long list of all files in a container.
:return final_list, list_count, last_obj: |
def _return_container_objects(self):
container_objects = self.job_args.get('object')
if container_objects:
return True, [{'container_object': i} for i in container_objects]
container_objects = self.job_args.get('objects_file')
if container_objects:
container_objects = os.path.expanduser(container_objects)
if os.path.isfile(container_objects):
with open(container_objects) as f:
return True, [
{'container_object': i.rstrip('\n')}
for i in f.readlines()
]
container_objects = self._list_contents()
pattern_match = self.job_args.get('pattern_match')
if pattern_match:
container_objects = self.match_filter(
idx_list=container_objects,
pattern=pattern_match,
dict_type=True,
dict_key='name'
)
# Reformat list for processing
if container_objects and isinstance(container_objects[0], dict):
return False, self._return_deque([
{'container_object': i['name']} for i in container_objects
])
else:
return False, self._return_deque() | Return a list of objects to delete.
The return tuple will indicate if it was a userd efined list of objects
as True of False.
The list of objects is a list of dictionaries with the key being
"container_object".
:returns: tuple (``bol``, ``list``) |
def _index_fs(self):
indexed_objects = self._return_deque()
directory = self.job_args.get('directory')
if directory:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._drectory_local_files(
directory=directory
)
)
object_names = self.job_args.get('object')
if object_names:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._named_local_files(
object_names=object_names
)
)
return indexed_objects | Returns a deque object full of local file system items.
:returns: ``deque`` |
def match_filter(self, idx_list, pattern, dict_type=False,
dict_key='name'):
if dict_type is False:
return self._return_deque([
obj for obj in idx_list
if re.search(pattern, obj)
])
elif dict_type is True:
return self._return_deque([
obj for obj in idx_list
if re.search(pattern, obj.get(dict_key))
])
else:
return self._return_deque() | Return Matched items in indexed files.
:param idx_list:
:return list |
def print_horiz_table(self, data):
# Build list of returned objects
return_objects = list()
fields = self.job_args.get('fields')
if not fields:
fields = set()
for item_dict in data:
for field_item in item_dict.keys():
fields.add(field_item)
fields = sorted(fields)
for obj in data:
item_struct = dict()
for item in fields:
item_struct[item] = obj.get(item)
else:
return_objects.append(item_struct)
table = prettytable.PrettyTable(fields)
for obj in return_objects:
table.add_row([obj.get(i) for i in fields])
for tbl in table.align.keys():
table.align[tbl] = 'l'
sort_key = self.job_args.get('sort_by')
if sort_key:
table.sortby = sort_key
self.printer(table) | Print a horizontal pretty table from data. |
def print_virt_table(self, data):
table = prettytable.PrettyTable()
keys = sorted(data.keys())
table.add_column('Keys', keys)
table.add_column('Values', [data.get(i) for i in keys])
for tbl in table.align.keys():
table.align[tbl] = 'l'
self.printer(table) | Print a vertical pretty table from data. |
def printer(self, message, color_level='info'):
if self.job_args.get('colorized'):
print(cloud_utils.return_colorized(msg=message, color=color_level))
else:
print(message) | Print Messages and Log it.
:param message: item to print to screen |
def _get_method(method):
# Split the class out from the job
module = method.split(':')
# Set the import module
_module_import = module[0]
# Set the class name to use
class_name = module[-1]
# import the module
module_import = __import__(_module_import, fromlist=[class_name])
# Return the attributes for the imported module and class
return getattr(module_import, class_name) | Return an imported object.
:param method: ``str`` DOT notation for import with Colin used to
separate the class used for the job.
:returns: ``object`` Loaded class object from imported method. |
def run_manager(self, job_override=None):
for arg_name, arg_value in self.job_args.items():
if arg_name.endswith('_headers'):
if isinstance(arg_value, list):
self.job_args[arg_name] = self._list_headers(
headers=arg_value
)
elif not arg_name:
self.job_args[arg_name] = self._str_headers(
header=arg_value
)
else:
self.job_args[arg_name] = dict()
# Set base header for the user-agent
self.job_args['base_headers']['User-Agent'] = 'turbolift'
LOG.info('Authenticating')
indicator_options = {'run': self.job_args.get('run_indicator', True)}
with indicator.Spinner(**indicator_options):
LOG.debug('Authenticate against the Service API')
self.job_args.update(auth.authenticate(job_args=self.job_args))
if job_override:
action = self._get_method(method=job_override)
else:
parsed_command = self.job_args.get('parsed_command')
if not parsed_command:
raise exceptions.NoCommandProvided(
'Please provide a command. Basic commands are: %s',
list(self.job_map.keys())
)
else:
action = self._get_method(method=self.job_map[parsed_command])
run = action(job_args=self.job_args)
run.start() | The run manager.
The run manager is responsible for loading the plugin required based on
what the user has inputted using the parsed_command value as found in
the job_args dict. If the user provides a *job_override* the method
will attempt to import the module and class as provided by the user.
Before the method attempts to run any job the run manager will first
authenticate to the the cloud provider.
:param job_override: ``str`` DOT notation for import with Colin used to
separate the class used for the job. |
def range_initialization(X, num_weights):
# Randomly initialize weights to cover the range of each feature.
X_ = X.reshape(-1, X.shape[-1])
min_val, max_val = X_.min(0), X_.max(0)
data_range = max_val - min_val
return data_range * np.random.rand(num_weights,
X.shape[-1]) + min_val | Initialize the weights by calculating the range of the data.
The data range is calculated by reshaping the input matrix to a
2D matrix, and then taking the min and max values over the columns.
Parameters
----------
X : numpy array
The input data. The data range is calculated over the last axis.
num_weights : int
The number of weights to initialize.
Returns
-------
new_weights : numpy array
A new version of the weights, initialized to the data range specified
by X. |
def login(self, usr, pwd):
self._usr = usr
self._pwd = pwd | Use login() to Log in with a username and password. |
def send(self, me, to, subject, msg):
msg = MIMEText(msg)
msg['Subject'] = subject
msg['From'] = me
msg['To'] = to
server = smtplib.SMTP(self.host, self.port)
server.starttls()
# Check if user and password defined
if self._usr and self._pwd:
server.login(self._usr, self._pwd)
try:
# Send email
server.sendmail(me, [x.strip() for x in to.split(",")], msg.as_string())
except:
# Error sending email
raise Exception("Error Sending Message.")
# Quit!
server.quit() | Send Message |
def _init_weights(self,
X):
X = np.asarray(X, dtype=np.float64)
if self.scaler is not None:
X = self.scaler.fit_transform(X)
if self.initializer is not None:
self.weights = self.initializer(X, self.num_neurons)
for v in self.params.values():
v['value'] = v['orig']
return X | Set the weights and normalize data before starting training. |
def _pre_train(self,
stop_param_updates,
num_epochs,
updates_epoch):
# Calculate the total number of updates given early stopping.
updates = {k: stop_param_updates.get(k, num_epochs) * updates_epoch
for k, v in self.params.items()}
# Calculate the value of a single step given the number of allowed
# updates.
single_steps = {k: np.exp(-((1.0 - (1.0 / v)))
* self.params[k]['factor'])
for k, v in updates.items()}
# Calculate the factor given the true factor and the value of a
# single step.
constants = {k: np.exp(-self.params[k]['factor']) / v
for k, v in single_steps.items()}
return constants | Set parameters and constants before training. |
def fit_predict(self,
X,
num_epochs=10,
updates_epoch=10,
stop_param_updates=dict(),
batch_size=1,
show_progressbar=False):
self.fit(X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar)
return self.predict(X, batch_size=batch_size) | First fit, then predict. |
def fit_transform(self,
X,
num_epochs=10,
updates_epoch=10,
stop_param_updates=dict(),
batch_size=1,
show_progressbar=False,
show_epoch=False):
self.fit(X,
num_epochs,
updates_epoch,
stop_param_updates,
batch_size,
show_progressbar,
show_epoch)
return self.transform(X, batch_size=batch_size) | First fit, then transform. |
def _epoch(self,
X,
epoch_idx,
batch_size,
updates_epoch,
constants,
show_progressbar):
# Create batches
X_ = self._create_batches(X, batch_size)
X_len = np.prod(X.shape[:-1])
update_step = np.ceil(X_.shape[0] / updates_epoch)
# Initialize the previous activation
prev = self._init_prev(X_)
influences = self._update_params(constants)
# Iterate over the training data
for idx, x in enumerate(tqdm(X_, disable=not show_progressbar)):
# Our batches are padded, so we need to
# make sure we know when we hit the padding
# so we don't inadvertently learn zeroes.
diff = X_len - (idx * batch_size)
if diff and diff < batch_size:
x = x[:diff]
# Prev_activation may be None
if prev is not None:
prev = prev[:diff]
# If we hit an update step, perform an update.
if idx % update_step == 0:
influences = self._update_params(constants)
logger.info(self.params)
prev = self._propagate(x,
influences,
prev_activation=prev) | Run a single epoch.
This function shuffles the data internally,
as this improves performance.
Parameters
----------
X : numpy array
The training data.
epoch_idx : int
The current epoch
batch_size : int
The batch size
updates_epoch : int
The number of updates to perform per epoch
constants : dict
A dictionary containing the constants with which to update the
parameters in self.parameters.
show_progressbar : bool
Whether to show a progressbar during training. |
def _update_params(self, constants):
for k, v in constants.items():
self.params[k]['value'] *= v
influence = self._calculate_influence(self.params['infl']['value'])
return influence * self.params['lr']['value'] | Update params and return new influence. |
def _create_batches(self, X, batch_size, shuffle_data=True):
if shuffle_data:
X = shuffle(X)
if batch_size > X.shape[0]:
batch_size = X.shape[0]
max_x = int(np.ceil(X.shape[0] / batch_size))
X = np.resize(X, (max_x, batch_size, X.shape[-1]))
return X | Create batches out of a sequence of data.
This function will append zeros to the end of your data to ensure that
all batches are even-sized. These are masked out during training. |
def _propagate(self, x, influences, **kwargs):
activation, difference_x = self.forward(x)
update = self.backward(difference_x, influences, activation)
# If batch size is 1 we can leave out the call to mean.
if update.shape[0] == 1:
self.weights += update[0]
else:
self.weights += update.mean(0)
return activation | Propagate a single batch of examples through the network. |
def backward(self, diff_x, influences, activations, **kwargs):
bmu = self._get_bmu(activations)
influence = influences[bmu]
update = np.multiply(diff_x, influence)
return update | Backward pass through the network, including update.
Parameters
----------
diff_x : numpy array
A matrix containing the differences between the input and neurons.
influences : numpy array
A matrix containing the influence each neuron has on each
other neuron. This is used to calculate the updates.
activations : numpy array
The activations each neuron has to each data point. This is used
to calculate the BMU.
Returns
-------
update : numpy array
A numpy array containing the updates to the neurons. |
def _check_input(self, X):
if np.ndim(X) == 1:
X = np.reshape(X, (1, -1))
if X.ndim != 2:
raise ValueError("Your data is not a 2D matrix. "
"Actual size: {0}".format(X.shape))
if X.shape[1] != self.data_dimensionality:
raise ValueError("Your data size != weight dim: {0}, "
"expected {1}".format(X.shape[1],
self.data_dimensionality))
return X | Check the input for validity.
Ensures that the input data, X, is a 2-dimensional matrix, and that
the second dimension of this matrix has the same dimensionality as
the weight matrix. |
def transform(self, X, batch_size=100, show_progressbar=False):
X = self._check_input(X)
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
prev = self._init_prev(batched)
for x in tqdm(batched, disable=not show_progressbar):
prev = self.forward(x, prev_activation=prev)[0]
activations.extend(prev)
activations = np.asarray(activations, dtype=np.float64)
activations = activations[:X.shape[0]]
return activations.reshape(X.shape[0], self.num_neurons) | Transform input to a distance matrix by measuring the L2 distance.
Parameters
----------
X : numpy array.
The input data.
batch_size : int, optional, default 100
The batch size to use in transformation. This may affect the
transformation in stateful, i.e. sequential SOMs.
show_progressbar : bool
Whether to show a progressbar during transformation.
Returns
-------
transformed : numpy array
A matrix containing the distance from each datapoint to all
neurons. The distance is normally expressed as euclidean distance,
but can be any arbitrary metric. |
def predict(self, X, batch_size=1, show_progressbar=False):
dist = self.transform(X, batch_size, show_progressbar)
res = dist.__getattribute__(self.argfunc)(1)
return res | Predict the BMU for each input data.
Parameters
----------
X : numpy array.
The input data.
batch_size : int, optional, default 100
The batch size to use in prediction. This may affect prediction
in stateful, i.e. sequential SOMs.
show_progressbar : bool
Whether to show a progressbar during prediction.
Returns
-------
predictions : numpy array
An array containing the BMU for each input data point. |
def quantization_error(self, X, batch_size=1):
dist = self.transform(X, batch_size)
res = dist.__getattribute__(self.valfunc)(1)
return res | Calculate the quantization error.
Find the the minimum euclidean distance between the units and
some input.
Parameters
----------
X : numpy array.
The input data.
batch_size : int
The batch size to use for processing.
Returns
-------
error : numpy array
The error for each data point. |
def load(cls, path):
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
s = cls(data['num_neurons'],
data['data_dimensionality'],
data['params']['lr']['orig'],
neighborhood=data['params']['infl']['orig'],
valfunc=data['valfunc'],
argfunc=data['argfunc'],
lr_lambda=data['params']['lr']['factor'],
nb_lambda=data['params']['nb']['factor'])
s.weights = weights
s.trained = True
return s | Load a SOM from a JSON file saved with this package.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class. |
def save(self, path):
to_save = {}
for x in self.param_names:
attr = self.__getattribute__(x)
if type(attr) == np.ndarray:
attr = [[float(x) for x in row] for row in attr]
elif isinstance(attr, types.FunctionType):
attr = attr.__name__
to_save[x] = attr
json.dump(to_save, open(path, 'w')) | Save a SOM to a JSON file. |
def get_authversion(job_args):
_version = job_args.get('os_auth_version')
for version, variants in AUTH_VERSION_MAP.items():
if _version in variants:
authversion = job_args['os_auth_version'] = version
return authversion
else:
raise exceptions.AuthenticationProblem(
"Auth Version must be one of %s.",
list(AUTH_VERSION_MAP.keys())
) | Get or infer the auth version.
Based on the information found in the *AUTH_VERSION_MAP* the authentication
version will be set to a correct value as determined by the
**os_auth_version** parameter as found in the `job_args`.
:param job_args: ``dict``
:returns: ``str`` |
def get_service_url(region, endpoint_list, lookup):
for endpoint in endpoint_list:
region_get = endpoint.get('region', '')
if region.lower() == region_get.lower():
return http.parse_url(url=endpoint.get(lookup))
else:
raise exceptions.AuthenticationProblem(
'Region "%s" was not found in your Service Catalog.',
region
) | Lookup a service URL from the *endpoint_list*.
:param region: ``str``
:param endpoint_list: ``list``
:param lookup: ``str``
:return: ``object`` |
def get_headers(self):
try:
return {
'X-Auth-User': self.job_args['os_user'],
'X-Auth-Key': self.job_args['os_apikey']
}
except KeyError as exp:
raise exceptions.AuthenticationProblem(
'Missing Credentials. Error: %s',
exp
) | Setup headers for authentication request. |
def parse_auth_response(auth_response):
auth_dict = dict()
LOG.debug('Authentication Headers %s', auth_response.headers)
try:
auth_dict['os_token'] = auth_response.headers['x-auth-token']
auth_dict['storage_url'] = urlparse.urlparse(
auth_response.headers['x-storage-url']
)
except KeyError as exp:
raise exceptions.AuthenticationProblem(
'No token was found in the authentication response. Please'
' check your auth URL, your credentials, and your set auth'
' version. Auth Headers: [ %s ] Error: [ %s ]',
auth_response.headers,
exp
)
else:
return auth_dict | Parse the auth response and return the tenant, token, and username.
:param auth_response: the full object returned from an auth call
:returns: ``dict`` |
def auth_request(self, url, headers, body):
return self.req.post(url, headers, body=body) | Perform auth request for token. |
def parse_region(self):
try:
auth_url = self.job_args['os_auth_url']
if 'tokens' not in auth_url:
if not auth_url.endswith('/'):
auth_url = '%s/' % auth_url
auth_url = urlparse.urljoin(auth_url, 'tokens')
return auth_url
except KeyError:
raise exceptions.AuthenticationProblem(
'You Are required to specify an Auth URL, Region or Plugin'
) | Pull region/auth url information from context. |
def execute():
if len(sys.argv) <= 1:
raise SystemExit(
'No Arguments provided. use [--help] for more information.'
)
# Capture user arguments
_args = arguments.ArgumentParserator(
arguments_dict=turbolift.ARGUMENTS,
env_name='TURBO',
epilog=turbolift.VINFO,
title='Turbolift',
detail='Multiprocessing Swift CLI tool.',
description='Manage Swift easily and fast.'
)
user_args = _args.arg_parser()
user_args['run_indicator'] = True
debug_log = False
stream_logs = True
# Load system logging
if user_args.get('debug'):
debug_log = True
user_args['run_indicator'] = False
# Load system logging
if user_args.get('quiet'):
stream_logs = False
user_args['run_indicator'] = False
_logging = logger.LogSetup(
debug_logging=debug_log,
colorized_messages=user_args.get('colorized', False)
)
_logging.default_logger(name='turbolift', enable_stream=stream_logs)
job = worker.Worker(job_args=user_args)
job.run_manager() | This is the run section of the application Turbolift. |
def write(self, log_file, msg):
try:
with open(log_file, 'a') as LogFile:
LogFile.write(msg + os.linesep)
except:
raise Exception('Error Configuring PyLogger.TextStorage Class.')
return os.path.isfile(log_file) | Append message to .log file |
def read(self, log_file):
if os.path.isdir(os.path.dirname(log_file)) and os.path.isfile(log_file):
with open(log_file, 'r') as LogFile:
data = LogFile.readlines()
data = "".join(line for line in data)
else:
data = ''
return data | Read messages from .log file |
def fit(self, X):
if X.ndim > 2:
X = X.reshape((np.prod(X.shape[:-1]), X.shape[-1]))
self.mean = X.mean(0)
self.std = X.std(0)
self.is_fit = True
return self | Fit the scaler based on some data.
Takes the columnwise mean and standard deviation of the entire input
array.
If the array has more than 2 dimensions, it is flattened.
Parameters
----------
X : numpy array
Returns
-------
scaled : numpy array
A scaled version of said array. |
def transform(self, X):
if not self.is_fit:
raise ValueError("The scaler has not been fit yet.")
return (X-self.mean) / (self.std + 10e-7) | Transform your data to zero mean unit variance. |
def retry(ExceptionToCheck, tries=3, delay=1, backoff=1):
def deco_retry(f):
@functools.wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck:
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry | Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int |
def stupid_hack(most=10, wait=None):
# Stupid Hack For Public Cloud so it is not overwhelmed with API requests.
if wait is not None:
time.sleep(wait)
else:
time.sleep(random.randrange(1, most)) | Return a random time between 1 - 10 Seconds. |
def time_stamp():
# Time constants
fmt = '%Y-%m-%dT%H:%M:%S.%f'
date = datetime.datetime
date_delta = datetime.timedelta
now = datetime.datetime.utcnow()
return fmt, date, date_delta, now | Setup time functions
:returns: ``tuple`` |
def unique_list_dicts(dlist, key):
return list(dict((val[key], val) for val in dlist).values()) | Return a list of dictionaries which are sorted for only unique entries.
:param dlist:
:param key:
:return list: |
def quoter(obj):
try:
try:
return urllib.quote(obj)
except AttributeError:
return urllib.parse.quote(obj)
except KeyError:
return obj | Return a Quoted URL.
The quote function will return a URL encoded string. If there is an
exception in the job which results in a "KeyError" the original
string will be returned as it will be assumed to already be URL
encoded.
:param obj: ``basestring``
:return: ``str`` |
def start(self):
LOG.info('Clone warm up...')
# Create the target args
self._target_auth()
last_list_obj = None
while True:
self.indicator_options['msg'] = 'Gathering object list'
with indicator.Spinner(**self.indicator_options):
objects_list = self._list_contents(
single_page_return=True,
last_obj=last_list_obj
)
if not objects_list:
return
last_obj = utils.byte_encode(objects_list[-1].get('name'))
LOG.info(
'Last object [ %s ] Last object in the list [ %s ]',
last_obj,
last_list_obj
)
if last_list_obj == last_obj:
return
else:
last_list_obj = last_obj
self._clone_worker(objects_list=objects_list) | Clone objects from one container to another.
This method was built to clone a container between data-centers while
using the same credentials. The method assumes that an authentication
token will be valid within the two data centers. |
def authenticate(job_args):
# Load any authentication plugins as needed
job_args = utils.check_auth_plugin(job_args)
# Set the auth version
auth_version = utils.get_authversion(job_args=job_args)
# Define the base headers that are used in all authentications
auth_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
auth_headers.update(job_args['base_headers'])
if auth_version == 'v1.0':
auth = utils.V1Authentication(job_args=job_args)
auth_headers.update(auth.get_headers())
LOG.debug('Request Headers: [ %s ]', auth_headers)
auth_url = job_args['os_auth_url']
LOG.debug('Parsed Auth URL: [ %s ]', auth_url)
auth_kwargs = {
'url': auth_url,
'headers': auth_headers
}
else:
auth = utils.OSAuthentication(job_args=job_args)
auth_url = auth.parse_region()
LOG.debug('Parsed Auth URL: [ %s ]', auth_url)
auth_json = auth.parse_reqtype()
LOG.debug('Request Headers: [ %s ]', auth_headers)
auth_body = json.dumps(auth_json)
LOG.debug('Request JSON: [ %s ]', auth_body)
auth_kwargs = {
'url': auth_url,
'headers': auth_headers,
'body': auth_body
}
auth_resp = auth.auth_request(**auth_kwargs)
if auth_resp.status_code >= 300:
raise exceptions.AuthenticationProblem(
'Authentication Failure, Status: [ %s ] Reason: [ %s ]',
auth_resp.status_code,
auth_resp.reason
)
else:
return auth.parse_auth_response(auth_resp) | Authentication For Openstack API.
Pulls the full Openstack Service Catalog Credentials are the Users API
Username and Key/Password.
Set a DC Endpoint and Authentication URL for the OpenStack environment |
def _config(self, **kargs):
for key, value in kargs.items():
setattr(self, key, value) | ReConfigure Package |
def getConfig(self, key):
if hasattr(self, key):
return getattr(self, key)
else:
return False | Get a Config Value |
def addFilter(self, filter):
self.FILTERS.append(filter)
return "FILTER#{}".format(len(self.FILTERS) - 1) | Register Custom Filter |
def addAction(self, action):
self.ACTIONS.append(action)
return "ACTION#{}".format(len(self.ACTIONS) - 1) | Register Custom Action |
def removeFilter(self, filter):
filter = filter.split('#')
del self.FILTERS[int(filter[1])]
return True | Remove Registered Filter |
def removeAction(self, action):
action = action.split('#')
del self.ACTIONS[int(action[1])]
return True | Remove Registered Action |
def info(self, msg):
self._execActions('info', msg)
msg = self._execFilters('info', msg)
self._processMsg('info', msg)
self._sendMsg('info', msg) | Log Info Messages |
def warning(self, msg):
self._execActions('warning', msg)
msg = self._execFilters('warning', msg)
self._processMsg('warning', msg)
self._sendMsg('warning', msg) | Log Warning Messages |
def error(self, msg):
self._execActions('error', msg)
msg = self._execFilters('error', msg)
self._processMsg('error', msg)
self._sendMsg('error', msg) | Log Error Messages |
def critical(self, msg):
self._execActions('critical', msg)
msg = self._execFilters('critical', msg)
self._processMsg('critical', msg)
self._sendMsg('critical', msg) | Log Critical Messages |
def log(self, msg):
self._execActions('log', msg)
msg = self._execFilters('log', msg)
self._processMsg('log', msg)
self._sendMsg('log', msg) | Log Normal Messages |
def _processMsg(self, type, msg):
now = datetime.datetime.now()
# Check If Path not provided
if self.LOG_FILE_PATH == '':
self.LOG_FILE_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
# Build absolute Path
log_file = self.LOG_FILE_PATH + now.strftime(self.LOG_FILE_FORMAT) + '.log'
# Add General Vars
msg = self.LOG_MESSAGE_FORMAT.format(
TYPE=type.upper(),
DATE=now.strftime(self.DATES_FORMAT),
DATETIME=now.strftime(self.DATETIME_FORMAT),
MESSAGE=msg,
)
# Check if to add platform data
if self.PLATFORM_DATA:
# Add Platform Specific Vars
msg = msg.format(
PL_TYPE=platform.machine(),
PL_NAME=platform.node(),
PL_PROCESSOR=platform.processor(),
PL_PY_BUILD_DATE=platform.python_build()[1],
PL_PY_COMPILER=platform.python_compiler(),
PL_PY_RELEASE=platform.release(),
PL_OS=platform.system(),
PL_TIMEZONE=strftime("%z", gmtime())
)
# Create Storage Instance
self._STORAGE = Storage(log_file)
# Write Storage
return self._STORAGE.write(msg) | Process Debug Messages |
def _configMailer(self):
self._MAILER = Mailer(self.MAILER_HOST, self.MAILER_PORT)
self._MAILER.login(self.MAILER_USER, self.MAILER_PWD) | Config Mailer Class |
def _sendMsg(self, type, msg):
if self.ALERT_STATUS and type in self.ALERT_TYPES:
self._configMailer()
self._MAILER.send(self.MAILER_FROM, self.ALERT_EMAIL, self.ALERT_SUBJECT, msg) | Send Alert Message To Emails |
def _execFilters(self, type, msg):
for filter in self.FILTERS:
msg = filter(type, msg)
return msg | Execute Registered Filters |
def _execActions(self, type, msg):
for action in self.ACTIONS:
action(type, msg) | Execute Registered Actions |
def check_basestring(item):
try:
return isinstance(item, (basestring, unicode))
except NameError:
return isinstance(item, str) | Return ``bol`` on string check item.
:param item: Item to check if its a string
:type item: ``str``
:returns: ``bol`` |
def predict_distance(self, X, batch_size=1, show_progressbar=False):
X = self._check_input(X)
X_shape = reduce(np.multiply, X.shape[:-1], 1)
batched = self._create_batches(X, batch_size, shuffle_data=False)
activations = []
activation = self._init_prev(batched)
for x in tqdm(batched, disable=not show_progressbar):
activation = self.forward(x, prev_activation=activation)[0]
activations.append(activation)
act = np.asarray(activations, dtype=np.float64).transpose((1, 0, 2))
act = act[:X_shape]
return act.reshape(X_shape, self.num_neurons) | Predict distances to some input data. |
def generate(self, num_to_generate, starting_place):
res = []
activ = starting_place[None, :]
index = activ.__getattribute__(self.argfunc)(1)
item = self.weights[index]
for x in range(num_to_generate):
activ = self.forward(item, prev_activation=activ)[0]
index = activ.__getattribute__(self.argfunc)(1)
res.append(index)
item = self.weights[index]
return res | Generate data based on some initial position. |
def forward(self, x, **kwargs):
prev = kwargs['prev_activation']
# Differences is the components of the weights subtracted from
# the weight vector.
distance_x, diff_x = self.distance_function(x, self.weights)
distance_y, diff_y = self.distance_function(prev, self.context_weights)
x_ = distance_x * self.alpha
y_ = distance_y * self.beta
activation = np.exp(-(x_ + y_))
return activation, diff_x, diff_y | Perform a forward pass through the network.
The forward pass in recursive som is based on a combination between
the activation in the last time-step and the current time-step.
Parameters
----------
x : numpy array
The input data.
prev_activation : numpy array.
The activation of the network in the previous time-step.
Returns
-------
activations : tuple of activations and differences
A tuple containing the activation of each unit, the differences
between the weights and input and the differences between the
context input and context weights. |
def load(cls, path):
data = json.load(open(path))
weights = data['weights']
weights = np.asarray(weights, dtype=np.float64)
try:
context_weights = data['context_weights']
context_weights = np.asarray(context_weights,
dtype=np.float64)
except KeyError:
context_weights = np.zeros((len(weights), len(weights)))
try:
alpha = data['alpha']
beta = data['beta']
except KeyError:
alpha = 1.0
beta = 1.0
s = cls(data['map_dimensions'],
data['data_dimensionality'],
data['params']['lr']['orig'],
influence=data['params']['infl']['orig'],
alpha=alpha,
beta=beta,
lr_lambda=data['params']['lr']['factor'],
infl_lambda=data['params']['infl']['factor'])
s.weights = weights
s.context_weights = context_weights
s.trained = True
return s | Load a recursive SOM from a JSON file.
You can use this function to load weights of other SOMs.
If there are no context weights, they will be set to 0.
Parameters
----------
path : str
The path to the JSON file.
Returns
-------
s : cls
A som of the specified class. |
def backward(self, diff_x, influences, activations, **kwargs):
diff_y = kwargs['diff_y']
bmu = self._get_bmu(activations)
influence = influences[bmu]
# Update
x_update = np.multiply(diff_x, influence)
y_update = np.multiply(diff_y, influence)
return x_update, y_update | Backward pass through the network, including update.
Parameters
----------
diff_x : numpy array
A matrix containing the differences between the input and neurons.
influences : numpy array
A matrix containing the influence each neuron has on each
other neuron. This is used to calculate the updates.
activations : numpy array
The activations each neuron has to each data point. This is used
to calculate the BMU.
differency_y : numpy array
The differences between the input and context neurons.
Returns
-------
updates : tuple of arrays
The updates to the weights and context weights, respectively. |
def start(self):
LOG.info('Listing options...')
with indicator.Spinner(**self.indicator_options):
objects_list = self._list_contents()
if not objects_list:
return
if isinstance(objects_list[0], dict):
filter_dlo = self.job_args.get('filter_dlo')
if filter_dlo:
dynamic_hash = hashlib.sha256(
self.job_args.get('container')
)
dynamic_hash = dynamic_hash.hexdigest()
objects_list = [
i for i in objects_list
if dynamic_hash not in i.get('name')
]
string_filter = self.job_args.get('filter')
if string_filter:
objects_list = [
i for i in objects_list
if string_filter in i.get('name')
]
self.print_horiz_table(objects_list)
else:
self.print_virt_table(objects_list[0].headers) | Return a list of objects from the API for a container. |
def _return_base_data(self, url, container, container_object=None,
container_headers=None, object_headers=None):
headers = self.job_args['base_headers']
headers.update({'X-Auth-Token': self.job_args['os_token']})
_container_uri = url.geturl().rstrip('/')
if container:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container)
)
if container_object:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container_object)
)
if object_headers:
headers.update(object_headers)
if container_headers:
headers.update(container_headers)
return headers, urlparse.urlparse(_container_uri) | Return headers and a parsed url.
:param url:
:param container:
:param container_object:
:param container_headers:
:return: ``tuple`` |
def _chunk_putter(self, uri, open_file, headers=None):
count = 0
dynamic_hash = hashlib.sha256(self.job_args.get('container'))
dynamic_hash = dynamic_hash.hexdigest()
while True:
# Read in a chunk of an open file
file_object = open_file.read(self.job_args.get('chunk_size'))
if not file_object:
break
# When a chuck is present store it as BytesIO
with io.BytesIO(file_object) as file_object:
# store the parsed URI for the chunk
chunk_uri = urlparse.urlparse(
'%s.%s.%s' % (
uri.geturl(),
dynamic_hash,
count
)
)
# Increment the count as soon as it is used
count += 1
# Check if the read chunk already exists
sync = self._sync_check(
uri=chunk_uri,
headers=headers,
file_object=file_object
)
if not sync:
continue
# PUT the chunk
_resp = self.http.put(
url=chunk_uri,
body=file_object,
headers=headers
)
self._resp_exception(resp=_resp)
LOG.debug(_resp.__dict__) | Make many PUT request for a single chunked object.
Objects that are processed by this method have a SHA256 hash appended
to the name as well as a count for object indexing which starts at 0.
To make a PUT request pass, ``url``
:param uri: ``str``
:param open_file: ``object``
:param headers: ``dict`` |
def _putter(self, uri, headers, local_object=None):
if not local_object:
return self.http.put(url=uri, headers=headers)
with open(local_object, 'rb') as f_open:
large_object_size = self.job_args.get('large_object_size')
if not large_object_size:
large_object_size = 5153960756
if os.path.getsize(local_object) > large_object_size:
# Remove the manifest entry while working with chunks
manifest = headers.pop('X-Object-Manifest')
# Feed the open file through the chunk process
self._chunk_putter(
uri=uri,
open_file=f_open,
headers=headers
)
# Upload the 0 byte object with the manifest path
headers.update({'X-Object-Manifest': manifest})
return self.http.put(url=uri, headers=headers)
else:
if self.job_args.get('sync'):
sync = self._sync_check(
uri=uri,
headers=headers,
local_object=local_object
)
if not sync:
return None
return self.http.put(
url=uri, body=f_open, headers=headers
) | Place object into the container.
:param uri:
:param headers:
:param local_object: |
def _getter(self, uri, headers, local_object):
if self.job_args.get('sync'):
sync = self._sync_check(
uri=uri,
headers=headers,
local_object=local_object
)
if not sync:
return None
# perform Object HEAD request
resp = self.http.get(url=uri, headers=headers)
self._resp_exception(resp=resp)
# Open our source file and write it
chunk_size = self.job_args['download_chunk_size']
with open(local_object, 'wb') as f_name:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk:
f_name.write(chunk)
f_name.flush()
if self.job_args.get('restore_perms'):
if 'X-Object-Meta-perms' in resp.headers:
os.chmod(
local_object,
int(resp.headers['x-object-meta-perms'], 8)
)
chown_file = {'uid': -1, 'gid': -1}
if 'X-Object-Meta-owner' in resp.headers:
chown_file['uid'] = pwd.getpwnam(
resp.headers['X-Object-Meta-owner']
).pw_uid
if 'X-Object-Meta-group' in resp.headers:
chown_file['gid'] = grp.getgrnam(
resp.headers['X-Object-Meta-group']
).gr_gid
os.chown(local_object, *chown_file.values())
return resp | Perform HEAD request on a specified object in the container.
:param uri: ``str``
:param headers: ``dict`` |
def _deleter(self, uri, headers):
# perform Object HEAD request
resp = self.http.delete(url=uri, headers=headers)
self._resp_exception(resp=resp)
return resp | Perform HEAD request on a specified object in the container.
:param uri: ``str``
:param headers: ``dict`` |
def _header_getter(self, uri, headers):
# perform Object HEAD request
resp = self.http.head(url=uri, headers=headers)
self._resp_exception(resp=resp)
return resp | Perform HEAD request on a specified object in the container.
:param uri: ``str``
:param headers: ``dict`` |
def _header_poster(self, uri, headers):
resp = self.http.post(url=uri, body=None, headers=headers)
self._resp_exception(resp=resp)
return resp | POST Headers on a specified object in the container.
:param uri: ``str``
:param headers: ``dict`` |
def _obj_index(self, uri, base_path, marked_path, headers, spr=False):
object_list = list()
l_obj = None
container_uri = uri.geturl()
while True:
marked_uri = urlparse.urljoin(container_uri, marked_path)
resp = self.http.get(url=marked_uri, headers=headers)
self._resp_exception(resp=resp)
return_list = resp.json()
if spr:
return return_list
time_offset = self.job_args.get('time_offset')
for obj in return_list:
if time_offset:
# Get the last_modified data from the Object.
time_delta = cloud_utils.TimeDelta(
job_args=self.job_args,
last_modified=time_offset
)
if time_delta:
object_list.append(obj)
else:
object_list.append(obj)
if object_list:
last_obj_in_list = object_list[-1].get('name')
else:
last_obj_in_list = None
if l_obj == last_obj_in_list:
return object_list
else:
l_obj = last_obj_in_list
marked_path = self._last_marker(
base_path=base_path,
last_object=l_obj
) | Return an index of objects from within the container.
:param uri:
:param base_path:
:param marked_path:
:param headers:
:param spr: "single page return" Limit the returned data to one page
:type spr: ``bol``
:return: |
def _list_getter(self, uri, headers, last_obj=None, spr=False):
# Quote the file path.
base_path = marked_path = ('%s?limit=10000&format=json' % uri.path)
if last_obj:
marked_path = self._last_marker(
base_path=base_path,
last_object=cloud_utils.quoter(last_obj)
)
file_list = self._obj_index(
uri=uri,
base_path=base_path,
marked_path=marked_path,
headers=headers,
spr=spr
)
LOG.debug(
'Found [ %d ] entries(s) at [ %s ]',
len(file_list),
uri.geturl()
)
if spr:
return file_list
else:
return cloud_utils.unique_list_dicts(
dlist=file_list, key='name'
) | Get a list of all objects in a container.
:param uri:
:param headers:
:return list:
:param spr: "single page return" Limit the returned data to one page
:type spr: ``bol`` |
def list_items(self, url, container=None, last_obj=None, spr=False):
headers, container_uri = self._return_base_data(
url=url,
container=container
)
if container:
resp = self._header_getter(uri=container_uri, headers=headers)
if resp.status_code == 404:
LOG.info('Container [ %s ] not found.', container)
return [resp]
return self._list_getter(
uri=container_uri,
headers=headers,
last_obj=last_obj,
spr=spr
) | Builds a long list of objects found in a container.
NOTE: This could be millions of Objects.
:param url:
:param container:
:param last_obj:
:param spr: "single page return" Limit the returned data to one page
:type spr: ``bol``
:return None | list: |
def update_object(self, url, container, container_object, object_headers,
container_headers):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object,
container_headers=container_headers,
object_headers=object_headers,
)
return self._header_poster(
uri=container_uri,
headers=headers
) | Update an existing object in a swift container.
This method will place new headers on an existing object or container.
:param url:
:param container:
:param container_object: |
def container_cdn_command(self, url, container, container_object,
cdn_headers):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object,
object_headers=cdn_headers
)
if self.job_args.get('purge'):
return self._deleter(
uri=container_uri,
headers=headers
)
else:
return self._header_poster(
uri=container_uri,
headers=headers
) | Command your CDN enabled Container.
:param url:
:param container: |
def put_container(self, url, container, container_headers=None):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_headers=container_headers
)
resp = self._header_getter(
uri=container_uri,
headers=headers
)
if resp.status_code == 404:
return self._putter(uri=container_uri, headers=headers)
else:
return resp | Create a container if it is not Found.
:param url:
:param container: |
def put_object(self, url, container, container_object, local_object,
object_headers, meta=None):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object,
container_headers=object_headers,
object_headers=meta
)
return self._putter(
uri=container_uri,
headers=headers,
local_object=local_object
) | This is the Sync method which uploads files to the swift repository
if they are not already found. If a file "name" is found locally and
in the swift repository an MD5 comparison is done between the two
files. If the MD5 is miss-matched the local file is uploaded to the
repository. If custom meta data is specified, and the object exists the
method will put the metadata onto the object.
:param url:
:param container:
:param container_object: |
def get_items(self, url, container, container_object, local_object):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object
)
return self._getter(
uri=container_uri,
headers=headers,
local_object=local_object
) | Get an objects from a container.
:param url:
:param container: |
def delete_items(self, url, container, container_object=None):
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object
)
return self._deleter(uri=container_uri, headers=headers) | Deletes an objects in a container.
:param url:
:param container: |
def _get_bmu(self, activations):
# If the neural gas is a recursive neural gas, we need reverse argsort.
if self.argfunc == 'argmax':
activations = -activations
sort = np.argsort(activations, 1)
return sort.argsort() | Get indices of bmus, sorted by their distance from input. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.