code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled.
"""
# these args are the same for each thread
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.'.format(ulength))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
# split the input and output arrays into numthreads chunks
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result | Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled. |
def run(self):
"""Load table data to :class:`EuroStatsValue` objects
"""
# -- start documentation include: eurostats-run-1
# create a new indicator metadata object
indicator = models.EuroStatIndicator(
number=self.number,
description=self.description,
url="http://ec.europa.eu/eurostat/web/products-datasets/-/tgs" + self.number)
# add/commit to get the object ID filled
self.session.add(indicator)
self.session.commit()
# -- end documentation include: eurostats-run-1
# -- start documentation include: eurostats-run-2
# load data from input file task
df = next(self.requires()).load(key_filter=self.key_filter,
header_preproc=self.header_preproc)
# Transform data: DataFrame from loading has NUTS2 key and years as columns.
# Index by key, then stack years as second level of index. Reset the index
# to get year and key as regular columns, with one value column left.
values = df.set_index('key').stack()
values.index.levels[1].name = 'year'
values.name = 'value'
df = values.reset_index()
# -- end documentation include: eurostats-run-2
# -- start documentation include: eurostats-run-3
# get current max ID for EuroStatValue objects, for manual ID generation
max_id = models.EuroStatValue.get_max_id(self.session)
# append an ID column, starting with the current max ID of the object class plus one
df['id'] = list(range(max_id + 1, max_id + 1 + len(df)))
# -- end documentation include: eurostats-run-3
# -- start documentation include: eurostats-run-4
# append indicator ID (constant)
df['indicator_id'] = indicator.id
# append region ID column, by mapping NUTS2 region keys to DB object IDs
regions = self.client.df_query(self.session.query(models.NUTS2Region)) \
.set_index('key')['id']
df['region_id'] = df['key'].map(regions)
# drop columns that are not part of the data model
df = df.drop(['key'], axis=1) # type: pd.DataFrame
# -- end documentation include: eurostats-run-4
# -- start documentation include: eurostats-run-5
# store, done
df.to_sql(name=models.EuroStatValue.__tablename__,
con=client.get_client().engine,
if_exists='append',
index=False)
self.done() | Load table data to :class:`EuroStatsValue` objects |
def data_to_dict(self, sysbase=False):
"""
Return the loaded model parameters as one dictionary.
Each key of the dictionary is a parameter name, and the value is a
list of all the parameter values.
:param sysbase: use system base quantities
:type sysbase: bool
"""
assert isinstance(sysbase, bool)
ret = {}
for key in self.data_keys:
if (not sysbase) and (key in self._store):
val = self._store[key]
else:
val = self.__dict__[key]
ret[key] = val
return ret | Return the loaded model parameters as one dictionary.
Each key of the dictionary is a parameter name, and the value is a
list of all the parameter values.
:param sysbase: use system base quantities
:type sysbase: bool |
def read_cdx(file, encoding='utf8'):
'''Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value.
'''
with codecs.getreader(encoding)(file) as stream:
header_line = stream.readline()
separator = header_line[0]
field_keys = header_line.strip().split(separator)
if field_keys.pop(0) != 'CDX':
raise ValueError('CDX header not found.')
for line in stream:
yield dict(zip(field_keys, line.strip().split(separator))) | Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value. |
def available(name):
'''
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
'''
for service in get_all():
if name.lower() == service.lower():
return True
return False | Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name> |
def to_operator(self):
"""Try to convert channel to a unitary representation Operator."""
mat = _to_operator(self.rep, self._data, *self.dim)
return Operator(mat, self.input_dims(), self.output_dims()) | Try to convert channel to a unitary representation Operator. |
def load_schema(name):
"""
loads the schema by name
:param name name of the model
"""
schema = import_schema_to_json(name)
#salesking specific swap
#//set link relation as key name to make it easier to call these
for item in schema['links']:
#//set link relation as key name to make it easier to call these
# foreach($schema->links as $key => $link)
# {
# $schema->links[$link->rel] = $link;
# unset($schema->links[$key]);
# }
# this here seems not to work as expected
# something is wrong
href_value = item['href']
rel_value = item['rel']
schema[rel_value] = href_value
del item
## sk use nesting of schema
## dynamically loading
for prop in schema['properties']:
value = schema['properties'][prop]
# arrays may contain the nesting
is_type_array = (value['type'] == 'array')
is_type_object = (value['type'] == 'object')
if ((is_type_array or is_type_object)
and (_value_properties_are_referenced(value))):
schema = _load_referenced_schema_from_properties(value, schema, prop)
if is_type_array and _value_is_default_any(value) and _value_has_items_key(value):
schema = _load_referenced_schemes_from_list(value['items'], value, schema, prop)
if _value_is_required(value):
# remove required
schema['properties'][prop]['required'] = False
# hack to bypass text format valitation to string
if _value_is_type_text(value):
log.debug("patched text to string")
schema['properties'][prop]['type'] = u"string"
#ignore the readonly properties auto validation
#if 'readonly' in value.keys() and value['readonly'] == True:
# log.debug("patched required validation to none required")
# schema['properties'][property]['readonly'] = False
# sk works on title and not name
schema['name'] = schema['title']
## go one level deeper as we now have some replacements
# put it to storage when done
# if not JsonSchemaStore.is_stored(name) and (schema is not None):
# JsonSchemaStore.copy_to_store(name, schema)
return schema | loads the schema by name
:param name name of the model |
def _repo_url_to_path(self, repo):
"""Convert a `repo` url to a file path for local storage."""
repo = repo.replace('http://', '')
repo = repo.replace('https://', '')
repo = repo.replace('/', '_')
return os.sep.join([self._data_directory, repo]) | Convert a `repo` url to a file path for local storage. |
def export_project(self):
""" Processes misc options specific for GCC ARM, and run generator """
generated_projects = deepcopy(self.generated_projects)
self.process_data_for_makefile(self.workspace)
generated_projects['path'], generated_projects['files']['makefile'] = self.gen_file_jinja('makefile_armcc.tmpl', self.workspace, 'Makefile', self.workspace['output_dir']['path'])
return generated_projects | Processes misc options specific for GCC ARM, and run generator |
def catalog(self, table='', column=''):
"""Lookup the values available for querying."""
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
# Show what methods are available.
return self.lookup_methods
return None | Lookup the values available for querying. |
def get(self, index):
"""Get a chunk by index"""
assert index <= self.count
assert index < self.size
offset = index * self.chunk_size
return self.data[offset:offset + self.chunk_size] | Get a chunk by index |
def _active_mounts_aix(ret):
'''
List active mounts on AIX systems
'''
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
if comps:
if comps[0] == 'node' or comps[0] == '--------':
continue
comps_len = len(comps)
if line.startswith((' ', '\t')):
curr_opts = _resolve_user_group_names(comps[6].split(',')) if 7 == comps_len else []
if curr_opts:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': curr_opts}
else:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2]}
else:
curr_opts = _resolve_user_group_names(comps[7].split(',')) if 8 == comps_len else []
if curr_opts:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3],
'opts': curr_opts}
else:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3]}
return ret | List active mounts on AIX systems |
def delete_project(self, tenant_name, part_name):
"""Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition.
"""
res = self._delete_partition(tenant_name, part_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", part_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
res = self._delete_org(tenant_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s organization in DCNM.", tenant_name)
else:
LOG.error("Failed to delete %(org)s organization in DCNM."
"Response: %(res)s", {'org': tenant_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition. |
def take_screenshot(self, screenshot_name=None, screenshot_path=None):
"""Take a screenshot
Use the screenshot_name args when you want to take a
screenshot for reference
If the `runner:cache_screenshot` config is set to True then
screenshot sharing all the same name will be saved only once
The screenshot_path args is exclusively used by the
proxy_driver:create_test_result function
Args:
screenshot_name (str) the name of the screenshot
screenshot_path (str) the path of the screenshot
"""
self.info_log("Taking a screenshot...")
save_to_db = False
if screenshot_path:
self._driver.save_screenshot(screenshot_path)
self.debug_log("Screenshot taken (%s)" % screenshot_path)
elif screenshot_name:
take_screenshot = True
if hasattr(self.runner, "screenshot_cache"):
if self.runner.screenshot_cache.get(screenshot_name):
self.debug_log(
"screenshot(%s) found in cache" % screenshot_name
)
take_screenshot = False
if take_screenshot:
if self.test_instance._runner_dir:
_screenshot_name = '%s.png' % \
string_to_filename(screenshot_name)
relative_path = os.path.join(
self.test_instance._screenshot_relative_dir,
_screenshot_name
)
full_path = os.path.join(
self.test_instance._screenshot_dir,
_screenshot_name
)
self._driver.save_screenshot(
full_path
)
self.debug_log("Screenshot taken (%s)" % full_path)
save_to_db = True
else:
if self.test_instance._runner_dir:
screenshot_name = get_timestamp()
_screenshot_name = '%s.png' % screenshot_name
relative_path = os.path.join(
self.test_instance._screenshot_relative_dir,
_screenshot_name
)
full_path = os.path.join(
self.test_instance._screenshot_dir,
_screenshot_name
)
self._driver.save_screenshot(
full_path
)
self.debug_log("Screenshot taken (%s)" % full_path)
save_to_db = True
if save_to_db:
with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa
capabilities = {
'browserName': self.capabilities['browserName'],
'platform': self.capabilities['platform'],
'version': self.capabilities['version']
}
screenshot = Testscreenshot()
screenshot.browser_capabilities = capabilities
screenshot.browser_id = self.get_id()
# TODO support s3
screenshot.location = 'local_file_system'
screenshot.root_path = self.test_instance._runner.root_test_result_dir # noqa
screenshot.file_path = relative_path
screenshot.extra_data = {}
screenshot.title = screenshot_name
screenshot.test_instance_id = self.test_instance._test_instance_id # noqa
screenshot.test_batch_id = self.test_instance._test_batch_id # noqa
session.save(screenshot, safe=True) | Take a screenshot
Use the screenshot_name args when you want to take a
screenshot for reference
If the `runner:cache_screenshot` config is set to True then
screenshot sharing all the same name will be saved only once
The screenshot_path args is exclusively used by the
proxy_driver:create_test_result function
Args:
screenshot_name (str) the name of the screenshot
screenshot_path (str) the path of the screenshot |
def raise_for_status(self):
"""Raises stored :class:`HTTPError` or :class:`URLError`, if occurred.
"""
if not self.ok:
reason = self.reason or 'No response from %s' % self.url
if not self.status_code:
raise HttpConnectionError(reason, response=self)
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error - %s - %s %s' % (
self.status_code, reason, self.request.method, self.url)
else:
http_error_msg = '%s Server Error - %s - %s %s' % (
self.status_code, reason, self.request.method, self.url)
raise HttpRequestException(http_error_msg, response=self) | Raises stored :class:`HTTPError` or :class:`URLError`, if occurred. |
def run(self, args=None):
"""! @brief Main entry point for command line processing."""
try:
self._args = self.build_parser().parse_args(args)
# Running without a subcommand will print usage.
if self._args.cmd is None:
if self._args.help_options:
self.show_options_help()
else:
self._parser.print_help()
return 1
# The default log level differs for some subcommands.
self._default_log_level = DEFAULT_CMD_LOG_LEVEL[self._args.cmd]
self._setup_logging()
# Pass any options to DAPAccess.
if hasattr(self._args, 'daparg'):
DAPAccess.set_args(self._args.daparg)
# Invoke subcommand.
self._COMMANDS[self._args.cmd](self)
# Successful exit.
return 0
except KeyboardInterrupt:
return 0
except exceptions.Error as e:
LOG.error(e, exc_info=Session.get_current().log_tracebacks)
except Exception as e:
LOG.error("uncaught exception: %s", e, exc_info=Session.get_current().log_tracebacks)
return 1 | ! @brief Main entry point for command line processing. |
def get_advanced_foreign_key_options_sql(self, foreign_key):
"""
Returns the FOREIGN KEY query section dealing with non-standard options
as MATCH, INITIALLY DEFERRED, ON UPDATE, ...
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str
"""
query = ""
if self.supports_foreign_key_on_update() and foreign_key.has_option(
"on_update"
):
query += " ON UPDATE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_update")
)
if foreign_key.has_option("on_delete"):
query += " ON DELETE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_delete")
)
return query | Returns the FOREIGN KEY query section dealing with non-standard options
as MATCH, INITIALLY DEFERRED, ON UPDATE, ...
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str |
def compileActions(self):
"""Build the action table from the text above
"""
import re
self.actionList = actions = [None]*121
#Action 73, which is too long, looks like this when expanded:
actions[73] = "b' the '+w+b' of the '"
#find out what the columns are
actionLines = self.actionTable.splitlines()
colonPositions = [m.start()
for m in re.finditer(':',actionLines[1])
]+[100]
columns = [(colonPositions[i]-3,colonPositions[i+1]-3)
for i in range(len(colonPositions)-1)]
for line in self.actionTable.splitlines(keepends=False):
for start,end in columns:
action = line[start:end]
#skip empty actions
if not action or action.isspace(): continue
#chop it up, and check if the colon is properly placed
index, colon, action = action[:3], action[3], action[4:]
assert colon==':'
#remove filler spaces at right
action = action.rstrip()
#replace space symbols
action = action.replace('_', ' ')
wPos = action.index('w')
#add quotes around left string when present
#translation: any pattern from beginning, up to
#(but not including) a + following by a w later on
action = re.sub(r"^(.*)(?=\+[U(]*w)", r"b'\1'", action)
#add quotes around right string when present
#translation: anything with a w in it, followed by a +
#and a pattern up to the end
#(there is no variable lookbehind assertion,
#so we have to copy the pattern)
action = re.sub(r"(w[[:\-1\]).U]*)\+(.*)$", r"\1+b'\2'", action)
#expand shortcut for uppercaseAll
action = action.replace(".U", ".upper()")
#store action
actions[int(index)] = action | Build the action table from the text above |
def setFontWeight(self, weight):
"""
Sets the font weight for this editor to the inputed weight.
:param weight | <QFont.Weight>
"""
font = self.currentFont()
font.setWeight(weight)
self.setCurrentFont(font) | Sets the font weight for this editor to the inputed weight.
:param weight | <QFont.Weight> |
def set_config_file(self, path):
"""
Set the config file. The contents must be valid YAML and there
must be a top-level element 'tasks'. The listed tasks will be
started according to their configuration, and the file will
be watched for future changes. The changes will be activated
by appropriate changes to the running tasks.
"""
log = self._params.get('log', self._discard)
if path != self._config_file:
if self._config_file:
log.info("Config file changed from '%s' to '%s'", self._config_file, path)
self.file_del(self, paths=[self._config_file])
else:
log.info("Config file set to '%s'", path)
self._config_file = path
self.file_add(event_target(self, 'legion_config', log=log), path)
return self._load_config() | Set the config file. The contents must be valid YAML and there
must be a top-level element 'tasks'. The listed tasks will be
started according to their configuration, and the file will
be watched for future changes. The changes will be activated
by appropriate changes to the running tasks. |
def collect(self, device, ip, user, password):
""" Collects metrics for our netapp filer --START HERE--
"""
if netappsdk is None:
self.log.error(
'Failed to import netappsdk.NaServer or netappsdk.NaElement')
return
if device in self.running:
return
self.running.add(device)
prefix = self.config['path_prefix']
pm = self.publish_metric
netapp_inodeCol(device, ip, user, password, prefix, pm)
self.running.remove(device) | Collects metrics for our netapp filer --START HERE-- |
def nagiosCommandHelp(**kwargs):
"""
Returns command help document when no command is specified
"""
with open(os.path.join(DIRECTORY, 'document.html')) as document:
return document.read() | Returns command help document when no command is specified |
def api_client_two_way(connection, connection_responder, client_class=xbahn.api.Client):
"""
Establishes an API client for two-way communication
connection with an API Server
Arguments:
- connection (xbahn.connection.Connection)
- connection_responder (xbahn.connection.Connection): This connection will
be used by the server to send requests to the client
Keyword Arguments:
- client_class (xbahn.api.Client): if supplied use this class to initantiate
the client object. If omitted will use xbahn.api.Client.
Returns:
- client_class: client instance
"""
# create connection link instance
link = xbahn.connection.link.Link()
# main wire
link.wire(
"main",
receive=connection,
send=connection,
# in order to let the server know how to send messages to client
# we include the "remote" property in the message meta
meta={
"remote":connection_responder.remote
}
)
# response wire
link.wire(
"responder",
receive=connection_responder,
respond=connection_responder
)
# run api client on connection
return client_class(link=link) | Establishes an API client for two-way communication
connection with an API Server
Arguments:
- connection (xbahn.connection.Connection)
- connection_responder (xbahn.connection.Connection): This connection will
be used by the server to send requests to the client
Keyword Arguments:
- client_class (xbahn.api.Client): if supplied use this class to initantiate
the client object. If omitted will use xbahn.api.Client.
Returns:
- client_class: client instance |
def register_model(self, storagemodel:object):
""" set up an Tableservice for an StorageTableModel in your Azure Storage Account
Will create the Table if not exist!
required Parameter is:
- storagemodel: StorageTableModel(Object)
"""
modeldefinition = self.getmodeldefinition(storagemodel, False)
if modeldefinition is None:
""" test if queuename already exists """
if [model for model in self._modeldefinitions if model['tablename'] == storagemodel._tablename]:
raise NameConventionError(storagemodel._tablename)
""" test if queuename fits to azure naming rules """
if not test_azurestorage_nameconventions(storagemodel._tablename, 'StorageTableModel'):
raise NameConventionError(storagemodel._tablename)
""" now register model """
modeldefinition = {
'modelname': storagemodel.__class__.__name__,
'tablename': storagemodel._tablename,
'encrypt': storagemodel._encrypt,
'tableservice': self._account.create_table_service()
}
if modeldefinition['encrypt']:
""" encrypt init """
# Create the KEK used for encryption.
# KeyWrapper is the provided sample implementation, but the user may use their own object as long as it implements the interface above.
kek = KeyWrapper(self._key_identifier, self._secret_key) # Key identifier
# Create the key resolver used for decryption.
# KeyResolver is the provided sample implementation, but the user may use whatever implementation they choose so long as the function set on the service object behaves appropriately.
key_resolver = KeyResolver()
key_resolver.put_key(kek)
# Create the EncryptionResolver Function to determine Properties to en/decrypt
encryptionresolver = self.__encryptionresolver__(modeldefinition['encrypt'])
# Set the require Encryption, KEK and key resolver on the service object.
modeldefinition['tableservice'].key_encryption_key = kek
modeldefinition['tableservice'].key_resolver_funcion = key_resolver.resolve_key
modeldefinition['tableservice'].encryption_resolver_function = encryptionresolver
pass
self.__createtable__(modeldefinition)
self._modeldefinitions.append(modeldefinition)
log.info('model {} registered successfully. Models are {!s}.'.format(modeldefinition['modelname'], [model['modelname'] for model in self._modeldefinitions]))
else:
log.info('model {} already registered. Models are {!s}.'.format(modeldefinition['modelname'], [model['modelname'] for model in self._modeldefinitions])) | set up an Tableservice for an StorageTableModel in your Azure Storage Account
Will create the Table if not exist!
required Parameter is:
- storagemodel: StorageTableModel(Object) |
def _convert(x, factor1, factor2):
"""
Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
"""
return x * factor2 / ((1-x) * factor1 + x * factor2) | Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1. |
def close(self):
"""Shutdown and free all resources."""
if self._controller is not None:
self._controller.quit()
self._controller = None
if self._process is not None:
self._process.close()
self._process = None | Shutdown and free all resources. |
def scatterviz(X,
y=None,
ax=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs):
"""Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = ScatterVisualizer(ax=ax, features=features, classes=classes,
color=color, colormap=colormap,
markers=markers, alpha=alpha, **kwargs)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the axes object on the visualizer
return visualizer.ax | Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on. |
def get_expanded(self, *args, **kwargs):
"""
Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to
*True* by default.
"""
kwargs.setdefault("expandvars", True)
kwargs.setdefault("expanduser", True)
return self.get_default(*args, **kwargs) | Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to
*True* by default. |
def get_col(self, alias, output_field=None):
"""Get the decryption for col."""
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
return DecryptedCol(
alias,
self,
output_field
)
else:
return self.cached_col | Get the decryption for col. |
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if (isinstance(start, (str, datetime))
and isinstance(end, (str, datetime))):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the "
"same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice | Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3) |
def run(self):
"""
run CMake
"""
command = [self.cmake]
if self.generator:
command.extend([
'-G', self.generator
])
if self.path:
command.append(self.path)
if self.definitions is not None:
for item in self.definitions.items():
command.append('-D%s=%s' % item)
if self.options is not None:
command.extend(self.options)
cmd = yield self.makeRemoteShellCommand(command=command)
yield self.runCommand(cmd)
return cmd.results() | run CMake |
def update(self, update_finished_cb):
"""Request an update of the memory content"""
if not self._update_finished_cb:
self._update_finished_cb = update_finished_cb
self.anchor_data = []
self.nr_of_anchors = 0
self.valid = False
logger.debug('Updating content of memory {}'.format(self.id))
# Start reading the header
self.mem_handler.read(self, LocoMemory.MEM_LOCO_INFO,
LocoMemory.MEM_LOCO_INFO_LEN) | Request an update of the memory content |
def _gather_local_posterior(self, use_gather,
gather_size, gather_offset):
"""Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html
"""
if use_gather:
self.comm.Gather(self.local_posterior_,
self.gather_posterior, root=0)
else:
target = [
self.gather_posterior,
gather_size,
gather_offset,
MPI.DOUBLE]
self.comm.Gatherv(self.local_posterior_, target)
return self | Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html |
def _validate_file_ownership(owner, group, file_name, optional=False):
"""
Validate that a specified file is owned by `owner:group`.
:param owner: Name of the owner
:type owner: str
:param group: Name of the group
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert owner == ownership.owner, \
"{} has an incorrect owner: {} should be {}".format(
file_name, ownership.owner, owner)
assert group == ownership.group, \
"{} has an incorrect group: {} should be {}".format(
file_name, ownership.group, group)
print("Validate ownership of {}: PASS".format(file_name)) | Validate that a specified file is owned by `owner:group`.
:param owner: Name of the owner
:type owner: str
:param group: Name of the group
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool |
def get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:
"""Iterates through relevant python files within the given directory.
Args:
directory: The top-level directory to explore.
Yields:
File paths.
"""
for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
if os.path.split(dirpath)[-1].startswith('.'):
dirnames.clear()
continue
for filename in filenames:
if filename.endswith('.py') and not filename.endswith('_pb2.py'):
yield os.path.join(dirpath, filename) | Iterates through relevant python files within the given directory.
Args:
directory: The top-level directory to explore.
Yields:
File paths. |
def truth(message, expected=None):
""" Convenience decorator that applies [`Check`](#check) to a callable.
```python
from good import truth
@truth(u'Must be an existing directory')
def isDir(v):
return os.path.isdir(v)
```
:param message: Validation error message
:type message: unicode
:param expected: Expected value string representation, or `None` to get it from the wrapped callable
:type expected: None|str|unicode
:return: decorator
:rtype: callable
"""
def decorator(func):
return update_wrapper(Check(func, message, expected), func)
return decorator | Convenience decorator that applies [`Check`](#check) to a callable.
```python
from good import truth
@truth(u'Must be an existing directory')
def isDir(v):
return os.path.isdir(v)
```
:param message: Validation error message
:type message: unicode
:param expected: Expected value string representation, or `None` to get it from the wrapped callable
:type expected: None|str|unicode
:return: decorator
:rtype: callable |
def load_input():
""" Open existing input file """
file = open(_input_file, 'r')
result = json.loads(file.read().strip('\0').strip())
file.close()
return result | Open existing input file |
def corner_shape_parameters(corners, frame_shape, cb_shape):
"""
Return a tuple of shape parameters for a given set of corners. This is
based on the parameters from ROS's perception library[1]. The parameters
returned are mean x- and y- co-ordinate normalised onto the interval
[0,1], the relative size of the set of corners within the frame on the
interval [0,1] and a 'skewness' metric on the inteval [0,1].
*corners* is a Nx2 numpy array of detected corner locations.
*frame_shape* is a pair giving the width and height of the frame.
*cb_shape* is a pair giving the number of horizontal and vertical corners
[1] https://github.com/ros-perception/image_pipeline/
"""
corners = corners.reshape(-1, 2)
assert corners.shape[0] == cb_shape[0] * cb_shape[1]
h, w = frame_shape
# Extract the "outside" corners which define the shape of the board in
# a clockwise order. I.e. [A,B,C,D] where:
#
# A----B
# | |
# D----C
A, B, C, D = tuple(x.reshape(-1) for x in corners[[0, cb_shape[0]-1, -1, -cb_shape[0]], :])
ba, bc = A - B, C - B # Edges
bd, ac = D - B, C - A # Diagonals
# Compute the angle between AB and BC
angle = np.arccos(np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)))
# Compute skew metric
skew = min(1, 2 * np.abs(0.5*np.pi - angle))
# Compute area (assuming quadrilateral) and hence size metric
area = 0.5 * np.abs(bd[0]*ac[1] - bd[1]*ac[0])
size = np.sqrt(area / (h*w))
# For X and Y, we "shrink" the image all around by approx. half the board
# size. Otherwise large boards are penalized because you can't get much
# X/Y variation.
border = np.sqrt(area)
X = np.clip((np.mean(corners[:,0]) - 0.5*border) / (w - border), 0, 1)
Y = np.clip((np.mean(corners[:,1]) - 0.5*border) / (h - border), 0, 1)
return (X, Y, size, skew) | Return a tuple of shape parameters for a given set of corners. This is
based on the parameters from ROS's perception library[1]. The parameters
returned are mean x- and y- co-ordinate normalised onto the interval
[0,1], the relative size of the set of corners within the frame on the
interval [0,1] and a 'skewness' metric on the inteval [0,1].
*corners* is a Nx2 numpy array of detected corner locations.
*frame_shape* is a pair giving the width and height of the frame.
*cb_shape* is a pair giving the number of horizontal and vertical corners
[1] https://github.com/ros-perception/image_pipeline/ |
def add_answer_for_student(student_item, vote, rationale):
"""
Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
"""
answers = get_answers_for_student(student_item)
answers.add_answer(vote, rationale)
sub_api.create_submission(student_item, {
ANSWER_LIST_KEY: answers.get_answers_as_list()
}) | Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option |
def hash160(data):
"""Return ripemd160(sha256(data))"""
rh = hashlib.new('ripemd160', sha256(data).digest())
return rh.digest() | Return ripemd160(sha256(data)) |
def linear_warp(X, d, n, *args):
r"""Warp inputs with a linear transformation.
Applies the warping
.. math::
w(x) = \frac{x-a}{b-a}
to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a
convenient way to map your inputs to the unit hypercube.
Parameters
----------
X : array, (`M`,)
`M` inputs from dimension `d`.
d : non-negative int
The index (starting from zero) of the dimension to apply the warping to.
n : non-negative int
The derivative order to compute.
*args : 2N scalars
The remaining parameters to describe the warping, given as scalars.
These are given as `a_i`, `b_i` for each of the `D` dimensions. Note
that these must ALL be provided for each call.
"""
X = scipy.asarray(X, dtype=float)
a = args[2 * d]
b = args[2 * d + 1]
if n == 0:
return (X - a) / (b - a)
elif n == 1:
return 1.0 / (b - a) * scipy.ones_like(X)
else:
return scipy.zeros_like(X) | r"""Warp inputs with a linear transformation.
Applies the warping
.. math::
w(x) = \frac{x-a}{b-a}
to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a
convenient way to map your inputs to the unit hypercube.
Parameters
----------
X : array, (`M`,)
`M` inputs from dimension `d`.
d : non-negative int
The index (starting from zero) of the dimension to apply the warping to.
n : non-negative int
The derivative order to compute.
*args : 2N scalars
The remaining parameters to describe the warping, given as scalars.
These are given as `a_i`, `b_i` for each of the `D` dimensions. Note
that these must ALL be provided for each call. |
def setitem(self, key, value):
"""Maps dictionary keys to values for assignment. Called for
dictionary style access with assignment.
"""
with self.lock:
self.tbl[key] = value | Maps dictionary keys to values for assignment. Called for
dictionary style access with assignment. |
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs | Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments |
def raises(self):
"""Return list of :raises meta."""
return [
DocstringRaises.from_meta(meta)
for meta in self.meta
if meta.args[0] in {'raises', 'raise', 'except', 'exception'}
] | Return list of :raises meta. |
def spectral_flux(d0, d1):
"""
Computes the spectral flux feature of the current frame
"""
# compute the spectral flux as the sum of square distances:
d0 = np.mean(d0, axis=1)
d1 = np.mean(d1, axis=1)
nFFT = min(len(d0) // 2, len(d1) // 2)
X = FFT(d0, nFFT)
Xprev = FFT(d1, nFFT)
# L = min(len(X), len(Xprev))
sumX = np.sum(X + EPSILON)
sumPrevX = np.sum(Xprev + EPSILON)
return np.sum((X / sumX - Xprev / sumPrevX) ** 2) | Computes the spectral flux feature of the current frame |
def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2 | Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise. |
def step(self, observations):
""" Sample action from an action space for given state """
q_values = self(observations)
return {
'actions': self.q_head.sample(q_values),
'q': q_values
} | Sample action from an action space for given state |
def freeze_all(self):
'''
Stop all activity of the agents running.
'''
d = defer.succeed(None)
for x in self.iter_agents():
d.addCallback(defer.drop_param, x._cancel_long_running_protocols)
d.addCallback(defer.drop_param, x._cancel_all_delayed_calls)
d.addCallback(defer.drop_param, x._kill_all_protocols)
return d | Stop all activity of the agents running. |
def http_get_metadata(metadata_path, timeout=__HTTP_DEFAULT_TIMEOUT_SEC):
"""
Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path>
ARGS:
metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id")
RETURN:
response content on success
RAISE:
URLError if there was a problem reading metadata
"""
metadata_path = __METADATA_PREFIX + metadata_path
try:
response = urllib2.urlopen(metadata_path, None, timeout)
if response.getcode() != 200:
raise IOError("Non-200 response " + str(response.getcode()) + " reading " + metadata_path)
return response.read()
except urllib2.URLError as error:
raise IOError("URLError in http_get_metadata: " + repr(error)) | Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path>
ARGS:
metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id")
RETURN:
response content on success
RAISE:
URLError if there was a problem reading metadata |
def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,)) | Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis |
def _deleteFile(self,directory,fn,dentry,db,service):
"""Deletets file and changes status to '?' if no
more services manages the file
"""
# FIXME : can switch back to only managing once service
# at a time
logger.debug("%s - Deleting"%(fn))
if fn not in db:
print("%s - rm: Not in DB, can't remove !"%(fn))
return False
# Build up list of names
servicenames=db[fn]['services'].keys()
# If service is none, build list of all services
# to perform this action on
if service is None:
servicelist=servicenames
else:
servicelist=[service]
for service in servicelist:
if not db[fn]['services'].has_key(service):
print("%s - Can't delete, service [%s] unknown"%(service))
continue
if db[fn]['services'][service]['status']!=self.ST_DELETED:
print("%s - rm: Can't remove file with non 'D' status (%s)!"\
%(fn,service))
continue
# Only change status if correctly deleted
if self.sman.GetServiceObj(service).Remove(directory,fn):
# Delete our service entry
del db[fn]['services'][service]
logger.debug('%s - deleted by service: %s'%(fn,service))
else:
logger.error('%s - Failed to delete by service: %s'%(fn,service))
continue
# Delete whole entry if no services manage it any more
if len(db[fn]['services'].keys())==0:
del db[fn]
return True | Deletets file and changes status to '?' if no
more services manages the file |
def plot_pseudosection(df, plot_key, spacing=1, ctypes=None, dd_merge=False,
cb=False, **kwargs):
"""Create a pseudosection plot for a given measurement
Parameters
----------
df: dataframe
measurement dataframe, one measurement frame (i.e., only one frequency
etc)
key:
which key to colorcode
spacing: float, optional
assumed electrode spacing
ctypes: list of strings
which configurations to plot, default: dd
dd_merge: bool, optional
?
cb: bool, optional
?
"""
grid = None
pseudo_d_functions = {
'dd': _pseudodepths_dd_simple,
'schlumberger': _pseudodepths_schlumberger,
'wenner': _pseudodepths_wenner,
}
titles = {
'dd': 'dipole-dipole configurations',
'schlumberger': 'Schlumberger configurations',
'wenner': 'Wenner configurations',
}
# for now sort data and only plot dipole-dipole
only_types = ctypes or ['dd', ]
if 'schlumberger' in only_types:
raise Exception('plotting of pseudosections not implemented for ' +
'Schlumberger configurations!')
configs = df[['a', 'b', 'm', 'n']].values
results = fT.filter(
configs,
settings={'only_types': only_types, }, )
values = df[plot_key].values
plot_objects = []
for key in sorted(results.keys()):
print('plotting: ', key)
if key == 'not_sorted':
continue
index_dict = results[key]
# it is possible that we want to generate multiple plots for one
# type of measurement, i.e., to separate skips of dipole-dipole
# measurements. Therefore we generate two lists:
# 1) list of list of indices to plot
# 2) corresponding labels
if key == 'dd' and not dd_merge:
plot_list = []
labels_add = []
for skip in sorted(index_dict.keys()):
plot_list.append(index_dict[skip])
labels_add.append(' - skip {0}'.format(skip))
else:
# merge all indices
plot_list = [np.hstack(index_dict.values()), ]
print('schlumberger', plot_list)
labels_add = ['', ]
# generate plots
for indices, label_add in zip(plot_list, labels_add):
if len(indices) == 0:
continue
ddc = configs[indices]
plot_data = values[indices]
px, pz = pseudo_d_functions[key](ddc, spacing, grid)
# we need at least four points for a spatial interpolation, I
# think...
if px.size <= 4:
continue
# take 200 points for the new grid in every direction. Could be
# adapted to the actual ratio
xg = np.linspace(px.min(), px.max(), 200)
zg = np.linspace(pz.min(), pz.max(), 200)
x, z = np.meshgrid(xg, zg)
cmap_name = kwargs.get('cmap_name', 'jet')
cmap = mpl.cm.get_cmap(cmap_name)
# normalize data
data_min = kwargs.get('cbmin', plot_data.min())
data_max = kwargs.get('cbmax', plot_data.max())
cnorm = mpl.colors.Normalize(vmin=data_min, vmax=data_max)
scalarMap = mpl.cm.ScalarMappable(norm=cnorm, cmap=cmap)
fcolors = scalarMap.to_rgba(plot_data)
try:
image = si.griddata(
(px, pz),
fcolors,
(x, z),
method='linear', )
except siq.QhullError as e:
print('Ex', e)
continue
cmap = mpl.cm.get_cmap('jet_r')
data_ratio = np.abs(px.max() - px.min()) / np.abs(pz.min())
fig_size_y = 15 / data_ratio + 6 / 2.54
fig = plt.figure(figsize=(15, fig_size_y))
fig_top = 1 / 2.54 / fig_size_y
fig_left = 2 / 2.54 / 15
fig_right = 1 / 2.54 / 15
if cb:
fig_bottom = 3 / 2.54 / fig_size_y
else:
fig_bottom = 0.05
ax = fig.add_axes([
fig_left, fig_bottom + fig_top * 2, 1 - fig_left - fig_right,
1 - fig_top - fig_bottom - fig_top * 2
])
im = ax.imshow(
image[::-1],
extent=(xg.min(), xg.max(), zg.min(), zg.max()),
interpolation='none',
aspect='auto',
# vmin=10,
# vmax=300,
cmap=cmap, )
ax.set_ylim(pz.min(), 0)
# colorbar
if cb:
print('plotting colorbar')
# the colorbar has 3 cm on the bottom
ax_cb = fig.add_axes([
fig_left * 4, fig_top * 2,
1 - fig_left * 4 - fig_right * 4, fig_bottom - fig_top * 2
])
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# divider = make_axes_locatable(ax)
# ax_cb = divider.append_axes("bottom", "5%", pad="3%")
# (ax_cb, kw) = mpl.colorbar.make_axes_gridspec(
# ax,
# orientation='horizontal',
# fraction=fig_bottom,
# pad=0.3,
# shrink=0.9,
# # location='bottom',
# )
cb = mpl.colorbar.ColorbarBase(
ax=ax_cb,
cmap=cmap,
norm=cnorm,
orientation='horizontal',
# **kw
)
cb.set_label('cb label')
else:
fig_bottom = 0.05
# 1cm on top
# # 3 cm on bottom for colorbar
# fig.subplots_adjust(
# top=1 - fig_top,
# bottom=fig_bottom,
# )
ax.set_title(titles[key] + label_add)
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('x [z]')
plot_objects.append((fig, ax, im))
return plot_objects | Create a pseudosection plot for a given measurement
Parameters
----------
df: dataframe
measurement dataframe, one measurement frame (i.e., only one frequency
etc)
key:
which key to colorcode
spacing: float, optional
assumed electrode spacing
ctypes: list of strings
which configurations to plot, default: dd
dd_merge: bool, optional
?
cb: bool, optional
? |
def tryDynMod(name):
'''
Dynamically import a python module or exception.
'''
try:
return importlib.import_module(name)
except ModuleNotFoundError:
raise s_exc.NoSuchDyn(name=name) | Dynamically import a python module or exception. |
def _RunMethod(dev, args, extra):
"""Runs a method registered via MakeSubparser."""
logging.info('%s(%s)', args.method.__name__, ', '.join(args.positional))
result = args.method(dev, *args.positional, **extra)
if result is not None:
if isinstance(result, io.StringIO):
sys.stdout.write(result.getvalue())
elif isinstance(result, (list, types.GeneratorType)):
r = ''
for r in result:
r = str(r)
sys.stdout.write(r)
if not r.endswith('\n'):
sys.stdout.write('\n')
else:
result = str(result)
sys.stdout.write(result)
if not result.endswith('\n'):
sys.stdout.write('\n')
return 0 | Runs a method registered via MakeSubparser. |
def convert_coord_object(coord):
"""Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile"""
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row)) | Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile |
def _get_title(self):
"""According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI"""
#TODO: unicode support
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
#unicode versions are (Get|Set)ConsolTitleW
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value | According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI |
def postcmd(self, stop, line):
''' Exit cmd cleanly. '''
self.color_prompt()
return Cmd.postcmd(self, stop, line) | Exit cmd cleanly. |
def inputs(self):
'''A list of Theano variables for feedforward computations.'''
return [l.input for l in self.layers if isinstance(l, layers.Input)] | A list of Theano variables for feedforward computations. |
def stop(self):
"""Stop this DMBS daemon. If it's not currently running, do nothing.
Don't return until it's terminated.
"""
log.info('Stopping PostgreSQL at %s:%s', self.host, self.port)
if self._is_running():
cmd = [
PostgresFinder.find_root() / 'pg_ctl',
'stop',
'-D', self.base_pathname,
'-m', 'fast',
]
subprocess.check_call(cmd)
# pg_ctl isn't reliable if it's called at certain critical times
if self.pid:
os.kill(self.pid, signal.SIGTERM)
# Can't use wait() because the server might not be our child
while self._is_running():
time.sleep(0.1) | Stop this DMBS daemon. If it's not currently running, do nothing.
Don't return until it's terminated. |
def is_active(cache, token):
"""
Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return:
"""
profile = cache.get(token)
if not profile:
raise exceptions.NotAuthenticatedException(
'The token is good, but you are not logged in. Please '
'try logging in again.')
return profile | Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return: |
def assert_match(actual_char_or_str, expected_char_or_str):
"""If values don't match, print them and raise a ValueError, otherwise,
continue
Raises: ValueError if argumetns do not match"""
if expected_char_or_str != actual_char_or_str:
print("Expected")
pprint(expected_char_or_str)
print("")
print("Got")
pprint(actual_char_or_str)
raise ValueError() | If values don't match, print them and raise a ValueError, otherwise,
continue
Raises: ValueError if argumetns do not match |
def grouper(iterable: Iterable, size: int) -> Iterable:
"""
Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks.
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk | Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks. |
def to_html(self, write_to):
"""Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to.
"""
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to. |
def available_configuration_files(self):
"""A list of strings with the absolute pathnames of the available configuration files."""
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')]
absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]
return [pathname for pathname in absolute_paths if os.path.isfile(pathname)] | A list of strings with the absolute pathnames of the available configuration files. |
def handle_not_found(exception, **extra):
"""Custom blueprint exception handler."""
assert isinstance(exception, NotFound)
page = Page.query.filter(db.or_(Page.url == request.path,
Page.url == request.path + "/")).first()
if page:
_add_url_rule(page.url)
return render_template(
[
page.template_name,
current_app.config['PAGES_DEFAULT_TEMPLATE']
],
page=page
)
elif 'wrapped' in extra:
return extra['wrapped'](exception)
else:
return exception | Custom blueprint exception handler. |
def _writeTracebackMessage(logger, typ, exception, traceback):
"""
Write a traceback to the log.
@param typ: The class of the exception.
@param exception: The L{Exception} instance.
@param traceback: The traceback, a C{str}.
"""
msg = TRACEBACK_MESSAGE(
reason=exception, traceback=traceback, exception=typ)
msg = msg.bind(
**_error_extraction.get_fields_for_exception(logger, exception))
msg.write(logger) | Write a traceback to the log.
@param typ: The class of the exception.
@param exception: The L{Exception} instance.
@param traceback: The traceback, a C{str}. |
def sens_power_encode(self, adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp):
'''
Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float)
'''
return MAVLink_sens_power_message(adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp) | Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float) |
def get_workflow_id_and_project(path):
'''
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
'''
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity')
try:
if entity_result is None or not entity_result['id'].startswith('workflow-'):
raise DXCLIError('Could not resolve "' + path + '" to a workflow object')
except:
err_exit()
return entity_result['id'], project | :param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message. |
def _iter(self):
"""Generate (name, est, weight) tuples excluding None transformers
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None) | Generate (name, est, weight) tuples excluding None transformers |
def CreateStorageReaderForFile(cls, path):
"""Creates a storage reader based on the file.
Args:
path (str): path to the storage file.
Returns:
StorageReader: a storage reader or None if the storage file cannot be
opened or the storage format is not supported.
"""
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(
path, check_readable_only=True):
return sqlite_reader.SQLiteStorageFileReader(path)
return None | Creates a storage reader based on the file.
Args:
path (str): path to the storage file.
Returns:
StorageReader: a storage reader or None if the storage file cannot be
opened or the storage format is not supported. |
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters) | Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond. |
def task_delete(self, **kw):
""" Marks a task as deleted. """
id, task = self.get_task(**kw)
if task['status'] == Status.DELETED:
raise ValueError("Task is already deleted.")
self._execute(id, 'delete')
return self.get_task(uuid=task['uuid'])[1] | Marks a task as deleted. |
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True):
"""
Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
"""
if validate:
from snet_cli.resources.proto.unixfs_pb2 import Data
from snet_cli.resources.proto.merckledag_pb2 import MerkleNode
# No nice Python library to parse ipfs blocks, so do it ourselves.
block_data = ipfs_client.block_get(ipfs_hash_base58)
mn = MerkleNode()
mn.ParseFromString(block_data)
unixfs_data = Data()
unixfs_data.ParseFromString(mn.Data)
assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file"
data = unixfs_data.Data
# multihash has a badly registered base58 codec, overwrite it...
multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode)
# create a multihash object from our ipfs hash
mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58')
# Convenience method lets us directly use a multihash to verify data
if not mh.verify(block_data):
raise Exception("IPFS hash mismatch with data")
else:
data = ipfs_client.cat(ipfs_hash_base58)
return data | Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise |
def change(self, event):
"""Change an existing object"""
try:
data, schema, user, client = self._get_args(event)
except AttributeError:
return
try:
uuid = data['uuid']
change = data['change']
field = change['field']
new_data = change['value']
except KeyError as e:
self.log("Update request with missing arguments!", data, e,
lvl=critical)
self._cancel_by_error(event, 'missing_args')
return
storage_object = None
try:
storage_object = objectmodels[schema].find_one({'uuid': uuid})
except Exception as e:
self.log('Change for unknown object requested:', schema, data, lvl=warn)
if storage_object is None:
self._cancel_by_error(event, 'not_found')
return
if not self._check_permissions(user, 'write', storage_object):
self._cancel_by_permission(schema, data, event)
return
self.log("Changing object:", storage_object._fields, lvl=debug)
storage_object._fields[field] = new_data
self.log("Storing object:", storage_object._fields, lvl=debug)
try:
storage_object.validate()
except ValidationError:
self.log("Validation of changed object failed!",
storage_object, lvl=warn)
self._cancel_by_error(event, 'invalid_object')
return
storage_object.save()
self.log("Object stored.")
result = {
'component': 'hfos.events.objectmanager',
'action': 'change',
'data': {
'schema': schema,
'uuid': uuid
}
}
self._respond(None, result, event) | Change an existing object |
def _get_pkgng_version(jail=None, chroot=None, root=None):
'''
return the version of 'pkg'
'''
cmd = _pkg(jail, chroot, root) + ['--version']
return __salt__['cmd.run'](cmd).strip() | return the version of 'pkg' |
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
from matplotlib.font_manager import FontProperties
from matplotlib.mathtext import MathTextParser
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth | Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename. |
def set_xticklabels_position(self, row, column, position):
"""Specify the position of the axis tick labels.
This is generally only useful for multiplots containing only one
row. This can be used to e.g. alternatively draw the tick labels
on the bottom or the top of the subplot.
:param row,column: specify the subplot.
:param position: 'top' or 'bottom' to specify the position of the
tick labels.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_xticklabels_position(position) | Specify the position of the axis tick labels.
This is generally only useful for multiplots containing only one
row. This can be used to e.g. alternatively draw the tick labels
on the bottom or the top of the subplot.
:param row,column: specify the subplot.
:param position: 'top' or 'bottom' to specify the position of the
tick labels. |
def handle_combined_input(args):
"""Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting.
"""
cur_args = args[:]
while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)):
cur_args = cur_args[0]
return cur_args | Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting. |
def _load_url(url):
"""
Loads a URL resource from a remote server
"""
try:
response = requests.get(url)
return BytesIO(response.content)
except IOError as ex:
parser.error("{url} could not be loaded remotely! ({ex})".format(url=url, ex=ex)) | Loads a URL resource from a remote server |
def _get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y.
"""
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y)) | find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y. |
def is_valid_coordinate(img, i, j, k):
"""Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool
"""
imgx, imgy, imgz = get_shape(img)
return (i >= 0 and i < imgx) and \
(j >= 0 and j < imgy) and \
(k >= 0 and k < imgz) | Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool |
def curse(rest):
"Curse the day!"
if rest:
cursee = rest
else:
cursee = 'the day'
karma.Karma.store.change(cursee, -1)
return "/me curses %s!" % cursee | Curse the day! |
def verify(self, msg, sig, key):
"""
Verify a message signature
:param msg: The message
:param sig: A signature
:param key: A ec.EllipticCurvePublicKey to use for the verification.
:raises: BadSignature if the signature can't be verified.
:return: True
"""
if not isinstance(key, ec.EllipticCurvePublicKey):
raise TypeError(
"The public key must be an instance of "
"ec.EllipticCurvePublicKey")
self._cross_check(key)
num_bits = key.curve.key_size
num_bytes = (num_bits + 7) // 8
if len(sig) != 2 * num_bytes:
raise ValueError('Invalid signature')
try:
# cryptography uses ASN.1-encoded signature data; split JWS
# signature (r||s) and encode before verification
(r, s) = self._split_raw_signature(sig)
asn1sig = encode_dss_signature(r, s)
key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm()))
except InvalidSignature as err:
raise BadSignature(err)
else:
return True | Verify a message signature
:param msg: The message
:param sig: A signature
:param key: A ec.EllipticCurvePublicKey to use for the verification.
:raises: BadSignature if the signature can't be verified.
:return: True |
def _resource_prefix(self, resource=None):
"""Get elastic prefix for given resource.
Resource can specify ``elastic_prefix`` which behaves same like ``mongo_prefix``.
"""
px = 'ELASTICSEARCH'
if resource and config.DOMAIN[resource].get('elastic_prefix'):
px = config.DOMAIN[resource].get('elastic_prefix')
return px | Get elastic prefix for given resource.
Resource can specify ``elastic_prefix`` which behaves same like ``mongo_prefix``. |
def from_(self, table, alias=None):
"""
Establece el origen de datos (y un alias opcionalmente).
"""
if isinstance(table, str):
table = [[table, alias]]
self.raw_tables = table
return self | Establece el origen de datos (y un alias opcionalmente). |
def copy_sig(sig, opts, isdiff):
"""Deploy a sig"""
info("[+] \033[92mDeploying signature:\033[0m %s" % sig)
if isdiff:
sourcefile = os.path.join(opts.workdir, '%s.cdiff' % sig)
destfile = os.path.join(opts.mirrordir, '%s.cdiff' % sig)
else:
sourcefile = os.path.join(opts.workdir, '%s.cvd' % sig)
destfile = os.path.join(opts.mirrordir, '%s.cvd' % sig)
deploy_signature(sourcefile, destfile, opts.user, opts.group)
info("=> Deployed signature: %s" % sig) | Deploy a sig |
def _parse_team_name(self, team):
"""
Parse the team name in the contract table.
The team names in the contract table contain special encoded characters
that are not supported by Python 2.7. These characters should be
filtered out to get the proper team name.
Parameters
----------
team : string
A string representing the team_name tag in a row in the player's
contract table.
Returns
-------
string
A string of the team's name, such as 'Houston Astros'.
"""
team = team.replace(' ', ' ')
team = team.replace('\xa0', ' ')
team_html = pq(team)
return team_html.text() | Parse the team name in the contract table.
The team names in the contract table contain special encoded characters
that are not supported by Python 2.7. These characters should be
filtered out to get the proper team name.
Parameters
----------
team : string
A string representing the team_name tag in a row in the player's
contract table.
Returns
-------
string
A string of the team's name, such as 'Houston Astros'. |
def linescore(self):
"""Returns the linescore for the game as a DataFrame."""
doc = self.get_main_doc()
table = doc('table#line_score')
columns = [th.text() for th in table('tr.thead').items('th')]
columns[0] = 'team_id'
data = [
[sportsref.utils.flatten_links(td) for td in tr('td').items()]
for tr in table('tr.thead').next_all('tr').items()
]
return pd.DataFrame(data, index=['away', 'home'],
columns=columns, dtype='float') | Returns the linescore for the game as a DataFrame. |
def not_storable(_type):
"""
Helper for tagging unserializable types.
Arguments:
_type (type): type to be ignored.
Returns:
Storable: storable instance that does not poke.
"""
return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type))) | Helper for tagging unserializable types.
Arguments:
_type (type): type to be ignored.
Returns:
Storable: storable instance that does not poke. |
def write_local_file(fp, name_bytes, writer, dt):
"""
Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x04034b50)) # local file header
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
crc32_pos = fp.tell()
fp.write(struct.pack('I', 0)) # crc32 placeholder
data_len_pos = fp.tell()
fp.write(struct.pack('I', 0)) # compressed length placeholder
fp.write(struct.pack('I', 0)) # uncompressed length placeholder
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(name_bytes)
data_start_pos = fp.tell()
crc32 = writer(fp)
data_end_pos = fp.tell()
data_len = data_end_pos - data_start_pos
fp.seek(crc32_pos)
fp.write(struct.pack('I', crc32)) # crc32
fp.seek(data_len_pos)
fp.write(struct.pack('I', data_len)) # compressed length placeholder
fp.write(struct.pack('I', data_len)) # uncompressed length placeholder
fp.seek(data_end_pos)
return data_len, crc32 | Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive |
def zcr(data):
"""Computes zero crossing rate of segment"""
data = np.mean(data, axis=1)
count = len(data)
countZ = np.sum(np.abs(np.diff(np.sign(data)))) / 2
return (np.float64(countZ) / np.float64(count - 1.0)) | Computes zero crossing rate of segment |
def _plot_x(self, iabscissa=1, x_opt=None, remark=None,
annotations=None):
"""If ``x_opt is not None`` the difference to x_opt is plotted
in log scale
"""
if not hasattr(self, 'x'):
_print_warning('no x-attributed found, use methods ' +
'plot_xrecent or plot_mean', 'plot_x',
'CMADataLogger')
return
from matplotlib.pyplot import plot, semilogy, hold, text, grid, axis, title
dat = self # for convenience and historical reasons
# modify fake last entry in x for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06 * dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
if x_opt is None:
idx = np.argsort(dat.x[-2, 5:])
idx2 = np.argsort(idx)
dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
else: # y-axis is in log
xdat = np.abs(dat.x[:, 5:] - np.array(x_opt, copy=False))
idx = np.argsort(xdat[-2, :])
idx2 = np.argsort(idx)
xdat[-1, idx] = np.logspace(np.log10(np.min(abs(xdat[xdat!=0]))),
np.log10(np.max(np.abs(xdat))),
dat.x.shape[1] - 5)
else:
minxend = 0
self._enter_plotting()
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(xdat), '-')
else:
plot(dat.x[:, iabscissa], dat.x[:, 5:], '-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6 # to prevent last x-tick annotation, probably superfluous
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
# yyl = np.sort(dat.x[-1,5:])
if x_opt is not None:
# semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] * (1+1e-6), ax[3] / (1+1e-6)]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in rglen(idx):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1, iabscissa], dat.x[-1, 5 + i]
if x_opt is None else np.abs(xdat[-1, i]),
('x(' + str(i) + ')=' if annotations is None
else str(i) + ':' + annotations[i] + "=")
+ str(dat.x[-2, 5 + i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' +
(remark + ', ' if remark is not None else '') +
str(dat.x.shape[1] - 5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
self._finalize_plotting() | If ``x_opt is not None`` the difference to x_opt is plotted
in log scale |
def _handler_swagger_ui(self, request, spec, version):
"""
---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3]
"""
version = version or self._version_ui
if self._spec_url:
spec_url = self._spec_url
else:
spec_url = request.url.with_path(self['swagger:spec'].url())
proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if proto:
spec_url = spec_url.with_scheme(proto)
if isinstance(spec, str):
spec_url = spec_url.with_query(spec=spec)
elif len(self._swagger_data) == 1:
for basePath in self._swagger_data:
spec_url = spec_url.with_query(spec=basePath)
else:
spec_url = spec_url.with_query(spec='/')
spec_url = spec_url.human_repr()
return web.Response(
text=ui.rend_template(spec_url,
prefix=self._swagger_ui,
version=version),
content_type='text/html') | ---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3] |
def _add_token(self, token, parent_node='root'):
"""add a token to this docgraph"""
if parent_node == 'root':
parent_node = self.root
token_node_id = 'token:{}'.format(self.token_count)
self.add_node(token_node_id, layers={self.ns, self.ns+':token'},
attr_dict={self.ns+':token': token})
self.add_edge(parent_node, token_node_id,
layers={self.ns},
edge_type=EdgeTypes.spanning_relation)
self.tokens.append(token_node_id)
self.token_count += 1 | add a token to this docgraph |
def run(config, tag, bucket, account, not_bucket, not_account, debug, region):
"""Run across a set of accounts and buckets."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(level=logging.WARNING)
if debug:
def invoke(f, *args, **kw):
# if f.func_name == 'process_keyset':
# key_count = len(args[-1])
# print("debug skip keyset %d" % key_count)
# return
return f(*args, **kw)
worker.invoke = invoke
with open(config) as fh:
data = utils.yaml_load(fh.read())
for account_info in data.get('accounts', ()):
if tag and tag not in account_info.get('tags', ()):
continue
if account and account_info['name'] not in account:
continue
if not_account and account_info['name'] in not_account:
continue
if 'inventory' in data and 'inventory' not in account_info:
account_info['inventory'] = data['inventory']
if 'visitors' in data and 'visitors' not in account_info:
account_info['visitors'] = data['visitors']
if 'object-reporting' in data and 'object-reporting' not in account_info:
account_info['object-reporting'] = data['object-reporting']
account_info['object-reporting'][
'record-prefix'] = datetime.utcnow().strftime('%Y/%m/%d')
if bucket:
account_info['buckets'] = bucket
if not_bucket:
account_info['not-buckets'] = not_bucket
if region:
account_info['regions'] = region
try:
worker.invoke(worker.process_account, account_info)
except Exception:
if not debug:
raise
import pdb, traceback, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise | Run across a set of accounts and buckets. |
def guass(self, mu: float, sigma: float) -> float:
"""Return a random number using Gaussian distribution.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float.
"""
return float(
lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma)
) | Return a random number using Gaussian distribution.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float. |
def compute_mu(L_aug, Y, k, p):
"""Given label matrix L_aug and labels Y, compute the true mu params.
Args:
L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix
Y: (np.array int) [n] The true labels in {1,...,k}
k: (int) Cardinality
p: (np.array float) [k] The class balance
"""
n, d = L_aug.shape
assert Y.shape[0] == n
# Compute mu
mu = np.zeros((d, k))
for y in range(1, k + 1):
L_y = L_aug[Y == y]
mu[:, y - 1] = L_y.sum(axis=0) / L_y.shape[0]
return mu | Given label matrix L_aug and labels Y, compute the true mu params.
Args:
L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix
Y: (np.array int) [n] The true labels in {1,...,k}
k: (int) Cardinality
p: (np.array float) [k] The class balance |
def write(self, data):
"""
Write data to the file. If write buffering is on (``bufsize`` was
specified and non-zero), some or all of the data may not actually be
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
:param data: ``str``/``bytes`` data to write
"""
if isinstance(data, text_type):
# Accept text and encode as utf-8 for compatibility only.
data = data.encode("utf-8")
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_WRITE):
raise IOError("File not open for writing")
if not (self._flags & self.FLAG_BUFFERED):
self._write_all(data)
return
self._wbuffer.write(data)
if self._flags & self.FLAG_LINE_BUFFERED:
# only scan the new data for linefeed, to avoid wasting time.
last_newline_pos = data.rfind(linefeed_byte)
if last_newline_pos >= 0:
wbuf = self._wbuffer.getvalue()
last_newline_pos += len(wbuf) - len(data)
self._write_all(wbuf[: last_newline_pos + 1])
self._wbuffer = BytesIO()
self._wbuffer.write(wbuf[last_newline_pos + 1 :])
return
# even if we're line buffering, if the buffer has grown past the
# buffer size, force a flush.
if self._wbuffer.tell() >= self._bufsize:
self.flush()
return | Write data to the file. If write buffering is on (``bufsize`` was
specified and non-zero), some or all of the data may not actually be
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
:param data: ``str``/``bytes`` data to write |
def deltasigma_nfw(self):
"""Calculate NFW differential surface mass density profile.
Generate the differential surface mass density profiles of each cluster
halo, assuming a spherical NFW model. Optionally includes the effect of
cluster miscentering offsets, if the parent object was initialized
with offsets.
Returns
----------
Quantity
Differential surface mass density profiles (ndarray, in
astropy.units of Msun/pc/pc). Each row corresponds to a single
cluster halo.
"""
def _centered_dsigma(self):
# calculate g
firstpart = np.zeros_like(self._x)
secondpart = np.zeros_like(self._x)
g = np.zeros_like(self._x)
small_1a = 4. / self._x[self._x_small]**2
small_1b = 2. / (self._x[self._x_small]**2 - 1.)
small_1c = np.sqrt(1. - self._x[self._x_small]**2)
firstpart[self._x_small] = (small_1a + small_1b) / small_1c
big_1a = 8. / (self._x[self._x_big]**2 *
np.sqrt(self._x[self._x_big]**2 - 1.))
big_1b = 4. / ((self._x[self._x_big]**2 - 1.)**1.5)
firstpart[self._x_big] = big_1a + big_1b
small_2a = np.sqrt((1. - self._x[self._x_small]) /
(1. + self._x[self._x_small]))
secondpart[self._x_small] = np.log((1. + small_2a) /
(1. - small_2a))
big_2a = self._x[self._x_big] - 1.
big_2b = 1. + self._x[self._x_big]
secondpart[self._x_big] = np.arctan(np.sqrt(big_2a / big_2b))
both_3a = (4. / (self._x**2)) * np.log(self._x / 2.)
both_3b = 2. / (self._x**2 - 1.)
g = firstpart * secondpart + both_3a - both_3b
g[self._x_one] = (10. / 3.) + 4. * np.log(0.5)
if np.isnan(np.sum(g)) or np.isinf(np.sum(g)):
print('\nERROR: g is not all real\n', g)
# calculate & return centered profile
deltasigma = self._rs_dc_rcrit * g
return deltasigma
def _offset_dsigma(self):
original_rbins = self._rbins.value
# if offset sigma was already calculated, use it!
try:
sigma_sm_rbins = self._sigma_sm
except AttributeError:
sigma_sm_rbins = self.sigma_nfw()
innermost_sampling = 1.e-10 # stable for anything below 1e-5
inner_prec = self._numRinner
r_inner = np.linspace(innermost_sampling,
original_rbins.min(),
endpoint=False, num=inner_prec)
outer_prec = self._factorRouter * self._nbins
r_outer = np.linspace(original_rbins.min(),
original_rbins.max(),
endpoint=False, num=outer_prec + 1)[1:]
r_ext_unordered = np.hstack([r_inner, r_outer, original_rbins])
r_extended = np.sort(r_ext_unordered)
# set temporary extended rbins, nbins, x, rs_dc_rcrit array
self._rbins = r_extended * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self) # uses _rbins, _nlens
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
sigma_sm_extended = self.sigma_nfw()
mean_inside_sigma_sm = np.zeros([self._nlens,
original_rbins.shape[0]])
for i, r in enumerate(original_rbins):
index_of_rbin = np.where(r_extended == r)[0][0]
x = r_extended[0:index_of_rbin + 1]
y = sigma_sm_extended[:, 0:index_of_rbin + 1] * x
integral = simps(y, x=x, axis=-1, even='first')
# average of sigma_sm at r < rbin
mean_inside_sigma_sm[:, i] = (2. / r**2) * integral
mean_inside_sigma_sm = mean_inside_sigma_sm * (units.Msun /
units.pc**2)
# reset original rbins, nbins, x
self._rbins = original_rbins * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self)
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
self._sigma_sm = sigma_sm_rbins # reset to original sigma_sm
dsigma_sm = mean_inside_sigma_sm - sigma_sm_rbins
return dsigma_sm
if self._sigmaoffset is None:
finaldeltasigma = _centered_dsigma(self)
elif np.abs(self._sigmaoffset).sum() == 0:
finaldeltasigma = _centered_dsigma(self)
else:
finaldeltasigma = _offset_dsigma(self)
return finaldeltasigma | Calculate NFW differential surface mass density profile.
Generate the differential surface mass density profiles of each cluster
halo, assuming a spherical NFW model. Optionally includes the effect of
cluster miscentering offsets, if the parent object was initialized
with offsets.
Returns
----------
Quantity
Differential surface mass density profiles (ndarray, in
astropy.units of Msun/pc/pc). Each row corresponds to a single
cluster halo. |
def interpret_stats(results):
"""Generates the string to be shown as updates after the execution of a
Cypher query
:param results: ``ResultSet`` with the raw results of the execution of
the Cypher query
"""
stats = results.stats
contains_updates = stats.pop("contains_updates", False) if stats else False
if not contains_updates:
result = '{} rows affected.'.format(len(results))
else:
result = ''
for stat, value in stats.items():
if value:
result = "{}\n{} {}.".format(result, value,
stat.replace("_", " "))
return result.strip() | Generates the string to be shown as updates after the execution of a
Cypher query
:param results: ``ResultSet`` with the raw results of the execution of
the Cypher query |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.