Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
382,000 | def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn(
.format(ulength))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result | Calculate the solar position using the numba compiled functions
and multiple threads. Very slow if functions are not numba compiled. |
382,001 | def run(self):
indicator = models.EuroStatIndicator(
number=self.number,
description=self.description,
url="http://ec.europa.eu/eurostat/web/products-datasets/-/tgs" + self.number)
self.session.add(indicator)
self.session.commit()
df = next(self.requires()).load(key_filter=self.key_filter,
header_preproc=self.header_preproc)
values = df.set_index().stack()
values.index.levels[1].name =
values.name =
df = values.reset_index()
max_id = models.EuroStatValue.get_max_id(self.session)
df[] = list(range(max_id + 1, max_id + 1 + len(df)))
df[] = indicator.id
regions = self.client.df_query(self.session.query(models.NUTS2Region)) \
.set_index()[]
df[] = df[].map(regions)
df = df.drop([], axis=1)
df.to_sql(name=models.EuroStatValue.__tablename__,
con=client.get_client().engine,
if_exists=,
index=False)
self.done() | Load table data to :class:`EuroStatsValue` objects |
382,002 | def data_to_dict(self, sysbase=False):
assert isinstance(sysbase, bool)
ret = {}
for key in self.data_keys:
if (not sysbase) and (key in self._store):
val = self._store[key]
else:
val = self.__dict__[key]
ret[key] = val
return ret | Return the loaded model parameters as one dictionary.
Each key of the dictionary is a parameter name, and the value is a
list of all the parameter values.
:param sysbase: use system base quantities
:type sysbase: bool |
382,003 | def read_cdx(file, encoding=):
with codecs.getreader(encoding)(file) as stream:
header_line = stream.readline()
separator = header_line[0]
field_keys = header_line.strip().split(separator)
if field_keys.pop(0) != :
raise ValueError()
for line in stream:
yield dict(zip(field_keys, line.strip().split(separator))) | Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value. |
382,004 | def available(name):
*
for service in get_all():
if name.lower() == service.lower():
return True
return False | Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name> |
382,005 | def to_operator(self):
mat = _to_operator(self.rep, self._data, *self.dim)
return Operator(mat, self.input_dims(), self.output_dims()) | Try to convert channel to a unitary representation Operator. |
382,006 | def load_schema(name):
schema = import_schema_to_json(name)
for item in schema[]:
href_value = item[]
rel_value = item[]
schema[rel_value] = href_value
del item
for prop in schema[]:
value = schema[][prop]
is_type_array = (value[] == )
is_type_object = (value[] == )
if ((is_type_array or is_type_object)
and (_value_properties_are_referenced(value))):
schema = _load_referenced_schema_from_properties(value, schema, prop)
if is_type_array and _value_is_default_any(value) and _value_has_items_key(value):
schema = _load_referenced_schemes_from_list(value[], value, schema, prop)
if _value_is_required(value):
schema[][prop][] = False
if _value_is_type_text(value):
log.debug("patched text to string")
schema[][prop][] = u"string"
schema[] = schema[]
return schema | loads the schema by name
:param name name of the model |
382,007 | def _repo_url_to_path(self, repo):
repo = repo.replace(, )
repo = repo.replace(, )
repo = repo.replace(, )
return os.sep.join([self._data_directory, repo]) | Convert a `repo` url to a file path for local storage. |
382,008 | def export_project(self):
generated_projects = deepcopy(self.generated_projects)
self.process_data_for_makefile(self.workspace)
generated_projects[], generated_projects[][] = self.gen_file_jinja(, self.workspace, , self.workspace[][])
return generated_projects | Processes misc options specific for GCC ARM, and run generator |
382,009 | def catalog(self, table=, column=):
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
return self.lookup_methods
return None | Lookup the values available for querying. |
382,010 | def get(self, index):
assert index <= self.count
assert index < self.size
offset = index * self.chunk_size
return self.data[offset:offset + self.chunk_size] | Get a chunk by index |
382,011 | def _active_mounts_aix(ret):
for line in __salt__[]().split():
comps = re.sub(r"\s+", " ", line).split()
if comps:
if comps[0] == or comps[0] == :
continue
comps_len = len(comps)
if line.startswith((, )):
curr_opts = _resolve_user_group_names(comps[6].split()) if 7 == comps_len else []
if curr_opts:
ret[comps[1]] = {: comps[0],
: comps[2],
: curr_opts}
else:
ret[comps[1]] = {: comps[0],
: comps[2]}
else:
curr_opts = _resolve_user_group_names(comps[7].split()) if 8 == comps_len else []
if curr_opts:
ret[comps[2]] = {: comps[0],
: comps[1],
: comps[3],
: curr_opts}
else:
ret[comps[2]] = {: comps[0],
: comps[1],
: comps[3]}
return ret | List active mounts on AIX systems |
382,012 | def delete_project(self, tenant_name, part_name):
res = self._delete_partition(tenant_name, part_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", part_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s", {: part_name, : res})
raise dexc.DfaClientRequestFailed(reason=res)
res = self._delete_org(tenant_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s organization in DCNM.", tenant_name)
else:
LOG.error("Failed to delete %(org)s organization in DCNM."
"Response: %(res)s", {: tenant_name, : res})
raise dexc.DfaClientRequestFailed(reason=res) | Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition. |
382,013 | def take_screenshot(self, screenshot_name=None, screenshot_path=None):
self.info_log("Taking a screenshot...")
save_to_db = False
if screenshot_path:
self._driver.save_screenshot(screenshot_path)
self.debug_log("Screenshot taken (%s)" % screenshot_path)
elif screenshot_name:
take_screenshot = True
if hasattr(self.runner, "screenshot_cache"):
if self.runner.screenshot_cache.get(screenshot_name):
self.debug_log(
"screenshot(%s) found in cache" % screenshot_name
)
take_screenshot = False
if take_screenshot:
if self.test_instance._runner_dir:
_screenshot_name = % \
string_to_filename(screenshot_name)
relative_path = os.path.join(
self.test_instance._screenshot_relative_dir,
_screenshot_name
)
full_path = os.path.join(
self.test_instance._screenshot_dir,
_screenshot_name
)
self._driver.save_screenshot(
full_path
)
self.debug_log("Screenshot taken (%s)" % full_path)
save_to_db = True
else:
if self.test_instance._runner_dir:
screenshot_name = get_timestamp()
_screenshot_name = % screenshot_name
relative_path = os.path.join(
self.test_instance._screenshot_relative_dir,
_screenshot_name
)
full_path = os.path.join(
self.test_instance._screenshot_dir,
_screenshot_name
)
self._driver.save_screenshot(
full_path
)
self.debug_log("Screenshot taken (%s)" % full_path)
save_to_db = True
if save_to_db:
with DbSessionContext(BROME_CONFIG[][]) as session:
capabilities = {
: self.capabilities[],
: self.capabilities[],
: self.capabilities[]
}
screenshot = Testscreenshot()
screenshot.browser_capabilities = capabilities
screenshot.browser_id = self.get_id()
screenshot.location =
screenshot.root_path = self.test_instance._runner.root_test_result_dir
screenshot.file_path = relative_path
screenshot.extra_data = {}
screenshot.title = screenshot_name
screenshot.test_instance_id = self.test_instance._test_instance_id
screenshot.test_batch_id = self.test_instance._test_batch_id
session.save(screenshot, safe=True) | Take a screenshot
Use the screenshot_name args when you want to take a
screenshot for reference
If the `runner:cache_screenshot` config is set to True then
screenshot sharing all the same name will be saved only once
The screenshot_path args is exclusively used by the
proxy_driver:create_test_result function
Args:
screenshot_name (str) the name of the screenshot
screenshot_path (str) the path of the screenshot |
382,014 | def raise_for_status(self):
if not self.ok:
reason = self.reason or % self.url
if not self.status_code:
raise HttpConnectionError(reason, response=self)
if 400 <= self.status_code < 500:
http_error_msg = % (
self.status_code, reason, self.request.method, self.url)
else:
http_error_msg = % (
self.status_code, reason, self.request.method, self.url)
raise HttpRequestException(http_error_msg, response=self) | Raises stored :class:`HTTPError` or :class:`URLError`, if occurred. |
382,015 | def run(self, args=None):
try:
self._args = self.build_parser().parse_args(args)
if self._args.cmd is None:
if self._args.help_options:
self.show_options_help()
else:
self._parser.print_help()
return 1
self._default_log_level = DEFAULT_CMD_LOG_LEVEL[self._args.cmd]
self._setup_logging()
if hasattr(self._args, ):
DAPAccess.set_args(self._args.daparg)
self._COMMANDS[self._args.cmd](self)
return 0
except KeyboardInterrupt:
return 0
except exceptions.Error as e:
LOG.error(e, exc_info=Session.get_current().log_tracebacks)
except Exception as e:
LOG.error("uncaught exception: %s", e, exc_info=Session.get_current().log_tracebacks)
return 1 | ! @brief Main entry point for command line processing. |
382,016 | def get_advanced_foreign_key_options_sql(self, foreign_key):
query = ""
if self.supports_foreign_key_on_update() and foreign_key.has_option(
"on_update"
):
query += " ON UPDATE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_update")
)
if foreign_key.has_option("on_delete"):
query += " ON DELETE %s" % self.get_foreign_key_referential_action_sql(
foreign_key.get_option("on_delete")
)
return query | Returns the FOREIGN KEY query section dealing with non-standard options
as MATCH, INITIALLY DEFERRED, ON UPDATE, ...
:param foreign_key: The foreign key
:type foreign_key: ForeignKeyConstraint
:rtype: str |
382,017 | def compileActions(self):
import re
self.actionList = actions = [None]*121
actions[73] = "b+w+b"
actionLines = self.actionTable.splitlines()
colonPositions = [m.start()
for m in re.finditer(,actionLines[1])
]+[100]
columns = [(colonPositions[i]-3,colonPositions[i+1]-3)
for i in range(len(colonPositions)-1)]
for line in self.actionTable.splitlines(keepends=False):
for start,end in columns:
action = line[start:end]
if not action or action.isspace(): continue
index, colon, action = action[:3], action[3], action[4:]
assert colon==
action = action.rstrip()
action = action.replace(, )
wPos = action.index()
action = re.sub(r"^(.*)(?=\+[U(]*w)", r"b", action)
action = re.sub(r"(w[[:\-1\]).U]*)\+(.*)$", r"\1+b", action)
action = action.replace(".U", ".upper()")
actions[int(index)] = action | Build the action table from the text above |
382,018 | def setFontWeight(self, weight):
font = self.currentFont()
font.setWeight(weight)
self.setCurrentFont(font) | Sets the font weight for this editor to the inputed weight.
:param weight | <QFont.Weight> |
382,019 | def set_config_file(self, path):
log = self._params.get(, self._discard)
if path != self._config_file:
if self._config_file:
log.info("Config file changed from to ", self._config_file, path)
self.file_del(self, paths=[self._config_file])
else:
log.info("Config file set to ", path)
self._config_file = path
self.file_add(event_target(self, , log=log), path)
return self._load_config() | Set the config file. The contents must be valid YAML and there
must be a top-level element 'tasks'. The listed tasks will be
started according to their configuration, and the file will
be watched for future changes. The changes will be activated
by appropriate changes to the running tasks. |
382,020 | def collect(self, device, ip, user, password):
if netappsdk is None:
self.log.error(
)
return
if device in self.running:
return
self.running.add(device)
prefix = self.config[]
pm = self.publish_metric
netapp_inodeCol(device, ip, user, password, prefix, pm)
self.running.remove(device) | Collects metrics for our netapp filer --START HERE-- |
382,021 | def nagiosCommandHelp(**kwargs):
with open(os.path.join(DIRECTORY, )) as document:
return document.read() | Returns command help document when no command is specified |
382,022 | def api_client_two_way(connection, connection_responder, client_class=xbahn.api.Client):
link = xbahn.connection.link.Link()
link.wire(
"main",
receive=connection,
send=connection,
meta={
"remote":connection_responder.remote
}
)
link.wire(
"responder",
receive=connection_responder,
respond=connection_responder
)
return client_class(link=link) | Establishes an API client for two-way communication
connection with an API Server
Arguments:
- connection (xbahn.connection.Connection)
- connection_responder (xbahn.connection.Connection): This connection will
be used by the server to send requests to the client
Keyword Arguments:
- client_class (xbahn.api.Client): if supplied use this class to initantiate
the client object. If omitted will use xbahn.api.Client.
Returns:
- client_class: client instance |
382,023 | def register_model(self, storagemodel:object):
modeldefinition = self.getmodeldefinition(storagemodel, False)
if modeldefinition is None:
if [model for model in self._modeldefinitions if model[] == storagemodel._tablename]:
raise NameConventionError(storagemodel._tablename)
if not test_azurestorage_nameconventions(storagemodel._tablename, ):
raise NameConventionError(storagemodel._tablename)
modeldefinition = {
: storagemodel.__class__.__name__,
: storagemodel._tablename,
: storagemodel._encrypt,
: self._account.create_table_service()
}
if modeldefinition[]:
kek = KeyWrapper(self._key_identifier, self._secret_key)
key_resolver = KeyResolver()
key_resolver.put_key(kek)
encryptionresolver = self.__encryptionresolver__(modeldefinition[])
modeldefinition[].key_encryption_key = kek
modeldefinition[].key_resolver_funcion = key_resolver.resolve_key
modeldefinition[].encryption_resolver_function = encryptionresolver
pass
self.__createtable__(modeldefinition)
self._modeldefinitions.append(modeldefinition)
log.info(.format(modeldefinition[], [model[] for model in self._modeldefinitions]))
else:
log.info(.format(modeldefinition[], [model[] for model in self._modeldefinitions])) | set up an Tableservice for an StorageTableModel in your Azure Storage Account
Will create the Table if not exist!
required Parameter is:
- storagemodel: StorageTableModel(Object) |
382,024 | def _convert(x, factor1, factor2):
return x * factor2 / ((1-x) * factor1 + x * factor2) | Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1. |
382,025 | def close(self):
if self._controller is not None:
self._controller.quit()
self._controller = None
if self._process is not None:
self._process.close()
self._process = None | Shutdown and free all resources. |
382,026 | def scatterviz(X,
y=None,
ax=None,
features=None,
classes=None,
color=None,
colormap=None,
markers=None,
alpha=1.0,
**kwargs):
visualizer = ScatterVisualizer(ax=ax, features=features, classes=classes,
color=color, colormap=colormap,
markers=markers, alpha=alpha, **kwargs)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
return visualizer.ax | Displays a bivariate scatter plot.
This helper function is a quick wrapper to utilize the ScatterVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n, default: None
An array or series of target or class values
ax : matplotlib axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of two features or columns.
More than that will raise an error.
classes : list of strings, default: None
The names of the classes in the target
color : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
markers : iterable of strings, default: ,+o*vhd
Matplotlib style markers for points on the scatter plot points
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on. |
382,027 | def get_expanded(self, *args, **kwargs):
kwargs.setdefault("expandvars", True)
kwargs.setdefault("expanduser", True)
return self.get_default(*args, **kwargs) | Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to
*True* by default. |
382,028 | def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
return DecryptedCol(
alias,
self,
output_field
)
else:
return self.cached_col | Get the decryption for col. |
382,029 | def slice_locs(self, start=None, end=None, step=None, kind=None):
inc = (step is None or step >= 0)
if not inc:
end_slice, start_slice = start_slice - 1, end_slice - 1
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice | Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
---------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3) |
382,030 | def run(self):
command = [self.cmake]
if self.generator:
command.extend([
, self.generator
])
if self.path:
command.append(self.path)
if self.definitions is not None:
for item in self.definitions.items():
command.append( % item)
if self.options is not None:
command.extend(self.options)
cmd = yield self.makeRemoteShellCommand(command=command)
yield self.runCommand(cmd)
return cmd.results() | run CMake |
382,031 | def update(self, update_finished_cb):
if not self._update_finished_cb:
self._update_finished_cb = update_finished_cb
self.anchor_data = []
self.nr_of_anchors = 0
self.valid = False
logger.debug(.format(self.id))
self.mem_handler.read(self, LocoMemory.MEM_LOCO_INFO,
LocoMemory.MEM_LOCO_INFO_LEN) | Request an update of the memory content |
382,032 | def _gather_local_posterior(self, use_gather,
gather_size, gather_offset):
if use_gather:
self.comm.Gather(self.local_posterior_,
self.gather_posterior, root=0)
else:
target = [
self.gather_posterior,
gather_size,
gather_offset,
MPI.DOUBLE]
self.comm.Gatherv(self.local_posterior_, target)
return self | Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html |
382,033 | def _validate_file_ownership(owner, group, file_name, optional=False):
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert owner == ownership.owner, \
"{} has an incorrect owner: {} should be {}".format(
file_name, ownership.owner, owner)
assert group == ownership.group, \
"{} has an incorrect group: {} should be {}".format(
file_name, ownership.group, group)
print("Validate ownership of {}: PASS".format(file_name)) | Validate that a specified file is owned by `owner:group`.
:param owner: Name of the owner
:type owner: str
:param group: Name of the group
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool |
382,034 | def get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:
for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
if os.path.split(dirpath)[-1].startswith():
dirnames.clear()
continue
for filename in filenames:
if filename.endswith() and not filename.endswith():
yield os.path.join(dirpath, filename) | Iterates through relevant python files within the given directory.
Args:
directory: The top-level directory to explore.
Yields:
File paths. |
382,035 | def truth(message, expected=None):
def decorator(func):
return update_wrapper(Check(func, message, expected), func)
return decorator | Convenience decorator that applies [`Check`](#check) to a callable.
```python
from good import truth
@truth(u'Must be an existing directory')
def isDir(v):
return os.path.isdir(v)
```
:param message: Validation error message
:type message: unicode
:param expected: Expected value string representation, or `None` to get it from the wrapped callable
:type expected: None|str|unicode
:return: decorator
:rtype: callable |
382,036 | def load_input():
file = open(_input_file, )
result = json.loads(file.read().strip().strip())
file.close()
return result | Open existing input file |
382,037 | def corner_shape_parameters(corners, frame_shape, cb_shape):
corners = corners.reshape(-1, 2)
assert corners.shape[0] == cb_shape[0] * cb_shape[1]
h, w = frame_shape
A, B, C, D = tuple(x.reshape(-1) for x in corners[[0, cb_shape[0]-1, -1, -cb_shape[0]], :])
ba, bc = A - B, C - B
bd, ac = D - B, C - A
angle = np.arccos(np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)))
skew = min(1, 2 * np.abs(0.5*np.pi - angle))
area = 0.5 * np.abs(bd[0]*ac[1] - bd[1]*ac[0])
size = np.sqrt(area / (h*w))
border = np.sqrt(area)
X = np.clip((np.mean(corners[:,0]) - 0.5*border) / (w - border), 0, 1)
Y = np.clip((np.mean(corners[:,1]) - 0.5*border) / (h - border), 0, 1)
return (X, Y, size, skew) | Return a tuple of shape parameters for a given set of corners. This is
based on the parameters from ROS's perception library[1]. The parameters
returned are mean x- and y- co-ordinate normalised onto the interval
[0,1], the relative size of the set of corners within the frame on the
interval [0,1] and a 'skewness' metric on the inteval [0,1].
*corners* is a Nx2 numpy array of detected corner locations.
*frame_shape* is a pair giving the width and height of the frame.
*cb_shape* is a pair giving the number of horizontal and vertical corners
[1] https://github.com/ros-perception/image_pipeline/ |
382,038 | def add_answer_for_student(student_item, vote, rationale):
answers = get_answers_for_student(student_item)
answers.add_answer(vote, rationale)
sub_api.create_submission(student_item, {
ANSWER_LIST_KEY: answers.get_answers_as_list()
}) | Add an answer for a student to the backend
Args:
student_item (dict): The location of the problem this submission is
associated with, as defined by a course, student, and item.
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option |
382,039 | def hash160(data):
rh = hashlib.new(, sha256(data).digest())
return rh.digest() | Return ripemd160(sha256(data)) |
382,040 | def linear_warp(X, d, n, *args):
r
X = scipy.asarray(X, dtype=float)
a = args[2 * d]
b = args[2 * d + 1]
if n == 0:
return (X - a) / (b - a)
elif n == 1:
return 1.0 / (b - a) * scipy.ones_like(X)
else:
return scipy.zeros_like(X) | r"""Warp inputs with a linear transformation.
Applies the warping
.. math::
w(x) = \frac{x-a}{b-a}
to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a
convenient way to map your inputs to the unit hypercube.
Parameters
----------
X : array, (`M`,)
`M` inputs from dimension `d`.
d : non-negative int
The index (starting from zero) of the dimension to apply the warping to.
n : non-negative int
The derivative order to compute.
*args : 2N scalars
The remaining parameters to describe the warping, given as scalars.
These are given as `a_i`, `b_i` for each of the `D` dimensions. Note
that these must ALL be provided for each call. |
382,041 | def setitem(self, key, value):
with self.lock:
self.tbl[key] = value | Maps dictionary keys to values for assignment. Called for
dictionary style access with assignment. |
382,042 | def op_list_venvs(self):
self.logger.info()
venvs = self.get_venvs()
for venv in venvs:
self.logger.info( % venv)
else:
self.logger.info( % VENVS_DIRNAME)
return venvs | Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments |
382,043 | def raises(self):
return [
DocstringRaises.from_meta(meta)
for meta in self.meta
if meta.args[0] in {, , , }
] | Return list of :raises meta. |
382,044 | def spectral_flux(d0, d1):
d0 = np.mean(d0, axis=1)
d1 = np.mean(d1, axis=1)
nFFT = min(len(d0) // 2, len(d1) // 2)
X = FFT(d0, nFFT)
Xprev = FFT(d1, nFFT)
sumX = np.sum(X + EPSILON)
sumPrevX = np.sum(Xprev + EPSILON)
return np.sum((X / sumX - Xprev / sumPrevX) ** 2) | Computes the spectral flux feature of the current frame |
382,045 | def _WaitForStartup(self, deadline):
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info(, Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
return False
else:
time.sleep(sleep)
sleep *= 2 | Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise. |
382,046 | def step(self, observations):
q_values = self(observations)
return {
: self.q_head.sample(q_values),
: q_values
} | Sample action from an action space for given state |
382,047 | def freeze_all(self):
d = defer.succeed(None)
for x in self.iter_agents():
d.addCallback(defer.drop_param, x._cancel_long_running_protocols)
d.addCallback(defer.drop_param, x._cancel_all_delayed_calls)
d.addCallback(defer.drop_param, x._kill_all_protocols)
return d | Stop all activity of the agents running. |
382,048 | def http_get_metadata(metadata_path, timeout=__HTTP_DEFAULT_TIMEOUT_SEC):
metadata_path = __METADATA_PREFIX + metadata_path
try:
response = urllib2.urlopen(metadata_path, None, timeout)
if response.getcode() != 200:
raise IOError("Non-200 response " + str(response.getcode()) + " reading " + metadata_path)
return response.read()
except urllib2.URLError as error:
raise IOError("URLError in http_get_metadata: " + repr(error)) | Fetch AWS metadata from http://169.254.169.254/latest/meta-data/<metadata_path>
ARGS:
metadata_path - the optional path and required key to the EC2 metadata (e.g. "instance-id")
RETURN:
response content on success
RAISE:
URLError if there was a problem reading metadata |
382,049 | def repeat_last_axis(array, count):
return as_strided(array, array.shape + (count,), array.strides + (0,)) | Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis |
382,050 | def _deleteFile(self,directory,fn,dentry,db,service):
logger.debug("%s - Deleting"%(fn))
if fn not in db:
print("%s - rm: Not in DB, canservicesservicest delete, service [%s] unknown"%(service))
continue
if db[fn][][service][]!=self.ST_DELETED:
print("%s - rm: CanDservices%s - deleted by service: %s%s - Failed to delete by service: %sservices'].keys())==0:
del db[fn]
return True | Deletets file and changes status to '?' if no
more services manages the file |
382,051 | def plot_pseudosection(df, plot_key, spacing=1, ctypes=None, dd_merge=False,
cb=False, **kwargs):
grid = None
pseudo_d_functions = {
: _pseudodepths_dd_simple,
: _pseudodepths_schlumberger,
: _pseudodepths_wenner,
}
titles = {
: ,
: ,
: ,
}
only_types = ctypes or [, ]
if in only_types:
raise Exception( +
)
configs = df[[, , , ]].values
results = fT.filter(
configs,
settings={: only_types, }, )
values = df[plot_key].values
plot_objects = []
for key in sorted(results.keys()):
print(, key)
if key == :
continue
index_dict = results[key]
if key == and not dd_merge:
plot_list = []
labels_add = []
for skip in sorted(index_dict.keys()):
plot_list.append(index_dict[skip])
labels_add.append(.format(skip))
else:
plot_list = [np.hstack(index_dict.values()), ]
print(, plot_list)
labels_add = [, ]
for indices, label_add in zip(plot_list, labels_add):
if len(indices) == 0:
continue
ddc = configs[indices]
plot_data = values[indices]
px, pz = pseudo_d_functions[key](ddc, spacing, grid)
if px.size <= 4:
continue
xg = np.linspace(px.min(), px.max(), 200)
zg = np.linspace(pz.min(), pz.max(), 200)
x, z = np.meshgrid(xg, zg)
cmap_name = kwargs.get(, )
cmap = mpl.cm.get_cmap(cmap_name)
data_min = kwargs.get(, plot_data.min())
data_max = kwargs.get(, plot_data.max())
cnorm = mpl.colors.Normalize(vmin=data_min, vmax=data_max)
scalarMap = mpl.cm.ScalarMappable(norm=cnorm, cmap=cmap)
fcolors = scalarMap.to_rgba(plot_data)
try:
image = si.griddata(
(px, pz),
fcolors,
(x, z),
method=, )
except siq.QhullError as e:
print(, e)
continue
cmap = mpl.cm.get_cmap()
data_ratio = np.abs(px.max() - px.min()) / np.abs(pz.min())
fig_size_y = 15 / data_ratio + 6 / 2.54
fig = plt.figure(figsize=(15, fig_size_y))
fig_top = 1 / 2.54 / fig_size_y
fig_left = 2 / 2.54 / 15
fig_right = 1 / 2.54 / 15
if cb:
fig_bottom = 3 / 2.54 / fig_size_y
else:
fig_bottom = 0.05
ax = fig.add_axes([
fig_left, fig_bottom + fig_top * 2, 1 - fig_left - fig_right,
1 - fig_top - fig_bottom - fig_top * 2
])
im = ax.imshow(
image[::-1],
extent=(xg.min(), xg.max(), zg.min(), zg.max()),
interpolation=,
aspect=,
cmap=cmap, )
ax.set_ylim(pz.min(), 0)
if cb:
print()
ax_cb = fig.add_axes([
fig_left * 4, fig_top * 2,
1 - fig_left * 4 - fig_right * 4, fig_bottom - fig_top * 2
])
cb = mpl.colorbar.ColorbarBase(
ax=ax_cb,
cmap=cmap,
norm=cnorm,
orientation=,
)
cb.set_label()
else:
fig_bottom = 0.05
ax.set_title(titles[key] + label_add)
ax.set_aspect()
ax.set_xlabel()
ax.set_ylabel()
plot_objects.append((fig, ax, im))
return plot_objects | Create a pseudosection plot for a given measurement
Parameters
----------
df: dataframe
measurement dataframe, one measurement frame (i.e., only one frequency
etc)
key:
which key to colorcode
spacing: float, optional
assumed electrode spacing
ctypes: list of strings
which configurations to plot, default: dd
dd_merge: bool, optional
?
cb: bool, optional
? |
382,052 | def tryDynMod(name):
try:
return importlib.import_module(name)
except ModuleNotFoundError:
raise s_exc.NoSuchDyn(name=name) | Dynamically import a python module or exception. |
382,053 | def _RunMethod(dev, args, extra):
logging.info(, args.method.__name__, .join(args.positional))
result = args.method(dev, *args.positional, **extra)
if result is not None:
if isinstance(result, io.StringIO):
sys.stdout.write(result.getvalue())
elif isinstance(result, (list, types.GeneratorType)):
r =
for r in result:
r = str(r)
sys.stdout.write(r)
if not r.endswith():
sys.stdout.write()
else:
result = str(result)
sys.stdout.write(result)
if not result.endswith():
sys.stdout.write()
return 0 | Runs a method registered via MakeSubparser. |
382,054 | def convert_coord_object(coord):
assert isinstance(coord, Coordinate)
coord = coord.container()
return Tile(int(coord.zoom), int(coord.column), int(coord.row)) | Convert ModestMaps.Core.Coordinate -> raw_tiles.tile.Tile |
382,055 | def _get_title(self):
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value | According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI |
382,056 | def postcmd(self, stop, line):
self.color_prompt()
return Cmd.postcmd(self, stop, line) | Exit cmd cleanly. |
382,057 | def inputs(self):
return [l.input for l in self.layers if isinstance(l, layers.Input)] | A list of Theano variables for feedforward computations. |
382,058 | def stop(self):
log.info(, self.host, self.port)
if self._is_running():
cmd = [
PostgresFinder.find_root() / ,
,
, self.base_pathname,
, ,
]
subprocess.check_call(cmd)
if self.pid:
os.kill(self.pid, signal.SIGTERM)
while self._is_running():
time.sleep(0.1) | Stop this DMBS daemon. If it's not currently running, do nothing.
Don't return until it's terminated. |
382,059 | def is_active(cache, token):
profile = cache.get(token)
if not profile:
raise exceptions.NotAuthenticatedException(
)
return profile | Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return: |
382,060 | def assert_match(actual_char_or_str, expected_char_or_str):
if expected_char_or_str != actual_char_or_str:
print("Expected")
pprint(expected_char_or_str)
print("")
print("Got")
pprint(actual_char_or_str)
raise ValueError() | If values don't match, print them and raise a ValueError, otherwise,
continue
Raises: ValueError if argumetns do not match |
382,061 | def grouper(iterable: Iterable, size: int) -> Iterable:
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk | Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks. |
382,062 | def to_html(self, write_to):
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to. |
382,063 | def available_configuration_files(self):
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get()]
absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]
return [pathname for pathname in absolute_paths if os.path.isfile(pathname)] | A list of strings with the absolute pathnames of the available configuration files. |
382,064 | def handle_not_found(exception, **extra):
assert isinstance(exception, NotFound)
page = Page.query.filter(db.or_(Page.url == request.path,
Page.url == request.path + "/")).first()
if page:
_add_url_rule(page.url)
return render_template(
[
page.template_name,
current_app.config[]
],
page=page
)
elif in extra:
return extra[](exception)
else:
return exception | Custom blueprint exception handler. |
382,065 | def _writeTracebackMessage(logger, typ, exception, traceback):
msg = TRACEBACK_MESSAGE(
reason=exception, traceback=traceback, exception=typ)
msg = msg.bind(
**_error_extraction.get_fields_for_exception(logger, exception))
msg.write(logger) | Write a traceback to the log.
@param typ: The class of the exception.
@param exception: The L{Exception} instance.
@param traceback: The traceback, a C{str}. |
382,066 | def sens_power_encode(self, adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp):
return MAVLink_sens_power_message(adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp) | Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float) |
382,067 | def get_workflow_id_and_project(path):
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected=)
try:
if entity_result is None or not entity_result[].startswith():
raise DXCLIError( + path + )
except:
err_exit()
return entity_result[], project | :param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message. |
382,068 | def _iter(self):
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None) | Generate (name, est, weight) tuples excluding None transformers |
382,069 | def CreateStorageReaderForFile(cls, path):
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(
path, check_readable_only=True):
return sqlite_reader.SQLiteStorageFileReader(path)
return None | Creates a storage reader based on the file.
Args:
path (str): path to the storage file.
Returns:
StorageReader: a storage reader or None if the storage file cannot be
opened or the storage format is not supported. |
382,070 | def break_bond(self, ind1, ind2, tol=0.2):
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters) | Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond. |
382,071 | def task_delete(self, **kw):
id, task = self.get_task(**kw)
if task[] == Status.DELETED:
raise ValueError("Task is already deleted.")
self._execute(id, )
return self.get_task(uuid=task[])[1] | Marks a task as deleted. |
382,072 | def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True):
if validate:
from snet_cli.resources.proto.unixfs_pb2 import Data
from snet_cli.resources.proto.merckledag_pb2 import MerkleNode
block_data = ipfs_client.block_get(ipfs_hash_base58)
mn = MerkleNode()
mn.ParseFromString(block_data)
unixfs_data = Data()
unixfs_data.ParseFromString(mn.Data)
assert unixfs_data.Type == unixfs_data.DataType.Value(), "IPFS hash must be a file"
data = unixfs_data.Data
multihash.CodecReg.register(, base58.b58encode, base58.b58decode)
mh = multihash.decode(ipfs_hash_base58.encode(), )
if not mh.verify(block_data):
raise Exception("IPFS hash mismatch with data")
else:
data = ipfs_client.cat(ipfs_hash_base58)
return data | Get file from ipfs
We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise |
382,073 | def change(self, event):
try:
data, schema, user, client = self._get_args(event)
except AttributeError:
return
try:
uuid = data[]
change = data[]
field = change[]
new_data = change[]
except KeyError as e:
self.log("Update request with missing arguments!", data, e,
lvl=critical)
self._cancel_by_error(event, )
return
storage_object = None
try:
storage_object = objectmodels[schema].find_one({: uuid})
except Exception as e:
self.log(, schema, data, lvl=warn)
if storage_object is None:
self._cancel_by_error(event, )
return
if not self._check_permissions(user, , storage_object):
self._cancel_by_permission(schema, data, event)
return
self.log("Changing object:", storage_object._fields, lvl=debug)
storage_object._fields[field] = new_data
self.log("Storing object:", storage_object._fields, lvl=debug)
try:
storage_object.validate()
except ValidationError:
self.log("Validation of changed object failed!",
storage_object, lvl=warn)
self._cancel_by_error(event, )
return
storage_object.save()
self.log("Object stored.")
result = {
: ,
: ,
: {
: schema,
: uuid
}
}
self._respond(None, result, event) | Change an existing object |
382,074 | def _get_pkgng_version(jail=None, chroot=None, root=None):
pkg
cmd = _pkg(jail, chroot, root) + []
return __salt__[](cmd).strip() | return the version of 'pkg' |
382,075 | def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
from matplotlib import figure
from matplotlib.backends import backend_agg
from matplotlib.font_manager import FontProperties
from matplotlib.mathtext import MathTextParser
if prop is None:
prop = FontProperties()
parser = MathTextParser()
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth | Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename. |
382,076 | def set_xticklabels_position(self, row, column, position):
subplot = self.get_subplot_at(row, column)
subplot.set_xticklabels_position(position) | Specify the position of the axis tick labels.
This is generally only useful for multiplots containing only one
row. This can be used to e.g. alternatively draw the tick labels
on the bottom or the top of the subplot.
:param row,column: specify the subplot.
:param position: 'top' or 'bottom' to specify the position of the
tick labels. |
382,077 | def handle_combined_input(args):
cur_args = args[:]
while len(cur_args) == 1 and isinstance(cur_args[0], (list, tuple)):
cur_args = cur_args[0]
return cur_args | Check for cases where we have a combined input nested list.
In these cases the CWL will be double nested:
[[[rec_a], [rec_b]]]
and we remove the outer nesting. |
382,078 | def _load_url(url):
try:
response = requests.get(url)
return BytesIO(response.content)
except IOError as ex:
parser.error("{url} could not be loaded remotely! ({ex})".format(url=url, ex=ex)) | Loads a URL resource from a remote server |
382,079 | def _get_YYTfactor(self, Y):
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y)) | find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y. |
382,080 | def is_valid_coordinate(img, i, j, k):
imgx, imgy, imgz = get_shape(img)
return (i >= 0 and i < imgx) and \
(j >= 0 and j < imgy) and \
(k >= 0 and k < imgz) | Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool |
382,081 | def curse(rest):
"Curse the day!"
if rest:
cursee = rest
else:
cursee =
karma.Karma.store.change(cursee, -1)
return "/me curses %s!" % cursee | Curse the day! |
382,082 | def verify(self, msg, sig, key):
if not isinstance(key, ec.EllipticCurvePublicKey):
raise TypeError(
"The public key must be an instance of "
"ec.EllipticCurvePublicKey")
self._cross_check(key)
num_bits = key.curve.key_size
num_bytes = (num_bits + 7) // 8
if len(sig) != 2 * num_bytes:
raise ValueError()
try:
(r, s) = self._split_raw_signature(sig)
asn1sig = encode_dss_signature(r, s)
key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm()))
except InvalidSignature as err:
raise BadSignature(err)
else:
return True | Verify a message signature
:param msg: The message
:param sig: A signature
:param key: A ec.EllipticCurvePublicKey to use for the verification.
:raises: BadSignature if the signature can't be verified.
:return: True |
382,083 | def _resource_prefix(self, resource=None):
px =
if resource and config.DOMAIN[resource].get():
px = config.DOMAIN[resource].get()
return px | Get elastic prefix for given resource.
Resource can specify ``elastic_prefix`` which behaves same like ``mongo_prefix``. |
382,084 | def from_(self, table, alias=None):
if isinstance(table, str):
table = [[table, alias]]
self.raw_tables = table
return self | Establece el origen de datos (y un alias opcionalmente). |
382,085 | def copy_sig(sig, opts, isdiff):
info("[+] \033[92mDeploying signature:\033[0m %s" % sig)
if isdiff:
sourcefile = os.path.join(opts.workdir, % sig)
destfile = os.path.join(opts.mirrordir, % sig)
else:
sourcefile = os.path.join(opts.workdir, % sig)
destfile = os.path.join(opts.mirrordir, % sig)
deploy_signature(sourcefile, destfile, opts.user, opts.group)
info("=> Deployed signature: %s" % sig) | Deploy a sig |
382,086 | def _parse_team_name(self, team):
team = team.replace(, )
team = team.replace(, )
team_html = pq(team)
return team_html.text() | Parse the team name in the contract table.
The team names in the contract table contain special encoded characters
that are not supported by Python 2.7. These characters should be
filtered out to get the proper team name.
Parameters
----------
team : string
A string representing the team_name tag in a row in the player's
contract table.
Returns
-------
string
A string of the team's name, such as 'Houston Astros'. |
382,087 | def linescore(self):
doc = self.get_main_doc()
table = doc()
columns = [th.text() for th in table().items()]
columns[0] =
data = [
[sportsref.utils.flatten_links(td) for td in tr().items()]
for tr in table().next_all().items()
]
return pd.DataFrame(data, index=[, ],
columns=columns, dtype=) | Returns the linescore for the game as a DataFrame. |
382,088 | def not_storable(_type):
return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type))) | Helper for tagging unserializable types.
Arguments:
_type (type): type to be ignored.
Returns:
Storable: storable instance that does not poke. |
382,089 | def write_local_file(fp, name_bytes, writer, dt):
fp.write(struct.pack(, 0x04034b50))
fp.write(struct.pack(, 10))
fp.write(struct.pack(, 0))
fp.write(struct.pack(, 0))
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack(, msdos_time))
fp.write(struct.pack(, msdos_date))
crc32_pos = fp.tell()
fp.write(struct.pack(, 0))
data_len_pos = fp.tell()
fp.write(struct.pack(, 0))
fp.write(struct.pack(, 0))
fp.write(struct.pack(, len(name_bytes)))
fp.write(struct.pack(, 0))
fp.write(name_bytes)
data_start_pos = fp.tell()
crc32 = writer(fp)
data_end_pos = fp.tell()
data_len = data_end_pos - data_start_pos
fp.seek(crc32_pos)
fp.write(struct.pack(, crc32))
fp.seek(data_len_pos)
fp.write(struct.pack(, data_len))
fp.write(struct.pack(, data_len))
fp.seek(data_end_pos)
return data_len, crc32 | Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive |
382,090 | def zcr(data):
data = np.mean(data, axis=1)
count = len(data)
countZ = np.sum(np.abs(np.diff(np.sign(data)))) / 2
return (np.float64(countZ) / np.float64(count - 1.0)) | Computes zero crossing rate of segment |
382,091 | def _plot_x(self, iabscissa=1, x_opt=None, remark=None,
annotations=None):
if not hasattr(self, ):
_print_warning( +
, ,
)
return
from matplotlib.pyplot import plot, semilogy, hold, text, grid, axis, title
dat = self
if dat.x.shape[1] < 100:
minxend = int(1.06 * dat.x[-2, iabscissa])
dat.x[-1, iabscissa] = minxend
if x_opt is None:
idx = np.argsort(dat.x[-2, 5:])
idx2 = np.argsort(idx)
dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
else:
xdat = np.abs(dat.x[:, 5:] - np.array(x_opt, copy=False))
idx = np.argsort(xdat[-2, :])
idx2 = np.argsort(idx)
xdat[-1, idx] = np.logspace(np.log10(np.min(abs(xdat[xdat!=0]))),
np.log10(np.max(np.abs(xdat))),
dat.x.shape[1] - 5)
else:
minxend = 0
self._enter_plotting()
if x_opt is not None:
semilogy(dat.x[:, iabscissa], abs(xdat), )
else:
plot(dat.x[:, iabscissa], dat.x[:, 5:], )
hold(True)
grid(True)
ax = array(axis())
axis(ax)
ax[1] -= 1e-6
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
if x_opt is not None:
semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] * (1+1e-6), ax[3] / (1+1e-6)]), )
else:
plot(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]), )
for i in rglen(idx):
text(dat.x[-1, iabscissa], dat.x[-1, 5 + i]
if x_opt is None else np.abs(xdat[-1, i]),
( + str(i) + if annotations is None
else str(i) + + annotations[i] + "=")
+ str(dat.x[-2, 5 + i]))
i = 2
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title( +
(remark + if remark is not None else ) +
str(dat.x.shape[1] - 5) + +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else )
+ )
self._finalize_plotting() | If ``x_opt is not None`` the difference to x_opt is plotted
in log scale |
382,092 | def _handler_swagger_ui(self, request, spec, version):
version = version or self._version_ui
if self._spec_url:
spec_url = self._spec_url
else:
spec_url = request.url.with_path(self[].url())
proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if proto:
spec_url = spec_url.with_scheme(proto)
if isinstance(spec, str):
spec_url = spec_url.with_query(spec=spec)
elif len(self._swagger_data) == 1:
for basePath in self._swagger_data:
spec_url = spec_url.with_query(spec=basePath)
else:
spec_url = spec_url.with_query(spec=)
spec_url = spec_url.human_repr()
return web.Response(
text=ui.rend_template(spec_url,
prefix=self._swagger_ui,
version=version),
content_type=) | ---
parameters:
- name: spec
in: query
type: string
- name: version
in: query
type: integer
enum: [2,3] |
382,093 | def _add_token(self, token, parent_node=):
if parent_node == :
parent_node = self.root
token_node_id = .format(self.token_count)
self.add_node(token_node_id, layers={self.ns, self.ns+},
attr_dict={self.ns+: token})
self.add_edge(parent_node, token_node_id,
layers={self.ns},
edge_type=EdgeTypes.spanning_relation)
self.tokens.append(token_node_id)
self.token_count += 1 | add a token to this docgraph |
382,094 | def run(config, tag, bucket, account, not_bucket, not_account, debug, region):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger().setLevel(level=logging.WARNING)
if debug:
def invoke(f, *args, **kw):
return f(*args, **kw)
worker.invoke = invoke
with open(config) as fh:
data = utils.yaml_load(fh.read())
for account_info in data.get(, ()):
if tag and tag not in account_info.get(, ()):
continue
if account and account_info[] not in account:
continue
if not_account and account_info[] in not_account:
continue
if in data and not in account_info:
account_info[] = data[]
if in data and not in account_info:
account_info[] = data[]
if in data and not in account_info:
account_info[] = data[]
account_info[][
] = datetime.utcnow().strftime()
if bucket:
account_info[] = bucket
if not_bucket:
account_info[] = not_bucket
if region:
account_info[] = region
try:
worker.invoke(worker.process_account, account_info)
except Exception:
if not debug:
raise
import pdb, traceback, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise | Run across a set of accounts and buckets. |
382,095 | def guass(self, mu: float, sigma: float) -> float:
return float(
lib.TCOD_random_get_gaussian_double(self.random_c, mu, sigma)
) | Return a random number using Gaussian distribution.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float. |
382,096 | def compute_mu(L_aug, Y, k, p):
n, d = L_aug.shape
assert Y.shape[0] == n
mu = np.zeros((d, k))
for y in range(1, k + 1):
L_y = L_aug[Y == y]
mu[:, y - 1] = L_y.sum(axis=0) / L_y.shape[0]
return mu | Given label matrix L_aug and labels Y, compute the true mu params.
Args:
L: (np.array {0,1}) [n, d] The augmented (indicator) label matrix
Y: (np.array int) [n] The true labels in {1,...,k}
k: (int) Cardinality
p: (np.array float) [k] The class balance |
382,097 | def write(self, data):
if isinstance(data, text_type):
data = data.encode("utf-8")
if self._closed:
raise IOError("File is closed")
if not (self._flags & self.FLAG_WRITE):
raise IOError("File not open for writing")
if not (self._flags & self.FLAG_BUFFERED):
self._write_all(data)
return
self._wbuffer.write(data)
if self._flags & self.FLAG_LINE_BUFFERED:
last_newline_pos = data.rfind(linefeed_byte)
if last_newline_pos >= 0:
wbuf = self._wbuffer.getvalue()
last_newline_pos += len(wbuf) - len(data)
self._write_all(wbuf[: last_newline_pos + 1])
self._wbuffer = BytesIO()
self._wbuffer.write(wbuf[last_newline_pos + 1 :])
return
if self._wbuffer.tell() >= self._bufsize:
self.flush()
return | Write data to the file. If write buffering is on (``bufsize`` was
specified and non-zero), some or all of the data may not actually be
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
:param data: ``str``/``bytes`` data to write |
382,098 | def deltasigma_nfw(self):
def _centered_dsigma(self):
firstpart = np.zeros_like(self._x)
secondpart = np.zeros_like(self._x)
g = np.zeros_like(self._x)
small_1a = 4. / self._x[self._x_small]**2
small_1b = 2. / (self._x[self._x_small]**2 - 1.)
small_1c = np.sqrt(1. - self._x[self._x_small]**2)
firstpart[self._x_small] = (small_1a + small_1b) / small_1c
big_1a = 8. / (self._x[self._x_big]**2 *
np.sqrt(self._x[self._x_big]**2 - 1.))
big_1b = 4. / ((self._x[self._x_big]**2 - 1.)**1.5)
firstpart[self._x_big] = big_1a + big_1b
small_2a = np.sqrt((1. - self._x[self._x_small]) /
(1. + self._x[self._x_small]))
secondpart[self._x_small] = np.log((1. + small_2a) /
(1. - small_2a))
big_2a = self._x[self._x_big] - 1.
big_2b = 1. + self._x[self._x_big]
secondpart[self._x_big] = np.arctan(np.sqrt(big_2a / big_2b))
both_3a = (4. / (self._x**2)) * np.log(self._x / 2.)
both_3b = 2. / (self._x**2 - 1.)
g = firstpart * secondpart + both_3a - both_3b
g[self._x_one] = (10. / 3.) + 4. * np.log(0.5)
if np.isnan(np.sum(g)) or np.isinf(np.sum(g)):
print(, g)
deltasigma = self._rs_dc_rcrit * g
return deltasigma
def _offset_dsigma(self):
original_rbins = self._rbins.value
try:
sigma_sm_rbins = self._sigma_sm
except AttributeError:
sigma_sm_rbins = self.sigma_nfw()
innermost_sampling = 1.e-10
inner_prec = self._numRinner
r_inner = np.linspace(innermost_sampling,
original_rbins.min(),
endpoint=False, num=inner_prec)
outer_prec = self._factorRouter * self._nbins
r_outer = np.linspace(original_rbins.min(),
original_rbins.max(),
endpoint=False, num=outer_prec + 1)[1:]
r_ext_unordered = np.hstack([r_inner, r_outer, original_rbins])
r_extended = np.sort(r_ext_unordered)
self._rbins = r_extended * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self)
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
sigma_sm_extended = self.sigma_nfw()
mean_inside_sigma_sm = np.zeros([self._nlens,
original_rbins.shape[0]])
for i, r in enumerate(original_rbins):
index_of_rbin = np.where(r_extended == r)[0][0]
x = r_extended[0:index_of_rbin + 1]
y = sigma_sm_extended[:, 0:index_of_rbin + 1] * x
integral = simps(y, x=x, axis=-1, even=)
mean_inside_sigma_sm[:, i] = (2. / r**2) * integral
mean_inside_sigma_sm = mean_inside_sigma_sm * (units.Msun /
units.pc**2)
self._rbins = original_rbins * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self)
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
self._sigma_sm = sigma_sm_rbins
dsigma_sm = mean_inside_sigma_sm - sigma_sm_rbins
return dsigma_sm
if self._sigmaoffset is None:
finaldeltasigma = _centered_dsigma(self)
elif np.abs(self._sigmaoffset).sum() == 0:
finaldeltasigma = _centered_dsigma(self)
else:
finaldeltasigma = _offset_dsigma(self)
return finaldeltasigma | Calculate NFW differential surface mass density profile.
Generate the differential surface mass density profiles of each cluster
halo, assuming a spherical NFW model. Optionally includes the effect of
cluster miscentering offsets, if the parent object was initialized
with offsets.
Returns
----------
Quantity
Differential surface mass density profiles (ndarray, in
astropy.units of Msun/pc/pc). Each row corresponds to a single
cluster halo. |
382,099 | def interpret_stats(results):
stats = results.stats
contains_updates = stats.pop("contains_updates", False) if stats else False
if not contains_updates:
result = .format(len(results))
else:
result =
for stat, value in stats.items():
if value:
result = "{}\n{} {}.".format(result, value,
stat.replace("_", " "))
return result.strip() | Generates the string to be shown as updates after the execution of a
Cypher query
:param results: ``ResultSet`` with the raw results of the execution of
the Cypher query |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.