Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
25,900 |
def _split_stock_code(self, code):
stock_str = str(code)
split_loc = stock_str.find(".")
if 0 <= split_loc < len(
stock_str) - 1 and stock_str[0:split_loc] in MKT_MAP:
market_str = stock_str[0:split_loc]
partial_stock_str = stock_str[split_loc + 1:]
return RET_OK, (market_str, partial_stock_str)
else:
error_str = ERROR_STR_PREFIX + "format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)" % stock_str
return RET_ERROR, error_str
|
do not use the built-in split function in python.
The built-in function cannot handle some stock strings correctly.
for instance, US..DJI, where the dot . itself is a part of original code
|
25,901 |
def get_variable_for_feature(self, feature_key, variable_key):
feature = self.feature_key_map.get(feature_key)
if not feature:
self.logger.error( % feature_key)
return None
if variable_key not in feature.variables:
self.logger.error( % variable_key)
return None
return feature.variables.get(variable_key)
|
Get the variable with the given variable key for the given feature.
Args:
feature_key: The key of the feature for which we are getting the variable.
variable_key: The key of the variable we are getting.
Returns:
Variable with the given key in the given variation.
|
25,902 |
def get_institute_usage(institute, start, end):
try:
cache = InstituteCache.objects.get(
institute=institute, date=datetime.date.today(),
start=start, end=end)
return cache.cpu_time, cache.no_jobs
except InstituteCache.DoesNotExist:
return 0, 0
|
Return a tuple of cpu hours and number of jobs for an institute
for a given period
Keyword arguments:
institute --
start -- start date
end -- end date
|
25,903 |
def itruediv(a, b):
"Same as a /= b."
if type(a) == int or type(a) == long:
a = float(a)
a /= b
return a
|
Same as a /= b.
|
25,904 |
def filename(self, value):
warnings.warn(
"The attribute will be removed in future versions. "
"Use instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
|
Deprecated, user `source'.
|
25,905 |
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print ("Addr 0x%x %dKBs/%dKBs..." % (xfer_base + xfer_bytes,
xfer_bytes // 1024,
xfer_total // 1024))
if progress and xfer_count % 2 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr,
progress_size)
set_address(xfer_base+xfer_bytes)
chunk = min(__cfg_descr.wTransferSize, xfer_total-xfer_bytes)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE,
buf[xfer_bytes:xfer_bytes + chunk], __TIMEOUT)
if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY:
raise Exception("DFU: write memory failed")
if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE:
raise Exception("DFU: write memory failed")
xfer_count += 1
xfer_bytes += chunk
|
Writes a buffer into memory. This routine assumes that memory has
already been erased.
|
25,906 |
def from_yaml(self, node):
content = self.construct_scalar(node)
try:
parts = shlex.split(content)
except UnicodeEncodeError:
raise yaml.YAMLError()
if len(parts) != 2:
raise yaml.YAMLError()
filename, key = parts
path = os.path.join(self._root, filename)
with open(path, ) as f:
doc = yaml.load(f, self.__class__)
try:
cur = doc
for k in key.split():
cur = cur[k]
except KeyError:
raise yaml.YAMLError(.format(key, filename))
return cur
|
Implementes a !from_yaml constructor with the following syntax:
!from_yaml filename key
Arguments:
filename: Filename of external YAML document from which to load,
relative to the current YAML file.
key: Key from external YAML document to return,
using a dot-separated syntax for nested keys.
Examples:
!from_yaml external.yml pop
!from_yaml external.yml foo.bar.pop
!from_yaml "another file.yml" "foo bar.snap crackle.pop"
|
25,907 |
def state(self) -> SessionState:
if self.session_id is None:
raise ValueError("session not yet started")
session = self.client.get_session(self.session_id)
if session is None:
raise ValueError("session not found - it may have been shut down")
return session.state
|
The state of the managed Spark session.
|
25,908 |
def set_xticklabels_position(self, position):
pgfplots_translation = {: , : }
fixed_position = pgfplots_translation[position]
self.xticklabel_pos = fixed_position
|
Specify the position of the axis tick labels.
This is generally only useful for multiplots containing only one
row. This can be used to e.g. alternatively draw the tick labels
on the bottom or the top of the subplot.
:param position: 'top' or 'bottom' to specify the position of the
tick labels.
|
25,909 |
def sample_outcomes(probs, n):
dist = np.cumsum(probs)
rs = np.random.rand(n)
return np.array([(np.where(r < dist)[0][0]) for r in rs])
|
For a discrete probability distribution ``probs`` with outcomes 0, 1, ..., k-1 draw ``n``
random samples.
:param list probs: A list of probabilities.
:param Number n: The number of random samples to draw.
:return: An array of samples drawn from distribution probs over 0, ..., len(probs) - 1
:rtype: numpy.ndarray
|
25,910 |
def twoDimensionalHistogram(title, title_x, title_y,
z, bins_x, bins_y,
lim_x=None, lim_y=None,
vmin=None, vmax=None):
plt.figure()
mesh_x, mesh_y = np.meshgrid(bins_x, bins_y)
if vmin != None and vmin == vmax:
plt.pcolor(mesh_x, mesh_y, z)
else:
plt.pcolor(mesh_x, mesh_y, z, vmin=vmin, vmax=vmax)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1])
|
Create a two-dimension histogram plot or binned map.
If using the outputs of np.histogram2d, remember to transpose the histogram.
INPUTS
|
25,911 |
def _fw_rule_update(self, drvr_name, data):
LOG.debug("FW Update %s", data)
tenant_id = data.get().get()
fw_rule = data.get()
rule = self._fw_rule_decode_store(data)
rule_id = fw_rule.get()
if tenant_id not in self.fwid_attr or not (
self.fwid_attr[tenant_id].is_rule_present(rule_id)):
LOG.error("Incorrect update info for tenant %s", tenant_id)
return
self.fwid_attr[tenant_id].rule_update(rule_id, rule)
self._check_update_fw(tenant_id, drvr_name)
|
Firewall Rule update routine.
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules.
|
25,912 |
def sync(self):
pack_name, refs = self._fetch_pack()
if pack_name:
commits = self._read_commits_from_pack(pack_name)
else:
commits = []
logger.debug("Git repository %s (%s) does not have any new object",
self.uri, self.dirpath)
self._update_references(refs)
logger.debug("Git repository %s (%s) is synced",
self.uri, self.dirpath)
return commits
|
Keep the repository in sync.
This method will synchronize the repository with its 'origin',
fetching newest objects and updating references. It uses low
level commands which allow to keep track of which things
have changed in the repository.
The method also returns a list of hashes related to the new
commits fetched during the process.
:returns: list of new commits
:raises RepositoryError: when an error occurs synchronizing
the repository
|
25,913 |
def predict(self, dataset, output_type=, batch_size=64):
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError()
if(batch_size < 1):
raise ValueError(" must be greater than or equal to 1")
dataset, unpack = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset, batch_size=batch_size)
return unpack(self.classifier.predict(extracted_features, output_type=output_type))
|
Return predictions for ``dataset``, using the trained logistic
regression model. Predictions can be generated as class labels,
probabilities that the target value is True, or margins (i.e. the
distance of the observations from the hyperplane separating the
classes). `probability_vector` returns a vector of probabilities by
each class.
For each new example in ``dataset``, the margin---also known as the
linear predictor---is the inner product of the example and the model
coefficients. The probability is obtained by passing the margin through
the logistic function. Predicted classes are obtained by thresholding
the predicted probabilities at 0.5. If you would like to threshold
predictions at a different probability level, you can use the
Turi Create evaluation toolkit.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images to be classified.
If dataset is an SFrame, it must have columns with the same names as
the features used for model training, but does not require a target
column. Additional columns are ignored.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SArray
An SArray with model predictions. If `dataset` is a single image, the
return value will be a single prediction.
See Also
----------
create, evaluate, classify
Examples
----------
>>> probability_predictions = model.predict(data, output_type='probability')
>>> margin_predictions = model.predict(data, output_type='margin')
>>> class_predictions = model.predict(data, output_type='class')
|
25,914 |
def insert(self, index, option):
self._options.insert(index, self._parse_option(option))
self._refresh_options()
self.resize(self._width, self._height)
|
Insert a new `option` in the ButtonGroup at `index`.
:param int option:
The index of where to insert the option.
:param string/List option:
The option to append to the ButtonGroup. If a 2D list is specified,
the first element is the text, the second is the value.
|
25,915 |
def Generate(self, items, token=None):
del token
client_ids = set()
for item_batch in collection.Batch(items, self.BATCH_SIZE):
client_paths = set()
for item in item_batch:
try:
client_path = flow_export.CollectionItemToClientPath(
item, self.client_id)
except flow_export.ItemNotExportableError:
continue
if not self.predicate(client_path):
self.ignored_files.add(client_path)
self.processed_files.add(client_path)
continue
client_ids.add(client_path.client_id)
client_paths.add(client_path)
for chunk in file_store.StreamFilesChunks(client_paths):
self.processed_files.add(chunk.client_path)
for output in self._WriteFileChunk(chunk=chunk):
yield output
self.processed_files |= client_paths - (
self.ignored_files | self.archived_files)
if client_ids:
for client_id, client_info in iteritems(
data_store.REL_DB.MultiReadClientFullInfo(client_ids)):
client = api_client.ApiClient().InitFromClientInfo(client_info)
for chunk in self._GenerateClientInfo(client_id, client):
yield chunk
for chunk in self._GenerateDescription():
yield chunk
yield self.archive_generator.Close()
|
Generates archive from a given collection.
Iterates the collection and generates an archive by yielding contents
of every referenced AFF4Stream.
Args:
items: Iterable of rdf_client_fs.StatEntry objects
token: User's ACLToken.
Yields:
Binary chunks comprising the generated archive.
|
25,916 |
def _handleEsc(self):
if self._typingSms:
self.serial.write(self.ESC_CHARACTER)
self._typingSms = False
self.inputBuffer = []
self.cursorPos = 0
|
Handler for CTRL+Z keypresses
|
25,917 |
def get_taskruns(project_id, limit=100, offset=0, last_id=None):
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params[] = project_id
try:
res = _pybossa_req(, ,
params=params)
if type(res).__name__ == :
return [TaskRun(taskrun) for taskrun in res]
else:
raise TypeError
except:
raise
|
Return a list of task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of task runs for the given project ID
|
25,918 |
def load_child_sections_for_section(context, section, count=None):
page = section.get_main_language_page()
locale = context.get()
qs = SectionPage.objects.child_of(page).filter(
language__is_main_language=True)
if not locale:
return qs[:count]
return get_pages(context, qs, locale)
|
Returns all child sections
If the `locale_code` in the context is not the main language, it will
return the translations of the live articles.
|
25,919 |
def get_attributes(self):
items = {}
items[] = self.form_method.strip()
items[] = self.form_tag
items[] = self.form_style.strip()
if self.form_action:
items[] = self.form_action.strip()
if self.form_id:
items[] = self.form_id.strip()
if self.form_class:
items[] = self.form_class.strip()
if self.inputs:
items[] = self.inputs
if self.form_error_title:
items[] = self.form_error_title.strip()
if self.formset_error_title:
items[] = self.formset_error_title.strip()
return items
|
Used by the uni_form_tags to get helper attributes
|
25,920 |
def pages(self, limit=0):
if limit > 0:
self.iterator.limit = limit
return self.iterator
|
Return iterator for pages
|
25,921 |
def parse_metadata(metadata_obj: Metadata, metadata_dictionary: dict) -> None:
for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api
term = URIRef(key)
for value_dict in value_set:
metadata_obj.add(term, *dict_to_literal(value_dict))
for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api
term = URIRef(key)
for value_dict in value_set:
metadata_obj.add(term, *dict_to_literal(value_dict))
|
Adds to a Metadata object any DublinCore or dts:Extensions object
found in the given dictionary
:param metadata_obj:
:param metadata_dictionary:
|
25,922 |
def get_command_line(self):
return loads(self.command_line) if isinstance(self.command_line, bytes) else loads(self.command_line.encode())
|
Returns the command line for the job.
|
25,923 |
def fresh_jwt_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_fresh_jwt_in_request()
return fn(*args, **kwargs)
return wrapper
|
A decorator to protect a Flask endpoint.
If you decorate an endpoint with this, it will ensure that the requester
has a valid and fresh access token before allowing the endpoint to be
called.
See also: :func:`~flask_jwt_extended.jwt_required`
|
25,924 |
def draw_quadmesh(data, obj):
content = []
filename, rel_filepath = files.new_filename(data, "img", ".png")
dpi = data["dpi"]
fig_dpi = obj.figure.get_dpi()
obj.figure.set_dpi(dpi)
from matplotlib.backends.backend_agg import RendererAgg
cbox = obj.get_clip_box()
width = int(round(cbox.extents[2]))
height = int(round(cbox.extents[3]))
ren = RendererAgg(width, height, dpi)
obj.draw(ren)
image = Image.frombuffer(
"RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1
)
box = (
int(round(cbox.extents[0])),
0,
int(round(cbox.extents[2])),
int(round(cbox.extents[3] - cbox.extents[1])),
)
cropped = image.crop(box)
cropped.save(filename)
obj.figure.set_dpi(fig_dpi)
extent = obj.axes.get_xlim() + obj.axes.get_ylim()
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content
|
Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
|
25,925 |
def on_btn_upload(self, event):
if not self.check_for_uncombined_files():
return
outstring="upload_magic.py"
print("-I- running python script:\n %s"%(outstring))
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
self.contribution.tables[].add_measurement_names()
if self.data_model_num == 3:
res, error_message, has_problems, all_failing_items = ipmag.upload_magic(concat=False, dir_path=self.WD,
vocab=self.contribution.vocab,
contribution=self.contribution)
if self.data_model_num == 2:
res, error_message, errors = ipmag.upload_magic2(dir_path=self.WD, data_model=self.er_magic.data_model)
del wait
if res:
text = "You are ready to upload!\n{} was generated in {}".format(os.path.split(res)[1], os.path.split(res)[0])
dlg = pw.ChooseOne(self, "Go to MagIC for uploading", "Not ready yet", text, "Saved")
del wait
else:
text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/message window for details".format(error_message)
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
dlg.Centre()
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
if result == wx.ID_YES:
pw.on_database_upload(None)
if self.data_model_num == 3:
if not res:
from programs import magic_gui
self.Disable()
self.Hide()
self.magic_gui_frame = magic_gui.MainFrame(self.WD,
dmodel=self.data_model,
title="Validations",
contribution=self.contribution)
self.magic_gui_frame.validation_mode = []
self.magic_gui_frame.failing_items = all_failing_items
self.magic_gui_frame.change_dir_button.Disable()
self.magic_gui_frame.Centre()
self.magic_gui_frame.Show()
self.magic_gui_frame.highlight_problems(has_problems)
self.magic_gui_frame.bSizer2.GetStaticBox().SetLabel()
self.magic_gui_frame.btn_upload.SetLabel("exit validation mode")
self.magic_gui_frame.Bind(wx.EVT_BUTTON, self.on_end_validation, self.magic_gui_frame.btn_upload)
self.magic_gui_frame.Bind(wx.EVT_CLOSE, self.on_end_validation)
self.magic_gui_frame.Bind(wx.EVT_MENU,
lambda event: self.menubar.on_quit(event, self.magic_gui_frame),
self.magic_gui_frame.menubar.file_quit)
self.Bind(wx.EVT_MENU,
lambda event: self.menubar.on_quit(event, self.magic_gui_frame),
self.magic_gui_frame.menubar.file_quit)
|
Try to run upload_magic.
Open validation mode if the upload file has problems.
|
25,926 |
def eqstr(a, b):
return bool(libspice.eqstr_c(stypes.stringToCharP(a), stypes.stringToCharP(b)))
|
Determine whether two strings are equivalent.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eqstr_c.html
:param a: Arbitrary character string.
:type a: str
:param b: Arbitrary character string.
:type b: str
:return: True if A and B are equivalent.
:rtype: bool
|
25,927 |
def kwargs(self):
kwargs = dict(self.query_kwargs)
kwargs.update(self.body_kwargs)
return kwargs
|
combine GET and POST params to be passed to the controller
|
25,928 |
def run_mainloop_with(self, target):
self._user_thread = threading.Thread(target=self._user_thread_main, args=(target,))
self._user_thread.daemon = True
except KeyboardInterrupt:
self._gobject_mainloop.quit()
sys.exit(0)
if self._exception is not None:
raise_(self._exception[1], None, self._exception[2])
else:
sys.exit(self._return_code)
|
Start the OS's main loop to process asyncronous BLE events and then
run the specified target function in a background thread. Target
function should be a function that takes no parameters and optionally
return an integer response code. When the target function stops
executing or returns with value then the main loop will be stopped and
the program will exit with the returned code.
Note that an OS main loop is required to process asyncronous BLE events
and this function is provided as a convenience for writing simple tools
and scripts that don't need to be full-blown GUI applications. If you
are writing a GUI application that has a main loop (a GTK glib main loop
on Linux, or a Cocoa main loop on OSX) then you don't need to call this
function.
|
25,929 |
def update_alias(self, addressid, data):
return self.api_call(
ENDPOINTS[][],
dict(addressid=addressid),
body=data)
|
Update alias address
|
25,930 |
def police_priority_map_conform_map_pri6_conform(self, **kwargs):
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop()
conform = ET.SubElement(police_priority_map, "conform")
map_pri6_conform = ET.SubElement(conform, "map-pri6-conform")
map_pri6_conform.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
25,931 |
def importable(obj):
try:
return look_up(object_name(obj)) is obj
except (AttributeError, TypeError, ImportError):
return False
|
Check if an object can be serialised as a qualified name. This is done
by checking that a ``look_up(object_name(obj))`` gives back the same
object.
.. |importable| replace:: :py:func:`importable`
|
25,932 |
def cleanup(self):
self._processing_stop = True
self._wakeup_processing_thread()
self._processing_stopped_event.wait(3)
|
Stop backgroud thread and cleanup resources
|
25,933 |
def add_tree(self, tree, parent=None):
if tree.path in self.path_db:
self.remove_tree_by_path(tree.path)
for index in tree.indexes:
if not getattr(tree, index):
continue
self._add_to(
getattr(self, index + "_db"),
getattr(tree, index),
tree,
)
if parent:
self._add_to(self.parent_db, tree.path, parent)
for sub_tree in tree.sub_trees:
assert sub_tree.path.startswith(tree.path)
for sub_tree in tree.sub_trees:
self.add_tree(sub_tree, parent=tree)
|
Add `tree` into database.
Args:
tree (obj): :class:`.Tree` instance.
parent (ref, default None): Reference to parent tree. This is used
for all sub-trees in recursive call.
|
25,934 |
def file_finder(dirname="."):
import distutils.log
dirname = dirname or
try:
valid_mgrs = managers.RepoManager.get_valid_managers(dirname)
valid_mgrs = managers.RepoManager.existing_only(valid_mgrs)
for mgr in valid_mgrs:
try:
return mgr.find_all_files()
except Exception:
e = sys.exc_info()[1]
distutils.log.warn(
"hgtools.%s could not find files: %s",
mgr, e)
except Exception:
e = sys.exc_info()[1]
distutils.log.warn(
"Unexpected error finding valid managers in "
"hgtools.file_finder_plugin: %s", e)
return []
|
Find the files in ``dirname`` under Mercurial version control
according to the setuptools spec (see
http://peak.telecommunity.com/DevCenter/setuptools#adding-support-for-other-revision-control-systems
).
|
25,935 |
def get_dimension_index(self, dimension):
if isinstance(dimension, int):
if (dimension < (self.ndims + len(self.vdims)) or
dimension < len(self.dimensions())):
return dimension
else:
return IndexError()
dim = dimension_name(dimension)
try:
dimensions = self.kdims+self.vdims
return [i for i, d in enumerate(dimensions) if d == dim][0]
except IndexError:
raise Exception("Dimension %s not found in %s." %
(dim, self.__class__.__name__))
|
Get the index of the requested dimension.
Args:
dimension: Dimension to look up by name or by index
Returns:
Integer index of the requested dimension
|
25,936 |
def query(self, u, v):
if self.level[u] > self.level[v]:
u, v = v, u
depth = len(self.anc)
for k in range(depth-1, -1, -1):
if self.level[u] <= self.level[v] - (1 << k):
v = self.anc[k][v]
assert self.level[u] == self.level[v]
if u == v:
return u
for k in range(depth-1, -1, -1):
if self.anc[k][u] != self.anc[k][v]:
u = self.anc[k][u]
v = self.anc[k][v]
assert self.anc[0][u] == self.anc[0][v]
return self.anc[0][u]
|
:returns: the lowest common ancestor of u and v
:complexity: O(log n)
|
25,937 |
def write(grp, out_path):
with open(out_path, "w") as f:
for x in grp:
f.write(str(x) + "\n")
|
Write a GRP to a text file.
Args:
grp (list): GRP object to write to new-line delimited text file
out_path (string): output path
Returns:
None
|
25,938 |
def description_from_content(self):
description = ""
for field_type in (RichTextField, models.TextField):
if not description:
for field in self._meta.fields:
if (isinstance(field, field_type) and
field.name != "description"):
description = getattr(self, field.name)
if description:
from yacms.core.templatetags.yacms_tags \
import richtext_filters
description = richtext_filters(description)
break
if not description:
description = str(self)
ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>",
"\n", ". ", "! ", "? ")
for end in ends:
pos = description.lower().find(end)
if pos > -1:
description = TagCloser(description[:pos]).html
break
else:
description = truncatewords_html(description, 100)
try:
description = unicode(description)
except NameError:
pass
return description
|
Returns the first block or sentence of the first content-like
field.
|
25,939 |
def get_local_references(tb, max_string_length=1000):
if in tb.tb_frame.f_locals:
_locals = [(, repr(tb.tb_frame.f_locals[]))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == :
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
|
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
|
25,940 |
def compress_json(data):
json_string = json.dumps(data).encode(, ).decode()
x = lzstring.LZString()
return x.compressToBase64(json_string)
|
Take a Python data object. Convert to JSON and compress using lzstring
|
25,941 |
def elementInActiveFormattingElements(self, name):
for item in self.activeFormattingElements[::-1]:
if item == Marker:
break
elif item.name == name:
return item
return False
|
Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false
|
25,942 |
def subpt(method, target, et, abcorr, obsrvr):
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.emptyDoubleVector(3)
alt = ctypes.c_double()
if hasattr(et, "__iter__"):
points = []
alts = []
for t in et:
libspice.subpt_c(method, target, ctypes.c_double(t), abcorr, obsrvr, spoint, ctypes.byref(alt))
checkForSpiceError(None)
points.append(stypes.cVectorToPython(spoint))
alts.append(alt.value)
return points, alts
else:
et = ctypes.c_double(et)
libspice.subpt_c(method, target, et, abcorr, obsrvr, spoint, ctypes.byref(alt))
return stypes.cVectorToPython(spoint), alt.value
|
Deprecated: This routine has been superseded by the CSPICE
routine :func:`subpnt`. This routine is supported for purposes of
backward compatibility only.
Compute the rectangular coordinates of the sub-observer point on
a target body at a particular epoch, optionally corrected for
planetary (light time) and stellar aberration. Return these
coordinates expressed in the body-fixed frame associated with the
target body. Also, return the observer's altitude above the
target body.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpt_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param et: Epoch in ephemeris seconds past J2000 TDB.
:type et: Union[float,Iterable[float]]
:param abcorr: Aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:return:
Sub-observer point on the target body,
Altitude of the observer above the target body.
:rtype: tuple
|
25,943 |
def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):
headers = {: }
params = urlparse.urlencode(params or {})
url = % (metadata_url, params)
request = urlrequest.Request(url, headers=headers)
request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
timeout = timeout or self.timeout
return request_opener.open(request, timeout=timeout*1.1)
|
Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails.
|
25,944 |
def AgregarAjusteFisico(self, cantidad, cantidad_cabezas=None,
cantidad_kg_vivo=None, **kwargs):
"Agrega campos al detalle de item por un ajuste fisico"
d = {: cantidad,
: cantidad_cabezas,
: cantidad_kg_vivo,
}
item_liq = self.solicitud[][-1]
item_liq[] = d
return True
|
Agrega campos al detalle de item por un ajuste fisico
|
25,945 |
def add_comment(self, comment, metadata=""):
data = {
: comment,
: metadata
}
return self.post(, data)
|
Add a canned comment
:type comment: str
:param comment: New canned comment
:type metadata: str
:param metadata: Optional metadata
:rtype: dict
:return: A dictionnary containing canned comment description
|
25,946 |
def get_jamo_class(jamo):
if jamo in JAMO_LEADS or jamo == chr(0x115F):
return "lead"
if jamo in JAMO_VOWELS or jamo == chr(0x1160) or\
0x314F <= ord(jamo) <= 0x3163:
return "vowel"
if jamo in JAMO_TAILS:
return "tail"
else:
raise InvalidJamoError("Invalid or classless jamo argument.", jamo)
|
Determine if a jamo character is a lead, vowel, or tail.
Integers and U+11xx characters are valid arguments. HCJ consonants are not
valid here.
get_jamo_class should return the class ["lead" | "vowel" | "tail"] of a
given character or integer.
Note: jamo class directly corresponds to the Unicode 7.0 specification,
thus includes filler characters as having a class.
|
25,947 |
def style(self, index, *args):
args = color_args(args, 0)
self.data[].append(
.join([str(index)]+list(map(str,args)))
)
return self.parent
|
Add style to your axis, one at a time
args are of the form::
<axis color>,
<font size>,
<alignment>,
<drawing control>,
<tick mark color>
APIPARAM: chxs
|
25,948 |
def push_update(self, params, values):
curr = self.get_values(params)
self.stack.append((params, curr))
self.update(params, values)
|
Perform a parameter update and keep track of the change on the state.
Same call structure as :func:`peri.states.States.update`
|
25,949 |
def attach_template(self, _template, _key, **unbound_var_values):
if _key in unbound_var_values:
raise ValueError( % _key)
unbound_var_values[_key] = self
return _template.as_layer().construct(**unbound_var_values)
|
Attaches the template to this such that _key=this layer.
Note: names were chosen to avoid conflicts with any likely unbound_var keys.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template.
|
25,950 |
def download(self, field):
if not field.startswith():
raise ValueError("Only processor results (output.* fields) can be downloaded")
if field not in self.annotation:
raise ValueError("Download field {} does not exist".format(field))
ann = self.annotation[field]
if ann[] != :
raise ValueError("Only basic:file: field can be downloaded")
return next(self.gencloud.download([self.id], field))
|
Download a file.
:param field: file field to download
:type field: string
:rtype: a file handle
|
25,951 |
def dump_object(self, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
if hasattr(obj, ):
return obj.isoformat()
if isinstance(obj, (bytes, bytearray, memoryview)):
return base64.b64encode(obj).decode()
raise TypeError(.format(obj))
|
Called to encode unrecognized object.
:param object obj: the object to encode
:return: the encoded object
:raises TypeError: when `obj` cannot be encoded
This method is passed as the ``default`` keyword parameter
to :func:`json.dumps`. It provides default representations for
a number of Python language/standard library types.
+----------------------------+---------------------------------------+
| Python Type | String Format |
+----------------------------+---------------------------------------+
| :class:`bytes`, | Base64 encoded string. |
| :class:`bytearray`, | |
| :class:`memoryview` | |
+----------------------------+---------------------------------------+
| :class:`datetime.datetime` | ISO8601 formatted timestamp in the |
| | extended format including separators, |
| | milliseconds, and the timezone |
| | designator. |
+----------------------------+---------------------------------------+
| :class:`uuid.UUID` | Same as ``str(value)`` |
+----------------------------+---------------------------------------+
|
25,952 |
def _cache_from_source(path: str) -> str:
cache_path, cache_file = os.path.split(importlib.util.cache_from_source(path))
filename, _ = os.path.splitext(cache_file)
return os.path.join(cache_path, filename + ".lpyc")
|
Return the path to the cached file for the given path. The original path
does not have to exist.
|
25,953 |
def regexp_filter(self_or_cls, pattern):
def inner_filter(name, p):
name_match = re.search(pattern,name)
if name_match is not None:
return True
doc_match = re.search(pattern,p.doc)
if doc_match is not None:
return True
return False
return inner_filter
|
Builds a parameter filter using the supplied pattern (may be a
general Python regular expression)
|
25,954 |
def _get_baremetal_switches(self, port):
all_switches = set()
active_switches = set()
all_link_info = port[bc.portbindings.PROFILE][]
for link_info in all_link_info:
switch_info = self._get_baremetal_switch_info(link_info)
if not switch_info:
continue
switch_ip = switch_info[]
if not self._switch_defined(switch_ip):
continue
all_switches.add(switch_ip)
if self.is_switch_active(switch_ip):
active_switches.add(switch_ip)
return list(all_switches), list(active_switches)
|
Get switch ip addresses from baremetal transaction.
This method is used to extract switch information
from the transaction where VNIC_TYPE is baremetal.
:param port: Received port transaction
:returns: list of all switches
:returns: list of only switches which are active
|
25,955 |
def GTax(x, ax):
slc0 = (slice(None),) * ax
xg = np.roll(x, 1, axis=ax) - x
xg[slc0 + (slice(0, 1),)] = -x[slc0 + (slice(0, 1),)]
xg[slc0 + (slice(-1, None),)] = x[slc0 + (slice(-2, -1),)]
return xg
|
Compute transpose of gradient of `x` along axis `ax`.
Parameters
----------
x : array_like
Input array
ax : int
Axis on which gradient transpose is to be computed
Returns
-------
xg : ndarray
Output array
|
25,956 |
def slug(request, url):
page = None
if url:
for slug in url.split():
if not slug:
continue
try:
page = Page.objects.get(slug=slug, parent=page)
except Page.DoesNotExist:
raise Http404
else:
try:
page = Page.objects.get(slug=, parent=None)
except Page.DoesNotExist:
return TemplateView.as_view(
template_name=)(request)
if in request.GET:
if not request.user.has_perm():
raise PermissionDenied
return EditPage.as_view()(request, pk=page.id)
if in request.GET:
if not request.user.has_perm():
raise PermissionDenied
return ComparePage.as_view()(request, pk=page.id)
return ShowPage.as_view()(request, pk=page.id)
|
Look up a page by url (which is a tree of slugs)
|
25,957 |
def complete_dict(
self,
values_dict):
if self.orientation != "rows":
values_dict = transpose_nested_dictionary(values_dict)
row_keys, column_keys = collect_nested_keys(values_dict)
if self.verbose:
print("[SimilarityWeightedAveraging]
print("[SimilarityWeightedAveraging]
similarities, overlaps, weights = \
self.jacard_similarity_from_nested_dicts(values_dict)
if self.verbose:
print(
"[SimilarityWeightedAveraging] Computed %d similarities between rows" % (
len(similarities),))
column_to_row_values = reverse_lookup_from_nested_dict(values_dict)
result = defaultdict(dict)
exponent = self.similarity_exponent
shrinkage_coef = self.shrinkage_coef
for i, row_key in enumerate(row_keys):
for column_key, value_triplets in column_to_row_values.items():
total = 0
denom = shrinkage_coef
for (other_row_key, y) in value_triplets:
sample_weight = 1.0
sim = similarities.get((row_key, other_row_key), 0)
combined_weight = sim ** exponent
combined_weight *= sample_weight
total += combined_weight * y
denom += combined_weight
if denom > shrinkage_coef:
result[row_key][column_key] = total / denom
if self.orientation != "rows":
result = transpose_nested_dictionary(result)
return result
|
Keys of nested dictionaries can be arbitrary objects.
|
25,958 |
def _indent(text, level=1):
prefix = * (4 * level)
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return .join(prefixed_lines())
|
Does a proper indenting for Sphinx rst
|
25,959 |
def callable_check(func, arg_count=1, arg_value=None, allow_none=False):
if func is None:
if not allow_none:
raise ValueError()
elif not arg_checker(func, *[arg_value for _ in range(arg_count)]):
raise ValueError( % (func, arg_count))
|
Check whether func is callable, with the given number of positional arguments. Returns True if check
succeeded, False otherwise.
|
25,960 |
def read_padding(fp, size, divisor=2):
remainder = size % divisor
if remainder:
return fp.read(divisor - remainder)
return b
|
Read padding bytes for the given byte size.
:param fp: file-like object
:param divisor: divisor of the byte alignment
:return: read byte size
|
25,961 |
def cost(self,
tileStorage=0,
fileStorage=0,
featureStorage=0,
generatedTileCount=0,
loadedTileCount=0,
enrichVariableCount=0,
enrichReportCount=0,
serviceAreaCount=0,
geocodeCount=0):
params = {
"f" : "json",
"tileStorage": tileStorage,
"fileStorage": fileStorage,
"featureStorage": featureStorage,
"generatedTileCount": generatedTileCount,
"loadedTileCount":loadedTileCount,
"enrichVariableCount": enrichVariableCount,
"enrichReportCount" : enrichReportCount,
"serviceAreaCount" : serviceAreaCount,
"geocodeCount" : geocodeCount
}
url = self._url + "/cost"
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
returns the cost values for a given portal
Inputs:
tileStorage - int - numbe of tiles to store in MBs
fileStorage - int - size of file to store in MBs
featureStorage - int - size in MBs
generateTileCount - int - number of tiles to genearte on site
loadedTileCount -int- cost to host a certian number of tiles
enrichVariableCount - int - cost to enrich data
enrichReportCount - int - cost to generate an enrichment report
serviceAreaCount - int - cost to generate x number of service
areas
geocodeCount - int - cost to generate x number of addresses
|
25,962 |
def main():
config = {
"api": {
"services": [
{
"name": "my_api",
"testkey": "testval",
},
],
"calls": {
"hello_world": {
"delay": 5,
"priority": 1,
"arguments": None,
},
"marco": {
"delay": 1,
"priority": 1,
},
"pollo": {
"delay": 1,
"priority": 1,
},
}
}
}
app = AppBuilder([MyAPI], Strategy(Print()), AppConf(config))
app.run()
|
MAIN
|
25,963 |
def percent_point(self, y, V):
self.check_fit()
if self.theta < 0:
return V
else:
a = np.power(y, self.theta / (-1 - self.theta))
b = np.power(V, self.theta)
u = np.power((a + b - 1) / b, -1 / self.theta)
return u
|
Compute the inverse of conditional cumulative distribution :math:`C(u|v)^-1`
Args:
y: `np.ndarray` value of :math:`C(u|v)`.
v: `np.ndarray` given value of v.
|
25,964 |
def match_box_with_gt(self, boxes, iou_threshold):
if self.is_training:
with tf.name_scope(.format(iou_threshold)):
iou = pairwise_iou(boxes, self.gt_boxes)
max_iou_per_box = tf.reduce_max(iou, axis=1)
best_iou_ind = tf.argmax(iou, axis=1)
labels_per_box = tf.gather(self.gt_labels, best_iou_ind)
fg_mask = max_iou_per_box >= iou_threshold
fg_inds_wrt_gt = tf.boolean_mask(best_iou_ind, fg_mask)
labels_per_box = tf.stop_gradient(labels_per_box * tf.cast(fg_mask, tf.int64))
return BoxProposals(boxes, labels_per_box, fg_inds_wrt_gt)
else:
return BoxProposals(boxes)
|
Args:
boxes: Nx4
Returns:
BoxProposals
|
25,965 |
def custom_callback(self, view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
plainreturn, data = self._process_callback()
if plainreturn:
return data
else:
return view_func(data, *args, **kwargs)
self._custom_callback = decorated
return decorated
|
Wrapper function to use a custom callback.
The custom OIDC callback will get the custom state field passed in with
redirect_to_auth_server.
|
25,966 |
def metainfo_to_protobuf(self) -> bytes:
card = cardtransferproto()
card.version = self.version
card.amount.extend(self.amount)
card.number_of_decimals = self.number_of_decimals
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
card.asset_specific_data = self.asset_specific_data.encode()
else:
card.asset_specific_data = self.asset_specific_data
if card.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn(
.format(max=net_query(self.network)
.op_return_max_bytes))
return card.SerializeToString()
|
encode card_transfer info to protobuf
|
25,967 |
def _get_resource_params(self, resource, for_update=False):
if isinstance(resource, CollectionResource):
return self._get_collection_params(resource)
if isinstance(resource, ExperimentResource):
return self._get_experiment_params(resource, for_update)
if isinstance(resource, CoordinateFrameResource):
return self._get_coordinate_params(resource, for_update)
if isinstance(resource, ChannelResource):
return self._get_channel_params(resource, for_update)
raise TypeError()
|
Get dictionary containing all parameters for the given resource.
When getting params for a coordinate frame update, only name and
description are returned because they are the only fields that can
be updated.
Args:
resource (intern.resource.boss.resource.BossResource): A sub-class
whose parameters will be extracted into a dictionary.
for_update (bool): True if params will be used for an update.
Returns:
(dictionary): A dictionary containing the resource's parameters as
required by the Boss API.
Raises:
TypeError if resource is not a supported class.
|
25,968 |
def acquire(self,blocking=True,timeout=None):
if timeout is None:
return self.__lock.acquire(blocking)
else:
endtime = _time() + timeout
delay = 0.0005
while not self.__lock.acquire(False):
remaining = endtime - _time()
if remaining <= 0:
return False
delay = min(delay*2,remaining,0.05)
_sleep(delay)
return True
|
Attempt to acquire this lock.
If the optional argument "blocking" is True and "timeout" is None,
this methods blocks until is successfully acquires the lock. If
"blocking" is False, it returns immediately if the lock could not
be acquired. Otherwise, it blocks for at most "timeout" seconds
trying to acquire the lock.
In all cases, this methods returns True if the lock was successfully
acquired and False otherwise.
|
25,969 |
def get_groupname(taskfileinfo):
element = taskfileinfo.task.element
name = element.name
return name + "_grp"
|
Return a suitable name for a groupname for the given taskfileinfo.
:param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing
:type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
|
25,970 |
def __get_live_version(self):
try:
import versiontools
except ImportError:
return None
else:
return str(versiontools.Version.from_expression(self.name))
|
Get a live version string using versiontools
|
25,971 |
def add_nodes(network_id, nodes,**kwargs):
start_time = datetime.datetime.now()
names=[]
for n_i in nodes:
if n_i.name in names:
raise HydraError("Duplicate Node Name: %s"%(n_i.name))
names.append(n_i.name)
user_id = kwargs.get()
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
_add_nodes_to_database(net_i, nodes)
net_i.project_id=net_i.project_id
db.DBSession.flush()
node_s = db.DBSession.query(Node).filter(Node.network_id==network_id).all()
node_id_map = dict()
iface_nodes = dict()
for n_i in node_s:
iface_nodes[n_i.name] = n_i
for node in nodes:
node_id_map[node.id] = iface_nodes[node.name]
_bulk_add_resource_attrs(network_id, , nodes, iface_nodes)
log.info("Nodes added in %s", get_timing(start_time))
return node_s
|
Add nodes to network
|
25,972 |
def get_voms_proxy_user():
out = _voms_proxy_info(["--identity"])[1].strip()
try:
return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1)
except:
raise Exception("no valid identity found in voms proxy: {}".format(out))
|
Returns the owner of the voms proxy.
|
25,973 |
def encode(self, word, max_length=4, zero_pad=True):
name = unicode_normalize(, text_type(word.upper()))
name = name.replace(, )
if max_length != -1:
max_length = min(max(4, max_length), 64)
else:
max_length = 64
name_code = last =
while name[-1:] == :
name = name[:-1]
if name[:2] == :
name = + name[2:]
elif name[:2] == :
name = + name[2:]
elif name[:2] == :
name = + name[2:]
if name:
if name[0] == :
name = name[1:]
if name:
if name[0] in self._uc_vy_set:
name = + name[1:]
elif name[0] in {, }:
name = + name[1:]
elif name[0] in {, }:
name = + name[1:]
elif name[0] in {, , }:
name = + name[1:]
elif name[0] in {, }:
name = + name[1:]
elif name[0] in {, }:
name = + name[1:]
name_code = last = name[0]
for i in range(1, len(name)):
code =
if name[i] in {, , , }:
code =
elif name[i] in {, , , , , , , }:
code =
elif name[i] in {, }:
if name[i + 1 : i + 2] != :
code =
elif name[i] == :
if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len(
name
):
code =
elif name[i] in {, }:
if name[i + 1 : i + 2] in {, }:
name = name[: i + 1] + name[i] + name[i + 2 :]
code =
elif name[i] == :
if name[i + 1 : i + 2] in self._uc_vy_set or i + 1 == len(
name
):
code =
if code != last and code != and i != 0:
name_code += code
last = name_code[-1]
if zero_pad:
name_code += * max_length
if not name_code:
name_code =
return name_code[:max_length]
|
Return the Phonex code for a word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to 4)
zero_pad : bool
Pad the end of the return value with 0s to achieve a max_length
string
Returns
-------
str
The Phonex value
Examples
--------
>>> pe = Phonex()
>>> pe.encode('Christopher')
'C623'
>>> pe.encode('Niall')
'N400'
>>> pe.encode('Schmidt')
'S253'
>>> pe.encode('Smith')
'S530'
|
25,974 |
def _get_adjustment(mag, year, mmin, completeness_year, t_f, mag_inc=0.1):
if len(completeness_year) == 1:
if (mag >= mmin) and (year >= completeness_year[0]):
return 1.0
else:
return False
kval = int(((mag - mmin) / mag_inc)) + 1
if (kval >= 1) and (year >= completeness_year[kval - 1]):
return t_f
else:
return False
|
If the magnitude is greater than the minimum in the completeness table
and the year is greater than the corresponding completeness year then
return the Weichert factor
:param float mag:
Magnitude of an earthquake
:param float year:
Year of earthquake
:param np.ndarray completeness_table:
Completeness table
:param float mag_inc:
Magnitude increment
:param float t_f:
Weichert adjustment factor
:returns:
Weichert adjustment factor is event is in complete part of catalogue
(0.0 otherwise)
|
25,975 |
def decouple(fn):
def fst(*args, **kwargs):
return fn(*args, **kwargs)[0]
def snd(*args, **kwargs):
return fn(*args, **kwargs)[1]
return fst, snd
|
Inverse operation of couple.
Create two functions of one argument and one return from a function that
takes two arguments and has two returns
Examples
--------
>>> h = lambda x: (2*x**3, 6*x**2)
>>> f, g = decouple(h)
>>> f(5)
250
>>> g(5)
150
|
25,976 |
def getCenter(self):
(left, top), (right, bottom) = self.getCoords()
x = left + (right - left) / 2
y = top + (bottom - top) / 2
return (x, y)
|
Gets the center coords of the View
@author: U{Dean Morin <https://github.com/deanmorin>}
|
25,977 |
def indices_within_times(times, start, end):
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))]
|
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
|
25,978 |
def update_continuously(records, update_interval=600):
while True:
for record in records:
try:
record.update()
except (ApiError, RequestException):
pass
time.sleep(update_interval)
|
Update `records` every `update_interval` seconds
|
25,979 |
def simple_search(self, *keywords):
matches = []
keywords = [kw.lower() for kw in keywords]
logger.verbose(
"Performing simple search on %s (%s) ..",
pluralize(len(keywords), "keyword"),
concatenate(map(repr, keywords)),
)
for entry in self.filtered_entries:
normalized = entry.name.lower()
if all(kw in normalized for kw in keywords):
matches.append(entry)
logger.log(
logging.INFO if matches else logging.VERBOSE,
"Matched %s using simple search.",
pluralize(len(matches), "password"),
)
return matches
|
Perform a simple search for case insensitive substring matches.
:param keywords: The string(s) to search for.
:returns: The matched password names (a generator of strings).
Only passwords whose names matches *all* of the given keywords are
returned.
|
25,980 |
def get_name(tags_or_instance_or_id):
ec2 = get_ec2_resource()
if hasattr(tags_or_instance_or_id, ):
tags = tags_or_instance_or_id.tags
elif isinstance(tags_or_instance_or_id, str):
tags = ec2.Instance(tags_or_instance_or_id).tags
elif tags_or_instance_or_id is None:
return EMPTY_NAME
else:
assert isinstance(tags_or_instance_or_id,
Iterable), "expected iterable of tags"
tags = tags_or_instance_or_id
if not tags:
return EMPTY_NAME
names = [entry[] for entry in tags if entry[] == ]
if not names:
return
if len(names) > 1:
assert False, "have more than one name: " + str(names)
return names[0]
|
Helper utility to extract name out of tags dictionary or intancce.
[{'Key': 'Name', 'Value': 'nexus'}] -> 'nexus'
Assert fails if there's more than one name.
Returns '' if there's less than one name.
|
25,981 |
def zip_html(self):
zip_fname = os.path.join(BUILD_PATH, , )
if os.path.exists(zip_fname):
os.remove(zip_fname)
dirname = os.path.join(BUILD_PATH, )
fnames = os.listdir(dirname)
os.chdir(dirname)
self._run_os(,
zip_fname,
,
,
*fnames)
|
Compress HTML documentation into a zip file.
|
25,982 |
def get_prep_value(self, value):
if isinstance(value, LocalizedValue):
prep_value = LocalizedValue()
for k, v in value.__dict__.items():
if v is None:
prep_value.set(k, )
else:
prep_value.set(k, six.text_type(v))
return super().get_prep_value(prep_value)
return super().get_prep_value(value)
|
Returns field's value prepared for saving into a database.
|
25,983 |
def _import_next_layer(self, proto, length, error=False):
if proto == 1:
from pcapkit.protocols.link import Ethernet as Protocol
elif proto == 228:
from pcapkit.protocols.internet import IPv4 as Protocol
elif proto == 229:
from pcapkit.protocols.internet import IPv6 as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, error=error,
layer=self._exlayer, protocol=self._exproto)
return next_
|
Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword arguments:
* error -- bool, if function call on error
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* Ethernet (data link layer)
* IPv4 (internet layer)
* IPv6 (internet layer)
|
25,984 |
def get_jobs(self):
url_jenkins = urijoin(self.base_url, "api", "json")
response = self.fetch(url_jenkins)
return response.text
|
Retrieve all jobs
|
25,985 |
def _recv_flow(self, method_frame):
self.channel._active = method_frame.args.read_bit()
args = Writer()
args.write_bit(self.channel.active)
self.send_frame(MethodFrame(self.channel_id, 20, 21, args))
if self._flow_control_cb is not None:
self._flow_control_cb()
|
Receive a flow control command from the broker
|
25,986 |
def invalid_example_number(region_code):
if not _is_valid_region_code(region_code):
return None
metadata = PhoneMetadata.metadata_for_region(region_code.upper())
desc = _number_desc_by_type(metadata, PhoneNumberType.FIXED_LINE)
if desc is None or desc.example_number is None:
pass
phone_number_length -= 1
return None
|
Gets an invalid number for the specified region.
This is useful for unit-testing purposes, where you want to test what
will happen with an invalid number. Note that the number that is
returned will always be able to be parsed and will have the correct
country code. It may also be a valid *short* number/code for this
region. Validity checking such numbers is handled with shortnumberinfo.
Arguments:
region_code -- The region for which an example number is needed.
Returns an invalid number for the specified region. Returns None when an
unsupported region or the region 001 (Earth) is passed in.
|
25,987 |
def refresh(path=None):
global GIT_OK
GIT_OK = False
if not Git.refresh(path=path):
return
if not FetchInfo.refresh():
return
GIT_OK = True
|
Convenience method for setting the git executable path.
|
25,988 |
def from_value(self, value):
if value is None:
sql_type = NVarCharType(size=1)
else:
sql_type = self._from_class_value(value, type(value))
return sql_type
|
Function infers TDS type from Python value.
:param value: value from which to infer TDS type
:return: An instance of subclass of :class:`BaseType`
|
25,989 |
def create(cls, issue_id, *, properties=None, auto_commit=False):
if cls.get(issue_id):
raise IssueException(.format(issue_id))
res = Issue()
res.issue_id = issue_id
res.issue_type_id = IssueType.get(cls.issue_type).issue_type_id
if properties:
for name, value in properties.items():
prop = IssueProperty()
prop.issue_id = res.issue_id
prop.name = name
prop.value = value.isoformat() if type(value) == datetime else value
res.properties.append(prop)
db.session.add(prop)
db.session.add(res)
if auto_commit:
db.session.commit()
return cls.get(res.issue_id)
|
Creates a new Issue object with the properties and tags provided
Attributes:
issue_id (str): Unique identifier for the issue object
account (:obj:`Account`): Account which owns the issue
properties (dict): Dictionary of properties for the issue object.
|
25,990 |
def set_image(self):
if not self.image:
scrape_image(self)
if not self.image:
contributors = self.get_primary_contributors()
if contributors:
self.image = contributors[0].image
self.save(set_image=False)
if not self.image:
filename = settings.STATIC_ROOT +
if os.path.exists(filename):
image = File(
open(filename, )
)
image.name =
self.image = image
self.save(set_image=False)
|
This code must be in its own method since the fetch functions need
credits to be set. m2m fields are not yet set at the end of either the
save method or post_save signal.
|
25,991 |
def clear_output(self, stdout=True, stderr=True, other=True):
if stdout:
print(, file=io.stdout, end=)
io.stdout.flush()
if stderr:
print(, file=io.stderr, end=)
io.stderr.flush()
|
Clear the output of the cell receiving output.
|
25,992 |
def setencoding():
encoding = "ascii"
if 0:
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
encoding = "undefined"
if encoding != "ascii":
sys.setdefaultencoding(encoding)
|
Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this.
|
25,993 |
def __minimum_noiseless_description_length(self, clusters, centers):
scores = float()
W = 0.0
K = len(clusters)
N = 0.0
sigma_sqrt = 0.0
alpha = 0.9
betta = 0.9
for index_cluster in range(0, len(clusters), 1):
Ni = len(clusters[index_cluster])
if Ni == 0:
return float()
Wi = 0.0
for index_object in clusters[index_cluster]:
Wi += euclidean_distance(self.__pointer_data[index_object], centers[index_cluster])
sigma_sqrt += Wi
W += Wi / Ni
N += Ni
if N - K > 0:
sigma_sqrt /= (N - K)
sigma = sigma_sqrt ** 0.5
Kw = (1.0 - K / N) * sigma_sqrt
Ks = ( 2.0 * alpha * sigma / (N ** 0.5) ) * ( (alpha ** 2.0) * sigma_sqrt / N + W - Kw / 2.0 ) ** 0.5
scores = sigma_sqrt * (2 * K)**0.5 * ((2 * K)**0.5 + betta) / N + W - sigma_sqrt + Ks + 2 * alpha**0.5 * sigma_sqrt / N
return scores
|
!
@brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion.
@param[in] clusters (list): Clusters for which splitting criterion should be calculated.
@param[in] centers (list): Centers of the clusters.
@return (double) Returns splitting criterion in line with bayesian information criterion.
Low value of splitting cretion means that current structure is much better.
@see __bayesian_information_criterion(clusters, centers)
|
25,994 |
def outline_segments(self, mask_background=False):
from scipy.ndimage import grey_erosion, grey_dilation
selem = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
eroded = grey_erosion(self.data, footprint=selem, mode=,
cval=0.)
dilated = grey_dilation(self.data, footprint=selem, mode=,
cval=0.)
outlines = ((dilated != eroded) & (self.data != 0)).astype(int)
outlines *= self.data
if mask_background:
outlines = np.ma.masked_where(outlines == 0, outlines)
return outlines
|
Outline the labeled segments.
The "outlines" represent the pixels *just inside* the segments,
leaving the background pixels unmodified.
Parameters
----------
mask_background : bool, optional
Set to `True` to mask the background pixels (labels = 0) in
the returned image. This is useful for overplotting the
segment outlines on an image. The default is `False`.
Returns
-------
boundaries : 2D `~numpy.ndarray` or `~numpy.ma.MaskedArray`
An image with the same shape of the segmentation image
containing only the outlines of the labeled segments. The
pixel values in the outlines correspond to the labels in the
segmentation image. If ``mask_background`` is `True`, then
a `~numpy.ma.MaskedArray` is returned.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[0, 0, 0, 0, 0, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 2, 2, 2, 2, 0],
... [0, 0, 0, 0, 0, 0]])
>>> segm.outline_segments()
array([[0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 0, 0, 2, 0],
[0, 2, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0]])
|
25,995 |
def _uri(self, url):
if url and not url.startswith():
return url
uri = "{0}://{1}{2}{3}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
url,
)
return uri
|
Returns request absolute URI
|
25,996 |
async def founder(self, root):
nation = root.find().text
if nation == :
return None
return aionationstates.Nation(nation)
|
Regional Founder. Returned even if the nation has ceased to
exist.
Returns
-------
an :class:`ApiQuery` of :class:`Nation`
an :class:`ApiQuery` of None
If the region is Game-Created and doesn't have a founder.
|
25,997 |
def set_sqlite_pragmas(self):
def _pragmas_on_connect(dbapi_con, con_record):
dbapi_con.execute("PRAGMA journal_mode = WAL;")
event.listen(self.engine, "connect", _pragmas_on_connect)
|
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine.
It currently sets:
- journal_mode to WAL
:return: None
|
25,998 |
def _determineVolumeSize(self, source_type, source_uuid):
nova = self.novaclient
if source_type == :
image = nova.images.get(source_uuid)
if hasattr(image, ):
size = getattr(image, )
size_gb = int(math.ceil(size / 1024.0**3))
return size_gb
elif source_type == :
volume = nova.volumes.get(source_uuid)
return volume.size
elif source_type == :
snap = nova.volume_snapshots.get(source_uuid)
return snap.size
else:
unknown_source = ("The source type for UUID is"
" unknown" % (source_type, source_uuid))
raise ValueError(unknown_source)
|
Determine the minimum size the volume needs to be for the source.
Returns the size in GiB.
|
25,999 |
def experiment_data(self, commit=None, must_contain_results=False):
results = {}
for tag in self.__repository.tags:
if not tag.name.startswith(self.__tag_prefix):
continue
data = json.loads(tag.tag.message)
if "results" not in data and must_contain_results:
continue
if commit is not None and tag.tag.object.hexsha != name_to_object(self.__repository, commit).hexsha:
continue
results[tag.name] = data
return results
|
:param commit: the commit that all the experiments should have happened or None to include all
:type commit: str
:param must_contain_results: include only tags that contain results
:type must_contain_results: bool
:return: all the experiment data
:rtype: dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.