Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
15,000 | def chunks_str(str, n, separator="\n", fill_blanks_last=True):
return separator.join(chunks(str, n)) | returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X |
15,001 | def ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
gen = self._indexed_ifilter(recursive, matches, flags, forcetype)
return (node for i, node in gen) | Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*. |
15,002 | def _sampleLocationOnDisc(self, top=None):
if top is None:
z = random.choice([-1, 1]) * self.height / 2.
else:
z = self.height / 2. if top else - self.height / 2.
sampledAngle = 2 * random.random() * pi
sampledRadius = self.radius * sqrt(random.random())
x, y = sampledRadius * cos(sampledAngle), sampledRadius * sin(sampledAngle)
return [x, y, z] | Helper method to sample from the top and bottom discs of a cylinder.
If top is set to True, samples only from top disc. If top is set to False,
samples only from bottom disc. If not set (defaults to None), samples from
both discs. |
15,003 | def save(self, overwrite=True):
self.save_popset(overwrite=overwrite)
self.save_signal() | Saves PopulationSet and TransitSignal.
Shouldn't need to use this if you're using
:func:`FPPCalculation.from_ini`.
Saves :class`PopulationSet` to ``[folder]/popset.h5]``
and :class:`TransitSignal` to ``[folder]/trsig.pkl``.
:param overwrite: (optional)
Whether to overwrite existing files. |
15,004 | def getWhatIf(number):
archive = getWhatIfArchive()
latest = getLatestWhatIfNum(archive)
if type(number) is str and number.isdigit():
number = int(number)
if number > latest or latest <= 0:
return None
return archive[number] | Returns a :class:`WhatIf` object corresponding to the What If article of
index passed to the function. If the index is less than zero or
greater than the maximum number of articles published thus far,
None is returned instead.
Like all the routines for handling What If articles, :func:`getWhatIfArchive`
is called first in order to establish a list of all previously published
What Ifs.
Arguments:
number: an integer or string that represents a number, this is the index of article to retrieve.
Returns the resulting :class:`WhatIf` object. |
15,005 | def input(self, *args, **kwargs):
errors = []
if args and self.arg_names:
args = list(args)
for i, (key, val) in enumerate(izip(self.arg_names, args)):
try:
args[i] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
args = tuple(args)
if kwargs and self.params:
for key, val in kwargs.items():
try:
kwargs[key] = self._adapt_param(key, val)
except AnticipateParamError as e:
errors.append(e)
if errors:
raise AnticipateErrors(
message= % self.func,
errors=errors)
return args, kwargs | Adapt the input and check for errors.
Returns a tuple of adapted (args, kwargs) or raises
AnticipateErrors |
15,006 | def set_attribute(self, obj, attr, value):
if isinstance(obj, MutableMapping):
obj[attr] = value
else:
setattr(obj, attr, value) | Set value of attribute in given object instance.
Reason for existence of this method is the fact that 'attribute' can
be also a object's key if it is a dict or any other kind of mapping.
Args:
obj (object): object instance to modify
attr (str): attribute (or key) to change
value: value to set |
15,007 | def add(self, relation):
cached = _CachedRelation(relation)
logger.debug(.format(cached))
logger.debug(.format(
pprint.pformat(self.dump_graph()))
)
with self.lock:
self._setdefault(cached)
logger.debug(.format(
pprint.pformat(self.dump_graph()))
) | Add the relation inner to the cache, under the schema schema and
identifier identifier
:param BaseRelation relation: The underlying relation. |
15,008 | def get_parsed_data(fn, *args, **kwargs):
file_format = detect_format(fn, *args, **kwargs)
data = get_header(fn, file_format, *args, **kwargs)
parsed_data = parse_header(data, *args, **kwargs)
return parsed_data | All above functions as a single function
:param str fn: file name
:return list parsed_data: structured metadata |
15,009 | def listBlockChildren(self, block_name=""):
if (not block_name) or re.search("[,]", block_name):
dbsExceptionHandler("dbsException-invalid-input", "DBSBlock/listBlockChildren. Block_name must be provided." )
conn = self.dbi.connection()
try:
results = self.blockchildlist.execute(conn, block_name)
return results
finally:
if conn:
conn.close() | list parents of a block |
15,010 | def _get_job_status(line):
try:
args = line.strip().split()
job_name = args[0]
job = None
if job_name in _local_jobs:
job = _local_jobs[job_name]
else:
raise Exception( % job_name)
if job is not None:
error = if job.fatal_error is None else str(job.fatal_error)
data = {: True, : job.is_complete, : error}
else:
data = {: False}
except Exception as e:
google.datalab.utils.print_exception_with_last_stack(e)
data = {: True, : str(e)}
return IPython.core.display.JSON(data) | magic used as an endpoint for client to get job status.
%_get_job_status <name>
Returns:
A JSON object of the job status. |
15,011 | def _get_github(self):
from github import Github
vms("Querying github with user .".format(self.username))
g = Github(self.username, self.apikey)
self._user = g.get_user()
if self._user is None:
raise ValueError("Can{}{}{}{}'.".format(self._repo.full_name), 2)
break | Creates an instance of github.Github to interact with the repos via the
API interface in pygithub. |
15,012 | def update(self, _values=None, **values):
if _values is not None:
values.update(_values)
values = OrderedDict(sorted(values.items()))
bindings = list(values.values()) + self.get_bindings()
sql = self._grammar.compile_update(self, values)
return self._connection.update(sql, self._clean_bindings(bindings)) | Update a record in the database
:param values: The values of the update
:type values: dict
:return: The number of records affected
:rtype: int |
15,013 | def create(self, data, **kwargs):
self._check_missing_create_attrs(data)
server_data = self.gitlab.http_post(self.path, post_data=data,
**kwargs)
source_issue = ProjectIssue(self._parent.manager,
server_data[])
target_issue = ProjectIssue(self._parent.manager,
server_data[])
return source_issue, target_issue | Create a new object.
Args:
data (dict): parameters to send to the server to create the
resource
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
RESTObject, RESTObject: The source and target issues
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request |
15,014 | def list_functions(region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = []
for funcs in __utils__[](conn.list_functions):
ret += funcs[]
return ret | List all Lambda functions visible in the current scope.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.list_functions |
15,015 | def smooth_angle_channels(self, channels):
for vertex in self.vertices:
for col in vertex.meta[]:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360. | Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions. |
15,016 | def rates(ctx, opts):
click.echo("Retrieving rate limits ... ", nl=False)
context_msg = "Failed to retrieve status!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
resources_limits = get_rate_limits()
click.secho("OK", fg="green")
headers = ["Resource", "Throttled", "Remaining", "Interval (Seconds)", "Reset"]
rows = []
for resource, limits in six.iteritems(resources_limits):
rows.append(
[
click.style(resource, fg="cyan"),
click.style(
"Yes" if limits.throttled else "No",
fg="red" if limits.throttled else "green",
),
"%(remaining)s/%(limit)s"
% {
"remaining": click.style(
six.text_type(limits.remaining), fg="yellow"
),
"limit": click.style(six.text_type(limits.limit), fg="yellow"),
},
click.style(six.text_type(limits.interval), fg="blue"),
click.style(six.text_type(limits.reset), fg="magenta"),
]
)
if resources_limits:
click.echo()
utils.pretty_print_table(headers, rows)
click.echo()
num_results = len(resources_limits)
list_suffix = "resource%s" % ("s" if num_results != 1 else "")
utils.pretty_print_list_info(num_results=num_results, suffix=list_suffix) | Check current API rate limits. |
15,017 | def _prompt(pre_prompt, items, post_prompt, default, indexed, stream):
select one [{}]:
if default is not None:
if in pre_prompt:
pre_prompt = pre_prompt.format(default)
if in post_prompt:
post_prompt = post_prompt.format(default)
item_format = "{indent}{item}"
if indexed:
item_format = "{indent}[{index}] {item}"
item_text_list = []
indent =
for index, item in enumerate(items):
item_text =
components = {
: indent,
: item
}
if indexed:
components[] = index
item_text = item_format.format(**components)
item_text_list.append(item_text)
menu_parts = [pre_prompt] + item_text_list
full_menu = .join(menu_parts) +
stream.write(full_menu)
stream.flush()
get_input = input
try:
get_input = raw_input
except NameError:
pass
response = get_input(post_prompt)
return response | Prompt once.
If you want the default displayed, put a format {} into the
post_prompt string (like 'select one [{}]: ') |
15,018 | def snake_to_camel(value):
camel = "".join(word.title() for word in value.split("_"))
return value[:1].lower() + camel[1:] | Converts a snake_case_string to a camelCaseString.
>>> snake_to_camel("foo_bar_baz")
'fooBarBaz' |
15,019 | def appendComponent(self, baseGlyph=None, offset=None, scale=None, component=None):
identifier = None
sxy = 0
syx = 0
if component is not None:
component = normalizers.normalizeComponent(component)
if baseGlyph is None:
baseGlyph = component.baseGlyph
sx, sxy, syx, sy, ox, oy = component.transformation
if offset is None:
offset = (ox, oy)
if scale is None:
scale = (sx, sy)
if baseGlyph is None:
baseGlyph = component.baseGlyph
if component.identifier is not None:
existing = set([c.identifier for c in self.components if c.identifier is not None])
if component.identifier not in existing:
identifier = component.identifier
baseGlyph = normalizers.normalizeGlyphName(baseGlyph)
if self.name == baseGlyph:
raise FontPartsError(("A glyph cannot contain a component referencing itself."))
if offset is None:
offset = (0, 0)
if scale is None:
scale = (1, 1)
offset = normalizers.normalizeTransformationOffset(offset)
scale = normalizers.normalizeTransformationScale(scale)
ox, oy = offset
sx, sy = scale
transformation = (sx, sxy, syx, sy, ox, oy)
identifier = normalizers.normalizeIdentifier(identifier)
return self._appendComponent(baseGlyph, transformation=transformation, identifier=identifier) | Append a component to this glyph.
>>> component = glyph.appendComponent("A")
This will return a :class:`BaseComponent` object representing
the new component in the glyph. ``offset`` indicates the x and
y shift values that should be applied to the appended component.
It must be a :ref:`type-coordinate` value or ``None``. If
``None`` is given, the offset will be ``(0, 0)``.
>>> component = glyph.appendComponent("A", offset=(10, 20))
``scale`` indicates the x and y scale values that should be
applied to the appended component. It must be a
:ref:`type-scale` value or ``None``. If ``None`` is given,
the scale will be ``(1.0, 1.0)``.
>>> component = glyph.appendComponent("A", scale=(1.0, 2.0))
``component`` may be a :class:`BaseComponent` object from which
attribute values will be copied. If ``baseGlyph``, ``offset``
or ``scale`` are specified as arguments, those values will be used
instead of the values in the given component object. |
15,020 | def get_hosting_device_plugging_driver(self, context, id):
if id is None:
return
try:
return self._plugging_drivers[id]
except KeyError:
try:
template = self._get_hosting_device_template(context, id)
self._plugging_drivers[id] = importutils.import_object(
template[])
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception("Error loading plugging driver for hosting "
"device template %s", id)
return self._plugging_drivers.get(id) | Returns plugging driver for hosting device template with <id>. |
15,021 | def group(requestContext, *seriesLists):
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup | Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one. |
15,022 | def get_all_rules(cls):
"Load all available Adblock rules."
from adblockparser import AdblockRules
raw_rules = []
for url in [
config.ADBLOCK_EASYLIST_URL, config.ADBLOCK_EXTRALIST_URL]:
raw_rules.extend(cls.load_raw_rules(url))
rules = AdblockRules(raw_rules)
return rules | Load all available Adblock rules. |
15,023 | def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):
network = {}
macs = self.get( % kind)
network[] = macs
if len(macs) == 0:
return network
if router:
network[] = self.get(, macs[0])
if vlans:
network[] = self.get(, macs[0])
if vlan_ids:
network[] = self.get(, macs[0])
return network | Wrapper for getting details about networks.
:param string kind: network kind. Typically 'public' or 'private'
:param boolean router: flag to include router information
:param boolean vlans: flag to include vlan information
:param boolean vlan_ids: flag to include vlan_ids |
15,024 | def process(self, request):
self.check(request)
if not self.visible:
return
if callable(self.title):
self.title = self.title(request)
if self.slug is None:
if hide_empty and len(self.children) == 0:
self.visible = False
return
curitem = None
for item in self.children:
item.selected = False
if item.match_url(request):
if curitem is None or len(curitem.url) < len(item.url):
curitem = item
if curitem is not None:
curitem.selected = True | process determines if this item should visible, if its selected, etc... |
15,025 | def check_connection (self):
nntpserver = self.host or self.aggregate.config["nntpserver"]
if not nntpserver:
self.add_warning(
_("No NNTP server was specified, skipping this URL."),
tag=WARN_NNTP_NO_SERVER)
return
nntp = self._connect_nntp(nntpserver)
group = self.urlparts[2]
while group[:1] == :
group = group[1:]
if in group:
number = nntp.stat("<"+group+">")[1]
self.add_info(_() % {"num": number})
else:
group = group.split(, 1)[0]
if group:
name = nntp.group(group)[4]
self.add_info(_("News group %(name)s found.") % {"name": name})
else:
self.add_warning(_("No newsgroup specified in NNTP URL."),
tag=WARN_NNTP_NO_NEWSGROUP) | Connect to NNTP server and try to request the URL article
resource (if specified). |
15,026 | def get_projections_on_elements_and_orbitals(self, el_orb_spec):
result = {}
structure = self.structure
el_orb_spec = {get_el_sp(el): orbs for el, orbs in el_orb_spec.items()}
for spin, v in self.projections.items():
result[spin] = [[{str(e): collections.defaultdict(float)
for e in el_orb_spec}
for i in range(len(self.kpoints))]
for j in range(self.nb_bands)]
for i, j in itertools.product(range(self.nb_bands),
range(len(self.kpoints))):
for key, item in v[i][j].items():
for key2, item2 in item.items():
specie = str(Specie(re.split(r"[0-9]+", key)[0]))
if get_el_sp(str(specie)) in el_orb_spec:
if key2 in el_orb_spec[get_el_sp(str(specie))]:
result[spin][i][j][specie][key2] += item2
return result | Method returning a dictionary of projections on elements and specific
orbitals
Args:
el_orb_spec: A dictionary of Elements and Orbitals for which we want
to have projections on. It is given as: {Element:[orbitals]},
e.g., {'Si':['3s','3p']} or {'Si':['3s','3p_x', '3p_y', '3p_z']} depending on input files
Returns:
A dictionary of projections on elements in the
{Spin.up:[][{Element:{orb:values}}],
Spin.down:[][{Element:{orb:values}}]} format
if there is no projections in the band structure returns an empty
dict. |
15,027 | def build_iiif_file_storage_path(url_path, ik_image, iiif_storage):
storage_path = url_path[1:]
if storage_path.startswith():
storage_path = storage_path[5:]
return storage_path | Return the file storage path for a given IIIF Image API URL path.
NOTE: The returned file storage path includes the given ``Image``
instance's ID to ensure the path is unique and identifiable, and its
modified timestamp to act as a primitive cache-busting mechanism for
when the image is changed but there are pre-existing image conversions.
TODO: Ideally we should use a hash or timestamp for Image's actual
image data changes, not the whole instance which could change but
have same image. |
15,028 | def _contains_yieldpoint(children):
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False | Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`. |
15,029 | def financials(self, security):
try:
url = % security
try:
page = self._request(url).read()
except UfException as ufExcep:
if Errors.NETWORK_400_ERROR == ufExcep.getCode:
raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security)
raise ufExcep
bPage = BeautifulSoup(page)
target = bPage.find(id=)
keyTimeValue = {}
i = 0
while True:
self._parseTarget(target, keyTimeValue)
if i < 5:
i += 1
target = target.nextSibling
if == target:
target = target.nextSibling
else:
break
return keyTimeValue
except BaseException:
raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in GoogleFinance.getHistoricalPrices %s" % traceback.format_exc()) | get financials:
google finance provide annual and quanter financials, if annual is true, we will use annual data
Up to four lastest year/quanter data will be provided by google
Refer to page as an example: http://www.google.com/finance?q=TSE:CVG&fstype=ii |
15,030 | def get_default_download_dir(self, *subdirs):
path = self.get_config_value(self.CONFIG_NAME_PATH)
if path is None:
return os.getcwd()
return os.path.join(path, *subdirs) | Get the download path for a file. If not defined, return default
from config.
Parameters
==========
subdirs: a single (or list of) subfolders under the basepath |
15,031 | def on_enter(self, command):
if self.profile:
t0 = time()
for _ in range(10):
self.execute_command(command)
self.insert_text(u"\n<Δt>=%dms\n" % (1e2*(time()-t0)))
self.new_prompt(self.interpreter.p1)
else:
self.execute_command(command)
self.__flush_eventqueue() | on_enter |
15,032 | def ignore(self, *ignore_lst: str):
def stream():
for each in ignore_lst:
each = ConstStrPool.cast_to_const(each)
yield id(each), each
self.ignore_lst.update(stream()) | ignore a set of tokens with specific names |
15,033 | def insertPrimaryDataset(self):
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrimaryDataset.insertPrimaryDataset(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler(, dbsExceptionCode[], self.logger.exception, sError) | API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required) |
15,034 | def multi_buffering(layer, radii, callback=None):
output_layer_name = buffer_steps[]
processing_step = buffer_steps[]
input_crs = layer.crs()
feature_count = layer.featureCount()
fields = layer.fields()
new_field = create_field_from_definition(hazard_class_field)
fields.append(new_field)
new_field = create_field_from_definition(buffer_distance_field)
fields.append(new_field)
buffered = create_memory_layer(
output_layer_name, QgsWkbTypes.PolygonGeometry, input_crs, fields)
buffered.startEditing()
if layer.crs().authid() == :
center = layer.extent().center()
utm = QgsCoordinateReferenceSystem(
get_utm_epsg(center.x(), center.y(), input_crs))
transform = QgsCoordinateTransform(
layer.crs(), utm, QgsProject.instance())
reverse_transform = QgsCoordinateTransform(
utm, layer.crs(), QgsProject.instance())
else:
transform = None
reverse_transform = None
for i, feature in enumerate(layer.getFeatures()):
geom = QgsGeometry(feature.geometry())
if transform:
geom.transform(transform)
inner_ring = None
for radius in radii:
attributes = feature.attributes()
attributes.append(radii[radius])
attributes.append(radius)
circle = geom.buffer(radius, 30)
if inner_ring:
circle.addRing(inner_ring)
inner_ring = circle.asPolygon()[0]
new_feature = QgsFeature()
if reverse_transform:
circle.transform(reverse_transform)
new_feature.setGeometry(circle)
new_feature.setAttributes(attributes)
buffered.addFeature(new_feature)
if callback:
callback(current=i, maximum=feature_count, step=processing_step)
buffered.commitChanges()
buffered.keywords = layer.keywords
buffered.keywords[] =
buffered.keywords[] = layer_purpose_hazard[]
buffered.keywords[][hazard_class_field[]] = (
hazard_class_field[])
check_layer(buffered)
return buffered | Buffer a vector layer using many buffers (for volcanoes or rivers).
This processing algorithm will keep the original attribute table and
will add a new one for the hazard class name according to
safe.definitions.fields.hazard_value_field.
radii = OrderedDict()
radii[500] = 'high'
radii[1000] = 'medium'
radii[2000] = 'low'
Issue https://github.com/inasafe/inasafe/issues/3185
:param layer: The layer to polygonize.
:type layer: QgsVectorLayer
:param radii: A dictionary of radius.
:type radii: OrderedDict
:param callback: A function to all to indicate progress. The function
should accept params 'current' (int), 'maximum' (int) and 'step' (str).
Defaults to None.
:type callback: function
:return: The buffered vector layer.
:rtype: QgsVectorLayer |
15,035 | def _style_to_xlwt(cls, item, firstlevel=True, field_sep=,
line_sep=):
if hasattr(item, ):
if firstlevel:
it = ["{key}: {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(line_sep).join(it))
return out
else:
it = ["{key} {val}"
.format(key=key, val=cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "{sep} ".format(sep=(field_sep).join(it))
return out
else:
item = "{item}".format(item=item)
item = item.replace("True", "on")
item = item.replace("False", "off")
return item | helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center; |
15,036 | def can(self, *args, **kwargs):
if isinstance(self.require, auth.Permission):
return self.require.can()
elif callable(self.require):
return self.require()
elif isinstance(self.require, bool):
return self.require
else:
return True | Overwrite this method to implement custom contextual permissions |
15,037 | def integrity_negotiated(self):
return (
self.flags & C.GSS_C_INTEG_FLAG
) and (
self.established or (self.flags & C.GSS_C_PROT_READY_FLAG)
) | After :meth:`step` has been called, this property will be set to
True if integrity protection (signing) has been negotiated in this context, False
otherwise. If this property is True, you can use :meth:`get_mic` to sign messages with a
message integrity code (MIC), which the peer application can verify. |
15,038 | def iter_fit_shifts(xy,uv,nclip=3,sigma=3.0):
fit = fit_shifts(xy,uv)
if nclip is None: nclip = 0
for n in range(nclip):
resids = compute_resids(xy,uv,fit)
resids1d = np.sqrt(np.power(resids[:,0],2)+np.power(resids[:,1],2))
sig = resids1d.std()
goodpix = resids1d < sigma*sig
xy = xy[goodpix]
uv = uv[goodpix]
fit = fit_shifts(xy,uv)
fit[] = xy
fit[] = uv
return fit | Perform an iterative-fit with 'nclip' iterations |
15,039 | def _checkServer(self, address, port):
import socket
s = socket.socket()
try:
s.connect((address, port))
return True
except socket.error, e:
self.log.warning(
% locals())
return False
return None | *Check that the TCP Port we've decided to use for tunnelling is available* |
15,040 | def update_hit_tally(self):
if not self.quiet:
num_hits = self.amt_services_wrapper.tally_hits()
if self.sandbox:
self.sandbox_hits = num_hits
else:
self.live_hits = num_hits | Tally hits |
15,041 | def superclass(self, klass):
return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls)) | True if the Class is a superclass of the given one. |
15,042 | def scalar_projection(v1, v2):
return np.dot(v1, v2) / np.linalg.norm(v2) | compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v |
15,043 | def explain(self, sql=None, sql_args=None):
cursor = self.get_cursor()
if sql is None:
sql = self.get_sql()
sql_args = self.get_args()
elif sql_args is None:
sql_args = {}
cursor.execute(.format(sql), sql_args)
rows = self._fetch_all_as_dict(cursor)
return rows | Runs EXPLAIN on this query
:type sql: str or None
:param sql: The sql to run EXPLAIN on. If None is specified, the query will
use ``self.get_sql()``
:type sql_args: dict or None
:param sql_args: A dictionary of the arguments to be escaped in the query. If None and
sql is None, the query will use ``self.get_args()``
:rtype: list of str
:return: list of each line of output from the EXPLAIN statement |
15,044 | def get_function_args(func, no_self=False, no_varargs=False):
par_dict = signature(func).parameters
pos = lambda x: x.kind == Parameter.VAR_POSITIONAL
kw = lambda x: x.kind == Parameter.VAR_KEYWORD
opts = ["", "*", "**"]
args = [
"{prefix}{arg}".format(prefix=opts[pos(value) + 2 * kw(value)], arg=par)
for par, value in par_dict.items()
]
self_filtered_args = (
args if not args else (args[1 if (args[0] == "self") and no_self else 0 :])
)
pos = lambda x: (len(x) > 1) and (x[0] == "*") and (x[1] != "*")
kw = lambda x: (len(x) > 2) and (x[:2] == "**")
varargs_filtered_args = [
arg
for arg in self_filtered_args
if (not no_varargs) or all([no_varargs, not pos(arg), not kw(arg)])
]
return tuple(varargs_filtered_args) | Return tuple of the function argument names in the order of the function signature.
:param func: Function
:type func: function object
:param no_self: Flag that indicates whether the function argument *self*,
if present, is included in the output (False) or not (True)
:type no_self: boolean
:param no_varargs: Flag that indicates whether keyword arguments are
included in the output (True) or not (False)
:type no_varargs: boolean
:rtype: tuple
For example:
>>> import pexdoc.pinspect
>>> class MyClass(object):
... def __init__(self, value, **kwargs):
... pass
...
>>> pexdoc.pinspect.get_function_args(MyClass.__init__)
('self', 'value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True
... )
('value', '**kwargs')
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_self=True, no_varargs=True
... )
('value',)
>>> pexdoc.pinspect.get_function_args(
... MyClass.__init__, no_varargs=True
... )
('self', 'value') |
15,045 | def run_section(self, name, input_func=_stdin_):
print( % name)
section = self.survey[name]
for question in section:
self.run_question(question, input_func) | Run the given section. |
15,046 | def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):
if mock_mode():
truncate_file(master_ip, hdfs_name, spark_on_toil)
log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name)
call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory)
remove_file(master_ip, hdfs_name, spark_on_toil) | Upload file hdfsName from hdfs to s3 |
15,047 | def _check_tunnel(self, _srv):
if self.skip_tunnel_checkup:
self.tunnel_is_up[_srv.local_address] = True
return
self.logger.info(.format(_srv.remote_address))
if isinstance(_srv.local_address, string_types):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(TUNNEL_TIMEOUT)
try:
connect_to = (, _srv.local_port) \
if _srv.local_host == else _srv.local_address
s.connect(connect_to)
self.tunnel_is_up[_srv.local_address] = _srv.tunnel_ok.get(
timeout=TUNNEL_TIMEOUT * 1.1
)
self.logger.debug(
.format(_srv.remote_address)
)
except socket.error:
self.logger.debug(
.format(_srv.remote_address)
)
self.tunnel_is_up[_srv.local_address] = False
except queue.Empty:
self.logger.debug(
.format(_srv.remote_address)
)
self.tunnel_is_up[_srv.local_address] = True
finally:
s.close() | Check if tunnel is already established |
15,048 | def olympic_sprints(data_set=):
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data[]
time = data[]
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data[] = X
data[] = Y
data[] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and GirolamiXYcovariatesyear%Yeventresponsetimeinfos First Course in Machine Learning.",
: {
0:,
1:,
2:,
3:,
4:,
5:}
}, data_set) | All olympics sprint winning times for multiple output prediction. |
15,049 | def visitAdditionOrSubtractionExpression(self, ctx):
is_add = ctx.PLUS() is not None
arg1 = self.visit(ctx.expression(0))
arg2 = self.visit(ctx.expression(1))
try:
_arg1 = conversions.to_decimal(arg1, self._eval_context)
_arg2 = conversions.to_decimal(arg2, self._eval_context)
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError:
pass
try:
_arg1 = conversions.to_date_or_datetime(arg1, self._eval_context)
if isinstance(arg2, datetime.time):
_arg1 = conversions.to_datetime(_arg1, self._eval_context)
_arg2 = datetime.timedelta(hours=arg2.hour, minutes=arg2.minute, seconds=arg2.second, microseconds=arg2.microsecond)
else:
_arg2 = datetime.timedelta(days=conversions.to_integer(arg2, self._eval_context))
return _arg1 + _arg2 if is_add else _arg1 - _arg2
except EvaluationError as ex:
raise EvaluationError("Expression could not be evaluated as decimal or date arithmetic", ex) | expression: expression (PLUS | MINUS) expression |
15,050 | def callback(self, event):
if event.mask == 0x00000008:
if event.name.endswith():
print_success("Ldapdomaindump file found")
if event.name in [, ]:
if event.name == :
self.domain_groups_file = event.pathname
if event.name == :
self.domain_users_file = event.pathname
if self.domain_groups_file and self.domain_users_file:
print_success("Importing users")
subprocess.Popen([, self.domain_groups_file, self.domain_users_file])
elif event.name == :
print_success("Importing computers")
subprocess.Popen([, event.pathname])
self.ldap_strings = []
self.write_targets()
if event.name.endswith():
host = event.name.replace(, )
print_success("Secretsdump file, host ip: {}".format(host))
subprocess.Popen([, event.pathname])
self.ips.remove(host)
self.write_targets() | Function that gets called on each event from pyinotify. |
15,051 | def _get_lts_from_user(self, user):
req = meta.Session.query(LayerTemplate).select_from(join(LayerTemplate, User))
return req.filter(User.login==user).all() | Get layertemplates owned by a user from the database. |
15,052 | def libvlc_video_get_track_description(p_mi):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi) | Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error. |
15,053 | def _ReadDataTypeDefinitionWithMembers(
self, definitions_registry, definition_values,
data_type_definition_class, definition_name, supports_conditions=False):
members = definition_values.get(, None)
if not members:
error_message =
raise errors.DefinitionReaderError(definition_name, error_message)
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE_WITH_MEMBERS)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get(, None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
self._SUPPORTED_ATTRIBUTES_STORAGE_DATA_TYPE)
if unsupported_attributes:
error_message = .format(
.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get(, definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = .format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
for member in members:
section = member.get(, None)
if section:
member_section_definition = data_types.MemberSectionDefinition(section)
definition_object.AddSectionDefinition(member_section_definition)
else:
member_data_type_definition = self._ReadMemberDataTypeDefinitionMember(
definitions_registry, member, definition_object.name,
supports_conditions=supports_conditions)
definition_object.AddMemberDefinition(member_data_type_definition)
return definition_object | Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. |
15,054 | def get_source(fileobj):
if not isinstance(fileobj, dict):
return fileobj
else:
try:
with io.open(fileobj["filename"], encoding="utf-8",
errors="ignore") as f:
return f.read()
finally:
if fileobj.get():
try:
os.remove(fileobj["filename"])
except:
pass | Translate fileobj into file contents.
fileobj is either a string or a dict. If it's a string, that's the
file contents. If it's a string, then the filename key contains
the name of the file whose contents we are to use.
If the dict contains a true value for the key delete_after_use,
the file should be deleted once read. |
15,055 | def team_matches(self, team, event=None, year=None, simple=False, keys=False):
if event:
if keys:
return self._get( % (self.team_key(team), event))
else:
return [Match(raw) for raw in self._get( % (self.team_key(team), event, if simple else ))]
elif year:
if keys:
return self._get( % (self.team_key(team), year))
else:
return [Match(raw) for raw in self._get( % (self.team_key(team), year, if simple else ))] | Get list of matches team has participated in.
:param team: Team to get matches of.
:param year: Year to get matches from.
:param event: Event to get matches from.
:param simple: Get only vital data.
:param keys: Only get match keys rather than their full data.
:return: List of string keys or Match objects. |
15,056 | def add_console_message(self, message_type, message):
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m)) | add messages in the console_messages list |
15,057 | def append(self, element):
assert element.locus == self.locus, (
"Element locus (%s) != Pileup locus (%s)"
% (element.locus, self.locus))
self.elements[element] = None | Append a PileupElement to this Pileup. If an identical PileupElement is
already part of this Pileup, do nothing. |
15,058 | def get_command_from_module(
command_module,
remote_connection: environ.RemoteConnection
):
use_remote = (
remote_connection.active and
hasattr(command_module, )
)
return (
command_module.execute_remote
if use_remote else
command_module.execute
) | Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return: |
15,059 | def connect(self, url=None):
if self._transport:
self.disconnect()
if url is None and self.url:
url = self.url
url = self.fix_url(url)
self._transport = _RequestsTransport(
url, self._cookiejar, sslverify=self._sslverify, cert=self.cert)
self._transport.user_agent = self.user_agent
self._proxy = _BugzillaServerProxy(url, self.tokenfile,
self._transport)
self.url = url
self.readconfig()
if (self.user and self.password):
log.info("user and password present - doing login()")
self.login()
if self.api_key:
log.debug("using API key")
self._proxy.use_api_key(self.api_key)
version = self._proxy.Bugzilla.version()["version"]
log.debug("Bugzilla version string: %s", version)
self._set_bz_version(version) | Connect to the bugzilla instance with the given url. This is
called by __init__ if a URL is passed. Or it can be called manually
at any time with a passed URL.
This will also read any available config files (see readconfig()),
which may set 'user' and 'password', and others.
If 'user' and 'password' are both set, we'll run login(). Otherwise
you'll have to login() yourself before some methods will work. |
15,060 | def _number_of_line(member_tuple):
member = member_tuple[1]
try:
return member.__code__.co_firstlineno
except AttributeError:
pass
try:
return inspect.findsource(member)[1]
except BaseException:
pass
for value in vars(member).values():
try:
return value.__code__.co_firstlineno
except AttributeError:
pass
return 0 | Try to return the number of the first line of the definition of a
member of a module. |
15,061 | def _GetSectionNames(self, pefile_object):
section_names = []
for section in pefile_object.sections:
section_name = getattr(section, , b)
try:
section_name = .format(section_name.decode())
except UnicodeDecodeError:
section_name = .format(repr(section_name))
section_names.append(section_name)
return section_names | Retrieves all PE section names.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
list[str]: names of the sections. |
15,062 | def service_messages(self, short_name):
if short_name not in self.services:
raise ArgumentError("Unknown service name", short_name=short_name)
return list(self.services[short_name][].messages) | Get the messages stored for a service.
Args:
short_name (string): The short name of the service to get messages for
Returns:
list(ServiceMessage): A list of the ServiceMessages stored for this service |
15,063 | def is_null(*symbols):
from symbols.symbol_ import Symbol
for sym in symbols:
if sym is None:
continue
if not isinstance(sym, Symbol):
return False
if sym.token == :
continue
if sym.token == :
if not is_null(*sym.children):
return False
continue
return False
return True | True if no nodes or all the given nodes are either
None, NOP or empty blocks. For blocks this applies recursively |
15,064 | def logical_name(self):
pchar = self._libinput.libinput_seat_get_logical_name(self._handle)
return string_at(pchar).decode() | The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat. |
15,065 | def stream_interactions(self):
timestamps = sorted(self.time_to_edge.keys())
for t in timestamps:
for e in self.time_to_edge[t]:
yield (e[0], e[1], e[2], t) | Generate a temporal ordered stream of interactions.
Returns
-------
nd_iter : an iterator
The iterator returns a 4-tuples of (node, node, op, timestamp).
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> G.add_path([3,4,5,6], t=1)
>>> list(G.stream_interactions())
[(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)] |
15,066 | def add(self, *tasks):
nodes = [x.node for x in tasks]
self.node.add(*nodes)
return self | Interfaces the GraphNode `add` method |
15,067 | def _get_multiparts(response):
boundary = None
for part in response.headers.get(, ).split():
if in part:
boundary = .format(part.split(, 1)[1].strip())
break
if not boundary:
raise ParseError("Was not able to find the boundary between objects in a multipart response")
if response.content is None:
return []
response_string = response.content
if six.PY3:
response_string = response_string.decode()
whole_body = response_string.strip()
no_front_boundary = whole_body.strip(boundary)
multi_parts = []
for part in no_front_boundary.split(boundary):
multi_parts.append(part.strip())
return multi_parts | From this
'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8'
get this
--874e43d27ec6d83f30f37841bdaf90c7 |
15,068 | def victim_pivot(self, victim_resource):
resource = self.copy()
resource._request_uri = .format(victim_resource.request_uri, resource._request_uri)
return resource | Pivot point on Victims for this resource.
This method will return all *resources* (group, indicators, task,
etc) for this resource that are associated with the provided victim id.
**Example Endpoints URI's**
+--------------+---------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+===============================================================+
| GET | /v2/victims/{resourceId}/groups/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/groups/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType} |
+--------------+---------------------------------------------------------------+
| GET | /v2/victims/{resourceId}/indicators/{resourceType}/{uniqueId} |
+--------------+---------------------------------------------------------------+
Args:
resource_id (integer): The resource pivot id (victim id). |
15,069 | def getall(self, key, default=_marker):
identity = self._title(key)
res = [v for i, k, v in self._impl._items if i == identity]
if res:
return res
if not res and default is not _marker:
return default
raise KeyError( % key) | Return a list of all values matching the key. |
15,070 | def compile_pattern_list(self, patterns):
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile())):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list | This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(cpl, timeout)
... |
15,071 | def _clearPrices(self):
for offbid in self.offers + self.bids:
if self.auctionType == DISCRIMINATIVE:
offbid.clearedPrice = offbid.price
elif self.auctionType == FIRST_PRICE:
offbid.clearedPrice = offbid.lmbda
else:
raise ValueError | Clears prices according to auction type. |
15,072 | def collect(self):
if psutil is None:
self.log.error()
return {}
for port_name, port_cfg in self.ports.iteritems():
port = int(port_cfg[])
stats = get_port_stats(port)
for stat_name, stat_value in stats.iteritems():
metric_name = % (port_name, stat_name)
self.publish(metric_name, stat_value) | Overrides the Collector.collect method |
15,073 | def enumerate_dynamic_imports(tokens):
imported_modules = []
import_line = False
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.NEWLINE:
import_line = False
elif token_string == "import":
try:
if tokens[index-1][0] == tokenize.NEWLINE:
import_line = True
except IndexError:
import_line = True
elif import_line:
if token_type == tokenize.NAME and tokens[index+1][1] != :
if token_string not in reserved_words:
if token_string not in imported_modules:
imported_modules.append(token_string)
return imported_modules | Returns a dictionary of all dynamically imported modules (those inside of
classes or functions) in the form of {<func or class name>: [<modules>]}
Example:
>>> enumerate_dynamic_modules(tokens)
{'myfunc': ['zlib', 'base64']} |
15,074 | def handle_moban_file(moban_file, options):
moban_file_configurations = load_data(None, moban_file)
if moban_file_configurations is None:
raise exceptions.MobanfileGrammarException(
constants.ERROR_INVALID_MOBAN_FILE % moban_file
)
if (
constants.LABEL_TARGETS not in moban_file_configurations
and constants.LABEL_COPY not in moban_file_configurations
):
raise exceptions.MobanfileGrammarException(
constants.ERROR_NO_TARGETS % moban_file
)
check_none(moban_file_configurations, moban_file)
version = moban_file_configurations.get(
constants.MOBAN_VERSION, constants.DEFAULT_MOBAN_VERSION
)
if version == constants.DEFAULT_MOBAN_VERSION:
mobanfile.handle_moban_file_v1(moban_file_configurations, options)
else:
raise exceptions.MobanfileGrammarException(
constants.MESSAGE_FILE_VERSION_NOT_SUPPORTED % version
)
HASH_STORE.save_hashes() | act upon default moban file |
15,075 | def execute_proc(procname, args=()):
ctx = Context.current()
with ctx.mdr:
cursor = ctx.execute_proc(procname, args)
row_count = cursor.rowcount
_safe_close(cursor)
return row_count | Execute a stored procedure. Returns the number of affected rows. |
15,076 | def remove_formatting_codes(line, irc=False):
if irc:
line = escape(line)
new_line =
while len(line) > 0:
try:
if line[0] == :
line = line[1:]
if line[0] == :
new_line +=
line = line[1:]
elif line[0] == :
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0] == :
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
elif line[0] == :
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0].isdigit():
line = line[1:]
if line[0] == :
while line[0] != :
line = line[1:]
line = line[1:]
elif line[0] == :
if line[:3] == :
new_line +=
line = line[3:]
continue
while line[0] != :
line = line[1:]
line = line[1:]
else:
line = line[1:]
else:
new_line += line[0]
line = line[1:]
except IndexError:
continue
return new_line | Remove girc control codes from the given line. |
15,077 | def _init_metadata(self):
self._inline_regions_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: False,
: False,
: [{}],
: ,
}
self._choice_ids_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: False,
: False,
: False,
: False,
: [[]],
: ,
}
self._choice_id_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: False,
: False,
: [],
: ,
: []
} | stub |
15,078 | def modify(connect_spec, dn, directives):
adddeletes values are deleted.
Otherwise, only the named values are deleted.
* ```` to replace all of the attribute*urlldaps://ldap.example.com/bindmethodsimplepasswordsecretcn=admin,dc=example,dc=comaddexampleexample_val
l = connect(connect_spec)
modlist = [(getattr(ldap, + op.upper()), attr, list(vals))
for op, attr, vals in directives]
for idx, mod in enumerate(modlist):
if mod[1] == :
modlist[idx] = (mod[0], mod[1],
[_format_unicode_password(x) for x in mod[2]])
modlist = salt.utils.data.decode(modlist, to_str=True, preserve_tuples=True)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True | Modify an entry in an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param directives:
Iterable of directives that indicate how to modify the entry.
Each directive is a tuple of the form ``(op, attr, vals)``,
where:
* ``op`` identifies the modification operation to perform.
One of:
* ``'add'`` to add one or more values to the attribute
* ``'delete'`` to delete some or all of the values from the
attribute. If no values are specified with this
operation, all of the attribute's values are deleted.
Otherwise, only the named values are deleted.
* ``'replace'`` to replace all of the attribute's values
with zero or more new values
* ``attr`` names the attribute to modify
* ``vals`` is an iterable of values to add or delete
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.modify "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
directives="('add', 'example', ['example_val'])" |
15,079 | def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
ns = self.user_ns.copy()
ns.update(sys._getframe(depth+1).f_locals)
ns.pop(, None)
try:
cmd = formatter.format(cmd, **ns)
except Exception:
pass
return cmd | Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace. |
15,080 | def trash(self, request, **kwargs):
content = self.get_object()
content.indexed = False
content.save()
LogEntry.objects.log(request.user, content, "Trashed")
return Response({"status": "Trashed"}) | Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index
Content is not actually deleted, merely hidden by deleted from ES index.import
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response` |
15,081 | def genl_msg_parser(ops, who, nlh, pp):
if ops.co_genl is None:
raise BUG
return int(cmd_msg_parser(who, nlh, ops.co_genl, ops, pp)) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L85.
Positional arguments:
ops -- nl_cache_ops class instance.
who -- sockaddr_nl class instance.
nlh -- nlmsghdr class instance.
pp -- nl_parser_param class instance.
Returns:
Integer, cmd_msg_parser() output. |
15,082 | def parse_basic_type_str(old_normalizer):
@functools.wraps(old_normalizer)
def new_normalizer(type_str, data):
try:
abi_type = parse(type_str)
except ParseError:
return type_str, data
if not isinstance(abi_type, BasicType):
return type_str, data
return old_normalizer(abi_type, type_str, data)
return new_normalizer | Modifies a normalizer to automatically parse the incoming type string. If
that type string does not represent a basic type (i.e. non-tuple type) or is
not parsable, the normalizer does nothing. |
15,083 | def cat_trials(x3d):
x3d = atleast_3d(x3d)
t = x3d.shape[0]
return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0) | Concatenate trials along time axis.
Parameters
----------
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples.
Returns
-------
x2d : array, shape (m, t * n)
Trials are concatenated along the second axis.
See also
--------
cut_segments : Cut segments from continuous data.
Examples
--------
>>> x = np.random.randn(6, 4, 150)
>>> y = cat_trials(x)
>>> y.shape
(4, 900) |
15,084 | def from_tuples_array(tuples):
result = StringValueMap()
if tuples == None or len(tuples) == 0:
return result
index = 0
while index < len(tuples):
if index + 1 >= len(tuples):
break
key = StringConverter.to_string(tuples[index])
value = StringConverter.to_nullable_string(tuples[index + 1])
index += 2
result.put(key, value)
return result | Creates a new StringValueMap from a list of key-value pairs called tuples.
The method is similar to [[fromTuples]] but tuples are passed as array instead of parameters.
:param tuples: a list of values where odd elements are keys and the following even elements are values
:return: a newly created StringValueMap. |
15,085 | def stop(self):
log.debug("Stopping periodic task")
stopframe = build_bcm_tx_delete_header(self.can_id_with_flags, self.flags)
send_bcm(self.bcm_socket, stopframe) | Send a TX_DELETE message to cancel this task.
This will delete the entry for the transmission of the CAN-message
with the specified can_id CAN identifier. The message length for the command
TX_DELETE is {[bcm_msg_head]} (only the header). |
15,086 | def write(self, target, *args, **kwargs):
return io_registry.write(self, target, *args, **kwargs) | Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
----- |
15,087 | def subdivide(network, pores, shape, labels=[]):
rnano
mro = network._mro()
if not in mro:
raise Exception()
from openpnm.network import Cubic
pores = network._parse_indices(pores)
if in network.labels():
if (sp.in1d(pores, network.pores())).any():
raise Exception()
if not hasattr(network, ):
network._subdivide_flag = True
else:
raise Exception()
if sp.size(shape) != 2 and sp.size(shape) != 3:
raise Exception()
elif sp.size(shape) == 3 and 1 not in shape:
div = sp.array(shape, ndmin=1)
single_dim = None
else:
single_dim = sp.where(sp.array(network.shape) == 1)[0]
if sp.size(single_dim) == 0:
single_dim = None
if sp.size(shape) == 3:
div = sp.array(shape, ndmin=1)
else:
div = sp.zeros(3, dtype=sp.int32)
if single_dim is None:
dim = 2
else:
dim = single_dim
div[dim] = 1
div[-sp.array(div, ndmin=1, dtype=bool)] = sp.array(shape, ndmin=1)
networkspacing = network.spacing
new_netspacing = networkspacing/div
new_net = Cubic(shape=div, spacing=new_netspacing)
main_labels = [, , , , , ]
if single_dim is not None:
label_groups = sp.array([[, ],
[, ],
[, ]])
non_single_labels = label_groups[sp.array([0, 1, 2]) != single_dim]
for l in main_labels:
new_net[ + l] = False
network[ + l] = False
if single_dim is None:
new_net[ + l][new_net.pores(labels=l)] = True
else:
for ind in [0, 1]:
loc = (non_single_labels[ind] == l)
temp_pores = new_net.pores(non_single_labels[ind][loc])
new_net[ + l][temp_pores] = True
old_coords = sp.copy(new_net[])
if labels == []:
labels = [ + new_net.name]
for P in pores:
shift = network[][P] - networkspacing/2
new_net[] += shift
Pn = network.find_neighbor_pores(pores=P)
try:
Pn_new_net = network.pores(labels)
except KeyError:
Pn_new_net = []
Pn_old_net = Pn[~sp.in1d(Pn, Pn_new_net)]
Np1 = network.Np
extend(pore_coords=new_net[],
throat_conns=new_net[] + Np1,
labels=labels, network=network)
for l in main_labels:
network[+l][Np1:] = new_net[+l]
surf_pores = network.pores()
surf_coord = network[][surf_pores]
for neighbor in Pn:
neighbor_coord = network[][neighbor]
dist = [round(sp.inner(neighbor_coord-x, neighbor_coord-x),
20) for x in surf_coord]
nearest_neighbor = surf_pores[dist == sp.amin(dist)]
if neighbor in Pn_old_net:
coplanar_labels = network.labels(pores=nearest_neighbor)
new_neighbors = network.pores(coplanar_labels,
mode=)
if sp.size(new_neighbors) == 0:
labels = network.labels(pores=nearest_neighbor,
mode=)
common_label = [l for l in labels if in l]
new_neighbors = network.pores(common_label)
elif neighbor in Pn_new_net:
new_neighbors = nearest_neighbor
connect_pores(network=network, pores1=neighbor,
pores2=new_neighbors, labels=labels)
for l in main_labels:
network[ + l] = False
new_net[] = sp.copy(old_coords)
label_faces(network=network)
for l in main_labels:
del network[+l]
trim(network=network, pores=pores)
ws = network.project.workspace
ws.close_project(new_net.project) | r'''
It trim the pores and replace them by cubic networks with the sent shape.
Parameters
----------
network : OpenPNM Network Object
pores : array_like
The first group of pores to be replaced
shape : array_like
The shape of cubic networks in the target locations
Notes
-----
- It works only for cubic networks.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 6, 5], spacing=0.001)
>>> pn.Np
150
>>> nano_pores = [2, 13, 14, 15]
>>> op.topotools.subdivide(network=pn, pores=nano_pores, shape=[4, 7, 3],
... labels='nano')
>>> pn.Np
482 |
15,088 | def print_objective(x):
value = 0
for minp, maxp in rectangles:
x_proj = np.minimum(np.maximum(x, minp), maxp)
value += (x - x_proj).norm()
print(.format(x[0], x[1], value)) | Calculate the objective value and prints it. |
15,089 | def collapse_pane(self, side):
hsplit = self.w[]
sizes = hsplit.get_sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == :
if rsize < 10:
rsize = self._rsize
msize -= rsize
else:
self._rsize = rsize
msize += rsize
rsize = 0
elif side == :
if lsize < 10:
lsize = self._lsize
msize -= lsize
else:
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.set_sizes([lsize, msize, rsize]) | Toggle collapsing the left or right panes. |
15,090 | def icon(self):
path = self._icon
if not path:
return
path = os.path.expandvars(os.path.expanduser(path))
if path.startswith():
base_path = os.path.dirname(self.filepath())
path = os.path.abspath(os.path.join(base_path, path))
return path | Returns the icon filepath for this plugin.
:return <str> |
15,091 | def get_registry_records_by_keyword(keyword=None):
portal_reg = ploneapi.portal.get_tool(name="portal_registry")
found_registers = {}
for record in portal_reg.records:
if keyword is None:
found_registers[record] = api.get_registry_record(record)
elif keyword.lower() in record.lower():
found_registers[record] = api.get_registry_record(record)
return found_registers | Get all the registry records (names and values) whose name
contains the specified keyword or, if keyword is None, return
all registry items
:param keyword: The keyword that has to be contained in the record name
:type keyword: str or None
:returns: Dictionary mapping the names of the found records to its values |
15,092 | def _updateVariantAnnotationSets(self, variantFile, dataUrl):
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
raise ValueError(
"Unsupported VEP version {} in ".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if in infoKeys or in infoKeys:
raise ValueError(
"Unsupported annotations in ".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas) | Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile. |
15,093 | def list_keyvaults(access_token, subscription_id, rgname):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, rgname,
,
, KEYVAULT_API])
return do_get_next(endpoint, access_token) | Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK. |
15,094 | def do_help(self, line):
print ""
print "Perfdump CLI provides a handful of simple ways to query your"
print "performance data."
print ""
print "The simplest queries are of the form:"
print ""
print "\t[slowest|fastest] [tests|setups]"
print ""
print "For example:"
print ""
print "\tperfdump > slowest tests"
print ""
print "Prints the slowest 10 tests"
print ""
print "Additional grouping of results can be request."
print ""
print "\tperfdump > slowest tests groupby file"
print ""
print "Grouping options include:"
print ""
print "\tfile | module | class | function"
print "" | Displays help information. |
15,095 | def title(self) -> str:
title_element = _find_tag(self.head, )
if title_element:
return title_element.textContent
return | Get/Set title string of this document. |
15,096 | def get(self, mode, metric):
if mode not in self._values:
logging.info("Metric %s not found for mode %s", metric, mode)
return []
return list(self._values[mode][metric]) | Get the history for the given metric and mode. |
15,097 | def serial_udb_extra_f4_send(self, sue_ROLL_STABILIZATION_AILERONS, sue_ROLL_STABILIZATION_RUDDER, sue_PITCH_STABILIZATION, sue_YAW_STABILIZATION_RUDDER, sue_YAW_STABILIZATION_AILERON, sue_AILERON_NAVIGATION, sue_RUDDER_NAVIGATION, sue_ALTITUDEHOLD_STABILIZED, sue_ALTITUDEHOLD_WAYPOINT, sue_RACING_MODE, force_mavlink1=False):
return self.send(self.serial_udb_extra_f4_encode(sue_ROLL_STABILIZATION_AILERONS, sue_ROLL_STABILIZATION_RUDDER, sue_PITCH_STABILIZATION, sue_YAW_STABILIZATION_RUDDER, sue_YAW_STABILIZATION_AILERON, sue_AILERON_NAVIGATION, sue_RUDDER_NAVIGATION, sue_ALTITUDEHOLD_STABILIZED, sue_ALTITUDEHOLD_WAYPOINT, sue_RACING_MODE), force_mavlink1=force_mavlink1) | Backwards compatible version of SERIAL_UDB_EXTRA F4: format
sue_ROLL_STABILIZATION_AILERONS : Serial UDB Extra Roll Stabilization with Ailerons Enabled (uint8_t)
sue_ROLL_STABILIZATION_RUDDER : Serial UDB Extra Roll Stabilization with Rudder Enabled (uint8_t)
sue_PITCH_STABILIZATION : Serial UDB Extra Pitch Stabilization Enabled (uint8_t)
sue_YAW_STABILIZATION_RUDDER : Serial UDB Extra Yaw Stabilization using Rudder Enabled (uint8_t)
sue_YAW_STABILIZATION_AILERON : Serial UDB Extra Yaw Stabilization using Ailerons Enabled (uint8_t)
sue_AILERON_NAVIGATION : Serial UDB Extra Navigation with Ailerons Enabled (uint8_t)
sue_RUDDER_NAVIGATION : Serial UDB Extra Navigation with Rudder Enabled (uint8_t)
sue_ALTITUDEHOLD_STABILIZED : Serial UDB Extra Type of Alitude Hold when in Stabilized Mode (uint8_t)
sue_ALTITUDEHOLD_WAYPOINT : Serial UDB Extra Type of Alitude Hold when in Waypoint Mode (uint8_t)
sue_RACING_MODE : Serial UDB Extra Firmware racing mode enabled (uint8_t) |
15,098 | def filepaths(path, exclude=(), hidden=True, empty=True):
if not os.path.exists(path):
raise error.PathNotFoundError(path)
elif not os.access(path, os.R_OK,
effective_ids=os.access in os.supports_effective_ids):
raise error.ReadError(errno.EACCES, path)
if os.path.isfile(path):
return [path]
else:
filepaths = []
for dirpath, dirnames, filenames in os.walk(path):
if not hidden and is_hidden(dirpath):
continue
for filename in filenames:
if not hidden and is_hidden(filename):
continue
filepath = os.path.join(dirpath, filename)
if any(is_match(filepath, pattern) for pattern in exclude):
continue
else:
if empty or os.path.getsize(os.path.realpath(filepath)) > 0:
filepaths.append(filepath)
return sorted(filepaths, key=lambda fp: fp.casefold()) | Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist. |
15,099 | def save_failed_dump(self):
try:
if (self.transport.__class__.__name__ ==
and not getattr(self.transport, , None)):
self.doc = None
else:
self.doc = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex:
logger.error(, exc_info=ex) | Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.