Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
17,100 | def group_by_day(self):
data_by_day = OrderedDict()
for d in xrange(1, 366):
data_by_day[d] = []
for v, dt in zip(self._values, self.datetimes):
data_by_day[dt.doy].append(v)
return data_by_day | Return a dictionary of this collection's values grouped by each day of year.
Key values are between 1-365. |
17,101 | def form_valid(self, form):
response = super(FormAjaxMixin, self).form_valid(form)
if self.request.is_ajax():
return self.json_to_response()
return response | If form valid return response with action |
17,102 | def proximal_translation(prox_factory, y):
r
def translation_prox_factory(sigma):
return (ConstantOperator(y) + prox_factory(sigma) *
(IdentityOperator(y.space) - ConstantOperator(y)))
return translation_prox_factory | r"""Calculate the proximal of the translated function F(x - y).
Parameters
----------
prox_factory : callable
A factory function that, when called with a step size, returns the
proximal operator of ``F``.
y : Element in domain of ``F``.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Given a functional :math:`F`, this is calculated according to the rule
.. math::
\mathrm{prox}_{\sigma F( \cdot - y)}(x) =
y + \mathrm{prox}_{\sigma F}(x - y)
where :math:`y` is the translation, and :math:`\sigma` is the step size.
For reference on the identity used, see [CP2011c].
References
----------
[CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting
methods in signal processing.* In: Bauschke, H H, Burachik, R S,
Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point
algorithms for inverse problems in science and engineering, Springer,
2011. |
17,103 | def compile_less(input_file, output_file):
from .modules import less
if not isinstance(input_file, str):
raise RuntimeError()
return {
: less.less_dependencies,
: less.less_compile,
: input_file,
: output_file,
: {},
} | Compile a LESS source file. Minifies the output in release mode. |
17,104 | def drawNormal(N, mu=0.0, sigma=1.0, seed=0):
RNG = np.random.RandomState(seed)
if isinstance(sigma,float):
draws = sigma*RNG.randn(N) + mu
else:
draws=[]
for t in range(len(sigma)):
draws.append(sigma[t]*RNG.randn(N) + mu[t])
return draws | Generate arrays of normal draws. The mu and sigma inputs can be numbers or
list-likes. If a number, output is a length N array of draws from the normal
distribution with mean mu and standard deviation sigma. If a list, output is
a length T list whose t-th entry is a length N array with draws from the
normal distribution with mean mu[t] and standard deviation sigma[t].
Parameters
----------
N : int
Number of draws in each row.
mu : float or [float]
One or more means. Number of elements T in mu determines number of rows
of output.
sigma : float or [float]
One or more standard deviations. Number of elements T in sigma
determines number of rows of output.
seed : int
Seed for random number generator.
Returns
-------
draws : np.array or [np.array]
T-length list of arrays of normal draws each of size N, or a single array
of size N (if sigma is a scalar). |
17,105 | def _repr_tty_(self) -> str:
header_description = [, ]
header_samples = [
,
,
,
,
,
]
header = SingleTable([], )
setting = SingleTable([], )
sample_main = SingleTable([header_samples], )
sample_desc = SingleTable([header_description], )
max_header_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for key in self.Header.keys():
if in key:
value = .join(
wrap(getattr(self.Header, key), max_header_width)
)
else:
value = getattr(self.Header, key)
header.table_data.append([key, value])
for key in self.Settings.keys():
setting.table_data.append((key, getattr(self.Settings, key) or ))
setting.table_data.append((, .join(map(str, self.Reads))))
description_width = max(MIN_WIDTH, sample_desc.column_max_width(-1))
for sample in self.samples:
sample_main.table_data.append(
[getattr(sample, title) or for title in header_samples]
)
sample_desc.table_data.append(
(
sample.Sample_ID,
.join(
wrap(sample.Description or , description_width)
),
)
)
header.inner_heading_row_border = False
setting.inner_heading_row_border = False
table = .join(
[header.table, setting.table, sample_main.table, sample_desc.table]
)
return table | Return a summary of this sample sheet in a TTY compatible codec. |
17,106 | def _safe_timezone(obj):
if isinstance(obj, _Timezone):
return obj
if obj is None or obj == "local":
return local_timezone()
if isinstance(obj, (int, float)):
obj = int(obj * 60 * 60)
elif isinstance(obj, _datetime.tzinfo):
if hasattr(obj, "localize"):
obj = obj.zone
else:
offset = obj.utcoffset(None)
if offset is None:
offset = _datetime.timedelta(0)
obj = int(offset.total_seconds())
return timezone(obj) | Creates a timezone instance
from a string, Timezone, TimezoneInfo or integer offset. |
17,107 | def output_latex(self, filename_or_file_handle):
if isinstance(filename_or_file_handle, basestring):
file_handle = file(filename_or_file_handle, )
we_opened = True
else:
file_handle = filename_or_file_handle
we_opened = False
try:
file_handle.write(self.latex)
finally:
if we_opened:
file_handle.close() | Output the file to a latex document
:param filename_or_file_handle: filename or already opened file handle to output to
:return: |
17,108 | def disconnect():
worker = global_worker
if worker.connected:
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "profiler") and hasattr(worker.profiler, "t"):
worker.profiler.join_flush_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "printer_thread"):
worker.printer_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
worker.node = None
worker.cached_functions_to_run = []
worker.function_actor_manager.reset_cache()
worker.serialization_context_map.clear()
if hasattr(worker, "raylet_client"):
del worker.raylet_client
if hasattr(worker, "plasma_client"):
worker.plasma_client.disconnect() | Disconnect this worker from the raylet and object store. |
17,109 | def unregister(self, alias):
if alias not in self._service_objects:
raise Error(self._device,
% alias)
service_obj = self._service_objects.pop(alias)
if service_obj.is_alive:
with expects.expect_no_raises(
% alias):
service_obj.stop() | Unregisters a service instance.
Stops a service and removes it from the manager.
Args:
alias: string, the alias of the service instance to unregister. |
17,110 | def iter(self):
if self.patterns:
patterns = list(self.patterns)
for pattern in patterns:
yield pattern | An iteration generator that allows the loop to modify the
:class:`PatternSet` during the loop |
17,111 | def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, ) | Convert directory path to uri |
17,112 | def write_biom(biomT, output_fp, fmt="hdf5", gzip=False):
opener = open
mode =
if gzip and fmt != "hdf5":
if not output_fp.endswith(".gz"):
output_fp += ".gz"
opener = gzip_open
mode =
if fmt == "hdf5":
opener = h5py.File
with opener(output_fp, mode) as biom_f:
if fmt == "json":
biomT.to_json(biomT.generated_by, direct_io=biom_f)
elif fmt == "tsv":
biom_f.write(biomT.to_tsv())
else:
biomT.to_hdf5(biom_f, biomT.generated_by)
return output_fp | Write the BIOM table to a file.
:type biomT: biom.table.Table
:param biomT: A BIOM table containing the per-sample OTU counts and metadata
to be written out to file.
:type output_fp str
:param output_fp: Path to the BIOM-format file that will be written.
:type fmt: str
:param fmt: One of: hdf5, json, tsv. The BIOM version the table will be
output (2.x, 1.0, 'classic'). |
17,113 | def PixelsHDU(model):
cards = model._mission.HDUCards(model.meta, hdu=2)
cards = []
cards.append((, ))
cards.append((, ))
cards.append((, ))
cards.append((, model.mission, ))
cards.append((, EVEREST_MAJOR_MINOR, ))
cards.append((, EVEREST_VERSION, ))
cards.append((, strftime(),
))
header = pyfits.Header(cards=cards)
arrays = [pyfits.Column(name=, format= %
model.fpix.shape[1], array=model.fpix)]
X1N = model.X1N
if X1N is not None:
arrays.append(pyfits.Column(name=, format= %
X1N.shape[1], array=X1N))
cols = pyfits.ColDefs(arrays)
hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name=)
return hdu | Construct the HDU containing the pixel-level light curve. |
17,114 | def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist):
if minv1 > maxv1:
raise ValueError("Invalid input to function.")
if minv2 > maxv2:
raise ValueError("Invalid input to function.")
v1s = [minv1]
v2s = [minv2]
initPoint = [minv1,minv2]
initLine = [initPoint]
tmpv1 = minv1
while (tmpv1 < maxv1):
tmpv1 = tmpv1 + (3 * mindist)**(0.5)
initLine.append([tmpv1,minv2])
v1s.append(tmpv1)
v2s.append(minv2)
initLine = numpy.array(initLine)
initLine2 = copy.deepcopy(initLine)
initLine2[:,0] += 0.5 * (3*mindist)**0.5
initLine2[:,1] += 1.5 * (mindist)**0.5
for i in xrange(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
tmpv2_1 = initLine[0,1]
tmpv2_2 = initLine2[0,1]
while tmpv2_1 < maxv2 and tmpv2_2 < maxv2:
tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5
tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5
initLine[:,1] = tmpv2_1
initLine2[:,1] = tmpv2_2
for i in xrange(len(initLine)):
v1s.append(initLine[i,0])
v2s.append(initLine[i,1])
for i in xrange(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
v1s = numpy.array(v1s)
v2s = numpy.array(v2s)
return v1s, v2s | This function generates a 2-dimensional lattice of points using a hexagonal
lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension |
17,115 | def change_password(self, previous_password, proposed_password):
self.check_token()
response = self.client.change_password(
PreviousPassword=previous_password,
ProposedPassword=proposed_password,
AccessToken=self.access_token
)
self._set_attributes(response, {: proposed_password}) | Change the User password |
17,116 | def split(self, fragment_height):
passes = int(math.ceil(self.height/fragment_height))
fragments = []
for n in range(0, passes):
left = 0
right = self.width
upper = n * fragment_height
lower = min((n + 1) * fragment_height, self.height)
box = (left, upper, right, lower)
fragments.append(self.img_original.crop(box))
return fragments | Split an image into multiple fragments after fragment_height pixels
:param fragment_height: height of fragment
:return: list of PIL objects |
17,117 | def create_html_link(urlbase, urlargd, link_label, linkattrd=None,
escape_urlargd=True, escape_linkattrd=True,
urlhash=None):
attributes_separator =
output = + \
create_url(urlbase, urlargd, escape_urlargd, urlhash) +
if linkattrd:
output +=
if escape_linkattrd:
attributes = [escape(str(key), quote=True) + +
escape(str(linkattrd[key]), quote=True) +
for key in linkattrd.keys()]
else:
attributes = [str(key) + + str(linkattrd[key]) +
for key in linkattrd.keys()]
output += attributes_separator.join(attributes)
output = wash_for_utf8(output)
output += + wash_for_utf8(link_label) +
return output | Creates a W3C compliant link.
@param urlbase: base url (e.g. config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'})
@param link_label: text displayed in a browser (has to be already escaped)
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link |
17,118 | def refresh(self, refresh_binary=True):
updated_self = self.repo.get_resource(self.uri)
if not isinstance(self, type(updated_self)):
raise Exception( % (type(updated_self), type(self)) )
if updated_self:
self.status_code = updated_self.status_code
self.rdf.data = updated_self.rdf.data
self.headers = updated_self.headers
self.exists = updated_self.exists
if type(self) != NonRDFSource:
self._parse_graph()
self.versions = SimpleNamespace()
if type(updated_self) == NonRDFSource and refresh_binary:
self.binary.refresh(updated_self)
if hasattr(self,):
self._post_refresh()
del(updated_self)
else:
logger.debug()
self._empty_resource_attributes() | Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None |
17,119 | def command_runner(shell_command, force_rerun_flag, outfile_checker, cwd=None, silent=False):
program_and_args = shlex.split(shell_command)
if not program_exists(program_and_args[0]):
raise OSError(.format(program_and_args[0]))
if cwd:
outfile_checker = op.join(cwd, op.basename(outfile_checker))
if force_rerun(flag=force_rerun_flag, outfile=outfile_checker):
if silent:
command = subprocess.Popen(program_and_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out, err = command.communicate()
ret = command.returncode
else:
for path in execute(cmd=program_and_args, cwd=cwd):
print(path, end="")
log.debug(.format(program_and_args[0], outfile_checker))
else:
log.debug(.format(outfile_checker)) | Run a shell command with subprocess, with additional options to check if output file exists and printing stdout.
Args:
shell_command (str): Command as it would be formatted in the command-line (ie. "program -i test.in -o test.out").
force_rerun_flag: If the program should be rerun even if the output file exists.
outfile_checker (str): Name out the output file which may have been generated. This does not specify what the outfile
will be, that should be done in the program's args or predetermined.
cwd (str): Path to working directory where command will be executed.
silent (bool): If program STDOUT should be printed to the current shell.
Returns:
bool: If the program ran successfully. |
17,120 | def _analyze(self, it):
self._harvest_data()
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
return Analysis(self, it) | Analyze a single morf or code unit.
Returns an `Analysis` object. |
17,121 | def create_hparams(hparams_set,
hparams_overrides_str="",
data_dir=None,
problem_name=None,
hparams_path=None):
hparams = registry.hparams(hparams_set)
if hparams_path and tf.gfile.Exists(hparams_path):
hparams = create_hparams_from_json(hparams_path, hparams)
if data_dir:
hparams.add_hparam("data_dir", data_dir)
if hparams_overrides_str:
tf.logging.info("Overriding hparams in %s with %s", hparams_set,
hparams_overrides_str)
hparams = hparams.parse(hparams_overrides_str)
if problem_name:
add_problem_hparams(hparams, problem_name)
return hparams | Create HParams with data_dir and problem hparams, if kwargs provided. |
17,122 | def print_success(msg, color=True):
if color and is_posix():
safe_print(u"%s[INFO] %s%s" % (ANSI_OK, msg, ANSI_END))
else:
safe_print(u"[INFO] %s" % (msg)) | Print a success message.
:param string msg: the message
:param bool color: if ``True``, print with POSIX color |
17,123 | def set_unit_spike_features(self, unit_id, feature_name, value):
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_features.keys():
self._unit_features[unit_id] = {}
if isinstance(feature_name, str) and len(value) == len(self.get_unit_spike_train(unit_id)):
self._unit_features[unit_id][feature_name] = np.asarray(value)
else:
if not isinstance(feature_name, str):
raise ValueError("feature_name must be a string")
else:
raise ValueError("feature values should have the same length as the spike train")
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int") | This function adds a unit features data set under the given features
name to the given unit.
Parameters
----------
unit_id: int
The unit id for which the features will be set
feature_name: str
The name of the feature to be stored
value
The data associated with the given feature name. Could be many
formats as specified by the user. |
17,124 | def _resolve_folder(project, parent_folder, folder_name):
if in folder_name:
raise ResolutionError( + str(folder_name) + +
str(parent_folder) + + str(project))
possible_folder, _skip = clean_folder_path(parent_folder + + folder_name, )
if not check_folder_exists(project, parent_folder, folder_name):
raise ResolutionError( + folder_name +
")
return possible_folder | :param project: The project that the folder belongs to
:type project: string
:param parent_folder: Full path to the parent folder that contains
folder_name
:type parent_folder: string
:param folder_name: Name of the folder
:type folder_name: string
:returns: The path to folder_name, if it exists, in the form of
"<parent_folder>/<folder_name>"
:rtype: string
:raises: ResolutionError if folder_name is not a folder, or if
folder_name points to a folder that does not exist
Attempts to resolve folder_name at location parent_folder in project. |
17,125 | def get_qubit_los(self, user_lo_config):
try:
_q_los = self.default_qubit_los.copy()
except KeyError:
raise PulseError()
for channel, lo_freq in user_lo_config.qubit_lo_dict().items():
_q_los[channel.index] = lo_freq
if _q_los == self.default_qubit_los:
return None
return _q_los | Embed default qubit LO frequencies from backend and format them to list object.
If configured lo frequency is the same as default, this method returns `None`.
Args:
user_lo_config (LoConfig): A dictionary of LOs to format.
Returns:
list: A list of qubit LOs.
Raises:
PulseError: when LO frequencies are missing. |
17,126 | def get_field_errors(self, field):
identifier = format_html({1}\, self.form_name, field.name)
errors = self.errors.get(field.html_name, [])
return self.error_class([SafeTuple(
(identifier, self.field_error_css_classes, , , , e)) for e in errors]) | Return server side errors. Shall be overridden by derived forms to add their
extra errors for AngularJS. |
17,127 | def notify_observers(self, joinpoint, post=False):
_observers = tuple(self.observers)
for observer in _observers:
observer.notify(joinpoint=joinpoint, post=post) | Notify observers with parameter calls and information about
pre/post call. |
17,128 | def _renew_by(name, window=None):
expiry = _expires(name)
if window is not None:
expiry = expiry - datetime.timedelta(days=window)
return expiry | Date before a certificate should be renewed
:param name: Common Name of the certificate (DNS name of certificate)
:param window: days before expiry date to renew
:return datetime object of first renewal date |
17,129 | def extract_path_arguments(path):
path = re.sub(, , path)
args = re.findall(, path)
def split_arg(arg):
spl = arg.split()
if len(spl) == 1:
return {: spl[0],
: ,
: }
else:
return {: spl[1],
: spl[0],
: }
return list(map(split_arg, args)) | Extracts a swagger path arguments from the given flask path.
This /path/<parameter> extracts [{name: 'parameter'}]
And this /<string(length=2):lang_code>/<string:id>/<float:probability>
extracts: [
{name: 'lang_code', dataType: 'string'},
{name: 'id', dataType: 'string'}
{name: 'probability', dataType: 'float'}] |
17,130 | def static(cls):
r
for attr in dir(cls):
im_func = getattr(getattr(cls, attr), , None)
if im_func:
setattr(cls, attr, staticmethod(im_func))
return cls | r"""Converts the given class into a static one, by changing all the methods of it into static methods.
Args:
cls (class): The class to be converted. |
17,131 | def escape_newlines(s: str) -> str:
if not s:
return s
s = s.replace("\\", r"\\")
s = s.replace("\n", r"\n")
s = s.replace("\r", r"\r")
return s | Escapes CR, LF, and backslashes.
Its counterpart is :func:`unescape_newlines`.
``s.encode("string_escape")`` and ``s.encode("unicode_escape")`` are
alternatives, but they mess around with quotes, too (specifically,
backslash-escaping single quotes). |
17,132 | def easter(year, method=EASTER_WESTERN):
if not (1 <= method <= 3):
raise ValueError("invalid method")
y = year
g = y % 19
e = 0
if method < 3:
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y), int(m), int(d)) | This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html |
17,133 | def _validate(self, p):
if self._is_operator(p):
for operator_or_filter in (p[1] if p[0] != else [p[1]]):
if p[0] == :
self._validate_xor_args(p)
self._validate(operator_or_filter)
elif not self._is_value_filter(p) and not self._is_key_filter(p):
raise ValueError(.format(p)) | Recursively validates the pattern (p), ensuring it adheres to the proper key names and structure. |
17,134 | def trace(self, func):
def wrapper(*args, **kwargs):
s = self.Stanza(self.indent)
s.add([self.GREEN, func.__name__, self.NORMAL, ])
s.indent += 4
sep =
for arg in args:
s.add([self.CYAN, self.safe_repr(arg), self.NORMAL], sep)
sep =
for name, value in sorted(kwargs.items()):
s.add([name + , self.CYAN, self.safe_repr(value),
self.NORMAL], sep)
sep =
s.add(, wrap=False)
self.writer.write(s.chunks)
self.indent += 2
try:
result = func(*args, **kwargs)
except:
self.indent -= 2
etype, evalue, etb = self.sys.exc_info()
info = self.inspect.getframeinfo(etb.tb_next, context=3)
s = self.Stanza(self.indent)
s.add([self.RED, , self.safe_repr(evalue), self.NORMAL])
s.add([, info.filename, , info.lineno], )
lines = self.unindent(info.code_context)
firstlineno = info.lineno - info.index
fmt = + str(len(str(firstlineno + len(lines)))) +
for i, line in enumerate(lines):
s.newline()
s.add([
i == info.index and self.MAGENTA or ,
fmt % (i + firstlineno),
i == info.index and or , line, self.NORMAL])
self.writer.write(s.chunks)
raise
self.indent -= 2
s = self.Stanza(self.indent)
s.add([self.GREEN, , self.CYAN, self.safe_repr(result),
self.NORMAL])
self.writer.write(s.chunks)
return result
return self.functools.update_wrapper(wrapper, func) | Decorator to print out a function's arguments and return value. |
17,135 | def create_session_entity_type(
self,
parent,
session_entity_type,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if not in self._inner_api_calls:
self._inner_api_calls[
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_session_entity_type,
default_retry=self._method_configs[
].retry,
default_timeout=self._method_configs[
].timeout,
client_info=self._client_info,
)
request = session_entity_type_pb2.CreateSessionEntityTypeRequest(
parent=parent,
session_entity_type=session_entity_type,
)
return self._inner_api_calls[](
request, retry=retry, timeout=timeout, metadata=metadata) | Creates a session entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.SessionEntityTypesClient()
>>>
>>> parent = client.session_path('[PROJECT]', '[SESSION]')
>>>
>>> # TODO: Initialize ``session_entity_type``:
>>> session_entity_type = {}
>>>
>>> response = client.create_session_entity_type(parent, session_entity_type)
Args:
parent (str): Required. The session to create a session entity type for.
Format: ``projects/<Project ID>/agent/sessions/<Session ID>``.
session_entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.SessionEntityType]): Required. The session entity type to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.SessionEntityType`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
17,136 | def search_related_tags(self,series_search_text=None,tag_names=None,response_type=None,params=None):
path =
params[], params[] = series_search_text, tag_names
response_type = response_type if response_type else self.response_type
if response_type != : params[] =
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response | Function to request the related FRED tags for one or more FRED tags matching a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_related_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg bool ssl_verify: To verify HTTPs. |
17,137 | def getIcon(self, glyph, isOpen, color=None):
try:
fileName = self._registry[(glyph, isOpen)]
except KeyError:
logger.warn("Unregistered icon glyph: {} (open={})".format(glyph, isOpen))
from argos.utils.misc import log_dictionary
log_dictionary(self._registry, "registry", logger=logger)
raise
return self.loadIcon(fileName, color=color) | Returns a QIcon given a glyph name, open/closed state and color.
The reslulting icon is cached so that it only needs to be rendered once.
:param glyph: name of a registered glyph (e.g. 'file', 'array')
:param isOpen: boolean that indicates if the RTI is open or closed.
:param color: '#RRGGBB' string (e.g. '#FF0000' for red)
:return: QtGui.QIcon |
17,138 | def database_list_projects(object_id, input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /database-xxxx/listProjects API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2FlistProjects |
17,139 | def ncpos(string, chars, start):
string = stypes.stringToCharP(string)
chars = stypes.stringToCharP(chars)
start = ctypes.c_int(start)
return libspice.ncpos_c(string, chars, start) | Find the first occurrence in a string of a character NOT belonging
to a collection of characters, starting at a specified
location searching forward.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncpos_c.html
:param string: Any character string.
:type string: str
:param chars: A collection of characters.
:type chars: str
:param start: Position to begin looking for one not in chars.
:type start: int
:return: index
:rtype: int |
17,140 | def _update_fps(self, event):
self._frame_count += 1
diff = time() - self._basetime
if (diff > self._fps_window):
self._fps = self._frame_count / diff
self._basetime = time()
self._frame_count = 0
self._fps_callback(self.fps) | Update the fps after every window |
17,141 | def _create_attach_record(self, id, timed):
record = super(MorphToMany, self)._create_attach_record(id, timed)
record[self._morph_type] = self._morph_class
return record | Create a new pivot attachement record. |
17,142 | def tocimxml(self, ignore_path=False):
for key, value in self.properties.items():
try:
assert isinstance(value, CIMProperty)
except AssertionError:
raise TypeError(
_format("Property {0!A} has invalid type: {1} (must be "
"CIMProperty)", key, builtin_type(value)))
instance_xml = cim_xml.INSTANCE(
self.classname,
properties=[p.tocimxml() for p in self.properties.values()],
qualifiers=[q.tocimxml() for q in self.qualifiers.values()])
if self.path is None or ignore_path:
return instance_xml
if self.path.namespace is None:
return cim_xml.VALUE_NAMEDINSTANCE(
self.path.tocimxml(),
instance_xml)
if self.path.host is None:
return cim_xml.VALUE_OBJECTWITHLOCALPATH(
self.path.tocimxml(),
instance_xml)
return cim_xml.VALUE_INSTANCEWITHPATH(
self.path.tocimxml(),
instance_xml) | Return the CIM-XML representation of this CIM instance,
as an object of an appropriate subclass of :term:`Element`.
If the instance has no instance path specified or if `ignore_path` is
`True`, the returned CIM-XML representation is an `INSTANCE` element
consistent with :term:`DSP0201`. This is the required element for
representing embedded instances.
Otherwise, if the instance path of the instance has no namespace
specified, the returned CIM-XML representation is an
`VALUE.NAMEDINSTANCE` element consistent with :term:`DSP0201`.
Otherwise, if the instance path of the instance has no host specified,
the returned CIM-XML representation is a
`VALUE.OBJECTWITHLOCALPATH` element consistent with :term:`DSP0201`.
Otherwise, the returned CIM-XML representation is a
`VALUE.INSTANCEWITHPATH` element consistent with :term:`DSP0201`.
The order of properties and qualifiers in the returned CIM-XML
representation is preserved from the :class:`~pywbem.CIMInstance`
object.
Parameters:
ignore_path (:class:`py:bool`): Ignore the path of the instance, even
if a path is specified.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`. |
17,143 | def add_user_grant(self, permission, user_id, recursive=False, headers=None):
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
% permission)
acl = self.get_acl(headers=headers)
acl.add_user_grant(permission, user_id)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers) | Convenience method that provides a quick way to add a canonical user grant to a bucket.
This method retrieves the current ACL, creates a new grant based on the parameters
passed in, adds that grant to the ACL and then PUTs the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
:type user_id: string
:param user_id: The canonical user id associated with the GS account you are granting
the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time! |
17,144 | def get_current_user_ids(self, db_read=None):
db_read = db_read or self.db_read
return self.user_ids.using(db_read) | Returns current user ids and the count. |
17,145 | def on_add_cols(self, event):
col_labels = self.grid.col_labels
er_items = [head for head in self.grid_headers[self.grid_type][][2] if head not in col_labels]
er_items = builder.remove_list_headers(er_items)
pmag_headers = sorted(list(set(self.grid_headers[self.grid_type][][2]).union(self.grid_headers[self.grid_type][][1])))
to_add = [i + for i in self.er_magic.double if i in pmag_headers and i + not in col_labels]
pmag_headers.extend(to_add)
pmag_items = [head for head in pmag_headers if head not in er_items and head not in col_labels]
pmag_items = sorted(builder.remove_list_headers(pmag_items))
dia = pw.HeaderDialog(self, , items1=er_items, items2=pmag_items)
dia.Centre()
result = dia.ShowModal()
new_headers = []
if result == 5100:
new_headers = dia.text_list
if not new_headers:
return
errors = self.add_new_grid_headers(new_headers, er_items, pmag_items)
if errors:
errors_str = .join(errors)
pw.simple_warning(.format(errors_str))
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy() | Show simple dialog that allows user to add a new column name |
17,146 | def flatMapValues(self, f):
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True) | Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] |
17,147 | def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="",
who="", add_join=False):
node_type = node[1][consts.Consts.type]
node_classification = nodes_classification[node[0]]
outgoing_flows = node[1].get(consts.Consts.outgoing_flow)
if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \
and node[1][consts.Consts.default] is not None:
default_flow_id = node[1][consts.Consts.default]
else:
default_flow_id = None
if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join:
if node_type == consts.Consts.task or node_type == consts.Consts.subprocess:
return node
else:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return outgoing_node
else:
if node_type == consts.Consts.task:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif node_type == consts.Consts.subprocess:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""})
if BpmnDiagramGraphCsvExport.classification_split in node_classification:
next_node = None
alphabet_suffix_index = 0
for outgoing_flow_id in outgoing_flows:
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
suffix = string.ascii_lowercase[alphabet_suffix_index]
next_prefix = prefix + str(order) + suffix
alphabet_suffix_index += 1
if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \
and outgoing_flow[2][consts.Consts.name] is not None:
condition = outgoing_flow[2][consts.Consts.name]
else:
condition = ""
if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]:
export_elements.append(
{"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1),
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif outgoing_flow_id == default_flow_id:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix, "else",
who)
if tmp_next_node is not None:
next_node = tmp_next_node
else:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix,
condition, who)
if tmp_next_node is not None:
next_node = tmp_next_node
if next_node is not None:
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who, add_join=True)
elif len(outgoing_flows) == 1:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who)
else:
return None | Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join. |
17,148 | def getLocationRepresentation(self):
activeCells = np.array([], dtype="uint32")
totalPrevCells = 0
for module in self.L6aModules:
activeCells = np.append(activeCells,
module.getActiveCells() + totalPrevCells)
totalPrevCells += module.numberOfCells()
return activeCells | Get the full population representation of the location layer. |
17,149 | def get_tag_html(tag_id):
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data[]
args = tag_data[]
kwargs = tag_data[]
lib, tag_name = get_lib_and_tag_name(tag)
args_str =
if args:
for arg in args:
if isinstance(arg, six.string_types):
args_str += " ".format(arg)
else:
args_str += "{0} ".format(arg)
kwargs_str =
if kwargs:
for name, value in kwargs.items():
if isinstance(value, six.string_types):
kwargs_str += "{0}= ".format(name, value)
else:
kwargs_str += "{0}={1} ".format(name, value)
html = .format(
lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)
return html | Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for. |
17,150 | def rekey(self,
uid=None,
offset=None,
**kwargs):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("The unique identifier must be a string.")
if offset is not None:
if not isinstance(offset, six.integer_types):
raise TypeError("The offset must be an integer.")
attributes = []
if kwargs.get():
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.ACTIVATION_DATE,
kwargs.get()
)
)
if kwargs.get():
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.PROCESS_START_DATE,
kwargs.get()
)
)
if kwargs.get():
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.PROTECT_STOP_DATE,
kwargs.get()
)
)
if kwargs.get():
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.DEACTIVATION_DATE,
kwargs.get()
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
result = self.proxy.rekey(
uuid=uid,
offset=offset,
template_attribute=template_attribute
)
status = result.get()
if status == enums.ResultStatus.SUCCESS:
return result.get()
else:
raise exceptions.KmipOperationFailure(
status,
result.get(),
result.get()
) | Rekey an existing key.
Args:
uid (string): The unique ID of the symmetric key to rekey.
Optional, defaults to None.
offset (int): The time delta, in seconds, between the new key's
initialization date and activation date. Optional, defaults
to None.
**kwargs (various): A placeholder for object attributes that
should be set on the newly rekeyed key. Currently
supported attributes include:
activation_date (int)
process_start_date (int)
protect_stop_date (int)
deactivation_date (int)
Returns:
string: The unique ID of the newly rekeyed key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid |
17,151 | def flatten_multi_dim(sequence):
for x in sequence:
if (isinstance(x, collections.Iterable)
and not isinstance(x, six.string_types)):
for y in flatten_multi_dim(x):
yield y
else:
yield x | Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator). |
17,152 | def add_template_global(self, f, name=None):
self.jinja_env.globals[name or f.__name__] = f | Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used. |
17,153 | def best_match_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False):
matches = matches_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess)
if len(matches)>0:
return matches[0]
return None | Returns the best match from :meth:`matches_from_list` or ``None`` if no good matches |
17,154 | def driver_send(command,hostname=None,wait=0.2):
cmd = []
if hostname:
cmd += [,hostname]
if isinstance(command,basestring):
command = [command]
cmd += [[,x] for x in command] + []
o = nl.run(cmd,quiet=None,stderr=None)
if wait!=None:
time.sleep(wait) | Send a command (or ``list`` of commands) to AFNI at ``hostname`` (defaults to local host)
Requires plugouts enabled (open afni with ``-yesplugouts`` or set ``AFNI_YESPLUGOUTS = YES`` in ``.afnirc``)
If ``wait`` is not ``None``, will automatically sleep ``wait`` seconds after sending the command (to make sure it took effect) |
17,155 | def apparent_dip_correction(axes):
a1 = axes[0].copy()
a1[-1] = 0
cosa = angle(axes[0],a1,cos=True)
_ = 1-cosa**2
if _ > 1e-12:
sina = N.sqrt(_)
if cosa < 0:
sina *= -1
R= N.array([[cosa,sina],[-sina,cosa]])
else:
R = N.identity(2)
return R | Produces a two-dimensional rotation matrix that
rotates a projected dataset to correct for apparent dip |
17,156 | def select_distinct_field(col, field_or_fields, filters=None):
fields = _preprocess_field_or_fields(field_or_fields)
if filters is None:
filters = dict()
if len(fields) == 1:
key = fields[0]
data = list(col.find(filters).distinct(key))
return data
else:
pipeline = [
{
"$match": filters
},
{
"$group": {
"_id": {key: "$" + key for key in fields},
},
},
]
data = list()
for doc in col.aggregate(pipeline):
data.append([doc["_id"][key] for key in fields])
return data | Select distinct value or combination of values of
single or multiple fields.
:params fields: str or list of str.
:return data: list of list.
**中文文档**
选择多列中出现过的所有可能的排列组合。 |
17,157 | def encode_grib2_data(self):
lscale = 1e6
grib_id_start = [7, 0, 14, 14, 2]
gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32)
lon_0 = self.proj_dict["lon_0"]
sw_lon = self.grid_dict["sw_lon"]
if lon_0 < 0:
lon_0 += 360
if sw_lon < 0:
sw_lon += 360
gdtmp1 = [1, 0, self.proj_dict[], 0, float(self.proj_dict[]), 0, float(self.proj_dict[]),
self.data.shape[-1], self.data.shape[-2], self.grid_dict["sw_lat"] * lscale,
sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale,
lon_0 * lscale,
self.grid_dict["dx"] * 1e3, self.grid_dict["dy"] * 1e3, 0b00000000, 0b01000000,
self.proj_dict["lat_1"] * lscale,
self.proj_dict["lat_2"] * lscale, -90 * lscale, 0]
pdtmp1 = np.array([1,
31,
4,
0,
31,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1
], dtype=np.int32)
grib_objects = pd.Series(index=self.times, data=[None] * self.times.size, dtype=object)
drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)
for t, time in enumerate(self.times):
time_list = list(self.run_date.utctimetuple()[0:6])
if grib_objects[time] is None:
grib_objects[time] = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))
grib_objects[time].addgrid(gdsinfo, gdtmp1)
pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0
data = self.data[t] / 1000.0
data[np.isnan(data)] = 0
masked_data = np.ma.array(data, mask=data<=0)
pdtmp1[-2] = 0
grib_objects[time].addfield(1, pdtmp1, 0, drtmp1, masked_data)
return grib_objects | Encodes member percentile data to GRIB2 format.
Returns:
Series of GRIB2 messages |
17,158 | def getWorkerInfo(dataTask):
workertime = []
workertasks = []
for fichier, vals in dataTask.items():
if hasattr(vals, ):
totaltime = sum([a[] for a in vals.values()])
totaltasks = sum([1 for a in vals.values()])
workertime.append(totaltime)
workertasks.append(totaltasks)
return workertime, workertasks | Returns the total execution time and task quantity by worker |
17,159 | def _create_page(cls, page, lang, auto_title, cms_app=None, parent=None, namespace=None,
site=None, set_home=False):
from cms.api import create_page, create_title
from cms.utils.conf import get_templates
default_template = get_templates()[0][0]
if page is None:
page = create_page(
auto_title, language=lang, parent=parent, site=site,
template=default_template, in_navigation=True, published=True
)
page.application_urls = cms_app
page.application_namespace = namespace
page.save()
page.publish(lang)
elif lang not in page.get_languages():
create_title(
language=lang, title=auto_title, page=page
)
page.publish(lang)
if set_home:
page.set_as_homepage()
return page.get_draft_object() | Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page |
17,160 | def randtld(self):
self.tlds = tuple(tlds.tlds) if not self.tlds else self.tlds
return self.random.choice(self.tlds) | -> a random #str tld via :mod:tlds |
17,161 | def join_state(self, state, history_index, concurrency_history_item):
state.join()
if state.backward_execution:
self.backward_execution = True
state.state_execution_status = StateExecutionStatus.INACTIVE
if not self.backward_execution:
state.concurrency_queue = None
state.execution_history.push_return_history_item(state, CallType.EXECUTE, self, state.output_data)
else:
last_history_item = concurrency_history_item.execution_histories[history_index].pop_last_item()
assert isinstance(last_history_item, CallItem) | a utility function to join a state
:param state: the state to join
:param history_index: the index of the execution history stack in the concurrency history item
for the given state
:param concurrency_history_item: the concurrency history item that stores the execution history stacks of all
children
:return: |
17,162 | def s3_etag(url: str) -> Optional[str]:
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag | Check ETag on S3 object. |
17,163 | def _deregister(self, session):
if session in self:
self._sessions.pop(self._get_session_key(session), None) | Deregister a session. |
17,164 | def update_agent_state():
configure_service()
status =
if get_service_status(db.Service.SCHEDULE) == db.ServiceStatus.STOPPED:
status =
elif get_service_status(db.Service.CAPTURE) == db.ServiceStatus.BUSY:
status =
elif get_service_status(db.Service.INGEST) == db.ServiceStatus.BUSY:
status =
register_ca(status=status) | Update the current agent state in opencast. |
17,165 | def GetVersion():
with open(os.path.join(, )) as versions_file:
source = versions_file.read()
return re.search((.*?)\, source).group(1) | Gets the version from googleads/common.py.
We can't import this directly because new users would get ImportErrors on our
third party dependencies.
Returns:
The version of the library. |
17,166 | def DVSFile(ID, season, cadence=):
if cadence == :
strcadence =
else:
strcadence =
return \
% (ID, season, EVEREST_MAJOR_MINOR, strcadence) | Returns the name of the DVS PDF for a given target.
:param ID: The target ID
:param int season: The target season number
:param str cadence: The cadence type. Default `lc` |
17,167 | def n_to_one(num_inputs, num_streams, bits_per_axis, weight_per_axis):
num_bits = num_streams*bits_per_axis
encoder_params = {
"dimensions" : num_streams,
"max_values" : [[0.,1.]]*num_streams,
"bits_per_axis" : [bits_per_axis]*num_streams,
"weight_per_axis" : [weight_per_axis]*num_streams,
"wrap_around" : False
}
scalar_encode = ScalarEncoder(**encoder_params)
xs = np.random.sample(num_inputs)
ys = np.random.sample(num_inputs)
input_vectors = np.zeros((num_inputs, num_bits))
for i in range(num_inputs):
input_vectors[i] = scalar_encode([xs[i]] + [ys[i]]*(num_streams - 1) )
return input_vectors | Creating inputs of the form:
``( x, y, ..., y )''
|---------|
n
Here n = num_streams -1.
To be more precise, for each component we allocate a fixed number
of units `bits_per_axis` and encode each component with a scalar encoder.
This is a toy example for the following scenario: We are given 4 input streams,
such that 3 mutually determine each other, and the remaining one is independent from the rest. |
17,168 | def compile_link_import_strings(codes, build_dir=None, **kwargs):
build_dir = build_dir or tempfile.mkdtemp()
if not os.path.isdir(build_dir):
raise OSError("Non-existent directory: ", build_dir)
source_files = []
if kwargs.get(, False) is True:
import logging
logging.basicConfig(level=logging.DEBUG)
kwargs[] = logging.getLogger()
only_update = kwargs.get(, True)
for name, code_ in codes:
dest = os.path.join(build_dir, name)
differs = True
md5_in_mem = md5_of_string(code_.encode()).hexdigest()
if only_update and os.path.exists(dest):
if os.path.exists(dest+):
md5_on_disk = open(dest+, ).read()
else:
md5_on_disk = md5_of_file(dest).hexdigest()
differs = md5_on_disk != md5_in_mem
if not only_update or differs:
with open(dest, ) as fh:
fh.write(code_)
open(dest+, ).write(md5_in_mem)
source_files.append(dest)
return compile_link_import_py_ext(
source_files, build_dir=build_dir, **kwargs) | Creates a temporary directory and dumps, compiles and links
provided source code.
Parameters
----------
codes: iterable of name/source pair tuples
build_dir: string (default: None)
path to cache_dir. None implies use a temporary directory.
**kwargs:
keyword arguments passed onto `compile_link_import_py_ext` |
17,169 | def find_end(self, text, start_token, end_token, ignore_end_token=None):
if not text.startswith(start_token):
raise MAVParseError("invalid token start")
offset = len(start_token)
nesting = 1
while nesting > 0:
idx1 = text[offset:].find(start_token)
idx2 = text[offset:].find(end_token)
if ignore_end_token:
combined_token = ignore_end_token + end_token
if text[offset+idx2:offset+idx2+len(combined_token)] == combined_token:
idx2 += len(ignore_end_token)
if idx1 == -1 and idx2 == -1:
raise MAVParseError("token nesting error")
if idx1 == -1 or idx1 > idx2:
offset += idx2 + len(end_token)
nesting -= 1
else:
offset += idx1 + len(start_token)
nesting += 1
return offset | find the of a token.
Returns the offset in the string immediately after the matching end_token |
17,170 | def convert(self, txn):
ofxid = self.mk_ofxid(txn.id)
metadata = {}
posting_metadata = {"ofxid": ofxid}
if isinstance(txn, OfxTransaction):
posting = Posting(self.name,
Amount(txn.amount, self.currency),
metadata=posting_metadata)
return Transaction(
date=txn.date,
payee=self.format_payee(txn),
postings=[
posting,
posting.clone_inverted(
self.mk_dynamic_account(self.format_payee(txn),
exclude=self.name))])
elif isinstance(txn, InvestmentTransaction):
acct1 = self.name
acct2 = self.name
posting1 = None
posting2 = None
security = self.maybe_get_ticker(txn.security)
if isinstance(txn.type, str):
if re.match(, txn.type):
acct2 = self.unknownaccount or
elif txn.type == :
acct2 =
elif txn.type == :
acct2 =
elif txn.type == and txn.income_type == :
metadata[] = security
acct2 =
posting1 = Posting(acct1,
Amount(txn.total, self.currency),
metadata=posting_metadata)
posting2 = posting1.clone_inverted(acct2)
else:
pass
else:
if (txn.type in [0, 1, 3, 4]):
acct2 = self.unknownaccount or
elif (txn.type == 2):
acct2 =
else:
pass
aux_date = None
if txn.settleDate is not None and \
txn.settleDate != txn.tradeDate:
aux_date = txn.settleDate
if posting1 is None and posting2 is None:
posting1 = Posting(
acct1,
Amount(
txn.units,
security,
unlimited=True),
unit_price=Amount(
txn.unit_price,
self.currency,
unlimited=True),
metadata=posting_metadata)
posting2 = Posting(
acct2,
Amount(
txn.units *
txn.unit_price,
self.currency,
reverse=True))
else:
pass
return Transaction(
date=txn.tradeDate,
aux_date=aux_date,
payee=self.format_payee(txn),
metadata=metadata,
postings=[posting1, posting2]
) | Convert an OFX Transaction to a posting |
17,171 | def constant(X, n, mu, hyper_deriv=None):
if (n == 0).all():
if hyper_deriv is not None:
return scipy.ones(X.shape[0])
else:
return mu * scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0]) | Function implementing a constant mean suitable for use with :py:class:`MeanFunction`. |
17,172 | def addpackage(sys_sitedir, pthfile, known_dirs):
with open(join(sys_sitedir, pthfile)) as f:
for n, line in enumerate(f):
if line.startswith("
continue
line = line.rstrip()
if line:
if line.startswith(("import ", "import\t")):
exec (line, globals(), locals())
continue
else:
p_rel = join(sys_sitedir, line)
p_abs = abspath(line)
if isdir(p_rel):
os.environ[] += env_t(os.pathsep + p_rel)
sys.path.append(p_rel)
added_dirs.add(p_rel)
elif isdir(p_abs):
os.environ[] += env_t(os.pathsep + p_abs)
sys.path.append(p_abs)
added_dirs.add(p_abs)
if isfile(pthfile):
site.addpackage(sys_sitedir, pthfile, known_dirs)
else:
logging.debug("pth file not found") | Wrapper for site.addpackage
Try and work out which directories are added by
the .pth and add them to the known_dirs set
:param sys_sitedir: system site-packages directory
:param pthfile: path file to add
:param known_dirs: set of known directories |
17,173 | def closed(self, reason):
self.logger.debug(.format(self.item_list))
output_strings = json.dumps(self.item_list, ensure_ascii=False)
with open(self.output_file, ) as fh:
fh.write(output_strings) | 异步爬取全部结束后,执行此关闭方法,对 ``item_list`` 中的数据进行 **JSON**
序列化,并输出到指定文件中,传递给 :meth:`.ZhihuDaily.crawl`
:param obj reason: 爬虫关闭原因 |
17,174 | def close(self, reply_code=0, reply_text=, method_sig=(0, 0)):
if not self.is_open:
return
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_sig[0])
args.write_short(method_sig[1])
self._send_method((20, 40), args)
return self.wait(allowed_methods=[
(20, 41),
]) | request a channel close
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
17,175 | def set(self, image_file, source=None):
image_file.set_size()
self._set(image_file.key, image_file)
if source is not None:
if not self.get(source):
raise ThumbnailError(
% source.name)
thumbnails = self._get(source.key, identity=) or []
thumbnails = set(thumbnails)
thumbnails.add(image_file.key)
self._set(source.key, list(thumbnails), identity=) | Updates store for the `image_file`. Makes sure the `image_file` has a
size set. |
17,176 | def pem_as_string(cert):
if hasattr(cert, ):
return cert
cert = cert.encode() if isinstance(cert, unicode) else cert
if re.match(_PEM_RE, cert):
return True
return False | Only return False if the certificate is a file path. Otherwise it
is a file object or raw string and will need to be fed to the
file open context. |
17,177 | def commit_on_success(using=None):
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
commit(using=using)
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using) | This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps. |
17,178 | def quick_add(self, api_token, text, **kwargs):
params = {
: api_token,
: text
}
return self._post(, params, **kwargs) | Add a task using the Todoist 'Quick Add Task' syntax.
:param api_token: The user's login api_token.
:type api_token: str
:param text: The text of the task that is parsed. A project
name starts with the `#` character, a label starts with a `@`
and an assignee starts with a `+`.
:type text: str
:param note: The content of the note.
:type note: str
:param reminder: The date of the reminder, added in free form text.
:type reminder: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response` |
17,179 | def target_to_ipv6_short(target):
splitted = target.split()
if len(splitted) != 2:
return None
try:
start_packed = inet_pton(socket.AF_INET6, splitted[0])
end_value = int(splitted[1], 16)
except (socket.error, ValueError):
return None
start_value = int(binascii.hexlify(start_packed[14:]), 16)
if end_value < 0 or end_value > 0xffff or end_value < start_value:
return None
end_packed = start_packed[:14] + struct.pack(, end_value)
return ipv6_range_to_list(start_packed, end_packed) | Attempt to return a IPv6 short-range list from a target string. |
17,180 | def add_synonym(self, syn):
n = self.node(syn.class_id)
if not in n:
n[] = {}
meta = n[]
if not in meta:
meta[] = []
meta[].append(syn.as_dict()) | Adds a synonym for a node |
17,181 | def update_channel(self, channel):
data = {: channel}
return super(ApiInterfaceRequest, self).put(, data) | Method to update a channel.
:param channel: List containing channel's desired to be created on database.
:return: Id. |
17,182 | def CreateWeightTableLDAS(in_ldas_nc,
in_nc_lon_var,
in_nc_lat_var,
in_catchment_shapefile,
river_id,
in_connectivity_file,
out_weight_table,
area_id=None,
file_geodatabase=None):
data_ldas_nc = Dataset(in_ldas_nc)
variables_list = data_ldas_nc.variables.keys()
if in_nc_lon_var not in variables_list:
raise Exception("Invalid longitude variable. Choose from: {0}"
.format(variables_list))
if in_nc_lat_var not in variables_list:
raise Exception("Invalid latitude variable. Choose from: {0}"
.format(variables_list))
ldas_lon = data_ldas_nc.variables[in_nc_lon_var][:]
ldas_lat = data_ldas_nc.variables[in_nc_lat_var][:]
data_ldas_nc.close()
rtree_create_weight_table(ldas_lat, ldas_lon,
in_catchment_shapefile, river_id,
in_connectivity_file, out_weight_table,
file_geodatabase, area_id) | Create Weight Table for NLDAS, GLDAS grids as well as for 2D Joules,
or LIS Grids
Parameters
----------
in_ldas_nc: str
Path to the land surface model NetCDF grid.
in_nc_lon_var: str
The variable name in the NetCDF file for the longitude.
in_nc_lat_var: str
The variable name in the NetCDF file for the latitude.
in_catchment_shapefile: str
Path to the Catchment shapefile.
river_id: str
The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO').
in_connectivity_file: str
The path to the RAPID connectivity file.
out_weight_table: str
The path to the output weight table file.
area_id: str, optional
The name of the field with the area of each catchment stored in meters
squared. Default is it calculate the area.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option, in_drainage_line
is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example:
.. code:: python
from RAPIDpy.gis.weight import CreateWeightTableLDAS
CreateWeightTableLDAS(
in_ldas_nc='/path/to/runoff_grid.nc',
in_nc_lon_var="lon_110",
in_nc_lat_var="lat_110",
in_catchment_shapefile='/path/to/catchment.shp',
river_id='LINKNO',
in_connectivity_file='/path/to/rapid_connect.csv',
out_weight_table='/path/to/ldas_weight.csv',
) |
17,183 | def from_miss(self, **kwargs):
raise type(self).Missing(type(self)(**kwargs).key()) | Called to initialize an instance when it is not found in the cache.
For example, if your CacheModel should pull data from the database to
populate the cache,
...
def from_miss(self, username):
user = User.objects.get(username=username)
self.email = user.email
self.full_name = user.get_full_name() |
17,184 | def get_target_list(self, scan_id):
target_list = []
for target, _, _ in self.scans_table[scan_id][]:
target_list.append(target)
return target_list | Get a scan's target list. |
17,185 | def items(self):
l = list()
for attr, value in get_all_attributes(self.__class__):
value = getattr(self, attr)
if not isinstance(value, Constant):
l.append((attr, value))
return list(sorted(l, key=lambda x: x[0])) | non-class attributes ordered by alphabetical order.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> my_class = MyClass()
>>> my_class.items()
[("a", 1), ("b", 2)]
.. versionchanged:: 0.0.5 |
17,186 | async def ack(self):
state = self._state
if state.is_bot:
raise ClientException()
return await state.http.ack_message(self.channel.id, self.id) | |coro|
Marks this message as read.
The user must not be a bot user.
Raises
-------
HTTPException
Acking failed.
ClientException
You must not be a bot user. |
17,187 | def polytropic_exponent(k, n=None, eta_p=None):
r
if n is None and eta_p:
return k*eta_p/(1.0 - k*(1.0 - eta_p))
elif eta_p is None and n:
return n*(k - 1.0)/(k*(n - 1.0))
else:
raise Exception() | r'''Calculates one of:
* Polytropic exponent from polytropic efficiency
* Polytropic efficiency from the polytropic exponent
.. math::
n = \frac{k\eta_p}{1 - k(1-\eta_p)}
.. math::
\eta_p = \frac{\left(\frac{n}{n-1}\right)}{\left(\frac{k}{k-1}
\right)} = \frac{n(k-1)}{k(n-1)}
Parameters
----------
k : float
Isentropic exponent of the gas (Cp/Cv) [-]
n : float, optional
Polytropic exponent of the process [-]
eta_p : float, optional
Polytropic efficiency of the process, [-]
Returns
-------
n or eta_p : float
Polytropic exponent or polytropic efficiency, depending on input, [-]
Notes
-----
Examples
--------
>>> polytropic_exponent(1.4, eta_p=0.78)
1.5780346820809246
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009. |
17,188 | def _set_view(self):
if self.logarithmic:
view_class = HorizontalLogView
else:
view_class = HorizontalView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
) | Assign a horizontal view to current graph |
17,189 | def get_feat_segments(F, bound_idxs):
assert len(bound_idxs) > 0, "Boundaries canre not out of bounds
assert bound_idxs[0] >= 0 and bound_idxs[-1] < F.shape[0], \
"Boundaries are not correct for the given feature dimensions."
feat_segments = []
for i in range(len(bound_idxs) - 1):
feat_segments.append(F[bound_idxs[i]:bound_idxs[i + 1], :])
return feat_segments | Returns a set of segments defined by the bound_idxs.
Parameters
----------
F: np.ndarray
Matrix containing the features, one feature vector per row.
bound_idxs: np.ndarray
Array with boundary indeces.
Returns
-------
feat_segments: list
List of segments, one for each boundary interval. |
17,190 | def close(self, error=None):
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info(, self, error or )
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config[],
api_version=self.config[])
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
sock = self._sock
self._sock = None
self.config[](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error) | Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError. |
17,191 | def submit_export(cls, file, volume, location, properties=None,
overwrite=False, copy_only=False, api=None):
data = {}
params = {}
volume = Transform.to_volume(volume)
file = Transform.to_file(file)
destination = {
: volume,
: location
}
source = {
: file
}
if properties:
data[] = properties
data[] = source
data[] = destination
data[] = overwrite
extra = {
: cls.__name__,
: data
}
logger.info(, extra=extra)
api = api if api else cls._API
if copy_only:
params[] = True
_export = api.post(
cls._URL[], data=data, params=params).json()
else:
_export = api.post(
cls._URL[], data=data).json()
return Export(api=api, **_export) | Submit new export job.
:param file: File to be exported.
:param volume: Volume identifier.
:param location: Volume location.
:param properties: Properties dictionary.
:param overwrite: If true it will overwrite file if exists
:param copy_only: If true files are kept on SevenBridges bucket.
:param api: Api Instance.
:return: Export object. |
17,192 | def lookup_rdap(self, inc_raw=False, retry_count=3, depth=0,
excluded_entities=None, bootstrap=False,
rate_limit_timeout=120, asn_alts=None, extra_org_map=None,
inc_nir=True, nir_field_list=None, asn_methods=None,
get_asn_description=True):
from .rdap import RDAP
results = {: None}
asn_data = None
response = None
if not bootstrap:
log.debug(.format(self.address_str))
asn_data = self.ipasn.lookup(
inc_raw=inc_raw, retry_count=retry_count, asn_alts=asn_alts,
extra_org_map=extra_org_map, asn_methods=asn_methods,
get_asn_description=get_asn_description
)
results.update(asn_data)
rdap = RDAP(self.net)
log.debug(.format(self.address_str))
rdap_data = rdap.lookup(
inc_raw=inc_raw, retry_count=retry_count, asn_data=asn_data,
depth=depth, excluded_entities=excluded_entities,
response=response, bootstrap=bootstrap,
rate_limit_timeout=rate_limit_timeout
)
results.update(rdap_data)
if inc_nir:
nir = None
if == asn_data[]:
nir =
elif == asn_data[]:
nir =
if nir:
nir_whois = NIRWhois(self.net)
nir_data = nir_whois.lookup(
nir=nir, inc_raw=inc_raw, retry_count=retry_count,
response=None,
field_list=nir_field_list, is_offline=False
)
results[] = nir_data
return results | The function for retrieving and parsing whois information for an IP
address via HTTP (RDAP).
**This is now the recommended method, as RDAP contains much better
information to parse.**
Args:
inc_raw (:obj:`bool`): Whether to include the raw whois results in
the returned dictionary. Defaults to False.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
depth (:obj:`int`): How many levels deep to run queries when
additional referenced objects are found. Defaults to 0.
excluded_entities (:obj:`list`): Entity handles to not perform
lookups. Defaults to None.
bootstrap (:obj:`bool`): If True, performs lookups via ARIN
bootstrap rather than lookups based on ASN data. ASN lookups
are not performed and no output for any of the asn* fields is
provided. Defaults to False.
rate_limit_timeout (:obj:`int`): The number of seconds to wait
before retrying when a rate limit notice is returned via
rdap+json. Defaults to 120.
asn_alts (:obj:`list`): Additional lookup types to attempt if the
ASN dns lookup fails. Allow permutations must be enabled.
If None, defaults to all ['whois', 'http']. *WARNING*
deprecated in favor of new argument asn_methods.
extra_org_map (:obj:`dict`): Dictionary mapping org handles to
RIRs. This is for limited cases where ARIN REST (ASN fallback
HTTP lookup) does not show an RIR as the org handle e.g., DNIC
(which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}.
Valid RIR values are (note the case-sensitive - this is meant
to match the REST result):
'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'
Defaults to None.
inc_nir (:obj:`bool`): Whether to retrieve NIR (National Internet
Registry) information, if registry is JPNIC (Japan) or KRNIC
(Korea). If True, extra network requests will be required.
If False, the information returned for JP or KR IPs is
severely restricted. Defaults to True.
nir_field_list (:obj:`list`): If provided and inc_nir, a list of
fields to parse:
['name', 'handle', 'country', 'address', 'postal_code',
'nameservers', 'created', 'updated', 'contacts']
If None, defaults to all.
asn_methods (:obj:`list`): ASN lookup types to attempt, in order.
If None, defaults to all ['dns', 'whois', 'http'].
get_asn_description (:obj:`bool`): Whether to run an additional
query when pulling ASN information via dns, in order to get
the ASN description. Defaults to True.
Returns:
dict: The IP RDAP lookup results
::
{
'query' (str) - The IP address
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
'entities' (list) - Entity handles referred by the top
level query.
'network' (dict) - Network information which consists of
the fields listed in the ipwhois.rdap._RDAPNetwork
dict.
'objects' (dict) - Mapping of entity handle->entity dict
which consists of the fields listed in the
ipwhois.rdap._RDAPEntity dict. The raw result is
included for each object if the inc_raw parameter
is True.
'raw' (dict) - Whois results in json format if the inc_raw
parameter is True.
'nir' (dict) - ipwhois.nir.NIRWhois results if inc_nir is
True.
} |
17,193 | def update_readme(self, template_readme: Template):
readme = os.path.join(self.cached_repo, "README.md")
if os.path.exists(readme):
os.remove(readme)
links = {model_type: {} for model_type in self.models.keys()}
for model_type, model_uuids in self.models.items():
for model_uuid in model_uuids:
links[model_type][model_uuid] = os.path.join("/", model_type, "%s.md" % model_uuid)
with open(readme, "w") as fout:
fout.write(template_readme.render(models=self.models, meta=self.meta, links=links))
git.add(self.cached_repo, [readme])
self._log.info("Updated %s", readme) | Generate the new README file locally. |
17,194 | def eval(self, expr):
self.expr = expr
return self._eval(ast.parse(expr.strip()).body[0].value) | Evaluates an expression
:param expr: Expression to evaluate
:return: Result of expression |
17,195 | def sort(self, **parameters):
self._sort_by_sortedset = None
is_sortedset = False
if parameters.get():
if parameters.get():
raise ValueError("You canby_score_instanceTo sort by sorted set, you must pass a SortedSetFied (attached to a model) or a string representing the key of a redis zset to the `by_score` named argumentbyby_scorebyby'] = by.name
super(ExtendedCollectionManager, self).sort(**parameters)
if is_sortedset:
self._sort_by_sortedset = self._sort
self._sort = None
return self | Enhance the default sort method to accept a new parameter "by_score", to
use instead of "by" if you want to sort by the score of a sorted set.
You must pass to "by_sort" the key of a redis sorted set (or a
sortedSetField attached to an instance) |
17,196 | def replace(self, source, dest):
for i, broker in enumerate(self.replicas):
if broker == source:
self.replicas[i] = dest
return | Replace source broker with destination broker in replica set if found. |
17,197 | def _read_config(self):
if not self._file_path:
return None
elif self._file_path.startswith():
return self._read_s3_config()
elif self._file_path.startswith() or \
self._file_path.startswith():
return self._read_remote_config()
elif not path.exists(self._file_path):
raise ValueError(
.format(self._file_path))
with open(self._file_path, ) as handle:
return handle.read() | Read the configuration from the various places it may be read from.
:rtype: str
:raises: ValueError |
17,198 | def _validate_ding0_grid_import(mv_grid, ding0_mv_grid, lv_grid_mapping):
_validate_ding0_mv_grid_import(mv_grid, ding0_mv_grid)
_validate_ding0_lv_grid_import(mv_grid.lv_grids, ding0_mv_grid,
lv_grid_mapping)
_validate_load_generation(mv_grid, ding0_mv_grid) | Cross-check imported data with original data source
Parameters
----------
mv_grid: MVGrid
eDisGo MV grid instance
ding0_mv_grid: MVGridDing0
Ding0 MV grid instance
lv_grid_mapping: dict
Translates Ding0 LV grids to associated, newly created eDisGo LV grids |
17,199 | def pull_logs(self, project_name, logstore_name, shard_id, cursor, count=None, end_cursor=None, compress=None):
headers = {}
if compress is None or compress:
if lz4:
headers[] =
else:
headers[] =
else:
headers[] =
headers[] =
params = {}
resource = "/logstores/" + logstore_name + "/shards/" + str(shard_id)
params[] =
params[] = cursor
count = count or 1000
params[] = str(count)
if end_cursor:
params[] = end_cursor
(resp, header) = self._send("GET", project_name, None, resource, params, headers, "binary")
compress_type = Util.h_v_td(header, , ).lower()
if compress_type == :
raw_size = int(Util.h_v_t(header, ))
if lz4:
raw_data = lz_decompress(raw_size, resp)
return PullLogResponse(raw_data, header)
else:
raise LogException("ClientHasNoLz4", "Theregzipdeflatex-log-bodyrawsize'))
raw_data = zlib.decompress(resp)
return PullLogResponse(raw_data, header)
else:
return PullLogResponse(resp, header) | batch pull log data from log service
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:type cursor: string
:param cursor: the start to cursor to get data
:type count: int
:param count: the required pull log package count, default 1000 packages
:type end_cursor: string
:param end_cursor: the end cursor position to get data
:type compress: boolean
:param compress: if use zip compress for transfer data, default is True
:return: PullLogResponse
:raise: LogException |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.