Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,500 |
def toString(self, obj):
return Box(shareID=obj.shareID.encode(),
localpart=obj.localpart.encode(),
domain=obj.domain.encode()).serialize()
|
Convert the given L{Identifier} to a string.
|
26,501 |
def _gen_dimension_table(self):
headers = [, ,
, ]
table = []
for dimval in sorted(self.dimensions(copy=False).itervalues(),
key=lambda dval: dval.name.upper()):
table.append([dimval.name,
dimval.description,
dimval.global_size,
(dimval.lower_extent, dimval.upper_extent)])
return table, headers
|
2D array describing each registered dimension
together with headers - for use in __str__
|
26,502 |
def automodsumm_to_autosummary_lines(fn, app):
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
if in app._extensions:
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
docname = os.path.splitext(fn)[0]
filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
spl = _automodsummrex.split(filestr)
indent1s = spl[1::5]
mods = spl[2::5]
opssecs = spl[3::5]
indent2s = spl[4::5]
remainders = spl[5::5]
newlines = []
for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods,
opssecs, remainders)):
allindent = i1 + ( if i2 is None else i2)
oplines = ops.split()
toskip = []
allowedpkgnms = []
funcsonly = clssonly = False
for i, ln in reversed(list(enumerate(oplines))):
if in ln:
funcsonly = True
del oplines[i]
if in ln:
clssonly = True
del oplines[i]
if in ln:
toskip.extend(_str_list_converter(ln.replace(, )))
del oplines[i]
if in ln:
allowedpkgnms.extend(_str_list_converter(ln.replace(, )))
del oplines[i]
if funcsonly and clssonly:
msg = (
)
lnnum = sum([spl[j].count() for j in range(i * 5 + 1)])
app.warn( + msg, (fn, lnnum))
continue
newlines.extend([i1 + + modnm,
,
])
newlines.extend(oplines)
ols = True if len(allowedpkgnms) == 0 else allowedpkgnms
for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)):
if nm in toskip:
continue
if funcsonly and not inspect.isroutine(obj):
continue
if clssonly and not inspect.isclass(obj):
continue
newlines.append(allindent + nm)
newlines.append()
return newlines
|
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
|
26,503 |
def remote_space_available(self, search_pattern=r"(\d+) \w+ free"):
remote_cmd = "dir {}".format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
if "kbytes" in match.group(0) or "Kbytes" in match.group(0):
return int(match.group(1)) * 1000
return int(match.group(1))
|
Return space available on remote device.
|
26,504 |
def cv_squared(x):
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon)
|
The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
|
26,505 |
def auth_criteria(self):
auth = {}
for attr in dir(self):
if attr != :
attribute = getattr(self, attr)
if isinstance(attribute, Callable) and hasattr(attribute, ):
auth[getattr(self, attr)._service_auth] = attribute
return auth
|
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
|
26,506 |
def download_files_maybe_extract(urls, directory, check_files=[]):
check_files = [os.path.join(directory, f) for f in check_files]
if _check_download(*check_files):
return
for url in urls:
download_file_maybe_extract(url=url, directory=directory)
if not _check_download(*check_files):
raise ValueError()
|
Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.
Args:
urls (str): Url of files.
directory (str): Directory to download to.
check_files (list of str): Check if these files exist, ensuring the download succeeded.
If these files exist before the download, the download is skipped.
Raises:
ValueError: Error if one of the ``check_files`` are not found following the download.
|
26,507 |
def read_data(self, blocksize=4096):
frames = ctypes.c_uint(blocksize // self._client_fmt.mBytesPerFrame)
buf = ctypes.create_string_buffer(blocksize)
buflist = AudioBufferList()
buflist.mNumberBuffers = 1
buflist.mBuffers[0].mNumberChannels = \
self._client_fmt.mChannelsPerFrame
buflist.mBuffers[0].mDataByteSize = blocksize
buflist.mBuffers[0].mData = ctypes.cast(buf, ctypes.c_void_p)
while True:
check(_coreaudio.ExtAudioFileRead(
self._obj, ctypes.byref(frames), ctypes.byref(buflist)
))
assert buflist.mNumberBuffers == 1
size = buflist.mBuffers[0].mDataByteSize
if not size:
break
data = ctypes.cast(buflist.mBuffers[0].mData,
ctypes.POINTER(ctypes.c_char))
blob = data[:size]
yield blob
|
Generates byte strings reflecting the audio data in the file.
|
26,508 |
def log_indexing_error(cls, indexing_errors):
indexing_errors_log = []
for indexing_error in indexing_errors:
indexing_errors_log.append(str(indexing_error))
raise exceptions.ElasticsearchException(.join(indexing_errors_log))
|
Logs indexing errors and raises a general ElasticSearch Exception
|
26,509 |
def check_str(obj):
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj)
|
Returns a string for various input types
|
26,510 |
def points(self):
vtk_data = self.GetPoints().GetData()
arr = vtk_to_numpy(vtk_data)
return vtki_ndarray(arr, vtk_data)
|
returns a pointer to the points as a numpy object
|
26,511 |
def unset_sentry_context(self, tag):
if self.sentry_client:
self.sentry_client.tags.pop(tag, None)
|
Remove a context tag from sentry
:param tag: The context tag to remove
:type tag: :class:`str`
|
26,512 |
def parse_sentry_configuration(filename):
filetype = os.path.splitext(filename)[-1][1:].lower()
if filetype == :
config = ConfigParser()
config.read(filename)
ini_key =
ini_sections = [, ]
for section in ini_sections:
if section in config:
print(
.format(section=section, key=ini_key))
try:
return config[section][ini_key]
except KeyError:
print(
.format(section=section, key=ini_key))
raise SystemExit(
.format(
file=filename,
sec_list=.join(ini_sections),
))
elif filetype == :
raise SystemExit(
)
else:
raise SystemExit(
% filetype)
|
Parse Sentry DSN out of an application or Sentry configuration file
|
26,513 |
def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode="w:gz", fileobj=f)
if isinstance(fileobject, BytesIO):
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(fileobject.getvalue())
fileobject.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile")
t.addfile(dfinfo, fileobject)
t.close()
f.seek(0)
return f
|
Create a zipped tar archive from a Dockerfile
**Remember to close the file object**
Args:
fileobj: a Dockerfile
Returns:
a NamedTemporaryFile() object
|
26,514 |
def frontendediting_request_processor(page, request):
if not in request.GET:
return
response = HttpResponseRedirect(request.path)
if request.user.has_module_perms():
if in request.GET:
try:
enable_fe = int(request.GET[]) > 0
except ValueError:
enable_fe = False
if enable_fe:
response.set_cookie(str(), enable_fe)
clear_cache()
else:
response.delete_cookie(str())
clear_cache()
else:
response.delete_cookie(str())
return response
|
Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions.
|
26,515 |
def __calculate_center(self, cluster):
dimension = len(self.__pointer_data[cluster[0]]);
center = [0] * dimension;
for index_point in cluster:
for index_dimension in range(0, dimension):
center[index_dimension] += self.__pointer_data[index_point][index_dimension];
for index_dimension in range(0, dimension):
center[index_dimension] /= len(cluster);
return center;
|
!
@brief Calculates new center.
@return (list) New value of the center of the specified cluster.
|
26,516 |
def proc_monomer(self, monomer_info, parent, mon_cls=False):
monomer_labels, monomer_data = monomer_info
if len(monomer_labels) > 1:
raise ValueError(
.format(monomer_labels))
else:
monomer_label = list(monomer_labels)[0]
if mon_cls:
monomer_class = mon_cls
het = True
elif monomer_label[0] == :
if monomer_label[2] in standard_amino_acids.values():
monomer_class = Residue
else:
monomer_class = Nucleotide
het = False
else:
raise ValueError()
monomer = monomer_class(
atoms=None, mol_code=monomer_label[2], monomer_id=monomer_label[1],
insertion_code=monomer_label[3], is_hetero=het, ampal_parent=parent
)
monomer.states = self.gen_states(monomer_data.values(), monomer)
monomer._active_state = sorted(monomer.states.keys())[0]
return monomer
|
Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `ampal_parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly.
|
26,517 |
def stop(self, timeout=None):
stopped = True
self.__shutdown.set()
if self.__bgthread:
logger.debug()
self.__bgthread.join(timeout)
if self.__bgthread.is_alive():
logger.warning()
stopped = False
self.__bgthread = None
return stopped
|
Requests device to stop running, waiting at most the given timout in seconds (fractional). Has no effect if
`run()` was not called with background=True set. Returns True if successfully stopped (or already not running).
|
26,518 |
def export_network(nw, mode=):
nw.control_circuit_breakers(mode=)
srid = str(int(nw.config[][]))
lv_info = True
mv_info = True
if mode == :
mv_info = False
if mode == :
lv_info = False
run_id = nw.metadata[]
lvgrid_idx = 0
lv_grid_dict = {}
lvloads_idx = 0
lv_loads_dict = {}
mvgrid_idx = 0
mv_grid_dict = {}
mvloads_idx = 0
mv_loads_dict = {}
mvgen_idx = 0
mv_gen_dict = {}
mvcb_idx = 0
mvcb_dict = {}
mvcd_idx = 0
mv_cd_dict = {}
mvstations_idx = 0
hvmv_stations_dict = {}
mvtrafos_idx = 0
hvmv_trafos_dict = {}
lvgen_idx = 0
lv_gen_dict = {}
lvcd_idx = 0
lv_cd_dict = {}
lvstations_idx = 0
mvlv_stations_dict = {}
lvtrafos_idx = 0
mvlv_trafos_dict = {}
areacenter_idx = 0
areacenter_dict = {}
lines_idx = 0
lines_dict = {}
LVMVmapping_idx = 0
mvlv_mapping_dict = {}
def aggregate_generators(gen, aggr):
if gen.v_level not in aggr[]:
aggr[][gen.v_level] = {}
if gen.type not in aggr[][gen.v_level]:
aggr[][gen.v_level][gen.type] = {}
if gen.subtype not in aggr[][gen.v_level][gen.type]:
aggr[][gen.v_level][gen.type].update(
{gen.subtype: {: [gen.id_db],
: gen.capacity}})
else:
aggr[][gen.v_level][gen.type][gen.subtype][
].append(gen.id_db)
aggr[][gen.v_level][gen.type][gen.subtype][
] += gen.capacity
return aggr
def aggregate_loads(la_center, aggr):
for s in [, , , ]:
if s not in aggr[]:
aggr[][s] = {}
for t in [,]:
if t not in aggr[][s]:
aggr[][s][t] = 0
aggr[][][] += sum(
[_.sector_consumption_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.sector_consumption_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.sector_consumption_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.sector_consumption_residential
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.peak_load_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.peak_load_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.peak_load_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr[][][] += sum(
[_.peak_load_residential
for _ in la_center.lv_load_area._lv_grid_districts])
return aggr
for mv_district in nw.mv_grid_districts():
mv_grid_id = mv_district.mv_grid.id_db
mv_grid_id_db = .join(
[str(mv_district.mv_grid.__class__.__name__), , str(mv_grid_id), str(mv_district.mv_grid.id_db)])
if mv_info:
lv_grid_id = 0
mvgrid_idx += 1
mv_grid_dict[mvgrid_idx] = {
: mv_district.mv_grid.id_db,
: .join([str(mv_district.mv_grid.__class__.__name__), , str(mv_grid_id),
str(mv_district.mv_grid.id_db)]),
: wkt_dumps(mv_district.geo_data),
:
sum([_.zensus_sum
for _ in
mv_district._lv_load_areas
if not np.isnan(_.zensus_sum)]),
: mv_district.mv_grid.v_level,
: run_id
}
for node in mv_district.mv_grid.graph_nodes_sorted():
geom = wkt_dumps(node.geo_data)
db_id = node.id_db
if isinstance(node, LVStationDing0):
if not node.lv_load_area.is_aggregated:
lvstations_idx += 1
mvlv_stations_dict[lvstations_idx] = {
: .join([str(node.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: .join([, , str(node.id_db), str(node.id_db)]),
: geom,
: run_id,
}
LVMVmapping_idx += 1
mvlv_mapping_dict[LVMVmapping_idx] = {
: mv_grid_id,
: mv_grid_id_db,
: node.id_db,
: .join([, , str(node.id_db), str(node.id_db)]),
: run_id,
}
for t in node.transformers():
lvtrafos_idx += 1
mvlv_trafos_dict[lvtrafos_idx] = {
: .join([str(t.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: geom,
: .join([, , str(node.id_db), str(node.id_db)]),
: t.v_level,
: t.s_max_a,
: t.x,
: t.r,
: run_id,
}
elif isinstance(node, MVStationDing0):
mvstations_idx += 1
hvmv_stations_dict[mvstations_idx] = {
: .join([str(node.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: mv_grid_id_db,
: geom,
: run_id,
}
for t in node.transformers():
mvtrafos_idx += 1
hvmv_trafos_dict[mvtrafos_idx] = {
: .join([str(t.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: geom,
: mv_grid_id_db,
: t.v_level,
: t.s_max_a,
: t.x,
: t.r,
: run_id,
}
elif isinstance(node, GeneratorDing0):
if node.subtype == None:
subtype =
else:
subtype = node.subtype
type = node.type
mvgen_idx += 1
mv_gen_dict[mvgen_idx] = {
: .join([str(node.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: mv_grid_id_db,
: geom,
: type,
: subtype,
: node.v_level,
: node.capacity,
: run_id,
: False,
}
elif isinstance(node, MVCableDistributorDing0):
mvcd_idx += 1
mv_cd_dict[mvcd_idx] = {
: .join([str(node.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: mv_grid_id_db,
: geom,
: run_id,
}
elif isinstance(node, LVLoadAreaCentreDing0):
areacenter_idx += 1
aggr_lines = 0
aggr = {: {}, : {}, : []}
for lvgd in node.lv_load_area._lv_grid_districts:
for aggr_gen in lvgd.lv_grid.generators():
aggr = aggregate_generators(aggr_gen, aggr)
if aggr_gen.subtype == None:
subtype =
else:
subtype = aggr_gen.subtype
type = aggr_gen.type
aggr = aggregate_loads(node, aggr)
aggr[] = {
: node.lv_load_area.zensus_sum,
: node.lv_load_area.geo_area}
aggr_line_type = nw._static_data[].iloc[
nw._static_data[][].idxmax()]
geom = wkt_dumps(node.lv_load_area.geo_area)
for aggr_node in aggr:
if aggr_node == :
mvgenaggr_idx = 0
for v_level in aggr[]:
for type in aggr[][v_level]:
for subtype in aggr[][v_level][type]:
mvgen_idx += 1
mvgenaggr_idx += 1
mv_gen_dict[mvgen_idx] = {
: .join(
[str(aggr_gen.__class__.__name__), , str(mv_grid_id),
str(aggr_gen.id_db), str(mvgenaggr_idx)]),
: mv_grid_id_db,
: geom,
: type,
: subtype,
: v_level,
: aggr[][v_level][type][subtype][],
: True,
: run_id,
}
lines_idx += 1
aggr_lines += 1
lines_dict[lines_idx] = {
: .join(
[str(mv_grid_id), , str(node.lv_load_area.id_db),
str(aggr_lines)]),
: mv_grid_id_db,
: ,
: ,
: 1e-3,
: aggr_line_type.U_n,
: aggr_line_type.I_max_th,
: aggr_line_type.R,
: aggr_line_type.L,
: aggr_line_type.C,
: .join(
[, , str(mv_grid_id), str(mvloads_idx)]),
: .join([
, , str(mv_grid_id), str(mv_grid_id)]),
: run_id,
}
elif isinstance(node, CircuitBreakerDing0):
mvcb_idx += 1
mvcb_dict[mvcb_idx] = {
: .join([str(node.__class__.__name__), , str(mv_grid_id), str(node.id_db)]),
: mv_grid_id,
: mv_grid_id_db,
: geom,
: node.status,
: run_id,
}
else:
type =
for branch in mv_district.mv_grid.graph_edges():
geom = from_shape(LineString([branch[][0].geo_data, branch[][1].geo_data]),
srid=srid)
if not any([isinstance(branch[][0], LVLoadAreaCentreDing0),
isinstance(branch[][1], LVLoadAreaCentreDing0)]):
lines_idx += 1
lines_dict[lines_idx] = {
: branch[].id_db,
: mv_grid_id_db,
: branch[].type[],
: branch[].kind,
: branch[].length / 1e3,
: branch[].type[],
: branch[].type[],
: branch[].type[],
: branch[].type[],
: branch[].type[],
: .join([str(branch[][0].__class__.__name__), , str(mv_grid_id),
str(branch[][0].id_db)]),
: .join([str(branch[][1].__class__.__name__), , str(mv_grid_id),
str(branch[][1].id_db)]),
: run_id,
}
if lv_info:
for LA in mv_district.lv_load_areas():
for lv_district in LA.lv_grid_districts():
if not lv_district.lv_grid.grid_district.lv_load_area.is_aggregated:
lvgrid_idx += 1
lv_grid_dict[lvgrid_idx] = {
: lv_district.lv_grid.id_db,
: .join(
[str(lv_district.lv_grid.__class__.__name__), , str(lv_district.lv_grid.id_db),
str(lv_district.lv_grid.id_db)]),
: wkt_dumps(lv_district.geo_data),
: lv_district.population,
: lv_district.lv_grid.v_level / 1e3,
: run_id
}
lv_grid_id = lv_district.lv_grid.id_db
lv_grid_id_db = .join(
[str(lv_district.lv_grid.__class__.__name__), , str(lv_district.lv_grid.id_db),
str(lv_district.lv_grid.id_db)])
for node in lv_district.lv_grid.graph_nodes_sorted():
if isinstance(node, GeneratorDing0):
if node.subtype == None:
subtype =
else:
subtype = node.subtype
type = node.type
lvgen_idx += 1
lv_gen_dict[lvgen_idx] = {
: .join(
[str(node.__class__.__name__), , str(lv_grid_id), str(node.id_db)]),
: lv_grid_id_db,
: wkt_dumps(node.geo_data),
: type,
: subtype,
: node.v_level,
: node.capacity,
: run_id,
}
elif isinstance(node, LVCableDistributorDing0):
lvcd_idx += 1
lv_cd_dict[lvcd_idx] = {
: .join(
[str(node.__class__.__name__), , str(lv_grid_id), str(node.id_db)]),
: lv_grid_id_db,
: None,
: run_id,
}
elif isinstance(node, LVLoadDing0):
consumption_dict = {}
for k in [, , , ]:
if k in node.consumption.keys():
consumption_dict[k] = node.consumption[k]
else:
consumption_dict[k] = None
lvloads_idx += 1
lv_loads_dict[lvloads_idx] = {
: .join(
[str(node.__class__.__name__), , str(lv_grid_id), str(node.id_db)]),
: lv_grid_id_db,
: None,
: consumption_dict[],
: consumption_dict[],
: consumption_dict[],
: consumption_dict[],
: run_id,
}
del consumption_dict
else:
type =
for branch in lv_district.lv_grid.graph_edges():
if not any([isinstance(branch[][0], LVLoadAreaCentreDing0),
isinstance(branch[][1], LVLoadAreaCentreDing0)]):
lines_idx += 1
lines_dict[lines_idx] = {
: branch[].id_db,
: lv_grid_id_db,
: branch[].type.to_frame().columns[0],
: branch[].kind,
: branch[].length / 1e3,
: branch[].type[] / 1e3,
: branch[].type[],
: branch[].type[],
: branch[].type[],
: branch[].type[],
: .join(
[str(branch[][0].__class__.__name__), , str(lv_grid_id),
str(branch[][0].id_db)])
if not isinstance(branch[][0], LVStationDing0) else .join(
[str(branch[][0].__class__.__name__), , str(mv_grid_id),
str(branch[][0].id_db)]),
: .join(
[str(branch[][1].__class__.__name__), , str(lv_grid_id),
str(branch[][1].id_db)])
if not isinstance(branch[][1], LVStationDing0) else .join(
[str(branch[][1].__class__.__name__), , str(mv_grid_id),
str(branch[][1].id_db)]),
: run_id,
}
lv_grid = pd.DataFrame.from_dict(lv_grid_dict, orient=)
lv_gen = pd.DataFrame.from_dict(lv_gen_dict, orient=)
lv_cd = pd.DataFrame.from_dict(lv_cd_dict, orient=)
mvlv_stations = pd.DataFrame.from_dict(mvlv_stations_dict, orient=)
mvlv_trafos = pd.DataFrame.from_dict(mvlv_trafos_dict, orient=)
lv_loads = pd.DataFrame.from_dict(lv_loads_dict, orient=)
mv_grid = pd.DataFrame.from_dict(mv_grid_dict, orient=)
mv_gen = pd.DataFrame.from_dict(mv_gen_dict, orient=)
mv_cd = pd.DataFrame.from_dict(mv_cd_dict, orient=)
hvmv_stations = pd.DataFrame.from_dict(hvmv_stations_dict, orient=)
hvmv_trafos = pd.DataFrame.from_dict(hvmv_trafos_dict, orient=)
mv_loads = pd.DataFrame.from_dict(mv_loads_dict, orient=)
lines = pd.DataFrame.from_dict(lines_dict, orient=)
mvlv_mapping = pd.DataFrame.from_dict(mvlv_mapping_dict, orient=)
lines = lines[sorted(lines.columns.tolist())]
return run_id, lv_grid, lv_gen, lv_cd, mvlv_stations, mvlv_trafos, lv_loads, mv_grid, mv_gen, mv_cd, \
hvmv_stations, hvmv_trafos, mv_loads, lines, mvlv_mapping
|
Export all nodes and lines of the network nw as DataFrames
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
mode: str
If 'MV' export only medium voltage nodes and lines
If 'LV' export only low voltage nodes and lines
else, exports MV and LV nodes and lines
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing nodes and its attributes
pandas.DataFrame
lines_df : Dataframe containing lines and its attributes
|
26,519 |
def print_rev_id(localRepoPath):
start_path = os.getcwd()
try:
log.info("Local repository path: {}".format(localRepoPath))
os.chdir(localRepoPath)
log.info("\n== Remote URL")
os.system()
log.info("\n== Local Branches")
os.system("git branch")
log.info("\n== Most Recent Commit")
os.system("git log |head -1")
rv = 0
except:
rv = 111
log.info("WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.")
finally:
os.chdir(start_path)
if rv != 0:
sys.exit(rv)
|
prints information about the specified local repository to STDOUT. Expected method of execution: command-line or
shell script call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if
something went wrong.
|
26,520 |
def get_user(uid):
if db is not None:
try:
uid = uid.decode()
except AttributeError:
pass
d = db.hgetall(.format(uid))
if d:
nd = {}
for k in d:
try:
nd[k.decode()] = d[k].decode()
except AttributeError:
try:
nd[k.decode()] = d[k]
except AttributeError:
nd[k] = d[k]
for p in PERMISSIONS:
nd[p] = nd.get(p) ==
return User(uid=uid, **nd)
else:
return None
else:
d = app.config[].get(uid)
if d:
return User(uid=uid, **d)
else:
return None
|
Get an user by the UID.
:param str uid: UID to find
:return: the user
:rtype: User object
:raises ValueError: uid is not an integer
:raises KeyError: if user does not exist
|
26,521 |
def evaluate(self, password=):
dev_submission = self
if self[].get(, {}).get(, None):
dev_submission = copy.deepcopy(self)
dev_submission[] = {token_id: token for token_id, token in self[].items()
if token_id in self[][][]}
url = .format(BASE_URL)
try:
r = requests.post(url,
data=dev_submission.dumps(),
headers={: },
auth=(dev_submission[][], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error(.format(e))
return Job()
if in response:
logging.error(.format(response[]))
return Job()
return Job(response)
|
Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results.
|
26,522 |
def _event_monitor_loop(region_name, vpc_id,
watcher_plugin, health_plugin,
iterations, sleep_time,
route_check_time_interval=30):
q_route_spec = watcher_plugin.get_route_spec_queue()
q_monitor_ips, q_failed_ips, q_questionable_ips = \
health_plugin.get_queues()
time.sleep(sleep_time)
current_route_spec = {}
all_ips = []
last_route_check_time = time.time()
while not CURRENT_STATE._stop_all:
try:
failed_ips = utils.read_last_msg_from_queue(q_failed_ips)
questnbl_ips = utils.read_last_msg_from_queue(q_questionable_ips)
new_route_spec = utils.read_last_msg_from_queue(q_route_spec)
if failed_ips:
CURRENT_STATE.failed_ips = failed_ips
if questnbl_ips:
CURRENT_STATE.questionble_ips = questnbl_ips
if new_route_spec:
CURRENT_STATE.route_spec = new_route_spec
current_route_spec = new_route_spec
all_ips = _update_health_monitor_with_new_ips(new_route_spec,
all_ips,
q_monitor_ips)
now = time.time()
time_for_regular_recheck = \
(now - last_route_check_time) > route_check_time_interval
if new_route_spec or failed_ips or questnbl_ips or \
time_for_regular_recheck:
if not new_route_spec and not (failed_ips or questnbl_ips):
logging.debug("Time for regular route check")
last_route_check_time = now
vpc.handle_spec(region_name, vpc_id, current_route_spec,
failed_ips if failed_ips else [],
questnbl_ips if questnbl_ips else [])
if iterations is not None:
iterations -= 1
if iterations == 0:
break
time.sleep(sleep_time)
except KeyboardInterrupt:
return
except Exception as e:
import traceback
traceback.print_exc()
logging.error("*** Uncaught exception 1: %s" % str(e))
return
logging.debug("event_monitor_loop ended: Global stop")
|
Monitor queues to receive updates about new route specs or any detected
failed IPs.
If any of those have updates, notify the health-monitor thread with a
message on a special queue and also re-process the entire routing table.
The 'iterations' argument allows us to limit the running time of the watch
loop for test purposes. Not used during normal operation. Also, for faster
tests, sleep_time can be set to values less than 1.
The 'route_check_time_interval' arguments specifies the number of seconds
we allow to elapse before forcing a re-check of the VPC routes. This is so
that accidentally deleted routes or manually broken route tables can be
fixed back up again on their own.
|
26,523 |
def getLatency(self, instId: int) -> float:
if len(self.clientAvgReqLatencies) == 0:
return 0.0
return self.clientAvgReqLatencies[instId].get_avg_latency()
|
Return a dict with client identifier as a key and calculated latency as a value
|
26,524 |
def _set_member_vlan(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=member_vlan.member_vlan, is_container=, presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__member_vlan = t
if hasattr(self, ):
self._set()
|
Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly.
|
26,525 |
def _get_or_open_file(filename):
readwritereadwrite
if isinstance(filename, (str, bytes)):
f = open(filename)
elif hasattr(filename, ) and hasattr(filename, ):
f = filename
else:
raise TypeError()
return f
|
If ``filename`` is a string or bytes object, open the
``filename`` and return the file object. If ``filename`` is
file-like (i.e., it has 'read' and 'write' attributes, return
``filename``.
Parameters
----------
filename : str, bytes, file
Raises
------
TypeError
If ``filename`` is not a string, bytes, or file-like
object.
File-likeness is determined by checking for 'read' and
'write' attributes.
|
26,526 |
def nl_send(sk, msg):
cb = sk.s_cb
if cb.cb_send_ow:
return cb.cb_send_ow(sk, msg)
hdr = nlmsg_hdr(msg)
iov = hdr.bytearray[:hdr.nlmsg_len]
return nl_send_iovec(sk, msg, iov, 1)
|
Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L416
Transmits the Netlink message `msg` over the Netlink socket using the `socket.sendmsg()`. This function is based on
`nl_send_iovec()`.
The message is addressed to the peer as specified in the socket by either the nl_socket_set_peer_port() or
nl_socket_set_peer_groups() function. The peer address can be overwritten by specifying an address in the `msg`
object using nlmsg_set_dst().
If present in the `msg`, credentials set by the nlmsg_set_creds() function are added to the control buffer of the
message.
Calls to this function can be overwritten by providing an alternative using the nl_cb_overwrite_send() function.
This function triggers the `NL_CB_MSG_OUT` callback.
ATTENTION: Unlike `nl_send_auto()`, this function does *not* finalize the message in terms of automatically adding
needed flags or filling out port numbers.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
Returns:
Number of bytes sent on success or a negative error code.
|
26,527 |
def _load_fsstat_data(self, timeout=3):
def stats_thread():
try:
cmd = [, self.get_raw_path(), , str(self.offset // self.disk.block_size)]
fstype = {
"ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs",
"ufs": "ufs", "swap": "swap", "exfat": "exfat",
}.get(self.fstype, None)
if fstype:
cmd.extend(["-f", fstype])
logger.debug(.format(.join(cmd)))
stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(stats_thread.process.stdout.readline, b):
line = line.decode()
logger.debug(.format(line))
if line.startswith("File System Type:"):
self.info[] = line[line.index() + 2:].strip()
elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"):
self.info[] = line[line.index() + 2:].strip().replace("//", "/")
elif line.startswith("Volume Name:") and not self.info.get():
self.info[] = line[line.index() + 2:].strip()
elif line.startswith("Version:"):
self.info[] = line[line.index() + 2:].strip()
elif line.startswith("Source OS:"):
self.info[] = line[line.index() + 2:].strip()
elif in line or in line:
try:
stats_thread.process.terminate()
logger.debug("Terminated fsstat at cylinder/block group information.")
except Exception:
pass
break
if self.info.get() and self.info.get():
self.info[] = "{0} ({1})".format(self.info[], self.info[])
elif self.info.get() and not self.info.get():
self.info[] = self.info[]
elif not self.info.get() and self.info.get() and \
self.info[].startswith("/"):
if self.info[].endswith("1"):
self.info[] = self.info[][:-1]
else:
self.info[] = self.info[]
except Exception:
logger.exception("Error while obtaining stats.")
stats_thread.process = None
thread = threading.Thread(target=stats_thread)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
stats_thread.process.terminate()
except Exception:
pass
thread.join()
logger.debug("Killed fsstat after {0}s".format(timeout))
|
Using :command:`fsstat`, adds some additional information of the volume to the Volume.
|
26,528 |
def connect(self, hardware: hc.API):
self._hw_manager.set_hw(hardware)
self._hw_manager.hardware.cache_instruments()
|
Connect to a running hardware API.
This can be either a simulator or a full hardware controller.
Note that there is no true disconnected state for a
:py:class:`.ProtocolContext`; :py:meth:`disconnect` simply creates
a new simulator and replaces the current hardware with it.
|
26,529 |
def start(path=None, host=None, port=None, color=None, cors=None, detach=False, nolog=False):
if detach:
sys.argv.append()
idx = sys.argv.index()
del sys.argv[idx]
cmd = sys.executable + + .join([sys.argv[0], ] + sys.argv[1:])
if os.name == :
cmd = % cmd
else:
cmd = % cmd
os.system(cmd)
else:
if path:
path = os.path.abspath(path)
app.config[]= first_value(path, app.config.get(,None), os.getcwd())
app.config[] = first_value(host, app.config.get(,None), )
app.config[] = int(first_value(port, app.config.get(,None), 5001))
app.logger.setLevel(logging.DEBUG)
app.config[] = HistoryHandler()
app.logger.addHandler(app.config[])
if not nolog:
app.logger.addHandler(StreamHandler())
if cors: CORS(app)
app.run(host = app.config[],
port = app.config[],
threaded = True)
|
start web server
|
26,530 |
def sqlite_to_csv(
input_filename,
table_name,
output_filename,
dialect=csv.excel,
batch_size=10000,
encoding="utf-8",
callback=None,
query=None,
):
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if query is None:
query = "SELECT * FROM {}".format(table_name)
connection = sqlite3.Connection(input_filename)
cursor = connection.cursor()
result = cursor.execute(query)
header = [item[0] for item in cursor.description]
fobj = open_compressed(output_filename, mode="w", encoding=encoding)
writer = csv.writer(fobj, dialect=dialect)
writer.writerow(header)
total_written = 0
for batch in rows.plugins.utils.ipartition(result, batch_size):
writer.writerows(batch)
written = len(batch)
total_written += written
if callback:
callback(written, total_written)
fobj.close()
|
Export a table inside a SQLite database to CSV
|
26,531 |
def save(self, **kwargs):
try:
manager = Manager()
for item in self.model_class.objects.language(manager.get_main_language()).filter(pk__in=self.ids).all():
create_translations_for_item_and_its_children.delay(self.model_class, item.pk, self.languages,
update_item_languages=True)
return {: }
except Exception as e:
raise serializers.ValidationError(detail=str(e))
|
Method that creates the translations tasks for every selected instance
:param kwargs:
:return:
|
26,532 |
def create_skeleton(shutit):
skel_path = shutit.cfg[][]
skel_module_name = shutit.cfg[][]
skel_domain = shutit.cfg[][]
skel_domain_hash = shutit.cfg[][]
skel_depends = shutit.cfg[][]
skel_shutitfiles = shutit.cfg[][]
skel_delivery = shutit.cfg[][]
skel_pattern = shutit.cfg[][]
skel_vagrant_num_machines = shutit.cfg[][]
skel_vagrant_machine_prefix = shutit.cfg[][]
skel_vagrant_ssh_access = shutit.cfg[][]
skel_vagrant_docker = shutit.cfg[][]
skel_vagrant_snapshot = shutit.cfg[][]
skel_vagrant_upload = shutit.cfg[][]
skel_vagrant_image_name = shutit.cfg[][]
if not skel_path or skel_path[0] != :
shutit.fail()
if os.path.exists(skel_path):
shutit.fail(skel_path + )
if not skel_module_name:
shutit.fail()
if not re.match(, skel_module_name):
shutit.fail( + skel_module_name)
if not skel_domain:
shutit.fail()
os.makedirs(skel_path)
os.chdir(skel_path)
|
Creates module based on a pattern supplied as a git repo.
|
26,533 |
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
"Transaction {} not found in canonical chain".format(encode_hex(transaction_hash)))
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_number, transaction_key.index)
|
Returns a 2-tuple of (block_number, transaction_index) indicating which
block the given transaction can be found in and at what index in the
block transactions.
Raises TransactionNotFound if the transaction_hash is not found in the
canonical chain.
|
26,534 |
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, RatioHeightDiameter):
return (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
* (1 - gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat,
NatOrgMat, coag))
)
|
Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
:param DiamTube: Diameter of the dosing tube
:type Diamtube: float
:param ConcClay: Concentration of clay in solution
:type ConcClay: float
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid
:type NatOrgMat: floc_model.Material
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter
:type RatioHeightDiameter: float
:return: fraction of the surface area that is covered with coagulant that is not covered with humic acid
:rtype: float
|
26,535 |
def reformat(found_sequences):
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == )
tokens = header.split()
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens)
|
Truncate the FASTA headers so that the first field is a 4-character ID.
|
26,536 |
def plot(data: Dict[str, np.array], fields: List[str] = None, *args, **kwargs):
if plt is None:
return
if fields is None:
fields = [, , , ]
labels = []
lines = []
for field in fields:
if min(data[field].shape) > 0:
f_lines = plt.plot(data[], data[field], *args, **kwargs)
lines.extend(f_lines)
labels.extend(data[][field])
plt.legend(lines, labels, ncol=2, loc=)
plt.xlabel()
plt.grid()
|
Plot simulation data.
:data: A dictionary of arrays.
:fields: A list of variables you want to plot (e.g. ['x', y', 'c'])
|
26,537 |
def exclude_fields(self):
request = self.context.get()
if request:
exclude = request.query_params.get(, None)
if exclude is None: return
excluded_fields = exclude.split()
for field in excluded_fields:
self.fields.pop(field)
|
Excludes fields that are included in the queryparameters
|
26,538 |
def download(self):
p = Pool()
p.map(self._download, self.days)
|
MLBAM dataset download
|
26,539 |
def setup(self, **kwargs):
clobber = self.clobber
self.clobber = False
if not self.load_model():
raise Exception("Canpiterpmaxfppert', 0.1)
|
This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param inter piter: The number of iterations in the minimizer. \
Default 3
:param int pmaxf: The maximum number of function evaluations per \
iteration. Default 300
:param float ppert: The fractional amplitude of the perturbation on \
the initial guess. Default 0.1
|
26,540 |
def update(self):
logger.debug("")
rd = self.repo_dir
logger.debug("pkg path %s", rd)
if not rd:
print(
"unable to find pkg . %s" % (self.name, did_u_mean(self.name))
)
cwd = os.getcwd()
os.chdir(self.repo_dir)
logger.debug("cwd: %s, updating %s ", cwd, self.repo_dir)
try:
p = git.pull(, ,
_out=self._sh_stdout(),
_err=self._sh_stderr())
p.wait()
except Exception as e:
pass
os.chdir(cwd)
self.install_update_deps()
up = os.path.join(self.repo_dir, , )
if os.path.exists(up):
cwd = os.getcwd()
os.chdir(os.path.join(self.repo_dir, ))
self.pr_info("Running update script for {} @ {}", self.name, up)
subprocess.check_call(up, shell=True)
os.chdir(cwd)
|
todo: Docstring for update
:return:
:rtype:
|
26,541 |
def _ack(self, message_id, subscription_id, **kwargs):
self._conn.ack(message_id, subscription_id, **kwargs)
|
Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged
:param subscription: ID of the relevant subscriptiong
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
|
26,542 |
def _compute_magnitude(self, rup, C):
return C[] + C[] * (rup.mag - 6.0) +\
(C[] * np.log(rup.mag / 6.0))
|
Compute the first term of the equation described on p. 1144:
``c1 + c2 * (M - 6) + c3 * log(M / 6)``
|
26,543 |
def write_to_fp(self, fp):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
text_parts = self._tokenize(self.text)
log.debug("text_parts: %i", len(text_parts))
assert text_parts,
for idx, part in enumerate(text_parts):
try:
part_tk = self.token.calculate_token(part)
except requests.exceptions.RequestException as e:
log.debug(str(e), exc_info=True)
raise gTTSError(
"Connection error during token calculation: %s" %
str(e))
payload = {: ,
: part,
: self.lang,
: self.speed,
: len(text_parts),
: idx,
: ,
: _len(part),
: part_tk}
log.debug("payload-%i: %s", idx, payload)
try:
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=self.GOOGLE_TTS_HEADERS,
proxies=urllib.request.getproxies(),
verify=False)
log.debug("headers-%i: %s", idx, r.request.headers)
log.debug("url-%i: %s", idx, r.request.url)
log.debug("status-%i: %s", idx, r.status_code)
r.raise_for_status()
except requests.exceptions.HTTPError:
raise gTTSError(tts=self, response=r)
except requests.exceptions.RequestException as e:
raise gTTSError(str(e))
try:
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
log.debug("part-%i written to %s", idx, fp)
except (AttributeError, TypeError) as e:
raise TypeError(
" is not a file-like object or it does not take bytes: %s" %
str(e))
|
Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes.
|
26,544 |
def check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None):
s a list the first/second element is the
minimal/maximal number of inputs. If it
if isinstance(input_count_range, list):
min_input_count = input_count_range[0]
max_input_count = input_count_range[1]
elif isinstance(input_count_range, int) or input_count_range is None:
min_input_count = input_count_range
max_input_count = input_count_range
else:
raise RuntimeError()
if isinstance(output_count_range, list):
min_output_count = output_count_range[0]
max_output_count = output_count_range[1]
elif isinstance(output_count_range, int) or output_count_range is None:
min_output_count = output_count_range
max_output_count = output_count_range
else:
raise RuntimeError()
if min_input_count is not None and len(operator.inputs) < min_input_count:
raise RuntimeError(
\
% (operator.full_name, operator.type, min_input_count, len(operator.inputs), operator.input_full_names))
if max_input_count is not None and len(operator.inputs) > max_input_count:
raise RuntimeError(
\
% (operator.full_name, operator.type, max_input_count, len(operator.inputs), operator.input_full_names))
if min_output_count is not None and len(operator.outputs) < min_output_count:
raise RuntimeError(
\
% (operator.full_name, operator.type, min_output_count, len(operator.outputs), operator.output_full_names))
if max_output_count is not None and len(operator.outputs) > max_output_count:
raise RuntimeError(
\
% (operator.full_name, operator.type, max_output_count, len(operator.outputs), operator.output_full_names))
|
Check if the number of input(s)/output(s) is correct
:param operator: A Operator object
:param input_count_range: A list of two integers or an integer. If it's a list the first/second element is the
minimal/maximal number of inputs. If it's an integer, it is equivalent to specify that number twice in a list. For
infinite ranges like 5 to infinity, you need to use [5, None].
:param output_count_range: A list of two integers or an integer. See input_count_range for its format.
|
26,545 |
def add_application(self, application_id, **kwargs):
path = % self.id
data = {: application_id}
if kwargs:
data["options"] = kwargs
self.api.request(path, data)
|
Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs.
|
26,546 |
def remove_accessibility_type(self, accessibility_type=None):
if accessibility_type is None:
raise NullArgument
metadata = Metadata(**settings.METADATA[])
if metadata.is_read_only() or metadata.is_required():
raise NoAccess()
if (accessibility_type._my_map[]) not in self._my_map[]:
raise NotFound()
self._my_map[].remove(accessibility_type._my_map[])
|
Removes an accessibility type.
:param accessibility_type: accessibility type to remove
:type accessibility_type: ``osid.type.Type``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NotFound`` -- acessibility type not found
:raise: ``NullArgument`` -- ``accessibility_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
26,547 |
def _analyze_ini_file(self, add_header=False):
def wrapped(file, filename):
potential_secrets = {}
with self.non_quoted_string_regex():
for value, lineno in IniFileParser(
file,
add_header,
exclude_lines_regex=self.exclude_lines_regex,
).iterator():
potential_secrets.update(self.analyze_string(
value,
lineno,
filename,
))
return potential_secrets
return wrapped
|
:returns: same format as super().analyze()
|
26,548 |
def export_request_rate_by_interval(
self, parameters, location, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._export_request_rate_by_interval_initial(
parameters=parameters,
location=location,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize(, response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
,
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={: }, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
Export logs that show Api requests made by this subscription in the
given time window to show throttling activities.
:param parameters: Parameters supplied to the LogAnalytics
getRequestRateByInterval Api.
:type parameters:
~azure.mgmt.compute.v2018_04_01.models.RequestRateByIntervalInput
:param location: The location upon which virtual-machine-sizes is
queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
LogAnalyticsOperationResult or
ClientRawResponse<LogAnalyticsOperationResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
|
26,549 |
def encrypt_file(cls, key, in_filename, out_filename=None, chunksize=64 * 1024):
if not out_filename:
out_filename = in_filename +
iv = .join(chr(random.randint(0, 0xFF)) for _ in range(16))
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, ) as infile:
with open(out_filename, ) as outfile:
outfile.write(struct.pack(, filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk))
|
Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
|
26,550 |
def format_kinds(raw):
output = .join(.format(*kind) for kind in raw if kind)
return output
|
Format a string representing the kinds.
|
26,551 |
def load_contents(self):
with open(self.name + ".csv") as f:
list_of_rows = f.readlines()
list_of_rows = map(
lambda x: x.strip(),
map(
lambda x: x.replace("\"", ""),
list_of_rows
)
)
for row in list_of_rows:
self.put_row(make_row(self.columns, row.split()))
|
Loads contents of Database from a filename database.csv.
|
26,552 |
def get_last_depth(self, symbol, _type):
params = {: symbol, : _type}
url = u.MARKET_URL +
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper
|
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
|
26,553 |
def generate(self, output_dir, work, ngrams, labels, minus_ngrams):
template = self._get_template()
colours = generate_colours(len(ngrams))
for siglum in self._corpus.get_sigla(work):
ngram_data = zip(labels, ngrams)
content = self._generate_base(work, siglum)
for ngrams_group in ngrams:
content = self._highlight(content, ngrams_group, True)
content = self._highlight(content, minus_ngrams, False)
self._ngrams_count = 1
content = self._format_content(content)
report_name = .format(work, siglum)
self._write(work, siglum, content, output_dir, report_name,
template, ngram_data=ngram_data,
minus_ngrams=minus_ngrams, colours=colours)
|
Generates HTML reports for each witness to `work`, showing its text
with the n-grams in `ngrams` highlighted.
Any n-grams in `minus_ngrams` have any highlighting of them
(or subsets of them) removed.
:param output_dir: directory to write report to
:type output_dir: `str`
:param work: name of work to highlight
:type work: `str`
:param ngrams: groups of n-grams to highlight
:type ngrams: `list` of `list` of `str`
:param labels: labels for the groups of n-grams
:type labels: `list` of `str`
:param minus_ngrams: n-grams to remove highlighting from
:type minus_ngrams: `list` of `str`
:rtype: `str`
|
26,554 |
def _calculate_refund_amount(self, amount=None):
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
amount_to_refund = min(eligible_to_refund, amount)
else:
amount_to_refund = eligible_to_refund
return int(amount_to_refund * 100)
|
:rtype: int
:return: amount that can be refunded, in CENTS
|
26,555 |
def prox_l1(v, alpha):
r
if have_numexpr:
return ne.evaluate(
)
else:
return np.sign(v) * (np.clip(np.abs(v) - alpha, 0, float()))
|
r"""Compute the proximal operator of the :math:`\ell_1` norm (scalar
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) =
\mathcal{S}_{1,\alpha}(\mathbf{v}) = \mathrm{sign}(\mathbf{v})
\odot \max(0, |\mathbf{v}| - \alpha)
where :math:`f(\mathbf{x}) = \|\mathbf{x}\|_1`.
Unlike the corresponding :func:`norm_l1`, there is no need for an
`axis` parameter since the proximal operator of the :math:`\ell_1`
norm is the same when taken independently over each element, or
over their sum.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array
|
26,556 |
def parse_workflow_call_body(self, i):
io_map = OrderedDict()
if isinstance(i, wdl_parser.Terminal):
return i.source_string
elif isinstance(i, wdl_parser.Ast):
if i.name == :
declarations = self.parse_workflow_call_body_declarations(i.attr("declarations"))
io_map = self.parse_workflow_call_body_io(i.attr())
else:
raise NotImplementedError
elif isinstance(i, wdl_parser.AstList):
raise NotImplementedError
return io_map
|
Required.
:param i:
:return:
|
26,557 |
def close(self):
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
ax.set_xscale()
ax.set_yscale()
ax.cla()
close(self)
|
Close the plot and release its memory.
|
26,558 |
def register_sizer(self, attr_name, sizedimage_cls):
if attr_name.startswith(
) or attr_name in self.unallowed_sizer_names:
raise UnallowedSizerName(
"`%s` is an unallowed Sizer name. Sizer names cannot begin "
"with an underscore or be named any of the "
"following: %s." % (
attr_name,
.join([
name
for name in self.unallowed_sizer_names
])
)
)
if not issubclass(sizedimage_cls, SizedImage):
raise InvalidSizedImageSubclass(
)
if attr_name in self._sizedimage_registry:
raise AlreadyRegistered(
% attr_name
)
else:
self._sizedimage_registry[attr_name] = sizedimage_cls
|
Register a new SizedImage subclass (`sizedimage_cls`).
To be used via the attribute (`attr_name`).
|
26,559 |
def distance_restraint_force(self, atoms, distances, strengths):
system = self.system
force = mm.HarmonicBondForce()
force.setUsesPeriodicBoundaryConditions(self.system.usesPeriodicBoundaryConditions())
for pair, distance, strength in zip(atoms, distances, strengths):
indices = []
for atom in pair:
if isinstance(atom, str):
index = self.subset(atom)
if len(index) != 1:
raise ValueError(
.format(atom, index))
indices.append(int(index[0]))
elif isinstance(atom, (int, float)):
indices.append(int(atom))
else:
raise ValueError()
if distance == :
pos = self.positions or system.positions
distance = np.linalg.norm(pos[indices[0]] - pos[indices[1]])
force.addBond(indices[0], indices[1], distance*u.nanometers,
strength*u.kilocalories_per_mole/u.angstroms**2)
return force
|
Parameters
----------
atoms : tuple of tuple of int or str
Pair of atom indices to be restrained, with shape (n, 2),
like ((a1, a2), (a3, a4)). Items can be str compatible with MDTraj DSL.
distances : tuple of float
Equilibrium distances for each pair
strengths : tuple of float
Force constant for each pair
|
26,560 |
def point3d(value, lon, lat, depth):
return longitude(lon), latitude(lat), positivefloat(depth)
|
This is used to convert nodes of the form
<hypocenter lon="LON" lat="LAT" depth="DEPTH"/>
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated triple (lon, lat, depth)
|
26,561 |
def get_dummy_dynamic_run(nsamples, **kwargs):
seed = kwargs.pop(, False)
ndim = kwargs.pop(, 2)
nthread_init = kwargs.pop(, 2)
nthread_dyn = kwargs.pop(, 3)
logl_range = kwargs.pop(, 1)
if kwargs:
raise TypeError(.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init[], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
for i, _ in enumerate(threads):
threads[i][] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples)
|
Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
|
26,562 |
def desbloquear_sat(self):
retorno = super(ClienteSATLocal, self).desbloquear_sat()
return RespostaSAT.desbloquear_sat(retorno)
|
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
|
26,563 |
def create(name, **params):
**
log.debug(, params)
params = _clean_salt_variables(params)
params[] = name
api_response = requests.post(
,
params={: get_sd_auth()},
data=params
)
log.debug(, api_response)
log.debug(, api_response.content)
if api_response.status_code == 200:
try:
return salt.utils.json.loads(api_response.content)
except ValueError:
log.error(, api_response.content)
raise CommandExecutionError(
.format(api_response)
)
else:
return None
|
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
|
26,564 |
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
min_epsilon=-10, max_epsilon=10,
num_points=21):
import matplotlib
matplotlib.use()
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title()
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel()
plt.ylabel()
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim(min_epsilon - 1, max_epsilon + 1)
for i in range(y.shape[0]):
if i == correct_idx:
ls =
linewidth = 5
else:
ls =
linewidth = 2
plt.plot(
x_axis,
log_prob_adv_array[:, i],
ls=ls,
linewidth=linewidth,
label=.format(i))
plt.legend(loc=, fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure
|
Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
|
26,565 |
def gimbal_torque_cmd_report_send(self, target_system, target_component, rl_torque_cmd, el_torque_cmd, az_torque_cmd, force_mavlink1=False):
return self.send(self.gimbal_torque_cmd_report_encode(target_system, target_component, rl_torque_cmd, el_torque_cmd, az_torque_cmd), force_mavlink1=force_mavlink1)
|
100 Hz gimbal torque command telemetry
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
rl_torque_cmd : Roll Torque Command (int16_t)
el_torque_cmd : Elevation Torque Command (int16_t)
az_torque_cmd : Azimuth Torque Command (int16_t)
|
26,566 |
def post_handler_err(self, function_arn, invocation_id, handler_err):
url = self._get_work_url(function_arn)
runtime_logger.info(.format(invocation_id, url))
payload = json.dumps({
"errorMessage": handler_err,
}).encode()
request = Request(url, payload)
request.add_header(HEADER_INVOCATION_ID, invocation_id)
request.add_header(HEADER_FUNCTION_ERR_TYPE, "Handled")
request.add_header(HEADER_AUTH_TOKEN, self.auth_token)
urlopen(request)
runtime_logger.info(.format(invocation_id))
|
Post the error message from executing the function handler for :code:`function_arn`
with specifid :code:`invocation_id`
:param function_arn: Arn of the Lambda function which has the handler error message.
:type function_arn: string
:param invocation_id: Invocation ID of the work that is being requested
:type invocation_id: string
:param handler_err: the error message caught from handler
:type handler_err: string
|
26,567 |
def delete_activity(self, activity_id):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
if not isinstance(activity_id, ABCId):
raise errors.InvalidArgument()
activity_map = collection.find_one(
dict({: ObjectId(activity_id.get_identifier())},
**self._view_filter()))
objects.Activity(osid_object_map=activity_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({: ObjectId(activity_id.get_identifier())})
|
Deletes the ``Activity`` identified by the given ``Id``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
to delete
raise: NotFound - an ``Activity`` was not found identified by
the given ``Id``
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
26,568 |
def getOutputElementCount(self, name):
if name in ["activeCells", "learnableCells", "sensoryAssociatedCells"]:
return self.cellCount * self.moduleCount
else:
raise Exception("Invalid output name specified: " + name)
|
Returns the size of the output array
|
26,569 |
def _open_interface(self, conn_id, iface, callback):
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_operation(conn_id, , callback, self.get_config())
topics = context[]
open_iface_message = {: context[], : , : , : self.name, : iface}
self.client.publish(topics.action, open_iface_message)
|
Open an interface on this device
Args:
conn_id (int): the unique identifier for the connection
iface (string): the interface name to open
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
26,570 |
def movies_released_in(self, year):
return [movie for movie in self._movie_finder.find_all()
if movie.year == year]
|
Return list of movies that were released in certain year.
:param year: Release year
:type year: int
:rtype: list[movies.models.Movie]
:return: List of movie instances.
|
26,571 |
def set_prev_hard(self):
prev = self.get_prev_letter()
if not prev:
return
if not prev.is_consonant():
return
if self.is_softener(prev):
prev.set_hard(False)
elif self.letter in self.vovels_set_hard:
prev.set_hard(True)
|
Выставляет параметры твёрдости/мягкости, для предыдущих согласных.
|
26,572 |
def parse_connection_option(
header: str, pos: int, header_name: str
) -> Tuple[ConnectionOption, int]:
item, pos = parse_token(header, pos, header_name)
return cast(ConnectionOption, item), pos
|
Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
|
26,573 |
def _set_ext_src_vtep_ip_any(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ext-src-vtep-ip-any", rest_name="src-vtep-ip-any", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "empty",
: ,
})
self.__ext_src_vtep_ip_any = t
if hasattr(self, ):
self._set()
|
Setter method for ext_src_vtep_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/ext_src_vtep_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ext_src_vtep_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ext_src_vtep_ip_any() directly.
|
26,574 |
def _encrypt(data):
BS = AES.block_size
def pad(s):
n = BS - len(s) % BS
char = chr(n).encode()
return s + n * char
password = settings.GECKOBOARD_PASSWORD
salt = Random.new().read(BS - len())
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = b + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted)
|
Equivalent to OpenSSL using 256 bit AES in CBC mode
|
26,575 |
def create(self, server):
if len(self.geometries) == 0:
raise Exception()
return server.post(
,
self.as_payload(),
replacements={
: self.__challenge__.slug,
: self.identifier})
|
Create the task on the server
|
26,576 |
def _validate_all_tags_are_used(metadata):
tag_names = set([tag_name for tag_name, _ in metadata.tags])
filter_arg_names = set()
for location, _ in metadata.registered_locations:
for filter_info in metadata.get_filter_infos(location):
for filter_arg in filter_info.args:
if is_tag_argument(filter_arg):
filter_arg_names.add(get_directive_argument_name(filter_arg))
unused_tags = tag_names - filter_arg_names
if unused_tags:
raise GraphQLCompilationError(u
u
u
.format(unused_tags))
|
Ensure all tags are used in some filter.
|
26,577 |
def from_iter(self, iterable):
try:
self._lens_from_iter
except AttributeError:
message = t know how to create instance of {} from iterable'
raise NotImplementedError(message.format(type(self)))
else:
return self._lens_from_iter(iterable)
|
Takes an object and an iterable and produces a new object that is
a copy of the original with data from ``iterable`` reincorporated. It
is intended as the inverse of the ``to_iter`` function. Any state in
``self`` that is not modelled by the iterable should remain unchanged.
The following equality should hold for your definition:
.. code-block:: python
from_iter(self, to_iter(self)) == self
This function is used by EachLens to synthesise states from iterables,
allowing it to focus every element of an iterable state.
The corresponding method call for this hook is
``obj._lens_from_iter(iterable)``.
There is no default implementation.
|
26,578 |
def on_while(self, node):
while self.run(node.test):
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None
|
While blocks.
|
26,579 |
def _set_packet_error_counters(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=packet_error_counters.packet_error_counters, is_container=, presence=False, yang_name="packet-error-counters", rest_name="packet-error-counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__packet_error_counters = t
if hasattr(self, ):
self._set()
|
Setter method for packet_error_counters, mapped from YANG variable /mpls_state/rsvp/statistics/packet_error_counters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_error_counters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_error_counters() directly.
YANG Description: RSVP error packet counters
|
26,580 |
def repr_data_size(size_in_bytes, precision=2):
if size_in_bytes < 1024:
return "%s B" % size_in_bytes
magnitude_of_data = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
index = 0
while 1:
index += 1
size_in_bytes, mod = divmod(size_in_bytes, 1024)
if size_in_bytes < 1024:
break
template = "{0:.%sf} {1}" % precision
s = template.format(size_in_bytes + mod / 1024.0, magnitude_of_data[index])
return s
|
Return human readable string represent of a file size. Doesn"t support
size greater than 1EB.
For example:
- 100 bytes => 100 B
- 100,000 bytes => 97.66 KB
- 100,000,000 bytes => 95.37 MB
- 100,000,000,000 bytes => 93.13 GB
- 100,000,000,000,000 bytes => 90.95 TB
- 100,000,000,000,000,000 bytes => 88.82 PB
...
Magnitude of data::
1000 kB kilobyte
1000 ** 2 MB megabyte
1000 ** 3 GB gigabyte
1000 ** 4 TB terabyte
1000 ** 5 PB petabyte
1000 ** 6 EB exabyte
1000 ** 7 ZB zettabyte
1000 ** 8 YB yottabyte
|
26,581 |
def setLinkQuality(self, EUIadr, LinkQuality):
print % self.port
print EUIadr
print LinkQuality
try:
euiHex = hex(EUIadr)
euiStr = str(euiHex)
euiStr = euiStr.rstrip()
address64 =
if in euiStr:
address64 = euiStr.lstrip()
if len(address64) < 16:
address64 = address64.zfill(16)
print address64
cmd = % (address64, str(LinkQuality))
print cmd
return self.__sendCommand(cmd)[0] ==
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setLinkQuality() Error: " + str(e))
|
set custom LinkQualityIn for all receiving messages from the specified EUIadr
Args:
EUIadr: a given extended address
LinkQuality: a given custom link quality
link quality/link margin mapping table
3: 21 - 255 (dB)
2: 11 - 20 (dB)
1: 3 - 9 (dB)
0: 0 - 2 (dB)
Returns:
True: successful to set the link quality
False: fail to set the link quality
|
26,582 |
def submit(recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip,
use_ssl=False):
if not (recaptcha_response_field and recaptcha_challenge_field and
len(recaptcha_response_field) and len(recaptcha_challenge_field)):
return RecaptchaResponse(
is_valid=False,
error_code=
)
if getattr(settings, "NOCAPTCHA", False):
params = urlencode({
: want_bytes(private_key),
: want_bytes(recaptcha_response_field),
: want_bytes(remoteip),
})
else:
params = urlencode({
: want_bytes(private_key),
: want_bytes(remoteip),
: want_bytes(recaptcha_challenge_field),
: want_bytes(recaptcha_response_field),
})
if not PY2:
params = params.encode()
if use_ssl:
verify_url = % VERIFY_SERVER
else:
verify_url = % VERIFY_SERVER
if getattr(settings, "NOCAPTCHA", False):
verify_url = % VERIFY_SERVER
req = Request(
url=verify_url,
data=params,
headers={
: ,
:
}
)
httpresp = urlopen(req)
if getattr(settings, "NOCAPTCHA", False):
data = json.loads(httpresp.read().decode())
return_code = data[]
return_values = [return_code, None]
if return_code:
return_code =
else:
return_code =
else:
return_values = httpresp.read().splitlines()
return_code = return_values[0]
httpresp.close()
if (return_code == "true"):
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_code=return_values[1])
|
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field
from the form
recaptcha_response_field -- The value of recaptcha_response_field
from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
|
26,583 |
def copy(self):
self_copy = self.dup()
self_copy._scopes = copy.copy(self._scopes)
return self_copy
|
Return a copy of this object.
|
26,584 |
def get_var(self, name, recurse=True):
self._dlog("getting var ".format(name))
return self._search("vars", name, recurse)
|
Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO
|
26,585 |
def draw(self, x, y):
try:
from time import time
import md5
from os import unlink
m = md5.new()
m.update(str(time()))
filename = "photobot" + str(m.hexdigest()) + ".png"
self.export(filename)
_ctx.image(filename, x, y)
unlink(filename)
except:
pass
|
Places the flattened canvas in NodeBox.
Exports to a temporary PNG file.
Draws the PNG in NodeBox using the image() command.
Removes the temporary file.
|
26,586 |
def normalize_response_value(rv):
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers
|
Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
|
26,587 |
def match_input_fmt(self, fmt_list):
rexp_list = []
for fmt in fmt_list:
rexp_list.extend(self.match_input_fmt_1(fmt))
return rexp_list
|
Given a list of Fortran format specifiers, e.g., ['I5', '2X', 'F4.1'],
this function constructs a list of tuples for matching an input
string against those format specifiers.
|
26,588 |
def delete(self, client=None, reload_data=False):
client = self._require_client(client)
client._connection.api_request(method="DELETE", path=self.path)
if reload_data:
self.reload()
|
API call: delete the project via a ``DELETE`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete
This actually changes the status (``lifecycleState``) from ``ACTIVE``
to ``DELETE_REQUESTED``.
Later (it's not specified when), the project will move into the
``DELETE_IN_PROGRESS`` state, which means the deleting has actually
begun.
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:type reload_data: bool
:param reload_data: Whether to reload the project with the latest
state. If you want to get the updated status,
you'll want this set to :data:`True` as the DELETE
method doesn't send back the updated project.
Default: :data:`False`.
|
26,589 |
def find_and_reserve_fcp(self, assigner_id):
fcp_list = self.db.get_from_assigner(assigner_id)
if not fcp_list:
new_fcp = self.db.find_and_reserve()
if new_fcp is None:
LOG.info("no more fcp to be allocated")
return None
LOG.debug("allocated %s fcp for %s assigner" %
(new_fcp, assigner_id))
return new_fcp
else:
old_fcp = fcp_list[0][0]
self.db.reserve(fcp_list[0][0])
return old_fcp
|
reserve the fcp to assigner_id
The function to reserve a fcp for user
1. Check whether assigner_id has a fcp already
if yes, make the reserve of that record to 1
2. No fcp, then find a fcp and reserve it
fcp will be returned, or None indicate no fcp
|
26,590 |
def patch_ref(self, sha):
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=self.origin,
branch=self.master_fork
)
data = {
"sha": sha,
"force": True
}
reply = self.request(
"PATCH",
uri,
data=data
)
if reply.status_code == 200:
dic = json.loads(reply.content.decode("utf-8"))
return dic["object"]["sha"]
else:
dic = json.loads(reply.content.decode("utf-8"))
return self.ProxyError(
reply.status_code,
(dic, "message"),
step="patch",
context={
"uri": uri,
"data": data
}
)
|
Patch reference on the origin master branch
:param sha: Sha to use for the branch
:return: Status of success
:rtype: str or self.ProxyError
|
26,591 |
def main():
try:
args = sys.argv[1:]
try:
_, args = getopt.getopt(args, MAIN_OPTS, MAIN_LONG_OPTS)
except getopt.GetoptError as e:
error(str(e))
sys.exit(1)
if args[0] == :
try:
from topydo.ui.prompt.Prompt import PromptApplication
PromptApplication().run()
except ImportError:
error("Some additional dependencies for prompt mode were not installed, please install with ")
elif args[0] == :
try:
from topydo.ui.columns.Main import UIApplication
UIApplication().run()
except ImportError:
error("Some additional dependencies for column mode were not installed, please install with ")
except NameError as err:
if _WINDOWS:
error("Column mode is not supported on Windows.")
else:
error("Could not load column mode: {}".format(err))
else:
CLIApplication().run()
except IndexError:
CLIApplication().run()
|
Main entry point of the CLI.
|
26,592 |
def _loadConfiguration(self):
configPath = os.path.join(self.path, "config")
if not os.path.isdir(configPath):
return
config = Config(configPath)
Config.mergeDictionaries(config.getData(), self.application.config)
|
Load module configuration files.
:return: <void>
|
26,593 |
def rm(self, path):
resp = self._sendRequest("DELETE", path)
if not (resp.status_code in (200, 204)):
raise YaDiskException(resp.status_code, resp.content)
|
Delete file or directory.
|
26,594 |
def _check_status(func, read_exception, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
_LOG.exception(e)
message = str(e) if read_exception else
return dict(message=message, available=False)
|
Checks the status of a single component by
calling the func with the args. The func is expected to
return a dict with at least an `available=<bool>` key
value pair
:param func func: The function to call
:param read_exception: If an exception is thrown
should the exception message be passed as the
message parameter. If not a generic
message parameter will be added to the dict
:param tuple args: A list of arguments to pass to
to function
:param dict kwargs: a dict of keyword arguments
to pass to the function
:return: a dictionary that includes the state
of the component. At least an 'available'
key is guaranteed
:rtype: dict
|
26,595 |
def upper_diag_self_prodx(list_):
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2]
|
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
|
26,596 |
def get_depth_pmf(self, depth_bins, default_depth=5.0, bootstrap=None):
if len(self.data[]) == 0:
return PMF([(1.0, default_depth)])
depth_hist = self.get_depth_distribution(depth_bins,
normalisation=True,
bootstrap=bootstrap)
depth_hist = np.around(depth_hist, 3)
while depth_hist.sum() - 1.0:
depth_hist[-1] -= depth_hist.sum() - 1.0
depth_hist = np.around(depth_hist, 3)
pmf_list = []
for iloc, prob in enumerate(depth_hist):
pmf_list.append((prob,
(depth_bins[iloc] + depth_bins[iloc + 1]) / 2.0))
return PMF(pmf_list)
|
Returns the depth distribution of the catalogue as a probability mass
function
|
26,597 |
def authenticate(self):
logger.info("Authenticating as %s", self.user[])
data = dict(self.user)
data.update({: False})
try:
req = self.session.post(
self._base_login_url,
params=self.params,
data=json.dumps(data)
)
except PyiCloudAPIResponseError as error:
msg =
raise PyiCloudFailedLoginException(msg, error)
resp = req.json()
self.params.update({: resp[][]})
if not os.path.exists(self._cookie_directory):
os.mkdir(self._cookie_directory)
self.session.cookies.save()
logger.debug("Cookies saved to %s", self._get_cookiejar_path())
self.data = resp
self.webservices = self.data[]
logger.info("Authentication completed successfully")
logger.debug(self.params)
|
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
|
26,598 |
def list_default_storage_policy_of_datastore(datastore, service_instance=None):
*
log.trace(%s\, datastore)
target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref,
datastore_names=[datastore])
if not ds_refs:
raise VMwareObjectRetrievalError({0}\
.format(datastore))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore(
profile_manager, ds_refs[0])
return _get_policy_dict(policy)
|
Returns a list of datastores assign the the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
|
26,599 |
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
msg = vehicle.message_factory.set_position_target_global_int_encode(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
0b0000111111000111,
0,
0,
0,
velocity_x,
velocity_y,
velocity_z,
0, 0, 0,
0, 0)
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
|
Move vehicle in direction based on specified velocity vectors.
This uses the SET_POSITION_TARGET_GLOBAL_INT command with type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_global_int).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.