Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
6,500 | def is_valid_python(code, reraise=True, ipy_magic_workaround=False):
import ast
try:
if ipy_magic_workaround:
code = .join([ if re.match(r, line) else line for line in code.split()])
ast.parse(code)
except SyntaxError:
if reraise:
import utool as ut
print()
ut.print_python_code(code)
raise
return False
return True | References:
http://stackoverflow.com/questions/23576681/python-check-syntax |
6,501 | def stream_replicate():
stream = primary.stream(SomeDataBlob, "trim_horizon")
next_heartbeat = pendulum.now()
while True:
now = pendulum.now()
if now >= next_heartbeat:
stream.heartbeat()
next_heartbeat = now.add(minutes=10)
record = next(stream)
if record is None:
continue
if record["new"] is not None:
replica.save(record["new"])
else:
replica.delete(record["old"]) | Monitor changes in approximately real-time and replicate them |
6,502 | def set_polling_override(self, override):
polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)
if polling_override is None:
logger.warn()
return False
if self.dongle._write_attribute(self.conn_handle, polling_override, struct.pack(, override)):
return True
return False | Set the sensor polling timer override value in milliseconds.
Due to the time it takes to poll all the sensors on up to 5 IMUs, it's not
possible for the SK8 firmware to define a single fixed rate for reading
new samples without it being artificially low for most configurations.
Instead the firmware tries to define a sensible default value for each
combination of IMUs and sensors that can be enabled (any combination of
1-5 IMUs and 1-3 sensors on each IMU). In most cases this should work well,
but for example if you have multiple SK8s connected through the same dongle
and have multiple IMUs enabled on each, you may find packets start to be
dropped quite frequently.
To mitigate this, you can adjust the period of the timer used by the firmware
to poll for new sensor data (and send data packets to the host device). The
value should be in integer milliseconds, and have a minimum value of 20. Values
below 20 will be treated as a request to disable the override and return to the
default polling period.
The method can be called before or after streaming is activated, and will take
effect immediately.
NOTE1: the value is stored in RAM and will not persist across reboots, although
it should persist for multiple connections.
NOTE2: once set, the override applies to ALL sensor configurations, so for
example if you set it while using 5 IMUs on 2 SK8s, then switch to using
1 IMU on each SK8, you will probably want to disable it again as the
latter configuration should work fine with the default period.
Args:
override (int): polling timer override period in milliseconds. Values
below 20 are treated as 0, and have the effect of disabling the
override in favour of the default periods.
Returns:
True on success, False on error. |
6,503 | def bls_stats_singleperiod(times, mags, errs, period,
magsarefluxes=False,
sigclip=10.0,
perioddeltapercent=10,
nphasebins=200,
mintransitduration=0.01,
maxtransitduration=0.4,
ingressdurationfraction=0.1,
verbose=True):
asymmetricperiodepochsnrtransitdepthtransitdurationnphasebinstransingressbintransegressbinblsmodelsubtractedmagsphasedmagsphases
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
startp = period - perioddeltapercent*period/100.0
if startp < 0:
startp = period
endp = period + perioddeltapercent*period/100.0
blsres = bls_serial_pfind(stimes, smags, serrs,
verbose=verbose,
startp=startp,
endp=endp,
nphasebins=nphasebins,
mintransitduration=mintransitduration,
maxtransitduration=maxtransitduration,
magsarefluxes=magsarefluxes,
get_stats=False,
sigclip=None)
if (not blsres or
not in blsres or
blsres[] is None):
LOGERROR("BLS failed during a period-search "
"performed around the input best period: %.6f. "
"Canblsresulttransdepthblsresulttransdurationbestperiodblsresulttransingressbinblsresulttransegressbint enough points in the mag series, bail out
else:
LOGERROR()
return None | This calculates the SNR, depth, duration, a refit period, and time of
center-transit for a single period.
The equation used for SNR is::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
NOTE: you should set the kwargs `sigclip`, `nphasebins`,
`mintransitduration`, `maxtransitduration` to what you used for an initial
BLS run to detect transits in the input light curve to match those input
conditions.
Parameters
----------
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
period : float
The period to search around and refit the transits. This will be used to
calculate the start and end periods of a rerun of BLS to calculate the
stats.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
nphasebins : int
The number of phase bins to use in the BLS run.
mintransitduration : float
The minimum transit duration in phase to consider.
maxtransitduration : float
The maximum transit duration to consider.
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'period': the refit best period,
'epoch': the refit epoch (i.e. mid-transit time),
'snr':the SNR of the transit,
'transitdepth':the depth of the transit,
'transitduration':the duration of the transit,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'blsmodel':the full BLS model used along with its parameters,
'subtractedmags':BLS model - phased light curve,
'phasedmags':the phase light curve,
'phases': the phase values} |
6,504 | def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, argmax=numpy.argmax):
time_coinc_zip = zip(*time_coincs)
if len(time_coinc_zip) == 0:
logging.info()
return numpy.array([])
time_avg_num = []
for tc in time_coinc_zip:
time_avg_num.append(mean_if_greater_than_zero(tc))
time_avg, num_ifos = zip(*time_avg_num)
time_avg = numpy.array(time_avg)
num_ifos = numpy.array(num_ifos)
if numpy.isfinite(slide):
nifos_minusone = (num_ifos - numpy.ones_like(num_ifos))
time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos
tslide = timeslide_id.astype(numpy.float128)
time_avg = time_avg.astype(numpy.float128)
span = (time_avg.max() - time_avg.min()) + window * 10
time_avg = time_avg + span * tslide
cidx = cluster_over_time(stat, time_avg, window, argmax)
return cidx | Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time_coincs: tuple of numpy.ndarrays
trigger times for each ifo, or -1 if an ifo does not participate in a coinc
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
duration of clustering window in seconds
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences |
6,505 | def pemp(stat, stat0):
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort")
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p | Computes empirical values identically to bioconductor/qvalue empPvals |
6,506 | def c2ln(c,l1,l2,n):
"char[n] to two unsigned long???"
c = c + n
l1, l2 = U32(0), U32(0)
f = 0
if n == 8:
l2 = l2 | (U32(c[7]) << 24)
f = 1
if f or (n == 7):
l2 = l2 | (U32(c[6]) << 16)
f = 1
if f or (n == 6):
l2 = l2 | (U32(c[5]) << 8)
f = 1
if f or (n == 5):
l2 = l2 | U32(c[4])
f = 1
if f or (n == 4):
l1 = l1 | (U32(c[3]) << 24)
f = 1
if f or (n == 3):
l1 = l1 | (U32(c[2]) << 16)
f = 1
if f or (n == 2):
l1 = l1 | (U32(c[1]) << 8)
f = 1
if f or (n == 1):
l1 = l1 | U32(c[0])
return (l1, l2) | char[n] to two unsigned long??? |
6,507 | def get_datasets_list(self, project_id=None):
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute(num_retries=self.num_retries)[]
self.log.info("Datasets List: %s", datasets_list)
except HttpError as err:
raise AirflowException(
.format(err.content))
return datasets_list | Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
] |
6,508 | def files_to_pif(files, verbose=0, quality_report=True, inline=True):
found_parser = False
for possible_parser in [PwscfParser, VaspParser]:
try:
parser = possible_parser(files)
found_parser = True
break
except InvalidIngesterException:
pass
if not found_parser:
raise Exception()
if verbose > 0:
print("Found a {} directory".format(parser.get_name()))
chem = ChemicalSystem()
chem.chemical_formula = parser.get_composition()
software = Software(name=parser.get_name(),
version=parser.get_version_number())
method = Method(name=,
software=[software])
conditions = []
for name, func in parser.get_setting_functions().items():
cond = getattr(parser, func)()
if cond is None:
continue
if inline and cond.files is not None:
continue
cond.name = name
conditions.append(cond)
chem.properties = []
for name, func in parser.get_result_functions().items():
prop = getattr(parser, func)()
if prop is None:
continue
if inline and prop.files is not None:
continue
prop.name = name
prop.methods = [method,]
prop.data_type=
if verbose > 0 and isinstance(prop, Value):
print(name)
if prop.conditions is None:
prop.conditions = conditions
else:
if not isinstance(prop.conditions, list):
prop.conditions = [prop.conditions]
prop.conditions.extend(conditions)
chem.properties.append(prop)
if quality_report and isinstance(parser, VaspParser):
_add_quality_report(parser, chem)
return chem | Given a directory that contains output from
a DFT calculation, parse the data and return
a pif object
Input:
files - [str] list of files from which the parser is allowed to read.
verbose - int, How much status messages to print
Output:
pif - ChemicalSystem, Results and settings of
the DFT calculation in pif format |
6,509 | def transform(function):
def transform_fn(_, result):
if isinstance(result, Nothing):
return result
lgr.debug("Transforming %r with %r", result, function)
try:
return function(result)
except:
exctype, value, tb = sys.exc_info()
try:
new_exc = StyleFunctionError(function, exctype, value)
new_exc.__cause__ = None
six.reraise(StyleFunctionError, new_exc, tb)
finally:
del tb
return transform_fn | Return a processor for a style's "transform" function. |
6,510 | def vb_get_network_addresses(machine_name=None, machine=None, wait_for_pattern=None):
if machine_name:
machine = vb_get_box().findMachine(machine_name)
ip_addresses = []
log.debug("checking for power on:")
if machine.state == _virtualboxManager.constants.MachineState_Running:
log.debug("got power on:")
if wait_for_pattern and not machine.getGuestPropertyValue(wait_for_pattern):
log.debug("waiting for pattern:%s:", wait_for_pattern)
return None
_total_slots = machine.getGuestPropertyValue()
if not _total_slots:
log.debug("waiting for net count:%s:", wait_for_pattern)
return None
try:
total_slots = int(_total_slots)
for i in range(total_slots):
try:
address = machine.getGuestPropertyValue(.format(i))
if address:
ip_addresses.append(address)
except Exception as e:
log.debug(e.message)
except ValueError as e:
log.debug(e.message)
return None
log.debug("returning ip_addresses:%s:", ip_addresses)
return ip_addresses | TODO distinguish between private and public addresses
A valid machine_name or a machine is needed to make this work!
!!!
Guest prerequisite: GuestAddition
!!!
Thanks to Shrikant Havale for the StackOverflow answer http://stackoverflow.com/a/29335390
More information on guest properties: https://www.virtualbox.org/manual/ch04.html#guestadd-guestprops
@param machine_name:
@type machine_name: str
@param machine:
@type machine: IMachine
@return: All the IPv4 addresses we could get
@rtype: str[] |
6,511 | def seq_2_StdStringVector(seq, vec=None):
if vec is None:
if isinstance(seq, StdStringVector):
return seq
vec = StdStringVector()
if not isinstance(vec, StdStringVector):
raise TypeError()
for e in seq:
vec.append(str(e))
return vec | Converts a python sequence<str> object to a :class:`tango.StdStringVector`
:param seq: the sequence of strings
:type seq: sequence<:py:obj:`str`>
:param vec: (optional, default is None) an :class:`tango.StdStringVector`
to be filled. If None is given, a new :class:`tango.StdStringVector`
is created
:return: a :class:`tango.StdStringVector` filled with the same contents as seq
:rtype: :class:`tango.StdStringVector` |
6,512 | def get_molo_comments(parser, token):
keywords = token.contents.split()
if len(keywords) != 5 and len(keywords) != 7 and len(keywords) != 9:
raise template.TemplateSyntaxError(
" tag takes exactly 2,4 or 6 arguments" % (keywords[0],))
if keywords[1] != :
raise template.TemplateSyntaxError(
"first argument to tag must be " % (keywords[0],))
if keywords[3] != :
raise template.TemplateSyntaxError(
"first argument to tag must be " % (keywords[0],))
if len(keywords) > 5 and keywords[5] != :
raise template.TemplateSyntaxError(
"third argument to tag must be " % (keywords[0],))
if len(keywords) == 7:
return GetMoloCommentsNode(keywords[2], keywords[4], keywords[6])
if len(keywords) > 7 and keywords[7] != :
raise template.TemplateSyntaxError(
"third argument to tag must be "
% (keywords[0],))
if len(keywords) > 7:
return GetMoloCommentsNode(keywords[2], keywords[4],
keywords[6], keywords[8])
return GetMoloCommentsNode(keywords[2], keywords[4]) | Get a limited set of comments for a given object.
Defaults to a limit of 5. Setting the limit to -1 disables limiting.
Set the amount of comments to
usage:
{% get_molo_comments for object as variable_name %}
{% get_molo_comments for object as variable_name limit amount %}
{% get_molo_comments for object as variable_name limit amount child_limit amount %} # noqa |
6,513 | def user(self, login=None):
if login:
url = self._build_url(, login)
else:
url = self._build_url()
json = self._json(self._get(url), 200)
return User(json, self._session) if json else None | Returns a User object for the specified login name if
provided. If no login name is provided, this will return a User
object for the authenticated user.
:param str login: (optional)
:returns: :class:`User <github3.users.User>` |
6,514 | def dump(self):
return {
"key": self._key,
"status": self._status,
"ttl": self._ttl,
"answer": self._answer.word,
"mode": self._mode.dump(),
"guesses_made": self._guesses_made
} | Dump (return) a dict representation of the GameObject. This is a Python
dict and is NOT serialized. NB: the answer (a DigitWord object) and the
mode (a GameMode object) are converted to python objects of a list and
dict respectively.
:return: python <dict> of the GameObject as detailed above. |
6,515 | def get_surface_as_bytes(self, order=None):
arr8 = self.get_surface_as_array(order=order)
return arr8.tobytes(order=) | Returns the surface area as a bytes encoded RGB image buffer.
Subclass should override if there is a more efficient conversion
than from generating a numpy array first. |
6,516 | def keyword(self) -> Tuple[Optional[str], str]:
i1 = self.yang_identifier()
if self.peek() == ":":
self.offset += 1
i2 = self.yang_identifier()
return (i1, i2)
return (None, i1) | Parse a YANG statement keyword.
Raises:
EndOfInput: If past the end of input.
UnexpectedInput: If no syntactically correct keyword is found. |
6,517 | def get_episodes(self, series_id, **kwargs):
params = {: 1}
for arg, val in six.iteritems(kwargs):
if arg in EPISODES_BY:
params[arg] = val
return self._exec_request(
,
path_args=[series_id, , ], params=params)[] | All episodes for a given series.
Paginated with 100 results per page.
.. warning::
authorization token required
The following search arguments currently supported:
* airedSeason
* airedEpisode
* imdbId
* dvdSeason
* dvdEpisode
* absoluteNumber
* page
:param str series_id: id of series as found on thetvdb
:parm kwargs: keyword args to search/filter episodes by (optional)
:returns: series episode records
:rtype: list |
6,518 | def _fourier(self):
freq_bin_upper = 2000
freq_bin_lower = 40
fs = self._metadata[]
Y_transformed = {}
for key in self.Y_dict.keys():
fs = self._metadata["fs"]
Y_transformed[key] = (1./fs)*np.fft.fft(self.Y_dict[key])[freq_bin_lower:freq_bin_upper]
self.Y_transformed = Y_transformed
self._metadata["dF"] = 1./self._metadata["T"]
self.psd = load_psd()[freq_bin_lower:freq_bin_upper]
dF = self._metadata[]
self.sigma = convert_psd_to_sigma(self.psd, dF) | 1 side Fourier transform and scale by dt all waveforms in catalog |
6,519 | def callback(self, request, **kwargs):
try:
client = self.get_evernote_client()
us = UserService.objects.get(user=request.user, name=ServicesActivated.objects.get(name=))
us.token = client.get_access_token(request.session[], request.session[],
request.GET.get(, ))
us.save()
except KeyError:
return
return | Called from the Service when the user accept to activate it |
6,520 | def bresenham(x1, y1, x2, y2):
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
if rev:
points.reverse()
return points | Return a list of points in a bresenham line.
Implementation hastily copied from RogueBasin.
Returns:
List[Tuple[int, int]]: A list of (x, y) points,
including both the start and end-points. |
6,521 | def upload_file(self, path=None, stream=None, name=None, **kwargs):
params = "&".join(["%s=%s" % (k, v) for k, v in kwargs.items()])
url = "http://{master_addr}:{master_port}/dir/assign{params}".format(
master_addr=self.master_addr,
master_port=self.master_port,
params="?" + params if params else
)
data = json.loads(self.conn.get_data(url))
if data.get("error") is not None:
return None
post_url = "http://{url}/{fid}".format(
url=data[ if self.use_public_url else ],
fid=data[]
)
if path is not None:
filename = os.path.basename(path)
with open(path, "rb") as file_stream:
res = self.conn.post_file(post_url, filename, file_stream)
elif stream is not None and name is not None:
res = self.conn.post_file(post_url, name, stream)
else:
raise ValueError(
"If `path` is None then *both* `stream` and `name` must not"
" be None ")
response_data = json.loads(res)
if "size" in response_data:
return data.get()
return None | Uploads file to WeedFS
I takes either path or stream and name and upload it
to WeedFS server.
Returns fid of the uploaded file.
:param string path:
:param string stream:
:param string name:
:rtype: string or None |
6,522 | def _lemmatise_roman_numerals(self, form, pos=False, get_lemma_object=False):
if estRomain(form):
_lemma = Lemme(
cle=form, graphie_accentuee=form, graphie=form, parent=self, origin=0, pos="a",
modele=self.modele("inv")
)
yield Lemmatiseur.format_result(
form=form,
lemma=_lemma,
with_pos=pos,
raw_obj=get_lemma_object
)
if form.upper() != form:
yield from self._lemmatise_roman_numerals(form.upper(), pos=pos, get_lemma_object=get_lemma_object) | Lemmatise un mot f si c'est un nombre romain
:param form: Mot à lemmatiser
:param pos: Récupère la POS
:param get_lemma_object: Retrieve Lemma object instead of string representation of lemma |
6,523 | def configs_in(src_dir):
for filename in files_in_dir(src_dir, ):
with open(os.path.join(src_dir, filename), ) as in_f:
yield json.load(in_f) | Enumerate all configs in src_dir |
6,524 | def compute_bbox_with_margins(margin, x, y):
pos = np.asarray((x, y))
minxy, maxxy = pos.min(axis=1), pos.max(axis=1)
xy1 = minxy - margin*(maxxy - minxy)
xy2 = maxxy + margin*(maxxy - minxy)
return tuple(xy1), tuple(xy2) | Helper function to compute bounding box for the plot |
6,525 | def solve(self, assumptions=[]):
if self.minicard:
if self.use_timer:
start_time = time.clock()
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
self.status = pysolvers.minicard_solve(self.minicard, assumptions)
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if self.use_timer:
self.call_time = time.clock() - start_time
self.accu_time += self.call_time
return self.status | Solve internal formula. |
6,526 | def supported_tags(self, interpreter=None, force_manylinux=True):
if interpreter and not self.is_extended:
return _get_supported_for_any_abi(
platform=self.platform,
impl=interpreter.identity.abbr_impl,
version=interpreter.identity.impl_ver,
force_manylinux=force_manylinux
)
else:
return _get_supported(
platform=self.platform,
impl=self.impl,
version=self.version,
abi=self.abi,
force_manylinux=force_manylinux
) | Returns a list of supported PEP425 tags for the current platform. |
6,527 | def unqueue(self, timeout=10, should_wait=False):
start, now = time.time(), time.time()
wait = self.queue.empty() and should_wait
while (not self.queue.empty() or wait) and (now - start) < timeout:
if wait and self.queue.empty():
time.sleep(0.25)
now = time.time()
else:
result = self.queue.get(False)
self.log.debug(, result)
if result and result != "nil":
wait = None
start, now = time.time(), time.time()
_json = json.loads(result)
call_id = _json.get("callId")
if _json["payload"]:
self.handle_incoming_response(call_id, _json["payload"])
else:
self.log.debug()
if (now - start) >= timeout:
self.log.warning(, timeout) | Unqueue all the received ensime responses for a given file. |
6,528 | def _check_all_devices_in_sync(self):
In Sync
if len(self._get_devices_by_failover_status()) != \
len(self.devices):
msg = "Expected all devices in group to have status."
raise UnexpectedDeviceGroupState(msg) | Wait until all devices have failover status of 'In Sync'.
:raises: UnexpectedClusterState |
6,529 | def get_top_sentences(self):
if isinstance(self.__top_sentences, int) is False:
raise TypeError("The type of __top_sentences must be int.")
return self.__top_sentences | getter |
6,530 | def create_fork(self, repo):
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True) | :calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository` |
6,531 | def thresholdBlocks(self, blocks, recall_weight=1.5):
candidate_records = itertools.chain.from_iterable(self._blockedPairs(blocks))
probability = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores)[]
probability = probability.copy()
probability.sort()
probability = probability[::-1]
expected_dupes = numpy.cumsum(probability)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / numpy.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = numpy.argmax(score)
logger.info()
logger.info(, recall[i])
logger.info(, precision[i])
logger.info(, probability[i])
return probability[i] | Returns the threshold that maximizes the expected F score, a
weighted average of precision and recall for a sample of
blocked data.
Arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2. |
6,532 | def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Optional[tuple] = None) -> List[IModel]:
raise NotImplementedError | Get aggregated results
: param query: Rulez based query
: param group: Grouping structure
: param order_by: Tuple of ``(field, order)`` where ``order`` is
``'asc'`` or ``'desc'``
: todo: Grouping structure need to be documented |
6,533 | def get_season_stats(self, season_key):
season_stats_url = self.api_path + "season/" + season_key + "/stats/"
response = self.get_response(season_stats_url)
return response | Calling Season Stats API.
Arg:
season_key: key of the season
Return:
json data |
6,534 | def import_generators(self, session, debug=False):
def import_res_generators():
generators_sqla = session.query(
self.orm[].columns.id,
self.orm[].columns.subst_id,
self.orm[].columns.la_id,
self.orm[].columns.mvlv_subst_id,
self.orm[].columns.electrical_capacity,
self.orm[].columns.generation_type,
self.orm[].columns.generation_subtype,
self.orm[].columns.voltage_level,
self.orm[].columns.w_id,
func.ST_AsText(func.ST_Transform(
self.orm[].columns.rea_geom_new, srid)).label(),
func.ST_AsText(func.ST_Transform(
self.orm[].columns.geom, srid)).label()
). \
filter(
self.orm[].columns.subst_id.in_(list(mv_grid_districts_dict))). \
filter(self.orm[].columns.voltage_level.in_([4, 5, 6, 7])). \
filter(self.orm[])
generators = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col=)
generators.loc[generators[
].isnull(),
] =
for id_db, row in generators.iterrows():
id_db))
elif not row[]:
logger.error(
.format(id_db))
continue
mv_grid_district_id = row[]
mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid
if row[] in [, ]:
generator = GeneratorFluctuatingDing0(
id_db=id_db,
mv_grid=mv_grid,
capacity=row[],
type=row[],
subtype=row[],
v_level=int(row[]),
weather_cell_id=row[])
else:
generator = GeneratorDing0(
id_db=id_db,
mv_grid=mv_grid,
capacity=row[],
type=row[],
subtype=row[],
v_level=int(row[]))
if generator.v_level in [4, 5]:
generator.geo_data = geo_data
mv_grid.add_generator(generator)
elif generator.v_level in [6, 7]:
mvlv_subst_id = row[]
elif generator.v_level in [6]:
generator.v_level = 5
mv_grid.add_generator(generator)
| Imports renewable (res) and conventional (conv) generators
Args:
session : sqlalchemy.orm.session.Session
Database session
debug: If True, information is printed during process
Notes:
Connection of generators is done later on in NetworkDing0's method connect_generators() |
6,535 | def plot_mixture(mixture, i=0, j=1, center_style=dict(s=0.15),
cmap=, cutoff=0.0, ellipse_style=dict(alpha=0.3),
solid_edge=True, visualize_weights=False):
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.cm import get_cmap
assert i >= 0 and j >= 0, % (i, j)
assert i != j, % i
assert mixture.dim >= 2,
cmap = get_cmap(name=cmap)
if visualize_weights:
renormalized_component_weights = np.array(mixture.weights)
colors = [cmap(k) for k in renormalized_component_weights]
else:
colors = [cmap(k) for k in np.linspace(0, _max_color, len(mixture.components))]
mask = mixture.weights >= cutoff
means = np.array([c.mu for c in mixture.components])
x_values = means.T[i]
y_values = means.T[j]
for k, w in enumerate(mixture.weights):
if w < cutoff:
continue
cov = mixture.components[k].sigma
submatrix = np.array([[cov[i,i], cov[i,j]], \
[cov[j,i], cov[j,j]]])
correlation = np.array([[1.0, cov[i,j] / np.sqrt(cov[i,i] * cov[j,j])], [0.0, 1.0]])
correlation[1,0] = correlation[0,1]
assert abs(correlation[0,1]) <= 1, % (k, correlation[0, 1])
ew, ev = np.linalg.eigh(submatrix)
assert ew.min() > 0, % (k, ew)
if submatrix[0,0] == submatrix[1,1]:
theta = np.sign(submatrix[0,1]) * np.pi / 4.
else:
theta = 0.5 * np.arctan( 2 * submatrix[0,1] / (submatrix[1,1] - submatrix[0,0]))
ax.add_patch(Ellipse(xy=(x_values[k], y_values[k]),
width=2*width, height=2*height, angle=angle,
**ellipse_style_clone))
if center_style:
plt.scatter(x_values[mask], y_values[mask], **center_style)
if visualize_weights:
mappable = plt.gci()
mappable.set_array(mixture.weights)
mappable.set_cmap(cmap) | Plot the (Gaussian) components of the ``mixture`` density as
one-sigma ellipses in the ``(i,j)`` plane.
:param center_style:
If a non-empty ``dict``, plot mean value with the style passed to ``scatter``.
:param cmap:
The color map to which components are mapped in order to
choose their face color. It is unaffected by the
``cutoff``. The meaning depends on ``visualize_weights``.
:param cutoff:
Ignore components whose weight is below the ``cut off``.
:param ellipse_style:
Passed on to define the properties of the ``Ellipse``.
:param solid_edge:
Draw the edge of the ellipse as solid opaque line.
:param visualize_weights:
Colorize the components according to their weights if ``True``.
One can do `plt.colorbar()` after this function and the bar allows to read off the weights.
If ``False``, coloring is based on the component index and the total number of components.
This option makes it easier to track components by assigning them the same color in subsequent calls to this function. |
6,536 | def addToLayout(self, analysis, position=None):
layout = self.getLayout()
container_uid = self.get_container_for(analysis)
if IRequestAnalysis.providedBy(analysis) and \
not IDuplicateAnalysis.providedBy(analysis):
container_uids = map(lambda slot: slot[], layout)
if container_uid in container_uids:
position = [int(slot[]) for slot in layout if
slot[] == container_uid][0]
elif not position:
used_positions = [0, ] + [int(slot[]) for slot in
layout]
position = [pos for pos in range(1, max(used_positions) + 2)
if pos not in used_positions][0]
an_type = self.get_analysis_type(analysis)
self.setLayout(layout + [{: position,
: an_type,
: container_uid,
: api.get_uid(analysis)}, ]) | Adds the analysis passed in to the worksheet's layout |
6,537 | def reset(name, soft=False, call=None):
if call != :
raise SaltCloudSystemExit(
)
vm_properties = [
"name",
"summary.runtime.powerState"
]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
if vm["name"] == name:
if vm["summary.runtime.powerState"] == "suspended" or vm["summary.runtime.powerState"] == "poweredOff":
ret =
log.info(, name, ret)
return ret
try:
log.info(, name)
if soft:
vm["object"].RebootGuest()
else:
task = vm["object"].ResetVM_Task()
salt.utils.vmware.wait_for_task(task, name, )
except Exception as exc:
log.error(
,
name, exc,
exc_info_on_loglevel=logging.DEBUG
)
return
return | To reset a VM using its name
.. note::
If ``soft=True`` then issues a command to the guest operating system
asking it to perform a reboot. Otherwise hypervisor will terminate VM and start it again.
Default is soft=False
For ``soft=True`` vmtools should be installed on guest system.
CLI Example:
.. code-block:: bash
salt-cloud -a reset vmname
salt-cloud -a reset vmname soft=True |
6,538 | def stopped(name, connection=None, username=None, password=None):
return _virt_call(name, , , "Machine has been shut down",
connection=connection, username=username, password=password) | Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped |
6,539 | def new_figure_manager(num, *args, **kwargs):
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop(, Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig) | Create a new figure manager instance |
6,540 | def on_service_arrival(self, svc_ref):
with self._lock:
new_ranking = svc_ref.get_property(SERVICE_RANKING)
if self._current_ranking is not None:
if new_ranking > self._current_ranking:
self._pending_ref = svc_ref
old_ref = self.reference
old_value = self._value
self._current_ranking = None
self._value = None
self.reference = None
self._ipopo_instance.unbind(self, old_value, old_ref)
else:
self.reference = svc_ref
self._value = self._context.get_service(svc_ref)
self._current_ranking = new_ranking
self._pending_ref = None
self._ipopo_instance.bind(self, self._value, self.reference) | Called when a service has been registered in the framework
:param svc_ref: A service reference |
6,541 | def sg_expand_dims(tensor, opt):
r
opt += tf.sg_opt(axis=-1)
return tf.expand_dims(tensor, opt.axis, name=opt.name) | r"""Inserts a new axis.
See tf.expand_dims() in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : Dimension to expand. Default is -1.
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor`. |
6,542 | async def purgeQueues(self, *args, **kwargs):
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs) | Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental`` |
6,543 | def updateVersions(region="us-east-1", table="credential-store"):
dynamodb = boto3.resource(, region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="
ExpressionAttributeNames={"
items = response["Items"]
for old_item in items:
if isInt(old_item[]):
new_item = copy.copy(old_item)
new_item[] = credstash.paddedInt(new_item[])
if new_item[] != old_item[]:
secrets.put_item(Item=new_item)
secrets.delete_item(Key={: old_item[], : old_item[]})
else:
print "Skipping item: %s, %s" % (old_item[], old_item[]) | do a full-table scan of the credential-store,
and update the version format of every credential if it is an integer |
6,544 | def extract_params(raw):
if isinstance(raw, (bytes, unicode_type)):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, ):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params | Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None. |
6,545 | def get_ticket_results(mgr, ticket_id, update_count=1):
ticket = mgr.get_ticket(ticket_id)
table = formatting.KeyValueTable([, ])
table.align[] =
table.align[] =
table.add_row([, ticket[]])
table.add_row([, ticket[]])
table.add_row([, PRIORITY_MAP[ticket.get(, 0)]])
if ticket.get():
user = ticket[]
table.add_row([
,
"%s %s" % (user.get(), user.get()),
])
table.add_row([, ticket[][]])
table.add_row([, ticket.get()])
table.add_row([, ticket.get()])
updates = ticket.get(, [])
count = min(len(updates), update_count)
count_offset = len(updates) - count + 1
for i, update in enumerate(updates[-count:]):
wrapped_entry = ""
editor = update.get()
if editor:
if editor.get():
wrapped_entry += "By %s (Employee)\n" % (editor[])
if editor.get():
wrapped_entry += "By %s %s\n" % (editor.get(),
editor.get())
wrapped_entry += click.wrap_text(update[].replace(, ))
table.add_row([ % (count_offset + i,), wrapped_entry])
return table | Get output about a ticket.
:param integer id: the ticket ID
:param integer update_count: number of entries to retrieve from ticket
:returns: a KeyValue table containing the details of the ticket |
6,546 | def parse_methodcall(self, tup_tree):
self.check_node(tup_tree, , (,), (),
(, , ))
path = self.list_of_matching(tup_tree,
(, ))
if not path:
raise CIMXMLParseError(
_format("Element {0!A} missing a required child element "
" or ",
name(tup_tree)),
conn_id=self.conn_id)
if len(path) > 1:
raise CIMXMLParseError(
_format("Element {0!A} has too many child elements {1!A} "
"(allowed is one of or "
")", name(tup_tree), path),
conn_id=self.conn_id)
path = path[0]
params = self.list_of_matching(tup_tree, (,))
return (name(tup_tree), attrs(tup_tree), path, params) | ::
<!ELEMENT METHODCALL ((LOCALCLASSPATH | LOCALINSTANCEPATH),
PARAMVALUE*)>
<!ATTLIST METHODCALL
%CIMName;> |
6,547 | def change_password(self, id_user, user_current_password, password):
if not is_valid_int_param(id_user):
raise InvalidParameterError(
u)
if password is None or password == "":
raise InvalidParameterError(
u)
user_map = dict()
user_map[] = id_user
user_map[] = password
code, xml = self.submit(
{: user_map}, , )
return self.response(code, xml) | Change password of User from by the identifier.
:param id_user: Identifier of the User. Integer value and greater than zero.
:param user_current_password: Senha atual do usuário.
:param password: Nova Senha do usuário.
:return: None
:raise UsuarioNaoExisteError: User not registered.
:raise InvalidParameterError: The identifier of User is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
6,548 | def text(self):
text =
for run in self.runs:
text += run.text
return text | String formed by concatenating the text of each run in the paragraph.
Tabs and line breaks in the XML are mapped to ``\\t`` and ``\\n``
characters respectively.
Assigning text to this property causes all existing paragraph content
to be replaced with a single run containing the assigned text.
A ``\\t`` character in the text is mapped to a ``<w:tab/>`` element
and each ``\\n`` or ``\\r`` character is mapped to a line break.
Paragraph-level formatting, such as style, is preserved. All
run-level formatting, such as bold or italic, is removed. |
6,549 | def draw_tiling(coord_generator, filename):
im = Image.new(, size=(CANVAS_WIDTH, CANVAS_HEIGHT))
for shape in coord_generator(CANVAS_WIDTH, CANVAS_HEIGHT):
ImageDraw.Draw(im).polygon(shape, outline=)
im.save(filename) | Given a coordinate generator and a filename, render those coordinates in
a new image and save them to the file. |
6,550 | def pumper(html_generator):
source = html_generator()
parser = etree.HTMLPullParser(
events=(, ),
remove_comments=True
)
while True:
for element in parser.read_events():
yield element
try:
parser.feed(next(source))
except StopIteration:
parser.feed()
for element in parser.read_events():
yield element
break | Pulls HTML from source generator,
feeds it to the parser and yields
DOM elements. |
6,551 | def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_().format(invalid_metric_names))
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric[]] = {
: adhoc_metric[][],
: [adhoc_metric[][]],
: DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
: adhoc_metric[],
}
return aggregations | Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations |
6,552 | def replace(self, **kw):
if "tzinfo" in kw:
if kw["tzinfo"] is None:
raise TypeError("Can not remove the timezone use asdatetime()")
else:
tzinfo = kw["tzinfo"]
del kw["tzinfo"]
else:
tzinfo = None
is_dst = None
if "is_dst" in kw:
is_dst = kw["is_dst"]
del kw["is_dst"]
else:
is_dst = self.is_dst
replaced = self.asdatetime().replace(**kw)
return type(self)(
replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst) | Return datetime with new specified fields given as arguments.
For example, dt.replace(days=4) would return a new datetime_tz object with
exactly the same as dt but with the days attribute equal to 4.
Any attribute can be replaced, but tzinfo can not be set to None.
Args:
Any datetime_tz attribute.
Returns:
A datetime_tz object with the attributes replaced.
Raises:
TypeError: If the given replacement is invalid. |
6,553 | def get_agent(self, agent_id):
url = % agent_id
return Agent(**self._api._get(url)) | Fetches the agent for the given agent ID |
6,554 | def handle(data_type, data, data_id=None, caller=None):
if not data_id:
data_id = data_type
if data_id not in _handlers:
_handlers[data_id] = dict(
[(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)])
for handler in list(_handlers[data_id].values()):
try:
data = handler(data, caller=caller)
except Exception as inst:
vodka.log.error("Data handler failed with error" % handler)
vodka.log.error(traceback.format_exc())
return data | execute all data handlers on the specified data according to data type
Args:
data_type (str): data type handle
data (dict or list): data
Kwargs:
data_id (str): can be used to differentiate between different data
sets of the same data type. If not specified will default to
the data type
caller (object): if specified, holds the object or function that
is trying to handle data
Returns:
dict or list - data after handlers have been executed on it |
6,555 | def listen(self, timeout=10):
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
args = ((data, client_address), )
t = threading.Thread(target=self.receive_datagram, args=args)
t.daemon = True
t.start()
except RuntimeError:
logging.exception("Exception with Executor")
logging.debug("closing socket")
self._socket.close() | Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds |
6,556 | def feedforward(self):
m = self._numInputs
n = self._numColumns
W = np.zeros((n, m))
for i in range(self._numColumns):
self.getPermanence(i, W[i, :])
return W | Soon to be depriciated.
Needed to make the SP implementation compatible
with some older code. |
6,557 | def _search_for_user_dn(self):
search = self.settings.USER_SEARCH
if search is None:
raise ImproperlyConfigured(
"AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance."
)
results = search.execute(self.connection, {"user": self._username})
if results is not None and len(results) == 1:
(user_dn, self._user_attrs) = next(iter(results))
else:
user_dn = None
return user_dn | Searches the directory for a user matching AUTH_LDAP_USER_SEARCH.
Populates self._user_dn and self._user_attrs. |
6,558 | def allow(self, comment, content_object, request):
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True | Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise. |
6,559 | def _locate_index(self, index):
assert index >= 0 and index < self.num_images, "index out of range"
pos = self.image_set_index[index]
for k, v in enumerate(self.imdbs):
if pos >= v.num_images:
pos -= v.num_images
else:
return (k, pos) | given index, find out sub-db and sub-index
Parameters
----------
index : int
index of a specific image
Returns
----------
a tuple (sub-db, sub-index) |
6,560 | def nlerp_quat(from_quat, to_quat, percent):
result = lerp_quat(from_quat, to_quat, percent)
result.normalize()
return result | Return normalized linear interpolation of two quaternions.
Less computationally expensive than slerp (which not implemented in this
lib yet), but does not maintain a constant velocity like slerp. |
6,561 | def factor_kkt(S_LU, R, d):
nBatch, nineq = d.size()
neq = S_LU[1].size(1) - nineq
oldPivotsPacked = S_LU[1][:, -nineq:] - neq
oldPivots, _, _ = torch.btriunpack(
T_LU[0], oldPivotsPacked, unpack_data=False)
newPivotsPacked = T_LU[1]
newPivots, _, _ = torch.btriunpack(
T_LU[0], newPivotsPacked, unpack_data=False)
if neq > 0:
S_LU_21 = S_LU[0][:, -nineq:, :neq]
S_LU[0][:, -nineq:,
:neq] = newPivots.transpose(1, 2).bmm(oldPivots.bmm(S_LU_21))
S_LU[1][:, -nineq:] = newPivotsPacked + neq
S_LU[0][:, -nineq:, -nineq:] = T_LU[0] | Factor the U22 block that we can only do after we know D. |
6,562 | def send_request(self, request, callback=None, timeout=None, no_response=False):
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
response = None
return response | Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response |
6,563 | def to_serializable(self, use_bytes=False, bias_dtype=np.float32,
bytes_type=bytes):
from dimod.package_info import __version__
schema_version = "2.0.0"
try:
variables = sorted(self.variables)
except TypeError:
variables = list(self.variables)
num_variables = len(variables)
index_dtype = np.uint16 if num_variables <= 2**16 else np.uint32
ldata, (irow, icol, qdata), offset = self.to_numpy_vectors(
dtype=bias_dtype,
index_dtype=index_dtype,
sort_indices=True,
variable_order=variables)
doc = {"basetype": "BinaryQuadraticModel",
"type": type(self).__name__,
"version": {"dimod": __version__,
"bqm_schema": schema_version},
"variable_labels": variables,
"variable_type": self.vartype.name,
"info": self.info,
"offset": float(offset),
"use_bytes": bool(use_bytes)
}
if use_bytes:
doc.update({: array2bytes(ldata, bytes_type=bytes_type),
: array2bytes(qdata, bytes_type=bytes_type),
: array2bytes(irow, bytes_type=bytes_type),
: array2bytes(icol, bytes_type=bytes_type)})
else:
doc.update({: ldata.tolist(),
: qdata.tolist(),
: irow.tolist(),
: icol.tolist()})
return doc | Convert the binary quadratic model to a serializable object.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bias_dtype (numpy.dtype, optional, default=numpy.float32):
If `use_bytes` is True, this numpy dtype will be used to
represent the bias values in the serialized format.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: An object that can be serialized.
Examples:
Encode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
Encode using BSON_ in python 3.5+
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
Encode using BSON in python 2.7. Because :class:`bytes` is an alias for :class:`str`,
we need to signal to the encoder that it should encode the biases and labels as binary
data.
>>> import dimod
>>> import bson
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> doc = bqm.to_serializable(use_bytes=True, bytes_type=bson.Binary)
>>> b = bson.BSON.encode(doc) # doctest: +SKIP
See also:
:meth:`~.BinaryQuadraticModel.from_serializable`
:func:`json.dumps`, :func:`json.dump` JSON encoding functions
:meth:`bson.BSON.encode` BSON encoding method
.. _BSON: http://bsonspec.org/ |
6,564 | def _formatters_default(self):
formatter_classes = [
PlainTextFormatter,
HTMLFormatter,
SVGFormatter,
PNGFormatter,
JPEGFormatter,
LatexFormatter,
JSONFormatter,
JavascriptFormatter
]
d = {}
for cls in formatter_classes:
f = cls(config=self.config)
d[f.format_type] = f
return d | Activate the default formatters. |
6,565 | def localize_field(self, value):
if self.default is not None:
if value is None or value == :
value = self.default
return value or | Method that must transform the value from object to localized string |
6,566 | def list_events(self, source=None, severity=None, text_filter=None,
start=None, stop=None, page_size=500, descending=False):
params = {
: if descending else ,
}
if source is not None:
params[] = source
if page_size is not None:
params[] = page_size
if severity is not None:
params[] = severity
if start is not None:
params[] = to_isostring(start)
if stop is not None:
params[] = to_isostring(stop)
if text_filter is not None:
params[] = text_filter
return pagination.Iterator(
client=self._client,
path=.format(self._instance),
params=params,
response_class=rest_pb2.ListEventsResponse,
items_key=,
item_mapper=Event,
) | Reads events between the specified start and stop time.
Events are sorted by generation time, source, then sequence number.
:param str source: The source of the returned events.
:param str severity: The minimum severity level of the returned events.
One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``,
``CRITICAL`` or ``SEVERE``.
:param str text_filter: Filter the text message of the returned events
:param ~datetime.datetime start: Minimum start date of the returned events (inclusive)
:param ~datetime.datetime stop: Maximum start date of the returned events (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` events are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Event] |
6,567 | def add_textop_iter(func):
op = type(func.__name__,(WrapOpIter,), {:staticmethod(func)})
setattr(textops.ops,func.__name__,op)
return op | Decorator to declare custom *ITER* function as a new textops op
An *ITER* function is a function that will receive the input text as a *LIST* of lines.
One have to iterate over this list and generate a result (it can be a list, a generator,
a dict, a string, an int ...)
Examples:
>>> @add_textop_iter
... def odd(lines, *args,**kwargs):
... for i,line in enumerate(lines):
... if not i % 2:
... yield line
>>> s = '''line 1
... line 2
... line 3'''
>>> s >> odd()
['line 1', 'line 3']
>>> s | odd().tolist()
['line 1', 'line 3']
>>> @add_textop_iter
... def sumsize(lines, *args,**kwargs):
... sum = 0
... for line in lines:
... sum += int(re.search(r'\d+',line).group(0))
... return sum
>>> '''1492 file1
... 1789 file2
... 2015 file3''' | sumsize()
5296 |
6,568 | def get_member_brief(self, member_id=0):
title = % self.__class__.__name__
input_fields = {
: member_id
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
url = % self.endpoint
params = {
:
}
if member_id:
params[] = member_id
response_details = self._get_request(url, params=params)
profile_details = {
: {}
}
for key, value in response_details.items():
if not key == :
profile_details[key] = value
if response_details[]:
if in response_details[].keys():
if response_details[][]:
details = response_details[][][0]
for key, value in details.items():
if key != :
profile_details[][key] = value
profile_details[] = self.objects.profile_brief.ingest(**profile_details[])
return profile_details | a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema |
6,569 | def write(self, message, delay_seconds=None):
new_msg = self.connection.send_message(self, message.get_body_encoded(), delay_seconds)
message.id = new_msg.id
message.md5 = new_msg.md5
return message | Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written. |
6,570 | def confd_state_internal_cdb_client_lock(self, **kwargs):
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
cdb = ET.SubElement(internal, "cdb")
client = ET.SubElement(cdb, "client")
lock = ET.SubElement(client, "lock")
lock.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
6,571 | def _set_gaussian_expected_stats(self, smoothed_mus, smoothed_sigmas, E_xtp1_xtT):
assert not np.isnan(E_xtp1_xtT).any()
assert not np.isnan(smoothed_mus).any()
assert not np.isnan(smoothed_sigmas).any()
assert smoothed_mus.shape == (self.T, self.D_latent)
assert smoothed_sigmas.shape == (self.T, self.D_latent, self.D_latent)
assert E_xtp1_xtT.shape == (self.T-1, self.D_latent, self.D_latent)
T, D_obs = self.T, self.D_emission
E_x_xT = smoothed_sigmas + smoothed_mus[:, :, None] * smoothed_mus[:, None, :]
E_x_uT = smoothed_mus[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1]
E_xtp1_xtp1T = E_x_xT[1:]
E_xtp1_utT = (smoothed_mus[1:, :, None] * self.inputs[:-1, None, :])
E_xtp1_xutT = np.concatenate((E_xtp1_xtT, E_xtp1_utT), axis=-1)
self.E_init_stats = (self.smoothed_mus[0], E_x_xT[0], 1.)
self.E_dynamics_stats = (E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, np.ones(self.T-1))
E_yyT = self.data**2 if self.diagonal_noise else self.data[:, :, None] * self.data[:, None, :]
E_yxT = self.data[:, :, None] * self.smoothed_mus[:, None, :]
E_yuT = self.data[:, :, None] * self.inputs[:, None, :]
E_yxuT = np.concatenate((E_yxT, E_yuT), axis=-1)
E_n = np.ones((T, D_obs)) if self.diagonal_noise else np.ones(T)
self.E_emission_stats = (E_yyT, E_yxuT, E_xu_xuT, E_n) | Both meanfield and VBEM require expected statistics of the continuous latent
states, x. This is a helper function to take E[x_t], E[x_t x_t^T] and E[x_{t+1}, x_t^T]
and compute the expected sufficient statistics for the initial distribution,
dynamics distribution, and Gaussian observation distribution. |
6,572 | def vclose(L, V):
lam, X = 0, []
for k in range(3):
lam = lam + V[k] * L[k]
beta = np.sqrt(1. - lam**2)
for k in range(3):
X.append((old_div((V[k] - lam * L[k]), beta)))
return X | gets the closest vector |
6,573 | def removeItem( self ):
item = self.uiMenuTREE.currentItem()
if ( not item ):
return
opts = QMessageBox.Yes | QMessageBox.No
answer = QMessageBox.question( self,
,
\
,
opts )
if ( answer == QMessageBox.Yes ):
parent = item.parent()
if ( parent ):
parent.takeChild(parent.indexOfChild(item))
else:
tree = self.uiMenuTREE
tree.takeTopLevelItem(tree.indexOfTopLevelItem(item)) | Removes the item from the menu. |
6,574 | def setup_logging():
logging.basicConfig(format=("[%(levelname)s\033[0m] "
"\033[1;31m%(module)s\033[0m: "
"%(message)s"),
level=logging.INFO,
stream=sys.stdout)
logging.addLevelName(logging.ERROR, )
logging.addLevelName(logging.INFO, )
logging.addLevelName(logging.WARNING, ) | Logging config. |
6,575 | def _validate_format(req):
for key in SLOJSONRPC._min_keys:
if not key in req:
logging.debug( % key)
raise SLOJSONRPCError(-32600)
for key in req.keys():
if not key in SLOJSONRPC._allowed_keys:
logging.debug( % key)
raise SLOJSONRPCError(-32600)
if req[] != :
logging.debug()
raise SLOJSONRPCError(-32600) | Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error |
6,576 | def serial_assimilate(self, rootpath):
valid_paths = []
for (parent, subdirs, files) in os.walk(rootpath):
valid_paths.extend(self._drone.get_valid_paths((parent, subdirs,
files)))
data = []
count = 0
total = len(valid_paths)
for path in valid_paths:
newdata = self._drone.assimilate(path)
self._data.append(newdata)
count += 1
logger.info(.format(count, total,
count / total * 100))
for d in data:
self._data.append(json.loads(d, cls=MontyDecoder)) | Assimilate the entire subdirectory structure in rootpath serially. |
6,577 | def encipher_shift(plaintext, plain_vocab, shift):
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext | Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text. |
6,578 | def set_shuffle(self, shuffle):
if not self.my_osid_object_form._is_valid_boolean(
shuffle):
raise InvalidArgument()
self.my_osid_object_form._my_map[] = shuffle | stub |
6,579 | def _evaluate_model_single_file(target_folder, test_file):
logging.info("Create running model...")
model_src = get_latest_model(target_folder, "model")
model_file_pointer = tempfile.NamedTemporaryFile(delete=False)
model_use = model_file_pointer.name
model_file_pointer.close()
logging.info("Adjusted model is in %s.", model_use)
create_adjusted_model_for_percentages(model_src, model_use)
project_root = get_project_root()
time_prefix = time.strftime("%Y-%m-%d-%H-%M")
logging.info("Evaluate with ...", model_src, test_file)
logfilefolder = os.path.join(project_root, "logs/")
if not os.path.exists(logfilefolder):
os.makedirs(logfilefolder)
logfile = os.path.join(project_root,
"logs/%s-error-evaluation.log" %
time_prefix)
with open(logfile, "w") as log, open(model_use, "r") as modl_src_p:
p = subprocess.Popen([get_nntoolkit(), ,
, ,
, test_file],
stdin=modl_src_p,
stdout=log)
ret = p.wait()
if ret != 0:
logging.error("nntoolkit finished with ret code %s",
str(ret))
sys.exit()
return (logfile, model_use) | Evaluate a model for a single recording.
Parameters
----------
target_folder : string
Folder where the model is
test_file : string
The test file (.hdf5) |
6,580 | def _parse(self, msg_dict):
error_present = False
for message in self.compiled_messages:
match_on = message[]
if match_on not in msg_dict:
continue
if message[] != msg_dict[match_on]:
continue
if in message:
return {
: message[],
: message[],
: message[]
}
error_present = True
match = message[].search(msg_dict[])
if not match:
continue
positions = message.get(, {})
values = message.get()
ret = {
: message[],
: message[],
: message[],
: message[]
}
for key in values.keys():
if key in message[]:
result = napalm_logs.utils.cast(match.group(positions.get(key)), message[][key])
else:
result = match.group(positions.get(key))
ret[key] = result
return ret
if error_present is True:
log.info(, self._name, msg_dict.get(, ))
else:
log.info(, self._name, msg_dict.get(, )) | Parse a syslog message and check what OpenConfig object should
be generated. |
6,581 | def parse_color(self, color):
if color == :
return None
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)) | color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255) |
6,582 | def calculateLocalElasticity(self, bp, frames=None, helical=False, unit=):
r
acceptedUnit = [, , ]
if unit not in acceptedUnit:
raise ValueError(" {0} not accepted. Use any of the following: {1} ".format(unit, acceptedUnit))
frames = self._validateFrames(frames)
name = .format(bp[0], bp[1], frames[0], frames[1], int(helical))
if bp[1]-bp[0]+1 > 4:
raise ValueError("Selected span {0} is larger than 4, and therefore, not recommended for local elasticity".format(bp[1]-bp[0]+1))
if name not in self.esMatrix:
time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)
mean = np.mean(array, axis = 1)
esMatrix = self.getElasticMatrix(array)
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if unit == :
result = 2.4946938107879997 * esMatrix
elif unit == :
result = 0.5962461306854684 * esMatrix
else:
result = esMatrix
return mean, result | r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix. |
6,583 | def _retrieveRemoteCertificate(self, From, port=port):
CS = self.service.certificateStorage
host = str(From.domainAddress())
p = AMP()
p.wrapper = self.wrapper
f = protocol.ClientCreator(reactor, lambda: p)
connD = f.connectTCP(host, port)
def connected(proto):
dhost = From.domainAddress()
iddom = proto.callRemote(Identify, subject=dhost)
def gotCert(identifyBox):
theirCert = identifyBox[]
theirIssuer = theirCert.getIssuer().commonName
theirName = theirCert.getSubject().commonName
if (theirName != str(dhost)):
raise VerifyError(
"%r claimed it was %r in IDENTIFY response"
% (theirName, dhost))
if (theirIssuer != str(dhost)):
raise VerifyError(
"self-signed %r claimed it was issued by "
"%r in IDENTIFY response" % (dhost, theirIssuer))
def storedCert(ignored):
return theirCert
return CS.storeSelfSignedCertificate(
str(dhost), theirCert).addCallback(storedCert)
def nothingify(x):
proto.transport.loseConnection()
return x
return iddom.addCallback(gotCert).addBoth(nothingify)
connD.addCallback(connected)
return connD | The entire conversation, starting with TCP handshake and ending at
disconnect, to retrieve a foreign domain's certificate for the first
time. |
6,584 | def remove_labels(self, labels, relabel=False):
self.check_labels(labels)
self.reassign_label(labels, new_label=0)
if relabel:
self.relabel_consecutive() | Remove one or more labels.
Removed labels are assigned a value of zero (i.e., background).
Parameters
----------
labels : int, array-like (1D, int)
The label number(s) to remove.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3])
>>> segm.data
array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.remove_labels(labels=[5, 3], relabel=True)
>>> segm.data
array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]]) |
6,585 | def will_set(self, topic, payload=None, qos=0, retain=False):
if topic is None or len(topic) == 0:
raise ValueError()
if qos<0 or qos>2:
raise ValueError()
if isinstance(payload, str):
self._will_payload = payload.encode()
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError()
self._will = True
self._will_topic = topic.encode()
self._will_qos = qos
self._will_retain = retain | Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length. |
6,586 | def netconf_state_statistics_in_bad_rpcs(self, **kwargs):
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
statistics = ET.SubElement(netconf_state, "statistics")
in_bad_rpcs = ET.SubElement(statistics, "in-bad-rpcs")
in_bad_rpcs.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
6,587 | def scale(self, x, y=None, z=None):
"Uniform scale, if only sx argument is specified"
if y is None:
y = x
if z is None:
z = x
m = self
for col in range(4):
m[0,col] *= x
m[1,col] *= y
m[2,col] *= z
return self | Uniform scale, if only sx argument is specified |
6,588 | def venv_bin(name=None):
if not hasattr(sys, "real_prefix"):
easy.error("ERROR: is not a virtualenv" % (sys.executable,))
sys.exit(1)
for bindir in ("bin", "Scripts"):
bindir = os.path.join(sys.prefix, bindir)
if os.path.exists(bindir):
if name:
bin_ext = os.path.splitext(sys.executable)[1] if sys.platform == else
return os.path.join(bindir, name + bin_ext)
else:
return bindir
easy.error("ERROR: Scripts directory not found in " % (sys.prefix,))
sys.exit(1) | Get the directory for virtualenv stubs, or a full executable path
if C{name} is provided. |
6,589 | def currentPage(self):
self._updateResults(self._sortAttributeValue(0), equalToStart=True, refresh=True)
return self._currentResults | Return a sequence of mappings of attribute IDs to column values, to
display to the user.
nextPage/prevPage will strive never to skip items whose column values
have not been returned by this method.
This is best explained by a demonstration. Let's say you have a table
viewing an item with attributes 'a' and 'b', like this:
oid | a | b
----+---+--
0 | 1 | 2
1 | 3 | 4
2 | 5 | 6
3 | 7 | 8
4 | 9 | 0
The table has 2 items per page. You call currentPage and receive a
page which contains items oid 0 and oid 1. item oid 1 is deleted.
If the next thing you do is to call nextPage, the result of currentPage
following that will be items beginning with item oid 2. This is
because although there are no longer enough items to populate a full
page from 0-1, the user has never seen item #2 on a page, so the 'next'
page from the user's point of view contains #2.
If instead, at that same point, the next thing you did was to call
currentPage, *then* nextPage and currentPage again, the first
currentPage results would contain items #0 and #2; the following
currentPage results would contain items #3 and #4. In this case, the
user *has* seen #2 already, so the user expects to see the following
item, not the same item again. |
6,590 | def repartition(self, npartitions):
if self.mode == :
return self._constructor(self.values.repartition(npartitions)).__finalize__(self)
else:
notsupported(self.mode) | Repartition data (Spark only).
Parameters
----------
npartitions : int
Number of partitions after repartitions. |
6,591 | def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
if roi is not None:
assert len(roi) == len(field.shape) * \
2, "ROI must match field dimension"
initshape = field.shape
Fshape = len(initshape)
propfunc = fft_propagate
if roi is None:
if Fshape == 2:
roi = (0, 0, field.shape[0], field.shape[1])
else:
roi = (0, field.shape[0])
roi = 1*np.array(roi)
if padding:
field = pad.pad_add(field)
if ival[0] > ival[1]:
ival = (ival[1], ival[0])
N = 100 / coarse_acc
zc = np.linspace(ival[0], ival[1], N, endpoint=True)
fftfield = np.fft.fftn(field)
gradc = np.zeros(zc.shape)
for i in range(len(zc)):
d = zc[i]
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradc[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradc[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradc)
if minid == 0:
zc -= zc[1] - zc[0]
minid += 1
if minid == len(zc) - 1:
zc += zc[1] - zc[0]
minid -= 1
zf = 1*zc
gradf = 1 * gradc
numfine = 10
mingrad = gradc[minid]
while True:
gradf = np.zeros(numfine)
ival = (zf[minid - 1], zf[minid + 1])
zf = np.linspace(ival[0], ival[1], numfine)
for i in range(len(zf)):
d = zf[i]
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradf[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradf[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradf)
if minid == 0:
zf -= zf[1] - zf[0]
minid += 1
if minid == len(zf) - 1:
zf += zf[1] - zf[0]
minid -= 1
if abs(mingrad - gradf[minid]) / 100 < fine_acc:
break
minid = np.argmin(gradf)
fsp = propfunc(fftfield, zf[minid], nm, res)
if padding:
fsp = pad.pad_rem(fsp)
if return_gradient:
return fsp, zf[minid], [(zc, gradc), (zf, gradf)]
return fsp, zf[minid] | Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location |
6,592 | def register(scheme):
scheme = nstr(scheme)
urlparse.uses_fragment.append(scheme)
urlparse.uses_netloc.append(scheme)
urlparse.uses_params.append(scheme)
urlparse.uses_query.append(scheme)
urlparse.uses_relative.append(scheme) | Registers a new scheme to the urlparser.
:param schema | <str> |
6,593 | def user_fields(self, user):
return self._query_zendesk(self.endpoint.user_fields, , id=user) | Retrieve the user fields for this user.
:param user: User object or id |
6,594 | def nlargest(self, n=5, keep=):
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest() | Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64 |
6,595 | def down(self, path, link, repo):
filename = link.split("/")[-1]
if not os.path.isfile(path + filename):
Download(path, link.split(), repo).start() | Download files |
6,596 | def start_polling(self, interval):
interval = float(interval)
self.polling = True
self.term_checker.reset()
logger.info("Starting polling for changes to the track list")
while self.polling:
loop_start = time()
self.update_stream()
self.handle_exceptions()
elapsed = time() - loop_start
sleep(max(0.1, interval - elapsed))
logger.warning("Term poll ceased!") | Start polling for term updates and streaming. |
6,597 | def listen(self, timeout=10):
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
if len(client_address) > 2:
client_address = (client_address[0], client_address[1])
except socket.timeout:
continue
except Exception as e:
if self._cb_ignore_listen_exception is not None and isinstance(self._cb_ignore_listen_exception, collections.Callable):
if self._cb_ignore_listen_exception(e, self):
continue
raise
try:
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
rst.mid = self._messageLayer.fetch_mid()
self.send_datagram(rst)
continue
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated, transaction completed")
if transaction.response is not None:
self.send_datagram(transaction.response)
continue
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated, transaction NOT completed")
self._send_ack(transaction)
continue
args = (transaction, )
t = threading.Thread(target=self.receive_request, args=args)
t.start()
elif isinstance(message, Response):
logger.error("Received response from %s", message.source)
else:
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
with transaction:
self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
except RuntimeError:
logger.exception("Exception with Executor")
self._socket.close() | Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds |
6,598 | def isel_points(self, dim=, **indexers):
warnings.warn(
, DeprecationWarning, stacklevel=2)
indexer_dims = set(indexers)
def take(variable, slices):
if hasattr(variable.data, ):
sel = variable.data.vindex[slices]
else:
sel = variable.data[slices]
return sel
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
coords = relevant_keys(self.coords)
indexers = [(k, np.asarray(v))
for k, v in indexers.items()]
indexers_dict = dict(indexers)
non_indexed_dims = set(self.dims) - indexer_dims
non_indexed_coords = set(self.coords) - set(coords)
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != :
raise TypeError()
if v.ndim != 1:
raise ValueError()
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError()
if isinstance(dim, str):
if dim in self.dims:
raise ValueError(
)
elif hasattr(dim, ):
if dim.name in self.dims:
raise ValueError(
)
if not utils.is_scalar(dim):
dim_name = if not hasattr(dim, ) else dim.name
dim_coord = as_variable(dim, name=dim_name)
else:
dim_name = dim
dim_coord = None
reordered = self.transpose(
*(list(indexer_dims) + list(non_indexed_dims)))
variables = OrderedDict()
for name, var in reordered.variables.items():
if name in indexers_dict or any(
d in indexer_dims for d in var.dims):
slc = [indexers_dict[k]
if k in indexers_dict
else slice(None) for k in var.dims]
var_dims = [dim_name] + [d for d in var.dims
if d in non_indexed_dims]
selection = take(var, tuple(slc))
var_subset = type(var)(var_dims, selection, var.attrs)
variables[name] = var_subset
else:
variables[name] = var
coord_names = (set(coords) & set(variables)) | non_indexed_coords
dset = self._replace_vars_and_dims(variables, coord_names=coord_names)
if dim_coord is not None:
dset.coords[dim_name] = dim_coord
return dset | Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
Dataset.isel
Dataset.sel_points
DataArray.isel_points |
6,599 | def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis | Get the frequencies for Mel bins |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.