Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
19,300 | def is30(msg):
if allzeros(msg):
return False
d = hex2bin(data(msg))
if d[0:8] != :
return False
if d[28:30] == :
return False
if bin2int(d[15:22]) >= 48:
return False
return True | Check if a message is likely to be BDS code 2,0
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False |
19,301 | def atlas_get_peer( peer_hostport, peer_table=None ):
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
ret = ptbl.get(peer_hostport, None)
return ret | Get the given peer's info |
19,302 | def set_cli_options(config, arguments=None):
arguments = arguments or sys.argv[1:]
parser = argparse.ArgumentParser()
for section_name, section in config:
for option_name, _ in section:
var_name = .format(
section_name.lower(),
option_name.lower(),
)
parser.add_argument(.format(var_name))
args, _ = parser.parse_known_args(arguments)
args = vars(args)
for section_name, section in config:
for option_name, _ in section:
var_name = .format(
section_name.lower(),
option_name.lower(),
)
value = args.get(var_name)
if value:
setattr(section, option_name, value)
return config | Set any configuration options which have a CLI value set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
arguments (iter of str): An iterable of strings which contains the CLI
arguments passed. If nothing is give then sys.argv is used.
Returns:
confpy.core.config.Configuration: A configuration object with CLI
values set.
The pattern to follow when setting CLI values is:
<section>_<option>
Each value should be lower case and separated by underscores. |
19,303 | def export_legacy_ldcoeffs(self, models, filename=None, photon_weighted=True):
if photon_weighted:
grid = self._ck2004_ld_photon_grid
else:
grid = self._ck2004_ld_energy_grid
if filename is not None:
import time
f = open(filename, )
f.write( % self.pbset)
f.write( % self.pbname)
f.write()
f.write( % (time.ctime()))
f.write( % ( if photon_weighted else ))
mods = np.loadtxt(models)
for mod in mods:
Tindex = np.argwhere(self._ck2004_intensity_axes[0] == mod[0])[0][0]
lindex = np.argwhere(self._ck2004_intensity_axes[1] == mod[1]/10)[0][0]
mindex = np.argwhere(self._ck2004_intensity_axes[2] == mod[2]/10)[0][0]
if filename is None:
print(*11 % tuple(grid[Tindex, lindex, mindex].tolist()))
else:
f.write((*11+) % tuple(self._ck2004_ld_photon_grid[Tindex, lindex, mindex].tolist()))
if filename is not None:
f.close() | @models: the path (including the filename) of legacy's models.list
@filename: output filename for storing the table
Exports CK2004 limb darkening coefficients to a PHOEBE legacy
compatible format. |
19,304 | def set_index_edited(self, index, edited):
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index) | Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None |
19,305 | def query(self,
startTime=None,
endTime=None,
sinceServerStart=False,
level="WARNING",
services="*",
machines="*",
server="*",
codes=[],
processIds=[],
export=False,
exportType="CSV",
out_path=None
):
allowed_levels = ("SEVERE", "WARNING", "INFO",
"FINE", "VERBOSE", "DEBUG")
qFilter = {
"services": "*",
"machines": "*",
"server" : "*"
}
if len(processIds) > 0:
qFilter[] = processIds
if len(codes) > 0:
qFilter[] = codes
params = {
"f" : "json",
"sinceServerStart" : sinceServerStart,
"pageSize" : 10000
}
if startTime is not None and \
isinstance(startTime, datetime):
params[] = startTime.strftime("%Y-%m-%dT%H:%M:%S")
if endTime is not None and \
isinstance(endTime, datetime):
params[] = endTime.strftime("%Y-%m-%dT%H:%M:%S")
if level.upper() in allowed_levels:
params[] = level
if server != "*":
qFilter[] = server.split()
if services != "*":
qFilter[] = services.split()
if machines != "*":
qFilter[] = machines.split(",")
params[] = qFilter
if export == True and \
out_path is not None:
messages = self._post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
with open(name=out_path, mode=) as f:
hasKeys = False
if exportType == "TAB":
csvwriter = csv.writer(f, delimiter=)
else:
csvwriter = csv.writer(f)
for message in messages[]:
if hasKeys == False:
csvwriter.writerow(message.keys())
hasKeys = True
csvwriter.writerow(message.values())
del message
del messages
return out_path
else:
return self._post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The query operation on the logs resource provides a way to
aggregate, filter, and page through logs across the entire site.
Inputs: |
19,306 | def commit(self):
logger.debug(.format(self.name))
self.backend.commit_job(self._serialize())
self.parent.commit() | Store metadata on this Job to the backend. |
19,307 | def mdr_mutual_information(X, Y, labels, base=2):
return mutual_information(_mdr_predict(X, Y, labels), labels, base=base) | Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY) |
19,308 | def _update_metadata(self, kwargs=None):
if kwargs == None and hasattr(self, ):
kwargs = self._list(self.name, quiet=True, return_json=True)
for arg in [, ]:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
if "image" in kwargs:
self._image = kwargs[]
elif "container_image" in kwargs:
self._image = kwargs[] | Extract any additional attributes to hold with the instance
from kwargs |
19,309 | def sel_list_pres(ds_sfc_x):
p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values
list_pres_level = [
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
, , ,
,
]
ser_pres_level = pd.Series(list_pres_level).map(int)*100
pos_lev_max, pos_lev_min = (
ser_pres_level[ser_pres_level > p_max].idxmin(),
ser_pres_level[ser_pres_level < p_min].idxmax()
)
list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100
list_pres_sel = list_pres_sel.map(int).map(str).to_list()
return list_pres_sel | select proper levels for model level data download |
19,310 | def add_user(self, attrs):
ldap_client = self._bind()
attrs_srt = self.attrs_pretreatment(attrs)
attrs_srt[self._byte_p2()] = self.objectclasses
dn = \
self._byte_p2(self.dn_user_attr) + \
self._byte_p2() + \
self._byte_p2(ldap.dn.escape_dn_chars(
attrs[self.dn_user_attr]
)
) + \
self._byte_p2() + \
self._byte_p2(self.userdn)
ldif = modlist.addModlist(attrs_srt)
try:
ldap_client.add_s(dn, ldif)
except ldap.ALREADY_EXISTS as e:
raise UserAlreadyExists(attrs[self.key], self.backend_name)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() | add a user |
19,311 | def approx_aic(self, ts):
return self._jmodel.approxAIC(_py2java(self._ctx, Vectors.dense(ts))) | Calculates an approximation to the Akaike Information Criterion (AIC). This is an approximation
as we use the conditional likelihood, rather than the exact likelihood. Please see
[[https://en.wikipedia.org/wiki/Akaike_information_criterion]] for more information on this
measure.
Parameters
----------
ts:
the timeseries to evaluate under current model
Returns an approximation to the AIC under the current model as a double |
19,312 | def _interpret_regexp(self, string, flags):
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st =
inside_square = 0
while (self.index < self.length):
template = if not inside_square else
ch = self.source[self.index]
self.index += 1
if ch == :
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == :
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st +=
elif ch == :
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch in REGEXP_SPECIAL_SINGLE:
st += + ch
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st +=
elif ch == :
st += template % u
elif ch == :
st += template % u
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(
self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += + num
else:
st += ch
else:
self.lineNumber += 1
if (ch == and self.source[self.index] == ):
self.index += 1
self.lineStart = self.index
else:
if ch == :
inside_square = True
elif ch == :
inside_square = False
st += ch
return st | Perform sctring escape - for regexp literals |
19,313 | def returnDepositsWithdrawals(self, start=0, end=2**32-1):
return self._private(, start=start, end=end) | Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps. |
19,314 | def get_destinations(self, ascii_listing):
self.destinations = set()
def collect_destinations(matchobj):
numbers = matchobj.group("no")
if numbers:
self.destinations.update(set(
[n.strip() for n in numbers.split(",")]
))
for line in self._iter_lines(ascii_listing):
self.renum_regex.sub(collect_destinations, line)
return sorted([int(no) for no in self.destinations if no]) | returns all line numbers that are used in a jump. |
19,315 | def update_metadata_filters(metadata, jupyter_md, cell_metadata):
cell_metadata = [m for m in cell_metadata if m not in [, ]]
if in metadata.get(, {}):
metadata_filter = metadata_filter_as_dict(metadata.get(, {})[])
if isinstance(metadata_filter.get(), list):
metadata_filter[] = [key for key in metadata_filter[] if key not in cell_metadata]
metadata_filter.setdefault(, [])
if isinstance(metadata_filter.get(), list):
for key in cell_metadata:
if key not in metadata_filter[]:
metadata_filter[].append(key)
metadata.setdefault(, {})[] = metadata_filter_as_string(metadata_filter)
if not jupyter_md:
cell_metadata = {: cell_metadata, : }
metadata.setdefault(, {})[] =
metadata.setdefault(, {})[] = metadata_filter_as_string(cell_metadata) | Update or set the notebook and cell metadata filters |
19,316 | def quick_summary(nml2_doc):
info = %nml2_doc.id
membs = inspect.getmembers(nml2_doc)
for memb in membs:
if isinstance(memb[1], list) and len(memb[1])>0 \
and not memb[0].endswith():
info+=%memb[0]
for entry in memb[1]:
extra =
extra = entry.name if hasattr(entry,) else extra
extra = entry.href if hasattr(entry,) else extra
extra = entry.id if hasattr(entry,) else extra
info+=" %s (%s),"%(entry, extra)
info+=
return info | Or better just use nml2_doc.summary(show_includes=False) |
19,317 | def set_extended_elements(self):
self.set_creative_commons()
self.set_owner()
self.set_subtitle()
self.set_summary() | Parses and sets non required elements |
19,318 | def protect_libraries_from_patching():
patched = [, , , , , , , ,
, , , ,
, , , ]
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name] | In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent). |
19,319 | def dispatch(self, req):
params = req.environ[][1]
controller = params.pop()
cont_class = controller.__class__
cont_name = "%s:%s" % (cont_class.__module__, cont_class.__name__)
origin = req.remote_addr if req.remote_addr else
if req.remote_user:
origin = % (origin, req.remote_user)
return webob.exc.HTTPInternalServerError() | Called by the Routes middleware to dispatch the request to the
appropriate controller. If a webob exception is raised, it is
returned; if some other exception is raised, the webob
`HTTPInternalServerError` exception is raised. Otherwise, the
return value of the controller is returned. |
19,320 | def netconf_session_start_session_id(self, **kwargs):
config = ET.Element("config")
netconf_session_start = ET.SubElement(config, "netconf-session-start", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
session_id = ET.SubElement(netconf_session_start, "session-id")
session_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
19,321 | def _CreateAllTypes(self, enumTypes, dataTypes, managedTypes):
for typeInfo in managedTypes:
name = typeInfo[0]
version = typeInfo[3]
VmomiSupport.AddVersion(version, , , 0, name)
VmomiSupport.AddVersionParent(version, )
VmomiSupport.AddVersionParent(version, )
VmomiSupport.AddVersionParent(version, version)
for fn, infos in (VmomiSupport.CreateEnumType, enumTypes), \
(VmomiSupport.CreateDataType, dataTypes), \
(VmomiSupport.CreateManagedType, managedTypes):
for typeInfo in infos:
try:
fn(*typeInfo)
except Exception as err:
pass | Create pyVmomi types from pyVmomi type definitions |
19,322 | def search_globs(path, patterns):
for pattern in (p for p in patterns if p):
if pattern.startswith():
regex = fnmatch.translate(pattern[1:])
regex = regex.replace(, )
temp_path = path[1:] if path.startswith() else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
else:
regex = fnmatch.translate(pattern)
regex = regex.replace(, )
if re.search(regex, path):
return True
return False | Test whether the given *path* contains any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the ``path`` matches any pattern in *patterns*. |
19,323 | def remove_listener(registry, listener):
if listener is not None and listener in registry:
registry.remove(listener)
return True
return False | Removes a listener from the registry
:param registry: A registry (a list)
:param listener: The listener to remove
:return: True if the listener was in the list |
19,324 | def parse_query(query):
parts = query.split()
norm = []
for p in parts:
p = p.strip()
if p:
norm.append(p)
elif not in norm:
norm.append()
return norm | Given a simplified XPath query string, returns an array of normalized query parts. |
19,325 | def setHoverIcon( self, column, icon ):
self._hoverIcon[column] = QtGui.QIcon(icon) | Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon) |
19,326 | def integrateFullOrbit_c(pot,yo,t,int_method,rtol=None,atol=None,dt=None):
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
if dt is None:
dt= -9999.99
result= nu.empty((len(t),6))
err= ctypes.c_int(0)
ndarrayFlags= (,)
integrationFunc= _lib.integrateFullOrbit
integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.int32,flags=ndarrayFlags),
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int]
f_cont= [yo.flags[],
t.flags[]]
yo= nu.require(yo,dtype=nu.float64,requirements=[,])
t= nu.require(t,dtype=nu.float64,requirements=[,])
result= nu.require(result,dtype=nu.float64,requirements=[,])
integrationFunc(yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(dt),
ctypes.c_double(rtol),ctypes.c_double(atol),
result,
ctypes.byref(err),
ctypes.c_int(int_method_c))
if int(err.value) == -10:
raise KeyboardInterrupt("Orbit integration interrupted by CTRL-C (SIGINT)")
if f_cont[0]: yo= nu.asfortranarray(yo)
if f_cont[1]: t= nu.asfortranarray(t)
return (result,err.value) | NAME:
integrateFullOrbit_c
PURPOSE:
C integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
dt= (None) force integrator to use this stepsize (default is to automatically determine one))
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS) |
19,327 | def get_meta(meta, name):
assert name in meta
data = meta[name]
if data[] in [, ]:
return data[]
elif data[] == :
if len(data[]) == 1 and data[][0][] == :
if data[][0][] in [, , ]:
return True
elif data[][0][] in [, , ]:
return False
return stringify(data[])
elif data[] == :
return [stringify(v[]) for v in data[]]
else:
raise RuntimeError("Could not understand metadata variable ." %
name) | Retrieves the metadata variable 'name' from the 'meta' dict. |
19,328 | def fileinfo(self, fid):
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didnt malformed?"
) from ex
return info | Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated. |
19,329 | def make_url(self, container=None, resource=None, query_items=None):
pth = [self._base_url]
if container:
pth.append(container.strip())
if resource:
pth.append(resource)
else:
pth.append()
url = .join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url | Create a URL from the specified parts. |
19,330 | def list_files(self, offset=None, limit=None, api=None):
api = api or self._API
if not self.is_folder():
raise SbgError(.format(name=self.name))
url = self._URL[].format(id=self.id)
return super(File, self.__class__)._query(
api=api, url=url, offset=offset, limit=limit, fields=
) | List files in a folder
:param api: Api instance
:param offset: Pagination offset
:param limit: Pagination limit
:return: List of files |
19,331 | def process_tick(self, tup):
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
self.processWindow(window_info, list(self.current_tuples))
for tup in self.current_tuples:
self.ack(tup)
self.current_tuples.clear() | Called every window_duration |
19,332 | def _dict_to_pio(d, class_=None):
d = keys_to_snake_case(d)
if class_:
return class_(**d)
if not in d:
raise ValueError( + .join(d.keys()))
elif d[] == :
return System(**d)
elif d[] == :
return ChemicalSystem(**d)
elif d[] == :
return Alloy(**d)
elif d[] == :
return ChemicalSystem(**d)
raise ValueError( + str(d[])) | Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`. |
19,333 | def authorize_url(self, state=):
url =
params = {
: self.client_id,
: ,
: state,
: self.redirect_uri
}
return url + urlencode(params) | return user authorize url |
19,334 | def jcrop_css(css_url=None):
if css_url is None:
if current_app.config[]:
css_url = url_for(, filename=)
else:
css_url =
return Markup( % css_url) | Load jcrop css file.
:param css_url: The custom CSS URL. |
19,335 | def last_job_statuses(self) -> List[str]:
statuses = []
for status in self.jobs.values_list(, flat=True):
if status is not None:
statuses.append(status)
return statuses | The last constants of the job in this experiment. |
19,336 | def set_logger(self, logger):
try:
self.library.set_logger.restype = None
except AttributeError:
logger.warn("Tried to set logger but method is not implemented in %s", self.engine)
return
self.library.set_logger.argtypes = [
(fortran_log_functype)]
self.library.set_logger(fortran_log_func) | subscribe to fortran log messages |
19,337 | def get_all_tasks(self, course):
tasks = self.get_readable_tasks(course)
output = {}
for task in tasks:
try:
output[task] = self.get_task(course, task)
except:
pass
return output | :return: a table containing taskid=>Task pairs |
19,338 | def set_ylim(self, xlims, dx, xscale, reverse=False):
self._set_axis_limits(, xlims, dx, xscale, reverse)
return | Set y limits for plot.
This will set the limits for the y axis
for the specific plot.
Args:
ylims (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False. |
19,339 | def does_sqlatype_require_index_len(
coltype: Union[TypeEngine, VisitableType]) -> bool:
coltype = _coltype_to_typeengine(coltype)
if isinstance(coltype, sqltypes.Text):
return True
if isinstance(coltype, sqltypes.LargeBinary):
return True
return False | Is the SQLAlchemy column type one that requires its indexes to have a
length specified?
(MySQL, at least, requires index length to be specified for ``BLOB`` and
``TEXT`` columns:
http://dev.mysql.com/doc/refman/5.7/en/create-index.html.) |
19,340 | def is_iterable(obj):
return hasattr(obj, ) and not isinstance(obj, str) or isinstance(obj, GeneratorType) | Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway. |
19,341 | def recipients(messenger, addresses):
if isinstance(messenger, six.string_types):
messenger = get_registered_messenger_object(messenger)
return messenger._structure_recipients_data(addresses) | Structures recipients data.
:param str|unicode, MessageBase messenger: MessengerBase heir
:param list[str|unicode]|str|unicode addresses: recipients addresses or Django User
model heir instances (NOTE: if supported by a messenger)
:return: list of Recipient
:rtype: list[Recipient] |
19,342 | def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None, dry_run=False):
conn = _get_conn3(region=region, key=key, keyid=keyid,
profile=profile)
if name and _vpc_peering_conn_id_for_name(name, conn):
raise SaltInvocationError(
)
if not _exactly_one((requester_vpc_id, requester_vpc_name)):
raise SaltInvocationError(
)
if not _exactly_one((peer_vpc_id, peer_vpc_name)):
raise SaltInvocationError(
)
if requester_vpc_name:
requester_vpc_id = _get_id(vpc_name=requester_vpc_name, region=region, key=key,
keyid=keyid, profile=profile)
if not requester_vpc_id:
return {: .format(requester_vpc_name)}
if peer_vpc_name:
peer_vpc_id = _get_id(vpc_name=peer_vpc_name, region=region, key=key,
keyid=keyid, profile=profile)
if not peer_vpc_id:
return {: .format(peer_vpc_name)}
peering_params = {"VpcId": requester_vpc_id, "PeerVpcId": peer_vpc_id, "DryRun": dry_run}
if peer_owner_id:
peering_params.update({"PeerOwnerId": peer_owner_id})
if peer_region:
peering_params.update({"PeerRegion": peer_region})
try:
log.debug()
vpc_peering = conn.create_vpc_peering_connection(**peering_params)
peering = vpc_peering.get(, {})
peering_conn_id = peering.get(, )
msg = .format(peering_conn_id)
log.debug(msg)
if name:
log.debug()
conn.create_tags(
Resources=[peering_conn_id],
Tags=[{: , : name}]
)
log.debug()
msg += .format(name)
return {: msg}
except botocore.exceptions.ClientError as err:
log.error()
return {: __utils__[](err)} | Request a VPC peering connection between two VPCs.
.. versionadded:: 2016.11.0
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC to create VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC to create VPC peering connection with. This can only
be a VPC in the same account and same region, else resolving it into a
vpc ID will almost certainly fail. Exclusive with peer_vpc_id.
name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and return status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da name=my_vpc_connection
# Without a name
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da
# Specify a region
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da region=us-west-2 |
19,343 | def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback) | Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!") |
19,344 | def generate_csr(self, basename=):
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
.format(basename, int(datetime.now().timestamp())),
.format(self.cuit),
csr,
)
csr.seek(0)
return csr | Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP. |
19,345 | def solve_let(expr, vars):
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) | Solves a let-form by calling RHS with nested scope. |
19,346 | def solve_tsp(V,c):
def addcut(X):
for sink in V[1:]:
mflow = maxflow(V,X,V[0],sink)
mflow.optimize()
f,cons = mflow.data
if mflow.ObjVal < 2-EPS:
break
else:
return False
CutA = set([V[0]])
for i in cons:
if cons[i].Pi <= -1+EPS:
CutA.add(i)
CutB = set(V) - CutA
main.addCons(
quicksum(x[i,j] for i in CutA for j in CutB if j>i) + \
quicksum(x[j,i] for i in CutA for j in CutB if j<i) >= 2)
print("mflow:",mflow.getObjVal(),"cut:",CutA,"+",CutB,">= 2")
print("mflow:",mflow.getObjVal(),"cut:",[(i,j) for i in CutA for j in CutB if j>i],"+",[(j,i) for i in CutA for j in CutB if j<i],">= 2")
return True
def isMIP(x):
for var in x:
if var.vtype == "CONTINUOUS":
return False
return True
main = Model("tsp")
x = {}
for i in V:
for j in V:
if j > i:
x[i,j] = main.addVar(ub=1, vtype="C", name="x(%s,%s)"%(i,j))
for i in V:
main.addCons(quicksum(x[j,i] for j in V if j < i) + \
quicksum(x[i,j] for j in V if j > i) == 2, "Degree(%s)"%i)
main.setObjective(quicksum(c[i,j]*x[i,j] for i in V for j in V if j > i), "minimize")
while True:
main.optimize()
z = main.getObjVal()
X = {}
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
X[i,j] = main.getVal(x[i,j])
if addcut(X) == False:
if isMIP():
break
for (i,j) in x:
main.chgVarType(x[i,j], "BINARY")
edges = []
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
edges.append((i,j))
return main.getObjVal(),edges | solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used. |
19,347 | def get_email_link(email, value=None):
if not email:
return ""
mailto = .format(email)
link_value = value and value or email
return get_link(mailto, link_value) | Returns a well-formed link to an email address. If email is None/empty,
returns an empty string
:param email: email address
:param link_text: text to be displayed. If None, the email itself is used
:return: a well-formatted html anchor |
19,348 | def _validate_entity_cls(self, entity_cls):
from protean.core.entity import Entity
if not issubclass(entity_cls, Entity):
raise AssertionError(
f)
if entity_cls.meta_.abstract is True:
raise NotSupportedError(
f
f) | Validate that Entity is a valid class |
19,349 | def to_array(self):
array = super(InlineQueryResultMpeg4Gif, self).to_array()
array[] = u(self.type)
array[] = u(self.id)
array[] = u(self.mpeg4_url)
array[] = u(self.thumb_url)
if self.mpeg4_width is not None:
array[] = int(self.mpeg4_width)
if self.mpeg4_height is not None:
array[] = int(self.mpeg4_height)
if self.mpeg4_duration is not None:
array[] = int(self.mpeg4_duration)
if self.title is not None:
array[] = u(self.title)
if self.caption is not None:
array[] = u(self.caption)
if self.parse_mode is not None:
array[] = u(self.parse_mode)
if self.reply_markup is not None:
array[] = self.reply_markup.to_array()
if self.input_message_content is not None:
array[] = self.input_message_content.to_array()
return array | Serializes this InlineQueryResultMpeg4Gif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict |
19,350 | def is_bootstrapped(metadata):
fields = UNIHAN_FIELDS + DEFAULT_COLUMNS
if TABLE_NAME in metadata.tables.keys():
table = metadata.tables[TABLE_NAME]
if set(fields) == set(c.name for c in table.columns):
return True
else:
return False
else:
return False | Return True if cihai is correctly bootstrapped. |
19,351 | def removeBiosample(self, biosample):
q = models.Biosample.delete().where(
models.Biosample.id == biosample.getId())
q.execute() | Removes the specified biosample from this repository. |
19,352 | def main():
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt= % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
context = None
if hasattr(ssl, ):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
atexit.register(Disconnect, si)
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames]
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e)) | Simple command-line program for powering on virtual machines on a system. |
19,353 | def t_op(self, s):
r
if s in (, , , , , , ):
self.add_token(, s)
elif s in (, , , , , , , , , , ,
):
self.add_token(, s)
elif s in self.UNOP2NAME.keys():
self.add_token(self.UNOP2NAME[s], s)
elif s in (, , , , , , , , ):
self.add_token(, s)
elif s == :
self.add_token(, s)
else:
print("Internal error: Unknown operator %s" % s)
raise SystemExit | r'\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|\*\*=|//=|//|==|<=|>=|<<|>>|[<>%^&+/=~-] |
19,354 | def whole_subnet_maker(ip_addr, cidr):
if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False:
LOGGER.critical(.format(item=ip_addr))
raise ValueError("Not a good ipv4 address")
if not cidr_check(cidr, False):
LOGGER.critical(.format(item=cidr))
raise ValueError("Not a good CIDR value should be 0 to 32")
def subnet_corrector(octet, cidr):
cidr_int = int(cidr)
octet_int = int(octet)
if cidr_int >= 24:
cidr_int = __mask_conversion[cidr_int]["OCT4"]
elif cidr_int >= 16:
cidr_int = __mask_conversion[cidr_int]["OCT3"]
elif cidr_int >= 8:
cidr_int = __mask_conversion[cidr_int]["OCT2"]
elif cidr_int >= 1:
cidr_int = __mask_conversion[cidr_int]["OCT1"]
cidr_count = 0
cidr_v = 256 - cidr_int
cidr_2 = 256 - cidr_int
while cidr_count < 300:
if octet_int >= cidr_count and octet_int <= cidr_2:
cidr_int = cidr_count
cidr_count = cidr_2
cidr_2 = cidr_2 + cidr_v
return str(cidr_int)
ip_addr_split = ip_addr.split(".")
if int(cidr) >= 24:
octet = subnet_corrector(ip_addr_split[3], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + ip_addr_split[2] + "." + octet
return completed
elif int(cidr) >= 16:
octet = subnet_corrector(ip_addr_split[2], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + octet + ".0"
return completed
elif int(cidr) >= 8:
octet = subnet_corrector(ip_addr_split[1], cidr)
completed = ip_addr_split[0] + "." + octet + ".0.0"
return completed
elif int(cidr) >= 1:
octet = subnet_corrector(ip_addr_split[0], cidr)
completed = octet + ".0.0.0"
return completed
else:
return "0.0.0.0" | Function to return a whole subnet value from a IP address and CIDR pair
Args:
ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns the corrected whole subnet |
19,355 | def spdhg_generic(x, f, g, A, tau, sigma, niter, **kwargs):
r
callback = kwargs.pop(, None)
if callback is not None and not callable(callback):
raise TypeError(
.format(callback))
y = kwargs.pop(, None)
if y is None:
y = A.range.zero()
z = kwargs.pop(, None)
if z is None:
if y.norm() == 0:
z = A.domain.zero()
else:
z = A.adjoint(y)
mu_g = kwargs.pop(, None)
if mu_g is None:
update_proximal_primal = False
else:
update_proximal_primal = True
theta = kwargs.pop(, 1)
extra = kwargs.pop(, None)
if extra is None:
extra = [1] * len(sigma)
fun_select = kwargs.pop(, None)
if fun_select is None:
def fun_select(x):
return [int(np.random.choice(len(A), 1, p=1 / len(A)))]
z_relax = z.copy()
dz = A.domain.element()
y_old = A.range.element()
proximal_dual_sigma = [fi.convex_conj.proximal(si)
for fi, si in zip(f, sigma)]
proximal_primal_tau = g.proximal(tau)
for k in range(niter):
selected = fun_select(k)
z_relax.lincomb(1, x, -tau, z_relax)
proximal_primal_tau(z_relax, out=x)
if update_proximal_primal:
theta = float(1 / np.sqrt(1 + 2 * mu_g * tau))
z_relax.assign(z)
for i in selected:
y_old[i].assign(y[i])
A[i](x, out=y[i])
y[i].lincomb(1, y_old[i], sigma[i], y[i])
proximal_dual_sigma[i](y[i], out=y[i])
y_old[i].lincomb(-1, y_old[i], 1, y[i])
A[i].adjoint(y_old[i], out=dz)
z += dz
z_relax.lincomb(1, z_relax, 1 + theta * extra[i], dz)
if update_proximal_primal:
for i in range(len(sigma)):
sigma[i] /= theta
tau *= theta
proximal_dual_sigma = [fi.convex_conj.proximal(si)
for fi, si in zip(f, sigma)]
proximal_primal_tau = g.proximal(tau)
if callback is not None:
callback([x, y]) | r"""Computes a saddle point with a stochastic PDHG.
This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
(x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
proper functionals. For this algorithm, they all may be non-smooth and no
strong convexity is assumed.
Parameters
----------
x : primal variable
This variable is both input and output of the method.
f : functions
Functionals Y[i] -> IR_infty that all have a convex conjugate with a
proximal operator, i.e.
f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i].
g : function
Functional X -> IR_infty that has a proximal operator, i.e.
g.proximal(tau) : X -> X.
A : functions
Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
tau : scalar / vector / matrix
Step size for primal variable. Note that the proximal operator of g
has to be well-defined for this input.
sigma : scalar
Scalar / vector / matrix used as step size for dual variable. Note that
the proximal operator related to f (see above) has to be well-defined
for this input.
niter : int
Number of iterations
Other Parameters
----------------
y : dual variable, optional
Dual variable is part of a product space. By default equals 0.
z : variable, optional
Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
mu_g : scalar
Strong convexity constant of g.
theta : scalar
Global extrapolation factor.
extra: list
List of local extrapolation paramters for every index i. By default
extra_i = 1.
fun_select : function
Function that selects blocks at every iteration IN -> {1,...,n}. By
default this is serial uniform sampling, fun_select(k) selects an index
i \in {1,...,n} with probability 1/n.
callback : callable, optional
Function called with the current iterate after each iteration.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
[E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
58 (2017) http://doi.org/10.1117/12.2272946. |
19,356 | def overlap_bbox_and_point(bbox, xp, yp):
cx, cy = get_midpoint(bbox)
dir_x = np.sign(cx-xp)
dir_y = np.sign(cy-yp)
if dir_x == -1:
dx = xp - bbox.xmax
elif dir_x == 1:
dx = xp - bbox.xmin
else:
dx = 0
if dir_y == -1:
dy = yp - bbox.ymax
elif dir_y == 1:
dy = yp - bbox.ymin
else:
dy = 0
return dx, dy | Given a bbox that contains a given point, return the (x, y) displacement
necessary to make the bbox not overlap the point. |
19,357 | def envs(backend=None, sources=False):
rootsgit
fileserver = salt.fileserver.Fileserver(__opts__)
return sorted(fileserver.envs(back=backend, sources=sources)) | Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git |
19,358 | def count(self):
if self.status:
PyFunceble.INTERN["counter"]["number"]["tested"] += 1
if (
self.status.lower() in PyFunceble.STATUS["list"]["up"]
or self.status.lower() in PyFunceble.STATUS["list"]["valid"]
):
PyFunceble.INTERN["counter"]["number"]["up"] += 1
elif self.status.lower() in PyFunceble.STATUS["list"]["down"]:
PyFunceble.INTERN["counter"]["number"]["down"] += 1
else:
PyFunceble.INTERN["counter"]["number"]["invalid"] += 1 | Count the number of domain for each status. |
19,359 | def _ctorCmprRange(self, vals):
if not isinstance(vals, (list, tuple)):
raise s_exc.BadCmprValu(valu=vals, cmpr=)
if len(vals) != 2:
raise s_exc.BadCmprValu(valu=vals, cmpr=)
tick, tock = self.getTickTock(vals)
if tick > tock:
def cmpr(valu):
return False
return cmpr
def cmpr(valu):
return tick <= valu <= tock
return cmpr | Override default *range= handler to account for relative computation. |
19,360 | def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault(, True)
extra_fields.setdefault(, True)
if extra_fields.get() is not True:
raise ValueError()
if extra_fields.get() is not True:
raise ValueError()
return self._create_user(email, password, **extra_fields) | Save new User with is_staff and is_superuser set to True |
19,361 | def apply(self, spectrum, plot=False):
f_units = 1.
if hasattr(spectrum[0], ):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], ):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], ):
spectrum[2] = spectrum[2].to(self.flux_units)
wav, flx, *err = [np.asarray(i) for i in spectrum]
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
rsr = np.copy(self.rsr)
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
COLORS = color_gen()
xlab = .format(self.wave_units)
ylab = .format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
fig.line(wav, flx[0], legend=, color=)
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color=, fill_alpha=0.1, line_alpha=0)
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units | Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error |
19,362 | async def movehere(self, channel):
self.logger.debug("movehere command")
await self.embed.delete()
self.embed.channel = channel
await self.embed.send()
await self.add_reactions()
self.statuslog.info("Moved to front") | Moves the embed message to a new channel; can also be used to move the musicplayer to the front
Args:
channel (discord.Channel): The channel to move to |
19,363 | def norm_slash(name):
if isinstance(name, str):
return name.replace(, "\\") if not is_case_sensitive() else name
else:
return name.replace(b, b"\\") if not is_case_sensitive() else name | Normalize path slashes. |
19,364 | def transliterate(table, text):
if table == :
return text.translate(SR_LATN_TABLE)
elif table == :
return text.translate(AZ_LATN_TABLE)
else:
raise ValueError("Unknown transliteration table: {!r}".format(table)) | Transliterate text according to one of the tables above.
`table` chooses the table. It looks like a language code but comes from a
very restricted set:
- 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the
Latin alphabet.
- 'az-Latn' means the same for Azerbaijani Cyrillic to Latn. |
19,365 | def URIUnescapeString(str, len, target):
ret = libxml2mod.xmlURIUnescapeString(str, len, target)
return ret | Unescaping routine, but does not check that the string is
an URI. The output is a direct unsigned char translation of
%XX values (no encoding) Note that the length of the result
can only be smaller or same size as the input string. |
19,366 | def normalize_name(decl):
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name | Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name |
19,367 | def legal_graph(graph):
descriptor = graph.extract_descriptor()
skips = descriptor.skip_connections
if len(skips) != len(set(skips)):
return False
return True | judge if a graph is legal or not. |
19,368 | def GetNBits(value, nbits):
if isinstance(value, int):
return Operators.EXTRACT(value, 0, nbits)
elif isinstance(value, BitVec):
if value.size < nbits:
return Operators.ZEXTEND(value, nbits)
else:
return Operators.EXTRACT(value, 0, nbits) | Get the first `nbits` from `value`.
:param value: Source value from which to extract
:type value: int or long or BitVec
:param int nbits: How many bits to extract
:return: Low `nbits` bits of `value`.
:rtype int or long or BitVec |
19,369 | def state(self, context):
state = None
for line in self.read(context, [
"status",
context.resolve(self.__name)
]):
if line[2] == "state":
state = line[3].strip()
return state | Get instance state.
:param resort.engine.execution.Context context:
Current execution context.
:rtype:
str
:return:
Instance state name. |
19,370 | def _validate_compression_params(self, img_array, cparams, colorspace):
self._validate_j2k_colorspace(cparams, colorspace)
self._validate_codeblock_size(cparams)
self._validate_precinct_size(cparams)
self._validate_image_rank(img_array)
self._validate_image_datatype(img_array) | Check that the compression parameters are valid.
Parameters
----------
img_array : ndarray
Image data to be written to file.
cparams : CompressionParametersType(ctypes.Structure)
Corresponds to cparameters_t type in openjp2 headers. |
19,371 | def iterate(self, params, repetition, iteration):
try:
print("\nStarting iteration",iteration)
t1 = time.time()
ret = {}
if self.lr_scheduler is not None:
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration)
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: Test error=", validation["testerror"],
"entropy=", validation["entropy"])
if (params["test_noise_every_epoch"] or
iteration == params["iterations"] - 1):
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
if ret["totalCorrect"] > 100000 and ret["testerror"] > 98.3:
print("*******")
print(params)
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
except Exception as e:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
raise RuntimeError("Something went wrong in iterate", e)
return ret | Called once for each training iteration (== epoch here). |
19,372 | def crud_mutation_name(action, model):
model_string = get_model_string(model)
model_string = model_string[0].upper() + model_string[1:]
return "{}{}".format(action, model_string) | This function returns the name of a mutation that performs the specified
crud action on the given model service |
19,373 | def current(instance=True):
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = None
if current is None:
if sys.platform == :
from .platforms import KQueueIOLoop
current = KQueueIOLoop()
else:
from .platforms import EPollIOLoop
current = EPollIOLoop()
current.initialize()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
return current | Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method) |
19,374 | def time_zone_by_name(self, hostname):
addr = self._gethostbyname(hostname)
return self.time_zone_by_addr(addr) | Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com) |
19,375 | def get_classification_node(self, project, structure_group, path=None, depth=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if structure_group is not None:
route_values[] = self._serialize.url(, structure_group, )
if path is not None:
route_values[] = self._serialize.url(, path, )
query_parameters = {}
if depth is not None:
query_parameters[] = self._serialize.query(, depth, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) | GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` |
19,376 | def calc_geo_dist_vincenty(node_source, node_target):
branch_detour_factor = cfg_ding0.get(, )
branch_length = branch_detour_factor * vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
)
return branch_length | Calculates the geodesic distance between `node_source` and `node_target`
incorporating the detour factor specified in :file:`ding0/ding0/config/config_calc.cfg`.
Parameters
----------
node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0
source node, member of GridDing0._graph
node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0
target node, member of GridDing0._graph
Returns
-------
:any:`float`
Distance in m |
19,377 | def extract_key_value(line, environ):
segments = line.split("=", 1)
if len(segments) < 2:
return None
key, value = segments
value = value[1:-1]
elif value[0] == and _DQUOTE_RE.match(value):
template = value[1:-1]
value = template.format(**environ)
key = key.strip()
value = value.strip()
return key, value | Return key, value from given line if present, else return None. |
19,378 | def _adjust_legend(self, overlay, axis):
legend_data = []
dimensions = overlay.kdims
title = .join([d.name for d in dimensions])
for key, subplot in self.subplots.items():
element = overlay.data.get(key, False)
if not subplot.show_legend or not element: continue
title = .join([d.name for d in dimensions])
handle = subplot.traverse(lambda p: p.handles[],
[lambda p: in p.handles])
if isinstance(overlay, NdOverlay):
key = (dim.pprint_value(k) for k, dim in zip(key, dimensions))
label = .join([str(k) + dim.unit if dim.unit else str(k) for dim, k in
zip(dimensions, key)])
if handle:
legend_data.append((handle, label))
else:
if isinstance(subplot, OverlayPlot):
legend_data += subplot.handles.get(, {}).items()
elif element.label and handle:
legend_data.append((handle, element.label))
all_handles, all_labels = list(zip(*legend_data)) if legend_data else ([], [])
data = OrderedDict()
used_labels = []
for handle, label in zip(all_handles, all_labels):
if isinstance(handle, list): handle = tuple(handle)
if handle and (handle not in data) and label and label not in used_labels:
data[handle] = label
used_labels.append(label)
if (not len(set(data.values())) > 0) or not self.show_legend:
legend = axis.get_legend()
if legend:
legend.set_visible(False)
else:
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec[] = self.legend_cols
leg = axis.legend(list(data.keys()), list(data.values()),
title=title, scatterpoints=1,
**dict(leg_spec, **self._fontsize()))
title_fontsize = self._fontsize()
if title_fontsize:
leg.get_title().set_fontsize(title_fontsize[])
frame = leg.get_frame()
frame.set_facecolor()
frame.set_edgecolor()
frame.set_linewidth()
leg.set_zorder(10e6)
self.handles[] = leg
self.handles[].append(leg)
self.handles[] = data | Accumulate the legend handles and labels for all subplots
and set up the legend |
19,379 | def intervaljoin(left, right, lstart=, lstop=, rstart=,
rstop=, lkey=None, rkey=None, include_stop=False,
lprefix=None, rprefix=None):
assert (lkey is None) == (rkey is None), \
return IntervalJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
lprefix=lprefix, rprefix=rprefix) | Join two tables by overlapping intervals. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
>>> # include stop coordinate in intervals
... table2 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop',
... include_stop=True)
>>> table2.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
An additional key comparison can be made, e.g.::
>>> import petl as etl
>>> left = (('fruit', 'begin', 'end'),
... ('apple', 1, 2),
... ('apple', 2, 4),
... ('apple', 2, 5),
... ('orange', 2, 5),
... ('orange', 9, 14),
... ('orange', 19, 140),
... ('apple', 1, 1))
>>> right = (('type', 'start', 'stop', 'value'),
... ('apple', 1, 4, 'foo'),
... ('apple', 3, 7, 'bar'),
... ('orange', 4, 9, 'baz'))
>>> table3 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end', lkey='fruit',
... rstart='start', rstop='stop', rkey='type')
>>> table3.lookall()
+----------+-------+-----+----------+-------+------+-------+
| fruit | begin | end | type | start | stop | value |
+==========+=======+=====+==========+=======+======+=======+
| 'apple' | 1 | 2 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'orange' | 2 | 5 | 'orange' | 4 | 9 | 'baz' |
+----------+-------+-----+----------+-------+------+-------+ |
19,380 | def parse(self, rrstr):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError()
self.parent_log_block_num = parent_log_block_num_le
self._initialized = True | Parse a Rock Ridge Parent Link record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing. |
19,381 | def promote(self, cart_name):
cart = juicer.common.Cart.Cart(cart_name=cart_name, autoload=True, autosync=True)
old_env = cart.current_env
cart.current_env = juicer.utils.get_next_environment(cart.current_env)
filename = item.path.split()[-1]
item.update( % (juicer.utils.pulp_repo_path(con, % (repo, cart.current_env)), filename))
juicer.utils.Log.log_info("Package association calls were accepted. Trusting that your packages existed in %s" % old_env)
cart.save()
self.publish(cart)
else:
juicer.utils.Log.log_debug("Syncing down rpms...")
cart.sync_remotes()
self.sign_cart_for_env_maybe(cart, cart.current_env)
juicer.utils.Log.log_info("Promoting %s from %s to %s" %
(cart_name, old_env, cart.current_env))
for repo in cart.repos():
juicer.utils.Log.log_debug("Promoting %s to %s in %s" %
(cart[repo], repo, cart.current_env))
self.upload(cart.current_env, cart) | `name` - name of cart
Promote a cart from its current environment to the next in the chain. |
19,382 | def size_of_varint(value):
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10 | Number of bytes needed to encode an integer in variable-length format. |
19,383 | def _get_linewise_report(self):
d = defaultdict(list)
for error, lines in self.errors.items():
for line_num in lines:
d[line_num].append(error)
return .join([
.format(line, error.string)
for line in sorted(d.keys())
for error in d[line]]) | Returns a report each line of which comprises a pair of an input line
and an error. Unlike in the standard report, errors will appear as many
times as they occur.
Helper for the get_report method. |
19,384 | def frame_parser(version=None, kind=0, extensions=None, protocols=None):
version = get_version(version)
return FrameParser(version, kind, ProtocolError, close_codes=CLOSE_CODES) | Create a new :class:`FrameParser` instance.
:param version: protocol version, the default is 13
:param kind: the kind of parser, and integer between 0 and 3 (check the
:class:`FrameParser` documentation for details)
:param extensions: not used at the moment
:param protocols: not used at the moment
:param pyparser: if ``True`` (default ``False``) uses the python frame
parser implementation rather than the much faster cython
implementation. |
19,385 | def read_message_bytes_from_pipe(pipe_handle):
overlapped = OVERLAPPED()
overlapped.hEvent = create_event()
try:
buff = create_string_buffer(BUFSIZE + 1)
c_read = DWORD()
success = windll.kernel32.ReadFile(
pipe_handle,
buff,
DWORD(BUFSIZE),
byref(c_read),
byref(overlapped))
if success:
buff[c_read.value] = b
raise Return(buff.value)
error_code = windll.kernel32.GetLastError()
if error_code == ERROR_IO_PENDING:
yield From(wait_for_event(overlapped.hEvent))
success = windll.kernel32.GetOverlappedResult(
pipe_handle,
byref(overlapped),
byref(c_read),
BOOL(False))
if success:
buff[c_read.value] = b
raise Return(buff.value)
else:
error_code = windll.kernel32.GetLastError()
if error_code == ERROR_BROKEN_PIPE:
raise BrokenPipeError
elif error_code == ERROR_MORE_DATA:
more_data = yield From(read_message_bytes_from_pipe(pipe_handle))
raise Return(buff.value + more_data)
else:
raise Exception(
% error_code)
elif error_code == ERROR_BROKEN_PIPE:
raise BrokenPipeError
elif error_code == ERROR_MORE_DATA:
more_data = yield From(read_message_bytes_from_pipe(pipe_handle))
raise Return(buff.value + more_data)
else:
raise Exception( % error_code)
finally:
windll.kernel32.CloseHandle(overlapped.hEvent) | (coroutine)
Read message from this pipe. Return bytes. |
19,386 | def setDragData(self, data, x=None, y=None):
self._dragData[(x, y)] = data | Sets the drag data for this chart item to the inputed data.
:param data | <QMimeData> || None |
19,387 | def GetRandomDatetime():
seconds_offset = random.randint(0, 60 * 60 * 24 * 7)
dt = datetime.today() + timedelta(seconds=seconds_offset)
return dt.replace(second=0, microsecond=0) | Return a datetime in the next week. |
19,388 | def example(script, explain, contents, requirements, output, outputfmt, details):
blank()
cprint(script.upper(), "yellow")
cprint(.join(["=" for i in range(70)]) + , "yellow")
cprint("DETAILS", "blue")
std(explain + )
cprint(requirements, "red")
cprint(output, "green")
blank()
if details != "":
std(details)
blank()
cprint("OUTPUT FORMAT", "blue")
std(outputfmt)
blank()
cprint("EXAMPLES", "blue")
for i in range(len(contents)):
pre, code, post = contents[i]
std("{}) {}".format(i + 1, pre))
cprint(" " + code, "cyan")
if post != "":
std( + post)
blank() | Prints the example help for the script. |
19,389 | def connect_delete_namespaced_service_proxy(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs)
return data | connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread. |
19,390 | def search_node_namespace_names(graph, query, namespace):
node_predicates = [
namespace_inclusion_builder(namespace),
build_node_name_search(query)
]
return filter_nodes(graph, node_predicates) | Search for nodes with the given namespace(s) and whose names containing a given string(s).
:param pybel.BELGraph graph: A BEL graph
:param query: The search query
:type query: str or iter[str]
:param namespace: The namespace(s) to filter
:type namespace: str or iter[str]
:return: An iterator over nodes whose names match the search query
:rtype: iter |
19,391 | def simxLoadModel(clientID, modelPathAndName, options, operationMode):
baseHandle = ct.c_int()
if (sys.version_info[0] == 3) and (type(modelPathAndName) is str):
modelPathAndName=modelPathAndName.encode()
return c_LoadModel(clientID, modelPathAndName, options, ct.byref(baseHandle), operationMode), baseHandle.value | Please have a look at the function description/documentation in the V-REP user manual |
19,392 | def acl_show(self, msg, args):
name = args[0] if len(args) > 0 else None
if name is None:
return "%s: The following ACLs are defined: %s" % (msg.user, .join(self._acl.keys()))
if name not in self._acl:
return "Sorry, couldn%s\n%s, allow, deny'])
]) | Show current allow and deny blocks for the given acl. |
19,393 | def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
self.renderer.set_scale(xscale, yscale, zscale, reset_camera) | Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen. |
19,394 | def generate_message_doc(message_descriptor, locations, path, name_prefix=):
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField():
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip())
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table((, , , , ),
row_tuples)
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + )
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + ) | Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name. |
19,395 | def check_application_state(self, request, callback):
"Check optional state parameter."
stored = request.session.get(self.session_key, None)
returned = request.GET.get(, None)
check = False
if stored is not None:
if returned is not None:
check = constant_time_compare(stored, returned)
else:
logger.error()
else:
logger.error()
return check | Check optional state parameter. |
19,396 | def roll_out_and_store(self, batch_info):
self.model.train()
if self.env_roller.is_ready_for_sampling():
rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device)
batch_info[] = rollout.frames()
batch_info[] = rollout.episode_information()
else:
frames = 0
episode_infos = []
with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar:
while not self.env_roller.is_ready_for_sampling():
rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device)
new_frames = rollout.frames()
frames += new_frames
episode_infos.extend(rollout.episode_information())
pbar.update(new_frames)
batch_info[] = frames
batch_info[] = episode_infos | Roll out environment and store result in the replay buffer |
19,397 | def get_cookie_jar(self):
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar | Returns our cookie jar. |
19,398 | def save_to_internal(self, data):
if self.filetype is "pickle":
pickle.dump(data, open(self.location_internal, "wb"))
elif self.filetype is "hickle":
import hickle
hickle.dump(data, open(self.location_internal, "wb"))
else:
raise ValueError(
"Invalid filetype {} (must be {} or {})".format(
self.filetype, "pickle", "hickle"
)
) | save |
19,399 | def make_int(value, missing=-1):
if isinstance(value, six.string_types):
if not value.strip():
return missing
elif value is None:
return missing
return int(value) | Convert string value to long, '' to missing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.