Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
15,500 | def getitem(self, index, context=None):
return _container_getitem(self, self.elts, index, context=context) | Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice |
15,501 | def mlp(feature, hparams, name="mlp"):
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature | Multi layer perceptron with dropout and relu activation. |
15,502 | def colorize(occurence,maxoccurence,minoccurence):
if occurence == maxoccurence:
color = (255,0,0)
elif occurence == minoccurence:
color = (0,0,255)
else:
color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255))
return color | A formula for determining colors. |
15,503 | def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_applications()
self._find_usage_application_versions()
self._find_usage_environments()
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. |
15,504 | def to_table(self, sort_key="wall_time", stop=None):
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table | Return a table (list of lists) with timer data |
15,505 | def _on_set_auth(self, sock, token):
self.log.info(f"Token received: {token}")
sock.setAuthtoken(token) | Set Auth request received from websocket |
15,506 | def scale(self, new_max_value=1):
f = new_max_value / self.max
return ColorRGB(self.r * f,
self.g * f,
self.b * f,
max_value=new_max_value) | Scale R, G and B parameters
:param new_max_value: how much to scale
:return: a new ColorRGB instance |
15,507 | def _front_delta(self):
if self.flags & self.NO_MOVE:
return Separator(0, 0)
if self.clicked and self.hovered:
delta = 2
elif self.hovered and not self.flags & self.NO_HOVER:
delta = 0
else:
delta = 0
return Separator(delta, delta) | Return the offset of the colored part. |
15,508 | def bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions, *varBinds, **options):
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if not cbFun:
return
if errorIndication:
cbFun(errorIndication, pMod.Integer(0), pMod.Integer(0), None,
cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle)
return
errorStatus = pMod.apiBulkPDU.getErrorStatus(rspPdu)
errorIndex = pMod.apiBulkPDU.getErrorIndex(rspPdu)
varBindTable = pMod.apiBulkPDU.getVarBindTable(reqPdu, rspPdu)
errorIndication, nextVarBinds = pMod.apiBulkPDU.getNextVarBinds(
varBindTable[-1], errorIndex=errorIndex
)
if options.get():
varBindTable = [
VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, vbs) for vbs in varBindTable
]
nextStateHandle = pMod.getNextRequestID()
nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBindTable,
cbCtx=cbCtx,
snmpDispatcher=snmpDispatcher,
stateHandle=stateHandle,
nextStateHandle=nextStateHandle,
nextVarBinds=nextVarBinds)
if not nextVarBinds:
return
pMod.apiBulkPDU.setRequestID(reqPdu, nextStateHandle)
pMod.apiBulkPDU.setVarBinds(reqPdu, nextVarBinds)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
if authData.mpModel < 1:
raise error.PySnmpError()
lookupMib, cbFun, cbCtx = [options.get(x) for x in (, , )]
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
pMod = api.PROTOCOL_MODULES[authData.mpModel]
reqPdu = pMod.GetBulkRequestPDU()
pMod.apiBulkPDU.setDefaults(reqPdu)
pMod.apiBulkPDU.setNonRepeaters(reqPdu, nonRepeaters)
pMod.apiBulkPDU.setMaxRepetitions(reqPdu, maxRepetitions)
pMod.apiBulkPDU.setVarBinds(reqPdu, varBinds)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun) | Initiate SNMP GETBULK query over SNMPv2c.
Based on passed parameters, prepares SNMP GETBULK packet
(:RFC:`1905#section-4.2.3`) and schedules its transmission by
I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing SNMP dispatcher.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData` or :py:class:`~pysnmp.hlapi.v1arch.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.v1arch.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer
address.
nonRepeaters: int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions: int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP dispatcher may choose lesser value than
requested.
\*varBinds: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Notes
-----
User-supplied `cbFun` callable must have the following call
signature:
* snmpDispatcher (:py:class:`~pysnmp.hlapi.v1arch.snmpDispatcher`):
Class instance representing SNMP dispatcher.
* stateHandle (int): Unique request identifier. Can be used
for matching multiple ongoing requests with received responses.
* errorIndication (str): True value indicates SNMP dispatcher error.
* errorStatus (str): True value indicates SNMP PDU error.
* errorIndex (int): Non-zero value refers to `varBinds[errorIndex-1]`
* varBindTable (tuple): A sequence of sequences (e.g. 2-D array) of
variable-bindings represented as :class:`tuple` or
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response, with
up to ``maxRepetitions`` rows, i.e. ``len(varBindTable) <= maxRepetitions``.
For ``0 <= i < len(varBindTable)`` and ``0 <= j < len(varBinds)``,
``varBindTable[i][j]`` represents:
- For non-repeaters (``j < nonRepeaters``), the first lexicographic
successor of ``varBinds[j]``, regardless the value of ``i``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists;
- For repeaters (``j >= nonRepeaters``), the ``i``-th lexicographic
successor of ``varBinds[j]``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists.
See :rfc:`3416#section-4.2.3` for details on the underlying
``GetBulkRequest-PDU`` and the associated ``GetResponse-PDU``, such as
specific conditions under which the server may truncate the response,
causing ``varBindTable`` to have less than ``maxRepetitions`` rows.
* `cbCtx` (object): Original user-supplied object.
Returns
-------
stateHandle : int
Unique request identifier. Can be used for matching received
responses with ongoing requests.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> def cbFun(snmpDispatcher, stateHandle, errorIndication,
>>> errorStatus, errorIndex, varBinds, cbCtx):
>>> print(errorIndication, errorStatus, errorIndex, varBinds)
>>>
>>> snmpDispatcher = snmpDispatcher()
>>>
>>> stateHandle = bulkCmd(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 2,
>>> ('1.3.6.1.2.1.1', None),
>>> cbFun=cbFun
>>> )
>>>
>>> snmpDispatcher.transportDispatcher.runDispatcher() |
15,509 | def append(self, symbol, metadata, start_time=None):
if start_time is None:
start_time = dt.utcnow()
old_metadata = self.find_one({: symbol}, sort=[(, pymongo.DESCENDING)])
if old_metadata is not None:
if old_metadata[] >= start_time:
raise ValueError(.format(start_time,
old_metadata[]))
if old_metadata[] == metadata:
return old_metadata
elif metadata is None:
return
self.find_one_and_update({: symbol}, {: {: start_time}},
sort=[(, pymongo.DESCENDING)])
document = {: bson.ObjectId(), : symbol, : metadata, : start_time}
mongo_retry(self.insert_one)(document)
logger.debug(, symbol)
return document | Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow() |
15,510 | def loop_position(self):
for i, v in enumerate(self._sort_loop):
if v == glances_processes.sort_key:
return i
return 0 | Return the current sort in the loop |
15,511 | def adj_nodes_gcp(gcp_nodes):
for node in gcp_nodes:
node.cloud = "gcp"
node.cloud_disp = "GCP"
node.private_ips = ip_to_str(node.private_ips)
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra[].name
return gcp_nodes | Adjust details specific to GCP. |
15,512 | def add_job(self, task, inputdata, debug=False):
if not self._user_manager.session_logged_in():
raise Exception("A user must be logged in to submit an object")
username = self._user_manager.session_username()
waiting_submission = self._database.submissions.find_one({
"courseid": task.get_course_id(),
"taskid": task.get_id(),
"username": username,
"status": "waiting"})
if waiting_submission is not None:
raise Exception("A submission is already pending for this task!")
obj = {
"courseid": task.get_course_id(),
"taskid": task.get_id(),
"status": "waiting",
"submitted_on": datetime.now(),
"username": [username],
"response_type": task.get_response_type()
}
inputdata["@username"] = username
inputdata["@lang"] = self._user_manager.session_language()
states = self._database.user_tasks.find_one({"courseid": task.get_course_id(), "taskid": task.get_id(), "username": username}, {"random": 1, "state": 1})
inputdata["@random"] = states["random"] if "random" in states else []
inputdata["@state"] = states["state"] if "state" in states else ""
self._hook_manager.call_hook("new_submission", submission=obj, inputdata=inputdata)
obj["input"] = self._gridfs.put(bson.BSON.encode(inputdata))
self._before_submission_insertion(task, inputdata, debug, obj)
submissionid = self._database.submissions.insert(obj)
to_remove = self._after_submission_insertion(task, inputdata, debug, obj, submissionid)
ssh_callback = lambda host, port, password: self._handle_ssh_callback(submissionid, host, port, password)
jobid = self._client.new_job(task, inputdata,
(lambda result, grade, problems, tests, custom, state, archive, stdout, stderr:
self._job_done_callback(submissionid, task, result, grade, problems, tests, custom, state, archive, stdout, stderr, True)),
"Frontend - {}".format(username), debug, ssh_callback)
self._database.submissions.update(
{"_id": submissionid, "status": "waiting"},
{"$set": {"jobid": jobid}}
)
self._logger.info("New submission from %s - %s - %s/%s - %s", self._user_manager.session_username(),
self._user_manager.session_email(), task.get_course_id(), task.get_id(),
web.ctx[])
return submissionid, to_remove | Add a job in the queue and returns a submission id.
:param task: Task instance
:type task: inginious.frontend.tasks.WebAppTask
:param inputdata: the input as a dictionary
:type inputdata: dict
:param debug: If debug is true, more debug data will be saved
:type debug: bool or string
:returns: the new submission id and the removed submission id |
15,513 | def switch_off(self):
success = self.set_status(CONST.STATUS_OFF_INT)
if success:
self._json_state[] = CONST.STATUS_OFF
return success | Turn the switch off. |
15,514 | def alter_old_distutils_request(request: WSGIRequest):
body = request.body
if request.POST or request.FILES:
return
new_body = BytesIO()
content_type, opts = parse_header(request.META[].encode())
parts = body.split(b + opts[] + b)
for part in parts:
if b not in part:
continue
headers, content = part.split(b, 1)
if not headers:
continue
new_body.write(b + opts[] + b)
new_body.write(headers.replace(b, b))
new_body.write(b)
new_body.write(content)
new_body.write(b)
new_body.write(b + opts[] + b)
request._body = new_body.getvalue()
request.META[] = len(request._body)
if hasattr(request, ):
delattr(request, )
if hasattr(request, ):
delattr(request, ) | Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510 |
15,515 | def get_collection(self, collection, database_name=None, username=None, password=None):
_db = self.get_database(database_name, username, password)
return _db[collection] | Get a pymongo collection handle.
:param collection: Name of collection
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo collection object |
15,516 | def add(self, *args):
if len(args) > 2:
name, template = args[:2]
args = args[2:]
else:
name = None
template = args[0]
args = args[1:]
if isinstance(template, tuple):
template, type_converters = template
template = Template(template, **type_converters)
elif not isinstance(template, Template):
template = Template(template)
if name:
self._templates[name] = template
super(PathRouter, self).add(template, *args) | Add a path template and handler.
:param name: Optional. If specified, allows reverse path lookup with
:meth:`reverse`.
:param template: A string or :class:`~potpy.template.Template`
instance used to match paths against. Strings will be wrapped in a
Template instance.
:param handler: A callable or :class:`~potpy.router.Route` instance
which will handle calls for the given path. See
:meth:`potpy.router.Router.add` for details. |
15,517 | def rewrap_bytes(data):
return b.join(
data[index:index+70] for index in range(0, len(data), 70)
) | Rewrap characters to 70 character width.
Intended to rewrap base64 content. |
15,518 | def default_is_local(hadoop_conf=None, hadoop_home=None):
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in , :
if not params.get(k, ).startswith():
return False
return True | \
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured. |
15,519 | def get_type_data(name):
name = name.upper()
try:
return {
: ,
: ,
: name,
: ,
: JEFFS_COORDINATE_FORMAT_TYPES[name] + ,
: JEFFS_COORDINATE_FORMAT_TYPES[name],
: ( +
JEFFS_COORDINATE_FORMAT_TYPES[name] +
)
}
except KeyError:
raise NotFound( + name) | Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type |
15,520 | def sync_scheduler(self):
url = "%s/%s/%s" % (self.config[][],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config[][],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
try:
server_sched = json.loads(req.content)
except Exception as exp:
logging.exception("Error parsing server scheduler: %s" % exp)
raise exp
sched_filename = os.path.join(self.config[][],
)
if not os.path.exists(sched_filename):
with open(sched_filename, ) as file_p:
json.dump(server_sched, file_p, indent=2,
separators=(, ))
return
client_sched = {}
try:
with open(sched_filename, ) as file_p:
client_sched = json.load(file_p)
except Exception as exp:
client_sched = {}
logging.exception("Error loading scheduler file: %s" % exp)
logging.info("Making an empty scheduler")
| Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy |
15,521 | def append(self, event):
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event) | Add an event to the list. |
15,522 | def _purge(self):
if len(self.cache) <= self.max_size:
return
cache = self.cache
refcount = self.refcount
queue = self.queue
max_size = self.max_size
while len(cache) > max_size:
refc = 1
while refc:
k = queue.popleft()
refc = refcount[k] = refcount[k] - 1
del cache[k]
del refcount[k] | Trim the cache down to max_size by evicting the
least-recently-used entries. |
15,523 | def eye_plot(x,L,S=0):
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],)
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],)
plt.grid()
plt.xlabel()
plt.ylabel()
plt.title()
return 0 | Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
None : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> x,b, data = dc.NRZ_bits(1000,10,'rc')
>>> dc.eye_plot(x,20,60)
>>> plt.show() |
15,524 | def remove_edge_fun(graph):
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v)
if is_isolate(graph, v):
rm_node(v)
return remove_edge | Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable |
15,525 | def get_branches(aliases):
ignore = [, , , ]
branches = []
for k, v in aliases.items():
tokens = re.sub(, , v).split()
for t in tokens:
if bool(re.search(r, t)) or len(t) <= 3:
continue
if bool(re.search(r, t)) and t not in ignore:
branches += [t]
return list(set(branches)) | Get unique branch names from an alias dictionary. |
15,526 | def read_index(self, fh, indexed_fh, rec_iterator=None,
rec_hash_func=None, parse_hash=str, flush=True,
no_reindex=True, verbose=False):
if rec_iterator is not None:
self.record_iterator = rec_iterator
if rec_hash_func is not None:
self.record_hash_function = rec_hash_func
self._no_reindex = no_reindex
handle = fh
try:
handle = open(fh)
except TypeError:
key = parse_hash("\t".join(parts[:-1]))
value = parts[-1]
self._index[key] = int(value) | Populate this index from a file. Input format is just a tab-separated file,
one record per line. The last column is the file location for the record
and all columns before that are collectively considered to be the hash key
for that record (which is probably only 1 column, but this allows us to
permit tabs in hash keys). Lines consisting only of whitespace are skipped.
:param fh: filename or stream-like object to read from.
:param indexed_fh: either the filename of the indexed file or handle to
it.
:param rec_iterator: a function that will return an interator for the
indexed file type (not the iterator for the file
itself). This function must take a single argument
which is the name the file to iterate over, or a
stream like object similar to a filestream.
:param rec_hash_func: a function that accepts the record type produced by
the iterator and produces a unique hash for each
record.
:param parse_hash: a function to convert the string representation of
the hash into whatever type is needed. By default,
we just leave these as strings.
:param flush: remove everything currently in the index and discard
any details about a file that is already
fully/partially indexed by this object. This is the
default behavior. If False, then data from <fh> is
just added to the existing index data (potentially
overwriting some of it) and the existing index can
continue to be used as before.
:param no_reindex: if True, after loading the index, a missing key will
cause an exception, rather than trigger re-scanning
the indexed file for the associated record. The only
reason to set this to False would be if your index
was incomplete.
:param verbose: output status message to STDERR about progress
reading the index (if possible).
:raise IndexError: on malformed line in input file/stream |
15,527 | def filter_search(self, code=None, name=None, abilities=None,
attributes=None, info=None):
command = "SELECT code, name FROM CARDS "
command += Where_filter_gen(("code", code), ("name", name),
("abilities", abilities),
("attributes", attributes),
("info", info))
with sqlite3.connect(self.dbname) as carddb:
return carddb.execute(command).fetchall() | Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key. |
15,528 | def perform_oauth(email, master_token, android_id, service, app, client_sig,
device_country=, operatorCountry=, lang=,
sdk_version=17):
data = {
: ,
: email,
: 1,
: master_token,
: service,
: ,
: android_id,
: app,
: client_sig,
: device_country,
: device_country,
: lang,
: sdk_version
}
return _perform_auth_request(data) | Use a master token from master_login to perform OAuth to a specific Google
service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``. |
15,529 | def MGMT_ACTIVE_SET(self, sAddr=, xCommissioningSessionId=None, listActiveTimestamp=None, listChannelMask=None, xExtendedPanId=None,
sNetworkName=None, sPSKc=None, listSecurityPolicy=None, xChannel=None, sMeshLocalPrefix=None, xMasterKey=None,
xPanId=None, xTmfPort=None, xSteeringData=None, xBorderRouterLocator=None, BogusTLV=None, xDelayTimer=None):
print % self.port
try:
cmd =
if listActiveTimestamp != None:
cmd +=
cmd += str(listActiveTimestamp[0])
if xExtendedPanId != None:
cmd +=
xpanid = self.__convertLongToString(xExtendedPanId)
if len(xpanid) < 16:
xpanid = xpanid.zfill(16)
cmd += xpanid
if sNetworkName != None:
cmd +=
cmd += str(sNetworkName)
if xChannel != None:
cmd +=
cmd += str(xChannel)
if sMeshLocalPrefix != None:
cmd +=
cmd += str(sMeshLocalPrefix)
if xMasterKey != None:
cmd +=
key = self.__convertLongToString(xMasterKey)
if len(key) < 32:
key = key.zfill(32)
cmd += key
if xPanId != None:
cmd +=
cmd += str(xPanId)
if listChannelMask != None:
cmd +=
cmd += + self.__convertLongToString(self.__convertChannelMask(listChannelMask))
if sPSKc != None or listSecurityPolicy != None or \
xCommissioningSessionId != None or xTmfPort != None or xSteeringData != None or xBorderRouterLocator != None or \
BogusTLV != None:
cmd +=
if sPSKc != None:
cmd +=
stretchedPskc = Thread_PBKDF2.get(sPSKc,ModuleHelper.Default_XpanId,ModuleHelper.Default_NwkName)
pskc = hex(stretchedPskc).rstrip().lstrip()
if len(pskc) < 32:
pskc = pskc.zfill(32)
cmd += pskc
if listSecurityPolicy != None:
cmd +=
rotationTime = 0
policyBits = 0
if (len(listSecurityPolicy) == 6):
rotationTime = listSecurityPolicy[2]
policyBits = 0b00000111
if listSecurityPolicy[0]:
policyBits = policyBits | 0b10000000
if listSecurityPolicy[1]:
policyBits = policyBits | 0b01000000
if listSecurityPolicy[3]:
policyBits = policyBits | 0b00100000
if listSecurityPolicy[4]:
policyBits = policyBits | 0b00010000
if listSecurityPolicy[5]:
policyBits = policyBits | 0b00001000
else:
rotationTime = listSecurityPolicy[0]
policyBits = listSecurityPolicy[1]
policy = str(hex(rotationTime))[2:]
if len(policy) < 4:
policy = policy.zfill(4)
cmd += policy
cmd += str(hex(policyBits))[2:]
if xCommissioningSessionId != None:
cmd +=
sessionid = str(hex(xCommissioningSessionId))[2:]
if len(sessionid) < 4:
sessionid = sessionid.zfill(4)
cmd += sessionid
if xBorderRouterLocator != None:
cmd +=
locator = str(hex(xBorderRouterLocator))[2:]
if len(locator) < 4:
locator = locator.zfill(4)
cmd += locator
if xSteeringData != None:
steeringData = self.__convertLongToString(xSteeringData)
cmd += + str(len(steeringData)/2).zfill(2)
cmd += steeringData
if BogusTLV != None:
cmd += "8202aa55"
print cmd
return self.__sendCommand(cmd)[0] ==
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("MGMT_ACTIVE_SET() Error: " + str(e)) | send MGMT_ACTIVE_SET command
Returns:
True: successful to send MGMT_ACTIVE_SET
False: fail to send MGMT_ACTIVE_SET |
15,530 | def daemon_start(self):
if daemon_status() == "SUN not running":
subprocess.call("{0} &".format(self.cmd), shell=True) | Start daemon when gtk loaded |
15,531 | def geturl(urllib2_resp):
url = urllib2_resp.geturl()
scheme, rest = url.split(, 1)
if rest.startswith():
return url
else:
return % (scheme, rest) | Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar |
15,532 | def _parse_attribute(
self,
element,
attribute,
state
):
parsed_value = self._default
attribute_value = element.get(attribute, None)
if attribute_value is not None:
parsed_value = self._parser_func(attribute_value, state)
elif self.required:
state.raise_error(
MissingValue, .format(
self._attribute, element.tag
)
)
return parsed_value | Parse the primitive value within the XML element's attribute. |
15,533 | def encode_sid(cls, secret, sid):
secret_bytes = secret.encode("utf-8")
sid_bytes = sid.encode("utf-8")
sig = hmac.new(secret_bytes, sid_bytes, hashlib.sha512).hexdigest()
return "%s%s" % (sig, sid) | Computes the HMAC for the given session id. |
15,534 | def unbind(self, callback):
handlers = self._handlers
if handlers:
filtered_callbacks = [f for f in handlers if f != callback]
removed_count = len(handlers) - len(filtered_callbacks)
if removed_count:
self._handlers = filtered_callbacks
return removed_count
return 0 | Remove a callback from the list |
15,535 | def keep(self, diff):
(toUUID, fromUUID) = self.toArg.diff(diff)
self._client.keep(toUUID, fromUUID)
logger.debug("Kept %s", diff) | Mark this diff (or volume) to be kept in path. |
15,536 | def _split_refextract_authors_str(authors_str):
author_seq = (x.strip() for x in RE_SPLIT_AUTH.split(authors_str) if x)
res = []
current =
for author in author_seq:
if not isinstance(author, six.text_type):
author = six.text_type(author.decode(, ))
author = re.sub(r, , author, re.U)
author = re.sub(r, , author, re.U)
author = re.sub(r, , author, re.U)
if RE_INITIALS_ONLY.match(author):
current += + author.strip().replace(, )
else:
if current:
res.append(current)
current = author
if current:
res.append(current)
lambda a: len(a) == 1
]
res = [r for r in res if all(not f(r) for f in filters)]
return res | Extract author names out of refextract authors output. |
15,537 | def get_create_security_group_commands(self, sg_id, sg_rules):
cmds = []
in_rules, eg_rules = self._format_rules_for_eos(sg_rules)
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(in_rule)
cmds.append("exit")
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(eg_rule)
cmds.append("exit")
return cmds | Commands for creating ACL |
15,538 | async def update_server_data(server):
data = datatools.get_data()
"The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n" + \
"You can use `!help` to get help commands for all modules, " + \
"or {} me to get the server prefix and help commands.".format(server.me.mention)
await client.send_message(default_channel, hello_message) | Updates the server info for the given server
Args:
server: The Discord server to update info for |
15,539 | def ParseCall(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
guid = self._GetRowValue(query_hash, row, )
is_incoming = self._GetRowValue(query_hash, row, )
videostatus = self._GetRowValue(query_hash, row, )
try:
aux = guid
if aux:
aux_list = aux.split()
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux =
dst_aux =
except IndexError:
src_aux = .format(guid)
dst_aux = .format(guid)
if is_incoming == :
user_start_call = True
source = src_aux
ip_address = self._GetRowValue(query_hash, row, )
if ip_address:
destination = .format(dst_aux, ip_address)
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
call_identifier = self._GetRowValue(query_hash, row, )
event_data = SkypeCallEventData()
event_data.dst_call = destination
event_data.offset = call_identifier
event_data.query = query
event_data.src_call = source
event_data.user_start_call = user_start_call
event_data.video_conference = videostatus ==
timestamp = self._GetRowValue(query_hash, row, )
event_data.call_type =
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, )
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
timestamp = self._GetRowValue(query_hash, row, )
timestamp = int(timestamp)
except (ValueError, TypeError):
timestamp = None
if timestamp:
event_data.call_type =
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, )
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
call_duration = self._GetRowValue(query_hash, row, )
call_duration = int(call_duration)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
.format(
call_identifier))
call_duration = None
if call_duration:
timestamp += call_duration
event_data.call_type =
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, )
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a call.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
query (Optional[str]): query. |
15,540 | def multigrid(bounds, points_count):
if len(bounds)==1:
return np.linspace(bounds[0][0], bounds[0][1], points_count).reshape(points_count, 1)
x_grid_rows = np.meshgrid(*[np.linspace(b[0], b[1], points_count) for b in bounds])
x_grid_columns = np.vstack([x.flatten(order=) for x in x_grid_rows]).T
return x_grid_columns | Generates a multidimensional lattice
:param bounds: box constraints
:param points_count: number of points per dimension. |
15,541 | def _get_ukko_report():
s report from the fixed URL.
'
with urllib.request.urlopen(URL_UKKO_REPORT) as response:
ret = str(response.read())
return ret | Get Ukko's report from the fixed URL. |
15,542 | def optimal_partitions(sizes, counts, num_part):
if num_part < 2:
return [(sizes[0], sizes[-1])]
if num_part >= len(sizes):
partitions = [(x, x) for x in sizes]
return partitions
nfps = _compute_nfps_real(counts, sizes)
partitions, _, _ = _compute_best_partitions(num_part, sizes, nfps)
return partitions | Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition. |
15,543 | def _concatenate_shape(input_shape, axis=-1):
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape | Helper to determine the shape of Concatenate output. |
15,544 | def check_num_slices(num_slices, img_shape=None, num_dims=3):
if not isinstance(num_slices, Iterable) or len(num_slices) == 1:
num_slices = np.repeat(num_slices, num_dims)
if img_shape is not None:
if len(num_slices) != len(img_shape):
raise ValueError(
.format(len(img_shape) + 1))
num_slices = np.minimum(img_shape, num_slices)
return np.maximum(1, num_slices) | Ensures requested number of slices is valid.
Atleast 1 and atmost the image size, if available |
15,545 | def loads(cls, s):
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj) | Load an instance of this class from YAML. |
15,546 | def show_exception_only(self, etype, evalue):
ostream = self.ostream
ostream.flush()
ostream.write(.join(self.get_exception_only(etype, evalue)))
ostream.flush() | Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value |
15,547 | def sparse_or_dense_matvecmul(sparse_or_dense_matrix,
dense_vector,
validate_args=False,
name=None,
**kwargs):
with tf.compat.v1.name_scope(name, ,
[sparse_or_dense_matrix, dense_vector]):
dense_vector = tf.convert_to_tensor(
value=dense_vector, dtype_hint=tf.float32, name=)
return tf.squeeze(
sparse_or_dense_matmul(
sparse_or_dense_matrix,
dense_vector[..., tf.newaxis],
validate_args=validate_args,
**kwargs),
axis=[-1]) | Returns (batched) matmul of a (sparse) matrix with a column vector.
Args:
sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of)
matrices.
dense_vector: `Tensor` representing a (batch of) vectors, with the same
batch shape as `sparse_or_dense_matrix`. The shape must be compatible with
the shape of `sparse_or_dense_matrix` and kwargs.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "sparse_or_dense_matvecmul".
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or
`tf.matmul`.
Returns:
product: A dense (batch of) vector-shaped Tensor of the same batch shape and
dtype as `sparse_or_dense_matrix` and `dense_vector`. |
15,548 | def iteritems(self):
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val | Iterate over all header lines, including duplicate ones. |
15,549 | def static_get_pdb_object(pdb_id, bio_cache = None, cache_dir = None):
pdb_id = pdb_id.upper()
if bio_cache:
return bio_cache.get_pdb_object(pdb_id)
if cache_dir:
filepath = os.path.join(cache_dir, .format(pdb_id))
if os.path.exists(filepath):
return PDB.from_filepath(filepath)
pdb_contents = retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
return PDB(pdb_contents) | This method does not necessarily use a BioCache but it seems to fit here. |
15,550 | def call_api(self,
action,
params=None,
method=(, , ),
**kwargs):
urltype, methodname, content_type = method
if urltype == :
url = self.sms_host
else:
url = self.api_host
if content_type == :
data = json.dumps(params)
else:
data = self._filter_params(params)
return self._http_call(
url=url + action,
method=methodname,
data=data,
headers=self._headers(content_type),
**kwargs) | :param method: methodName
:param action: MethodUrl,
:param params: Dictionary,form params for api.
:param timeout: (optional) Float describing the timeout of the request.
:return: |
15,551 | def scrape_metrics(self, scraper_config):
response = self.poll(scraper_config)
try:
if not scraper_config[]:
scraper_config[] = False
elif not scraper_config[]:
for val in itervalues(scraper_config[]):
scraper_config[].add(val[])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
scraper_config[] = False
for metric, mapping in list(iteritems(scraper_config[])):
for key in list(mapping):
if key not in scraper_config[][metric]:
del scraper_config[][metric][key]
scraper_config[] = {}
finally:
response.close() | Poll the data from prometheus and return the metrics as a generator. |
15,552 | def extract_public_key(args):
sk = _load_ecdsa_signing_key(args)
vk = sk.get_verifying_key()
args.public_keyfile.write(vk.to_string())
print("%s public key extracted to %s" % (args.keyfile.name, args.public_keyfile.name)) | Load an ECDSA private key and extract the embedded public key as raw binary data. |
15,553 | def read(self, entity=None, attrs=None, ignore=None, params=None):
if entity is None:
entity = type(self)(
self._server_config,
content_view_filter=self.content_view_filter,
)
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add()
ignore.update([
field_name
for field_name in entity.get_fields().keys()
if field_name not in attrs
])
return super(ContentViewFilterRule, self).read(
entity, attrs, ignore, params) | Do not read certain fields.
Do not expect the server to return the ``content_view_filter``
attribute. This has no practical impact, as the attribute must be
provided when a :class:`nailgun.entities.ContentViewFilterRule` is
instantiated.
Also, ignore any field that is not returned by the server. For more
information, see `Bugzilla #1238408
<https://bugzilla.redhat.com/show_bug.cgi?id=1238408>`_. |
15,554 | def run(self):
"Get jobs from the queue and perform them as they arrive."
while 1:
job = self.jobs.get()
try:
job.run()
self.jobs.task_done()
except TerminationNotice:
self.jobs.task_done()
break | Get jobs from the queue and perform them as they arrive. |
15,555 | def register_token(platform, user_id, token, on_error=None, on_success=None):
__device_token(platform, True, user_id, token=token, on_error=on_error, on_success=on_success) | Register a device token for a user.
:param str platform The platform which to register token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to register.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters. |
15,556 | def _scrape_song_lyrics_from_url(self, url):
page = requests.get(url)
if page.status_code == 404:
return None
html = BeautifulSoup(page.text, "html.parser")
div = html.find("div", class_="lyrics")
if not div:
return None
return lyrics.strip("\n") | Use BeautifulSoup to scrape song info off of a Genius song URL
:param url: URL for the web page to scrape lyrics from |
15,557 | def _send_unsigned_long(self,value):
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value) | Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long. |
15,558 | def create_bot(self, name, avatar_url=None, callback_url=None, dm_notification=None,
**kwargs):
return self._bots.create(name=name, group_id=self.group_id,
avatar_url=avatar_url, callback_url=callback_url,
dm_notification=dm_notification) | Create a new bot in a particular group.
:param str name: bot name
:param str avatar_url: the URL of an image to use as an avatar
:param str callback_url: a POST-back URL for each new message
:param bool dm_notification: whether to POST-back for direct messages?
:return: the new bot
:rtype: :class:`~groupy.api.bots.Bot` |
15,559 | def dumps_tabledata(value, format_name="rst_grid_table", **kwargs):
from ._factory import TableWriterFactory
if not value:
raise TypeError("value must be a tabledata.TableData instance")
writer = TableWriterFactory.create_from_format_name(format_name)
for attr_name, attr_value in kwargs.items():
setattr(writer, attr_name, attr_value)
writer.from_tabledata(value)
return writer.dumps() | :param tabledata.TableData value: Tabular data to dump.
:param str format_name:
Dumped format name of tabular data.
Available formats are described in
:py:meth:`~pytablewriter.TableWriterFactory.create_from_format_name`
:Example:
.. code:: python
>>> dumps_tabledata(value)
.. table:: sample_data
====== ====== ======
attr_a attr_b attr_c
====== ====== ======
1 4.0 a
2 2.1 bb
3 120.9 ccc
====== ====== ====== |
15,560 | def radius(self):
try:
return self._radius
except AttributeError:
pass
self._radius = Point(1, 1, 0)
return self._radius | Radius of the ellipse, Point class. |
15,561 | def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50):
if unit == "miles":
distance = float(distance/69)
else:
distance = float(distance/111.045)
query = {
"loc" : {
"$within": {
"$center" : [[lng, lat], distance]}
}
}
if attribute_map:
query = dict(query.items() + attribute_map.items())
results = yield self.find(query, page=page, limit=limit)
raise Return(self._list_cursor_to_json(results)) | Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list |
15,562 | def _add_helpingmaterials(config, helping_file, helping_type):
try:
project = find_project_by_short_name(config.project[],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
sleep, msg = enable_auto_throttling(config, data,
endpoint=)
if sleep:
click.secho(msg, fg=)
if sleep:
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project[]))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Add helping materials to a project. |
15,563 | def ending_long_process(self, message=""):
QApplication.restoreOverrideCursor()
self.show_message(message, timeout=2000)
QApplication.processEvents() | Clear main window's status bar and restore mouse cursor. |
15,564 | def clone(name_a, name_b, **kwargs):
property1value1property2value2*
flags = []
target = []
filesystem_properties = kwargs.get(, {})
if kwargs.get(, False):
flags.append()
target.append(name_a)
target.append(name_b)
res = __salt__[](
__utils__[](
command=,
flags=flags,
filesystem_properties=filesystem_properties,
target=target,
),
python_shell=False,
)
return __utils__[](res, ) | Creates a clone of the given snapshot.
name_a : string
name of snapshot
name_b : string
name of filesystem or volume
create_parent : boolean
creates all the non-existing parent datasets. any property specified on the
command line using the -o option is ignored.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday |
15,565 | async def keep_alive(self, period=1, margin=.3):
if period < 0:
period = 0
if period > 10:
period = 10
self.period = period
if margin < .1:
margin = .1
if margin > .9:
margin = .9
self.margin = margin
self.keep_alive_interval = [period & 0x7f, (period >> 7) & 0x7f]
await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL,
self.keep_alive_interval)
while True:
if self.period:
await asyncio.sleep(period - (period - (period * margin)))
await self._send_sysex(PrivateConstants.KEEP_ALIVE,
self.keep_alive_interval)
else:
break | Periodically send a keep alive message to the Arduino.
Frequency of keep alive transmission is calculated as follows:
keep_alive_sent = period - (period * margin)
:param period: Time period between keepalives. Range is 0-10 seconds.
0 disables the keepalive mechanism.
:param margin: Safety margin to assure keepalives are sent before
period expires. Range is 0.1 to 0.9
:returns: No return value |
15,566 | def run(self, debug=False, reload=None):
loop = asyncio.get_event_loop()
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
if reload is None:
reload = debug
bot_loop = asyncio.ensure_future(self.loop())
try:
if reload:
loop.run_until_complete(run_with_reloader(loop, bot_loop, self.stop))
else:
loop.run_until_complete(bot_loop)
except KeyboardInterrupt:
logger.debug("User cancelled")
bot_loop.cancel()
self.stop()
finally:
if AIOHTTP_23:
loop.run_until_complete(self.session.close())
logger.debug("Closing loop")
loop.stop()
loop.close() | Convenience method for running bots in getUpdates mode
:param bool debug: Enable debug logging and automatic reloading
:param bool reload: Automatically reload bot on code change
:Example:
>>> if __name__ == '__main__':
>>> bot.run() |
15,567 | def update(self, friendly_name=None, description=None, expiry=None, schema=None):
self._load_info()
if friendly_name is not None:
self._info[] = friendly_name
if description is not None:
self._info[] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info[] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info[] = {: schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
self._info = None
except Exception as e:
raise e | Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema. |
15,568 | def _do_functions(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
if name:
funct, params, _ = name.partition()
funct = funct.strip()
params = split_params(depar(params + _))
defaults = {}
new_params = []
for param in params:
param, _, default = param.partition()
param = param.strip()
default = default.strip()
if param:
new_params.append(param)
if default:
default = self.apply_vars(
default, rule[CONTEXT], None, rule)
defaults[param] = default
context = rule[CONTEXT].copy()
for p in new_params:
context.pop(p, None)
mixin = [list(new_params), defaults, self.
apply_vars(c_codestr, context, None, rule)]
if code == :
def _call(mixin):
def __call(R, *args, **kwargs):
m_params = mixin[0]
m_vars = rule[CONTEXT].copy()
m_vars.update(mixin[1])
m_codestr = mixin[2]
for i, a in enumerate(args):
m_vars[m_params[i]] = a
m_vars.update(kwargs)
_options = rule[OPTIONS].copy()
_rule = spawn_rule(R, codestr=m_codestr, context=m_vars, options=_options, deps=set(), properties=[], final=False, lineno=c_lineno)
self.manage_children(_rule, p_selectors, p_parents,
p_children, (scope or ) + , R[MEDIA])
ret = _rule[OPTIONS].pop(, )
return ret
return __call
_mixin = _call(mixin)
_mixin.mixin = mixin
mixin = _mixin
while len(new_params):
rule[OPTIONS][ % (code, funct,
len(new_params))] = mixin
param = new_params.pop()
if param not in defaults:
break
if not new_params:
rule[OPTIONS][code + + funct + ] = mixin | Implements @mixin and @function |
15,569 | def check_work_done(self, grp):
id_ = self.get_id(grp)
concat_file = os.path.join(self.cache_dir, .format(id_))
result_file = os.path.join(self.cache_dir, .format(id_, self.task_interface.name))
return os.path.exists(concat_file), os.path.exists(result_file) | Check for the existence of alignment and result files. |
15,570 | def detect_encoding(readline):
bom_found = False
encoding = None
default =
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode()
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != :
raise SyntaxError()
encoding +=
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default =
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second] | The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned. |
15,571 | def write_stilde(self, stilde_dict, group=None):
subgroup = self.data_group + "/{ifo}/stilde"
if group is None:
group = subgroup
else:
group = .join([group, subgroup])
for ifo, stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs[] = stilde.delta_f
self[group.format(ifo=ifo)].attrs[] = float(stilde.epoch) | Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level. |
15,572 | def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True,
envelope=False, iSpeedTuning=False, enforceDale=True):
np.matmul(self.activationsP * self.placeGainI, self.weightsPI,
self.instantaneousI)
np.matmul(self.activationsP* self.placeGainE, self.weightsPEL,
self.instantaneousEL)
np.matmul(self.activationsP * self.placeGainE, self.weightsPER,
self.instantaneousER)
self.instantaneousI += self.boostEffectI*\
self.activationHistoryI +\
feedforwardInputI
self.instantaneousEL += self.boostEffectE*\
self.activationHistoryEL +\
feedforwardInputE
self.instantaneousER += self.boostEffectE*\
self.activationHistoryER +\
feedforwardInputE
if enforceDale:
weightsII = np.minimum(self.weightsII, 0)
weightsIER = np.minimum(self.weightsIER, 0)
weightsIEL = np.minimum(self.weightsIEL, 0)
weightsELI = np.maximum(self.weightsELI, 0)
weightsERI = np.maximum(self.weightsERI, 0)
else:
weightsII = self.weightsII
weightsIER = self.weightsIER
weightsIEL = self.weightsIEL
weightsELI = self.weightsELI
weightsERI = self.weightsERI
if recurrent:
self.instantaneousI += (np.matmul(self.activationsEL, weightsELI) +\
np.matmul(self.activationsER, weightsERI) +\
np.matmul(self.activationsI, weightsII))
self.instantaneousEL += np.matmul(self.activationsI, weightsIEL)
self.instantaneousER += np.matmul(self.activationsI, weightsIER)
self.instantaneousI += self.tonicMagnitude
self.instantaneousEL += self.tonicMagnitude
self.instantaneousER += self.tonicMagnitude
self.instantaneousEL *= max((1 - self.velocityGain*v), 0)
self.instantaneousER *= max((1 + self.velocityGain*v), 0)
if iSpeedTuning:
self.instantaneousI *= min(self.velocityGain*np.abs(v), 1)
if envelope:
self.instantaneousI *= self.envelopeI
self.instantaneousER *= self.envelopeE
self.instantaneousEL *= self.envelopeE
np.maximum(self.instantaneousI, 0., self.instantaneousI)
np.maximum(self.instantaneousEL, 0., self.instantaneousEL)
np.maximum(self.instantaneousER, 0., self.instantaneousER)
self.activationsI += (self.instantaneousI - self.activationsI/self.decayConstant)*self.dt
self.activationsEL += (self.instantaneousEL - self.activationsEL/self.decayConstant)*self.dt
self.activationsER += (self.instantaneousER - self.activationsER/self.decayConstant)*self.dt
np.minimum(self.activationsI, self.clip, self.activationsI)
np.minimum(self.activationsEL, self.clip, self.activationsEL)
np.minimum(self.activationsER, self.clip, self.activationsER)
self.activationHistoryI += (-self.activationsI + np.sum(self.activationsI)/np.sum(self.envelopeI))*self.dt
self.activationHistoryEL += (-self.activationsEL + np.sum(self.activationsEL)/np.sum(self.envelopeE))*self.dt
self.activationHistoryER += (-self.activationsER + np.sum(self.activationsER)/np.sum(self.envelopeE))*self.dt
self.activationHistoryI -= self.dt*self.activationHistoryI/self.alpha
self.activationHistoryEL -= self.dt*self.activationHistoryEL/self.alpha
self.activationHistoryER -= self.dt*self.activationHistoryER/self.alpha | Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param placeActivity: Activity of the place code.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training. |
15,573 | def run_script(self,
script,
shutit_pexpect_child=None,
in_shell=True,
echo=None,
note=None,
loglevel=logging.DEBUG):
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.run_script(script,
in_shell=in_shell,
echo=echo,
note=note,
loglevel=loglevel) | Run the passed-in string as a script on the target's command line.
@param script: String representing the script. It will be de-indented
and stripped before being run.
@param shutit_pexpect_child: See send()
@param in_shell: Indicate whether we are in a shell or not. (Default: True)
@param note: See send()
@type script: string
@type in_shell: boolean |
15,574 | def parse(self, data, path=None):
assert not self.exhausted,
self.path = path
parsed_data = self.yacc.parse(data, lexer=self.lexer, debug=self.debug)
for err_msg, lineno in self.lexer.errors[::-1]:
self.errors.insert(0, (err_msg, lineno, self.path))
parsed_data.extend(self.anony_defs)
self.exhausted = True
return parsed_data | Args:
data (str): Raw specification text.
path (Optional[str]): Path to specification on filesystem. Only
used to tag tokens with the file they originated from. |
15,575 | def swap_dims(self, dims_dict, inplace=None):
inplace = _check_inplace(inplace)
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError(
% k)
if self.variables[v].dims != (k,):
raise ValueError(
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
variables = OrderedDict()
indexes = OrderedDict()
for k, v in self.variables.items():
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
if k in result_dims:
var = v.to_index_variable()
if k in self.indexes:
indexes[k] = self.indexes[k]
else:
indexes[k] = var.to_index()
else:
var = v.to_base_variable()
var.dims = dims
variables[k] = var
return self._replace_with_new_dims(variables, coord_names,
indexes=indexes, inplace=inplace) | Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims |
15,576 | def get_record_params(args):
name, rtype, content, ttl, priority = (
args.name, args.rtype, args.content, args.ttl, args.priority)
return name, rtype, content, ttl, priority | Get record parameters from command options.
Argument:
args: arguments object |
15,577 | def render_json_response(self, context_dict, status=200):
json_context = json.dumps(
context_dict,
cls=DjangoJSONEncoder,
**self.get_json_dumps_kwargs()
).encode(u)
return HttpResponse(
json_context,
content_type=self.get_content_type(),
status=status
) | Limited serialization for shipping plain data. Do not use for models
or other complex or custom objects. |
15,578 | def do_gen(argdict):
site = make_site_obj(argdict)
try:
st = time.time()
site.generate()
et = time.time()
print "Generated Site in %f seconds."% (et-st)
except ValueError as e:
print "Cannot generate. You are not within a simplystatic \
tree and you didn't specify a directory." | Generate the whole site. |
15,579 | def template_substitute(text, **kwargs):
for name, value in kwargs.items():
placeholder_pattern = "{%s}" % name
if placeholder_pattern in text:
text = text.replace(placeholder_pattern, value)
return text | Replace placeholders in text by using the data mapping.
Other placeholders that is not represented by data is left untouched.
:param text: Text to search and replace placeholders.
:param data: Data mapping/dict for placeholder key and values.
:return: Potentially modified text with replaced placeholders. |
15,580 | def copy_from(self,
container: Container,
fn_container: str,
fn_host: str
) -> None:
logger.debug("Copying file from container, %s: %s -> %s",
container.uid, fn_container, fn_host)
cmd = "docker cp ".format(container.id, fn_container, fn_host)
try:
subprocess.check_output(cmd, shell=True)
logger.debug("Copied file from container, %s: %s -> %s",
container.uid, fn_container, fn_host)
except subprocess.CalledProcessError:
logger.exception("Failed to copy file from container, %s: %s -> %s",
container.uid, fn_container, fn_host)
raise | Copies a given file from the container to a specified location on the
host machine. |
15,581 | def todate(val):
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val) | Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception. |
15,582 | def fetchGroupInfo(self, *group_ids):
threads = self.fetchThreadInfo(*group_ids)
groups = {}
for id_, thread in threads.items():
if thread.type == ThreadType.GROUP:
groups[id_] = thread
else:
raise FBchatUserError("Thread {} was not a group".format(thread))
return groups | Get groups' info from IDs, unordered
:param group_ids: One or more group ID(s) to query
:return: :class:`models.Group` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed |
15,583 | def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None):
url = self.base + path
params = []
params.append((, ))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append((, arg))
method = if (files or data) else
parser = encoding.get_encoding(decoder if decoder else "none")
return self._request(method, url, params, parser, stream,
files, headers, data) | Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests` |
15,584 | def randomSize(cls, widthLimits, heightLimits, origin=None):
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r | :param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle |
15,585 | def load(self, **kwargs):
kwargs[] = True
kwargs = self._mutate_name(kwargs)
return self._load(**kwargs) | Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return: |
15,586 | def count_generator(generator, memory_efficient=True):
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator)) | Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory. |
15,587 | def parse(self, argv, tokenizer=DefaultTokenizer):
args = tokenizer.tokenize(argv)
_lang = tokenizer.language_definition()
pass | Parse command line to out tree
:type argv object
:type tokenizer AbstractTokenizer |
15,588 | def ready(self):
self.log(, len(schemastore), ,
len(configschemastore), , lvl=debug) | Sets up the application after startup. |
15,589 | def listunion(ListOfLists):
u = []
for s in ListOfLists:
if s != None:
u.extend(s)
return u | Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`. |
15,590 | def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished):
logger.debug("Checking if a service %s (%s) notification is blocked...",
self.get_full_name(), self.state)
host = hosts[self.host]
if t_wished is None:
t_wished = time.time()
if not self.enable_notifications or \
not self.notifications_enabled or \
in self.notification_options:
logger.debug("Service: %s, notification %s sending is blocked by configuration",
self.get_name(), n_type)
return True
if notification_period is not None and not notification_period.is_time_valid(t_wished):
logger.debug("Service: %s, notification %s sending is blocked by globals",
self.get_name(), n_type)
return True
if n_type in (u, u) and (
self.state == u and not in self.notification_options or
self.state == u and not in self.notification_options or
self.state == u and not in self.notification_options or
self.state == u and not in self.notification_options or
self.state == u and not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
self.get_name(), n_type, self.notification_options)
return True
if (n_type in [u, u, u] and
not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
if (n_type in [u, u, u] and
not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
if n_type in [u] and self.state == self.ok_up:
logger.debug("Host: %s, notification %s sending is blocked by current state",
self.get_name(), n_type)
return True
if host.scheduled_downtime_depth > 0:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
if self.scheduled_downtime_depth > 1 and n_type not in (u,
u):
logger.debug("Service: %s, notification %s sending is blocked by deep downtime",
self.get_name(), n_type)
return True
if self.scheduled_downtime_depth > 0 and n_type in \
[u, u, u,
u, u, u]:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
if self.state_type == u and n_type == u or \
self.problem_has_been_acknowledged and n_type != u or \
self.is_flapping and n_type not in [u,
u,
u] or \
host.state != host.ok_up:
logger.debug("Service: %s, notification %s sending is blocked by soft state, "
"acknowledgement, flapping or host DOWN", self.get_name(), n_type)
return True
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked(hosts, services) is True \
and n_type == u:
logger.debug("Service: %s, notification %s sending is blocked by business rules",
self.get_name(), n_type)
return True
logger.debug("Service: %s, notification %s sending is not blocked", self.get_name(), n_type)
return False | Check if a notification is blocked by the service.
Conditions are ONE of the following::
* enable_notification is False (global)
* not in a notification_period
* notifications_enable is False (local)
* notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...)
(include flapping and downtimes)
* state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)
* scheduled_downtime_depth > 0 and flapping (host is in downtime)
* scheduled_downtime_depth > 1 and not downtime end (deep downtime)
* scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)
* SOFT state of a problem (we raise notification ony on HARD state)
* ACK notification when already ACK (don't raise again ACK)
* not flapping notification in a flapping state
* business rule smart notifications is enabled and all its children have been acknowledged
or are under downtime
* linked host is not up
* linked host is in downtime
:param n_type: notification type
:type n_type:
:param t_wished: the time we should like to notify the host (mostly now)
:type t_wished: float
:return: True if ONE of the above condition was met, otherwise False
:rtype: bool
TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications |
15,591 | def traverse_tree_recursive(odb, tree_sha, path_prefix):
entries = []
data = tree_entries_from_data(odb.stream(tree_sha).read())
for sha, mode, name in data:
if S_ISDIR(mode):
entries.extend(traverse_tree_recursive(odb, sha, path_prefix + name + ))
else:
entries.append((sha, mode, path_prefix + name))
return entries | :return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths |
15,592 | def collect(self):
if pymongo is None:
self.log.error()
return
if in self.config:
self.config[] = [self.config[]]
if self.config[]:
self.config[] = int(
self.config[])
if in self.config:
user = self.config[]
else:
user = None
if in self.config:
passwd = self.config[]
else:
passwd = None
for host in self.config[]:
if len(self.config[]) == 1:
base_prefix = []
else:
matches = re.search(, host)
alias = matches.group(2)
host = matches.group(3)
if alias is None:
base_prefix = [re.sub(, , host)]
else:
base_prefix = [alias]
try:
if ReadPreference is None:
conn = pymongo.Connection(
host,
network_timeout=self.config[],
slave_okay=True
)
else:
conn = pymongo.Connection(
host,
network_timeout=self.config[],
read_preference=ReadPreference.SECONDARY,
)
except Exception as e:
self.log.error(, e)
continue
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception as e:
self.log.error(
+
% (host, e))
return{}
serverStatus = conn.db.command()
engineStatus = conn.db.command()
data = dict(serverStatus.items() + engineStatus.items())
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config[]):
data = self._extract_simple_data(data)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config[])
ignored_collections = re.compile(self.config[])
for db_name in conn.database_names():
if not db_name_filter.search(db_name):
continue
db_stats = conn[db_name].command()
db_prefix = base_prefix + [, db_name]
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
collection_stats = conn[db_name].command(,
collection_name)
if str_to_bool(self.config[]):
collection_name = collection_name.replace(, )
collection_prefix = db_prefix + [collection_name]
self._publish_dict_with_prefix(collection_stats,
collection_prefix) | Collect number values from db.serverStatus() and db.engineStatus() |
15,593 | def _speak_as(
self,
element,
regular_expression,
data_property_value,
operation
):
children = []
pattern = re.compile(regular_expression)
content = element.get_text_content()
while content:
matches = pattern.search(content)
if matches is not None:
index = matches.start()
children = operation(content, index, children)
new_index = index + 1
content = content[new_index:]
else:
break
if children:
if content:
children.append(self._create_content_element(
content,
data_property_value
))
while element.has_children():
element.get_first_node_child().remove_node()
for child in children:
element.append_element(child) | Execute a operation by regular expression for element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param regular_expression: The regular expression.
:type regular_expression: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:param operation: The operation to be executed.
:type operation: function |
15,594 | def setDevice(self, device):
print(self.pre, "setDevice :", device)
if (not device and not self.device):
return
if (self.device):
if self.device == device:
print(self.pre, "setDevice : same device")
return
if self.filterchain:
self.clearDevice()
self.device = device
self.video.setDevice(self.device)
self.filterchain = self.filterchain_group.get(_id = self.device._id)
if self.filterchain:
self.viewport.setXScreenNum(self.n_xscreen)
self.viewport.setWindowId (int(self.video.winId()))
self.filterchain.addViewPort(self.viewport) | Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice. |
15,595 | def uploadFile(self, filename, ispickle=False, athome=False):
print("Uploading file {} to Redunda.".format(filename))
_, tail = os.path.split(filename)
url = "https://redunda.sobotics.org/bots/data/{}?key={}".format(tail, self.key)
header = {"Content-type": "application/octet-stream"}
filedata = ""
if athome:
filename = str(os.path.expanduser("~")) + filename
if filename.endswith(".pickle") or ispickle:
try:
with open(filename, "rb") as fileToRead:
data = pickle.load(fileToRead)
except pickle.PickleError as perr:
print("Pickling error occurred: {}".format(perr))
return
filedata = json.dumps(data)
else:
try:
with open(filename, "r") as fileToRead:
filedata = fileToRead.read()
except IOError as ioerr:
print("IOError occurred: {}".format(ioerr))
return
requestToMake = request.Request(url, data=filedata.encode("utf-8"), headers=header)
response = request.urlopen(requestToMake)
if response.code >= 400:
print("Error occurred while uploading file with error code {}.".format(filename,response.code)) | Uploads a single file to Redunda.
:param str filename: The name of the file to upload
:param bool ispickle: Optional variable to be set to True is the file is a pickle; default is False.
:returns: returns nothing |
15,596 | def __parse_domain_to_employer_stream(self, stream):
if not stream:
return
f = self.__parse_domain_to_employer_line
for o in self.__parse_stream(stream, f):
org = o[0]
dom = o[1]
if org not in self.__raw_orgs:
self.__raw_orgs[org] = []
self.__raw_orgs[org].append(dom) | Parse domain to employer stream.
Each line of the stream has to contain a domain and a organization,
or employer, separated by tabs. Comment lines start with the hash
character (#)
Example:
# Domains from domains.txt
example.org Example
example.com Example
bitergia.com Bitergia
libresoft.es LibreSoft
example.org LibreSoft |
15,597 | def getargs():
from argparse import ArgumentParser
parser = ArgumentParser(description=)
parser.add_argument("question", type=str, help="A question to ask.")
return parser.parse_args() | Return arguments |
15,598 | def get_commits_since(check_name, target_tag=None):
root = get_root()
target_path = os.path.join(root, check_name)
command = .format( if target_tag is None else .format(target_tag), target_path)
with chdir(root):
return run_command(command, capture=True).stdout.splitlines() | Get the list of commits from `target_tag` to `HEAD` for the given check |
15,599 | def register(self,flag):
super(Flags,self).__setitem__(flag.name,flag) | Register a new :class:`Flag` instance with the Flags registry. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.