Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
6,200 | def memory_usage(self):
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data[] = values
return data | Get the combined memory usage of the field data and field values. |
6,201 | def _query_nsot(url, headers, device=None):
url = urlparse.urljoin(url, )
ret = {}
if not device:
query = salt.utils.http.query(url, header_dict=headers, decode=True)
else:
url = urlparse.urljoin(url, device)
query = salt.utils.http.query(url, header_dict=headers,
decode=True)
error = query.get()
if error:
log.error(t get device(s) from nsot! reason: %sdict']
return ret | if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return: |
6,202 | def error_perturbation(C, S):
r
if issparse(C):
warnings.warn("Error-perturbation will be dense for sparse input")
C = C.toarray()
return dense.covariance.error_perturbation(C, S) | r"""Error perturbation for given sensitivity matrix.
Parameters
----------
C : (M, M) ndarray
Count matrix
S : (M, M) ndarray or (K, M, M) ndarray
Sensitivity matrix (for scalar observable) or sensitivity
tensor for vector observable
Returns
-------
X : float or (K, K) ndarray
error-perturbation (for scalar observables) or covariance matrix
(for vector-valued observable)
Notes
-----
**Scalar observable**
The sensitivity matrix :math:`S=(s_{ij})` of a scalar observable
:math:`f(T)` is defined as
.. math:: S= \left(\left. \frac{\partial f(T)}{\partial t_{ij}} \right \rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the variance of the observable
.. math:: \mathbb{V}(f)=\sum_{i,j,k,l} s_{ij} \text{cov}[t_{ij}, t_{kl}] s_{kl}
**Vector valued observable**
The sensitivity tensor :math:`S=(s_{ijk})` for a vector
valued observable :math:`(f_1(T),\dots,f_K(T))` is defined as
.. math:: S= \left( \left. \frac{\partial f_i(T)}{\partial t_{jk}} \right\rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the covariance matrix for the observable
.. math:: \text{cov}[f_{\alpha}(T),f_{\beta}(T)] = \sum_{i,j,k,l} s_{\alpha i j}
\text{cov}[t_{ij}, t_{kl}] s_{\beta kl} |
6,203 | async def disable(self, reason=None):
params = {"enable": True, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 | Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success |
6,204 | def get_cookies_for_class(session, class_name,
cookies_file=None,
username=None,
password=None):
if cookies_file:
cookies = find_cookies_for_class(cookies_file, class_name)
session.cookies.update(cookies)
logging.info(, cookies_file)
else:
cookies = get_cookies_from_cache(username)
session.cookies.update(cookies)
if validate_cookies(session, class_name):
logging.info()
else:
get_authentication_cookies(session, class_name, username, password)
write_cookies_to_cache(session.cookies, username) | Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed. |
6,205 | def register_chooser(self, chooser, **kwargs):
if not issubclass(chooser, Chooser):
return self.register_simple_chooser(chooser, **kwargs)
self.choosers[chooser.model] = chooser(**kwargs)
return chooser | Adds a model chooser definition to the registry. |
6,206 | def tz_convert(dt, to_tz, from_tz=None) -> str:
logger = logs.get_logger(tz_convert, level=)
f_tz, t_tz = get_tz(from_tz), get_tz(to_tz)
from_dt = pd.Timestamp(str(dt), tz=f_tz)
logger.debug(f)
return str(pd.Timestamp(str(from_dt), tz=t_tz)) | Convert to tz
Args:
dt: date time
to_tz: to tz
from_tz: from tz - will be ignored if tz from dt is given
Returns:
str: date & time
Examples:
>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')
>>> tz_convert(dt_1, to_tz='NY')
'2018-09-10 04:00:00-04:00'
>>> dt_2 = pd.Timestamp('2018-01-10 16:00')
>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')
'2018-01-11 05:00:00+08:00'
>>> dt_3 = '2018-09-10 15:00'
>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')
'2018-09-10 02:00:00-04:00' |
6,207 | def _set_size_code(self):
if not self._op.startswith(self.SIZE):
self._size_code = None
return
if len(self._op) == len(self.SIZE):
self._size_code = self.SZ_EQ
else:
suffix = self._op[len(self.SIZE):]
self._size_code = self.SZ_MAPPING.get(suffix, None)
if self._size_code is None:
raise ValueError(.format(self.SIZE, suffix)) | Set the code for a size operation. |
6,208 | def get(self, path):
key = tuple(map(id, path))
item = self._cache.get(key, None)
if item is None:
logger.debug("Transform cache miss: %s", key)
item = [0, self._create(path)]
self._cache[key] = item
item[0] = 0
return item[1] | Get a transform from the cache that maps along *path*, which must
be a list of Transforms to apply in reverse order (last transform is
applied first).
Accessed items have their age reset to 0. |
6,209 | def doMove(self, orgresource, dstresource, dummy = 56184, stresource = , bShareFireCopy = ):
url = nurls[]
data = {: self.user_id,
: self.useridx,
: dummy,
: orgresource,
: dstresource,
: overwrite,
: bShareFireCopy,
}
r = self.session.post(url = url, data = data)
try:
j = json.loads(r.text)
except:
print
return False
return self.resultManager(r.text) | DoMove
Args:
dummy: ???
orgresource: Path for a file which you want to move
dstresource: Destination path
bShareFireCopy: ???
Returns:
True: Move success
False: Move failed |
6,210 | def home_wins(self):
try:
wins, losses = re.findall(r, self._home_record)
return wins
except ValueError:
return 0 | Returns an ``int`` of the number of games the home team won after the
conclusion of the game. |
6,211 | def from_string(cls, s, name=None, modules=None, active=None):
r = cls(name=name, modules=modules, active=active)
_parse_repp(s.splitlines(), r, None)
return r | Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations |
6,212 | def magic_write(ofile, Recs, file_type):
if len(Recs) < 1:
print(.format(ofile))
return False, ""
if os.path.split(ofile)[0] != "" and not os.path.isdir(os.path.split(ofile)[0]):
os.mkdir(os.path.split(ofile)[0])
pmag_out = open(ofile, , errors="backslashreplace")
outstring = "tab \t" + file_type
outstring = outstring.strip("\n").strip(
"\r") + "\n"
return True, ofile | Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Return :
[True,False] : True if successful
ofile : same as input
Effects :
writes a MagIC formatted file from Recs |
6,213 | def occupied_by_sort(self, address):
idx = self._search(address)
if len(self._list) <= idx:
return None
if self._list[idx].start <= address < self._list[idx].end:
return self._list[idx].sort
if idx > 0 and address < self._list[idx - 1].end:
return self._list[idx - 1].sort
return None | Check if an address belongs to any segment, and if yes, returns the sort of the segment
:param int address: The address to check
:return: Sort of the segment that occupies this address
:rtype: str |
6,214 | def simBirth(self,which_agents):
N = np.sum(which_agents)
aNrmNow_new = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1))
self.pLvlNow[which_agents] = drawLognormal(N,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.aLvlNow[which_agents] = aNrmNow_new*self.pLvlNow[which_agents]
self.t_age[which_agents] = 0
self.t_cycle[which_agents] = 0 | Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and persistent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None |
6,215 | def loads(cls, s: str) -> :
try:
currency, amount = s.strip().split()
return cls(amount, currency)
except ValueError as err:
raise ValueError("failed to parse string "
" : {}".format(s, err)) | Parse from a string representation (repr) |
6,216 | def main(arguments=None):
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="ERROR",
options_first=True
)
arguments, settings, log, dbConn = su.setup()
readline.set_completer_delims()
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = " % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug( % (varname, val,))
startTime = times.get_now_sql_datetime()
log.info(
%
(startTime,))
if "interactiveFlag" in locals() and interactiveFlag:
moduleDirectory = os.path.dirname(__file__) + "/resources"
pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
try:
with open(pathToPickleFile):
pass
previousSettingsExist = True
except:
previousSettingsExist = False
previousSettings = {}
if previousSettingsExist:
previousSettings = pickle.load(open(pathToPickleFile, "rb"))
pickleMeObjects = []
pickleMe = {}
theseLocals = locals()
for k in pickleMeObjects:
pickleMe[k] = theseLocals[k]
pickle.dump(pickleMe, open(pathToPickleFile, "wb"))
if cone and filelist:
import codecs
pathToReadFile = pathToCoordinateList
readFile = codecs.open(pathToReadFile, encoding=, mode=)
listOfCoordinates = []
for line in readFile.readlines():
line = line.strip()
[ra, dec] = line.split()
listOfCoordinates.append(str(ra) + " " + str(dec))
search = conesearch(
log=log,
radiusArcsec=radiusArcsec,
nearestOnly=nearestFlag,
unclassified=unclassifiedFlag,
listOfCoordinates=listOfCoordinates,
outputFilePath=outPutFile,
verbose=verboseFlag,
redshift=redshiftFlag)
elif cone:
search = conesearch(
log=log,
ra=ra,
dec=dec,
radiusArcsec=radiusArcsec,
nearestOnly=nearestFlag,
unclassified=unclassifiedFlag,
outputFilePath=outPutFile,
verbose=verboseFlag,
redshift=redshiftFlag
)
elif obj:
search = namesearch(
log=log,
names=objectName,
verbose=verboseFlag,
outputFilePath=outPutFile
)
search.get()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info( %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* |
6,217 | def create_response_object(self, service_id, version_number, name, status="200", response="OK", content="", request_condition=None, cache_condition=None):
body = self._formdata({
"name": name,
"status": status,
"response": response,
"content": content,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number), method="POST", body=body)
return FastlyResponseObject(self, content) | Creates a new Response Object. |
6,218 | def guessFormat(self):
c = [ord(x) for x in self.quals]
mi, ma = min(c), max(c)
r = []
for entry_format, v in iteritems(RANGES):
m1, m2 = v
if mi >= m1 and ma < m2:
r.append(entry_format)
return r | return quality score format -
might return several if ambiguous. |
6,219 | def export_sleep_stats(self, filename, lights_off, lights_on):
epochs = self.get_epochs()
ep_starts = [i[] for i in epochs]
hypno = [i[] for i in epochs]
n_ep_per_min = 60 / self.epoch_length
first = {}
latency = {}
for stage in [, , , ]:
first[stage] = next(((i, j) for i, j in enumerate(epochs) if \
j[] == stage), None)
if first[stage] is not None:
latency[stage] = (first[stage][1][] -
lights_off) / 60
else:
first[stage] = nan
latency[stage] = nan
idx_loff = asarray([abs(x - lights_off) for x in ep_starts]).argmin()
idx_lon = asarray([abs(x - lights_on) for x in ep_starts]).argmin()
duration = {}
for stage in [, , , , , ,
]:
duration[stage] = hypno[idx_loff:idx_lon].count(
stage) / n_ep_per_min
slp_onset = sorted(first.values(), key=lambda x: x[1][])[0]
wake_up = next((len(epochs) - i, j) for i, j in enumerate(
epochs[::-1]) if j[] in [, , ,
])
total_dark_time = (lights_on - lights_off) / 60
slp_onset_lat = (slp_onset[1][] - lights_off) / 60
waso = hypno[slp_onset[0]:wake_up[0]].count() / n_ep_per_min
wake = waso + slp_onset_lat
total_slp_period = sum((waso, duration[], duration[],
duration[], duration[]))
total_slp_time = total_slp_period - waso
slp_eff = total_slp_time / total_dark_time
switch = self.switch()
slp_frag = self.slp_frag()
dt_format =
loff_str = (self.start_time + timedelta(seconds=lights_off)).strftime(
dt_format)
lon_str = (self.start_time + timedelta(seconds=lights_on)).strftime(
dt_format)
slp_onset_str = (self.start_time + timedelta(
seconds=slp_onset[1][])).strftime(dt_format)
wake_up_str = (self.start_time + timedelta(
seconds=wake_up[1][])).strftime(dt_format)
slcnrem5 = self.latency_to_consolidated(lights_off, duration=5,
stage=[, ])
slcnrem10 = self.latency_to_consolidated(lights_off, duration=10,
stage=[, ])
slcn35 = self.latency_to_consolidated(lights_off, duration=5,
stage=[])
slcn310 = self.latency_to_consolidated(lights_off, duration=10,
stage=[])
cycles = self.get_cycles() if self.get_cycles() else []
cyc_stats = []
for i, cyc in enumerate(cycles):
one_cyc = {}
cyc_hypno = [x[] for x in self.get_epochs(time=cyc)]
one_cyc[] = {}
for stage in [, , , , , ,
]:
one_cyc[][stage] = cyc_hypno.count(stage)
one_cyc[] = sum([one_cyc[][stage] for stage in [
, , , ]])
one_cyc[] = one_cyc[] + one_cyc[][]
one_cyc[] = one_cyc[] / one_cyc[]
one_cyc[] = self.switch(time=cyc)
one_cyc[] = self.slp_frag(time=cyc)
cyc_stats.append(one_cyc)
with open(filename, , newline=) as f:
lg.info( + str(filename))
cf = writer(f)
cf.writerow([.format(__version__)])
cf.writerow([, ,
, ,
, ,
])
cf.writerow([, ,
, loff_str,
, lights_off,
])
cf.writerow([, ,
, lon_str,
, lights_on,
])
cf.writerow([, ,
, slp_onset_str,
, slp_onset[1][],
])
cf.writerow([, ,
, wake_up_str,
, wake_up[1][],
])
cf.writerow([, ,
, total_dark_time * n_ep_per_min,
, total_dark_time,
])
cf.writerow([, ,
, slp_onset_lat * n_ep_per_min,
, slp_onset_lat,
])
cf.writerow([, ,
, wake * n_ep_per_min,
, wake,
])
cf.writerow([, ,
, waso * n_ep_per_min,
, waso,
])
cf.writerow([, ,
, duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
, duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
, duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
, duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
,
duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
,
duration[] * n_ep_per_min,
, duration[],
])
cf.writerow([, ,
, total_slp_period * n_ep_per_min,
, total_slp_period,
])
cf.writerow([, ,
, total_slp_time * n_ep_per_min,
, total_slp_time,
])
cf.writerow([, ,
, slp_eff * 100,
, ,
])
cf.writerow([, ,
, waso * 100 / total_slp_period,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_period,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_period,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_period,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_period,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_time,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_time,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_time,
, ,
])
cf.writerow([, ,
, duration[] * 100 / total_slp_time,
, ,
])
cf.writerow([, ,
, switch,
, ,
])
cf.writerow([, ,
,
switch * 100 / total_slp_period / n_ep_per_min,
, switch * 100 / total_slp_period,
])
cf.writerow([, ,
, slp_frag,
, ,
(
)])
cf.writerow([, ,
,
slp_frag * 100 / total_slp_time / n_ep_per_min,
, slp_frag * 100 / total_slp_time,
])
cf.writerow([, ,
, latency[] * n_ep_per_min,
, latency[],
])
cf.writerow([, ,
, latency[] * n_ep_per_min,
, latency[],
])
cf.writerow([, ,
, latency[] * n_ep_per_min,
, latency[],
])
cf.writerow([, ,
, latency[] * n_ep_per_min,
, latency[],
])
cf.writerow([,
,
, slcnrem5 * n_ep_per_min,
, slcnrem5,
(
)])
cf.writerow([,
,
, slcnrem10 * n_ep_per_min,
, slcnrem10,
(
)])
cf.writerow([, ,
, slcn35 * n_ep_per_min,
, slcn35,
(
)])
cf.writerow([, ,
, slcn310 * n_ep_per_min,
, slcn310,
(
)])
for i in range(len(cycles)):
one_cyc = cyc_stats[i]
cf.writerow([])
cf.writerow([f])
cf.writerow([, ,
, (one_cyc[] * 100 /
total_slp_period / n_ep_per_min),
, ,
])
for stage in [, , , , ,
, ]:
cf.writerow([f, ,
, one_cyc[][stage],
,
one_cyc[][stage] / n_ep_per_min,
f])
cf.writerow([f,
f,
, one_cyc[],
, one_cyc[] / n_ep_per_min,
f])
cf.writerow([f, f,
, one_cyc[],
, one_cyc[] / n_ep_per_min,
f])
cf.writerow([f, f,
, one_cyc[] * 100,
, ,
f])
for denom in [, ]:
for stage in [, , , , ]:
cf.writerow([f, ,
, (one_cyc[][stage] /
one_cyc[denom.lower()]) * 100,
, ,
f])
cf.writerow([f, ,
, one_cyc[], , ,
f])
cf.writerow([f, ,
, (one_cyc[] * 100 /
one_cyc[]),
, (one_cyc[] * 100 *
n_ep_per_min / one_cyc[]),
f])
cf.writerow([f, ,
, one_cyc[], , ,
f])
cf.writerow([f,
f,
, (one_cyc[] * 100 /
one_cyc[]),
, (one_cyc[] * 100 *
n_ep_per_min / one_cyc[]),
f])
return slp_onset_lat, waso, total_slp_time | Create CSV with sleep statistics.
Parameters
----------
filename: str
Filename for csv export
lights_off: float
Initial time when sleeper turns off the light (or their phone) to
go to sleep, in seconds from recording start
lights_on: float
Final time when sleeper rises from bed after sleep, in seconds from
recording start
Returns
-------
float or None
If there are no epochs scored as sleep, returns None. Otherwise,
returns the sleep onset latency, for testing purposes.
Note
----
Total dark time and sleep efficiency does NOT subtract epochs marked as
Undefined or Unknown. |
6,220 | def __clear_break(self, pid, address):
if type(address) not in (int, long):
unknown = True
label = address
try:
deferred = self.__deferredBP[pid]
del deferred[label]
unknown = False
except KeyError:
pass
aProcess = self.system.get_process(pid)
try:
address = aProcess.resolve_label(label)
if not address:
raise Exception()
except Exception:
if unknown:
msg = ("Can't clear unknown code breakpoint"
" at %s in process ID %d")
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
return
if self.has_code_breakpoint(pid, address):
self.erase_code_breakpoint(pid, address) | Used by L{dont_break_at} and L{dont_stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved. |
6,221 | async def play(self, author, text_channel, query, index=None, stop_current=False, shuffle=False):
if self.state == :
self.state =
self.prev_queue = []
await self.set_topic("")
await self.msetup(text_channel)
await self.enqueue(query, index, stop_current, shuffle)
await self.vsetup(author)
self.state = if self.mready and self.vready else
else:
await self.enqueue(query, index, stop_current, shuffle)
if self.state == :
if self.streamer is None:
await self.vplay() | The play command
Args:
author (discord.Member): The member that called the command
text_channel (discord.Channel): The channel where the command was called
query (str): The argument that was passed with the command
index (str): Whether to play next or at the end of the queue
stop_current (bool): Whether to stop the currently playing song
shuffle (bool): Whether to shuffle the queue after starting |
6,222 | def vote_cast(vote: Vote, choice_index: int, inputs: dict,
change_address: str) -> bytes:
network_params = net_query(vote.deck.network)
vote_cast_addr = vote.vote_choice_address[choice_index]
tx_fee = network_params.min_tx_fee
for utxo in inputs[]:
utxo[] = unhexlify(utxo[])
utxo[] = unhexlify(utxo[])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(vote_cast_addr)},
{"redeem": float(inputs[]) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs[], outputs) | vote cast transaction |
6,223 | def _generate_style(self):
return generate_style(self.code_styles[self._current_code_style_name],
self.ui_styles[self._current_ui_style_name]) | Create new Style instance.
(We don't want to do this on every key press, because each time the
renderer receives a new style class, he will redraw everything.) |
6,224 | def StreamingCommand(cls, usb, service, command=, timeout_ms=None):
if not isinstance(command, bytes):
command = command.encode()
connection = cls.Open(
usb, destination=b % (service, command),
timeout_ms=timeout_ms)
for data in connection.ReadUntilClose():
yield data.decode() | One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Yields:
The responses from the service. |
6,225 | def get_published_events(self, process=True) -> List[Event]:
LOG.debug(, self._pub_key)
if process:
LOG.debug()
DB.watch(self._pub_key, pipeline=True)
event_ids = DB.get_list(self._pub_key, pipeline=True)
if event_ids:
DB.delete(self._pub_key, pipeline=True)
DB.append_to_list(self._processed_key, *event_ids,
pipeline=True)
DB.execute()
else:
event_ids = DB.get_list(self._pub_key)
events = []
for event_id in event_ids[::-1]:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict[] = event_id
event = Event.from_config(event_dict)
LOG.debug(, event.id, event.type)
events.append(event)
return events | Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects |
6,226 | def move_dir(
src_fs,
src_path,
dst_fs,
dst_path,
workers=0,
):
def src():
return manage_fs(src_fs, writeable=False)
def dst():
return manage_fs(dst_fs, create=True)
with src() as _src_fs, dst() as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
_dst_fs.makedir(dst_path, recreate=True)
copy_dir(src_fs, src_path, dst_fs, dst_path, workers=workers)
_src_fs.removetree(src_path) | Move a directory from one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on ``src_fs``
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on ``dst_fs``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy. |
6,227 | def get(self,url,
headers=None,
token=None,
data=None,
return_json=True,
default_headers=True,
quiet=False):
bot.debug("GET %s" %url)
return self._call(url,
headers=headers,
func=requests.get,
data=data,
return_json=return_json,
default_headers=default_headers,
quiet=quiet) | get will use requests to get a particular url |
6,228 | def m_c(mcmc, scale, f, alphasMZ=0.1185, loop=3):
r
if scale == mcmc:
return mcmc
_sane(scale, f)
crd = rundec.CRunDec()
alphas_mc = alpha_s(mcmc, 4, alphasMZ=alphasMZ, loop=loop)
if f == 4:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(mcmc, alphas_mc, alphas_scale, f, loop)
elif f == 3:
crd.nfMmu.Mth = 1.3
crd.nfMmu.muth = 1.3
crd.nfMmu.nf = 4
return crd.mH2mL(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
elif f == 5:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return crd.mL2mH(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | r"""Get running c quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_c(m_c)$ |
6,229 | def Save(self, token=None):
graph_series_by_label = {}
for active_time in self.active_days:
for label in self.categories[active_time]:
graphs_for_label = graph_series_by_label.setdefault(
label, rdf_stats.ClientGraphSeries(report_type=self._report_type))
graph = rdf_stats.Graph(title="%s day actives for %s label" %
(active_time, label))
for k, v in sorted(iteritems(self.categories[active_time][label])):
graph.Append(label=k, y_value=v)
graphs_for_label.graphs.Append(graph)
for label, graph_series in iteritems(graph_series_by_label):
client_report_utils.WriteGraphSeries(graph_series, label, token=token) | Generate a histogram object and store in the specified attribute. |
6,230 | def add_cli_to_bel(main: click.Group) -> click.Group:
@main.command()
@click.option(, , type=click.File(), default=sys.stdout)
@click.option(, , default=, show_default=True, help=)
@click.pass_obj
def write(manager: BELManagerMixin, output: TextIO, fmt: str):
graph = manager.to_bel()
graph.serialize(file=output, fmt=fmt)
click.echo(graph.summary_str())
return main | Add several command to main :mod:`click` function related to export to BEL. |
6,231 | async def update(self):
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass | reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived. |
6,232 | def find_central_module(self):
mf = ModuleFinder(self.file_opener)
candidates = mf.find_by_any_method()
sub_modules = []
root_modules = []
for candidate in candidates:
if "." in candidate:
sub_modules.append(candidate)
else:
root_modules.append(candidate)
candidates = root_modules
candidates = self.remove_likely_non_central(candidates)
if len(candidates) == 1:
return candidates[0]
if self.package_name:
if self.package_name in candidates:
return self.package_name
if self.package_name:
if self.package_name.replace("-", "_") in candidates:
return self.package_name.replace("-", "_")
if self.package_name:
if self.package_name.replace("-", "") in candidates:
return self.package_name.replace("-", "")
if self.package_name:
if self.package_name.replace("_", "") in candidates:
return self.package_name.replace("_", "")
return None | Get the module that is the sole module, or the module
that matches the package name/version
:return: |
6,233 | def _clones(self):
vbox = VirtualBox()
machines = []
for machine in vbox.machines:
if machine.name == self.machine_name:
continue
if machine.name.startswith(self.machine_name):
machines.append(machine)
return machines | Yield all machines under this pool |
6,234 | def _get_prepare_env(self, script, job_descriptor, inputs, outputs, mounts):
docker_paths = sorted([
var.docker_path if var.recursive else os.path.dirname(var.docker_path)
for var in inputs | outputs | mounts
if var.value
])
env = {
_SCRIPT_VARNAME: repr(script.value),
_META_YAML_VARNAME: repr(job_descriptor.to_yaml()),
: str(len(docker_paths))
}
for idx, path in enumerate(docker_paths):
env[.format(idx)] = os.path.join(providers_util.DATA_MOUNT_POINT,
path)
return env | Return a dict with variables for the 'prepare' action. |
6,235 | def update(self, friendly_name=values.unset, target_workers=values.unset,
reservation_activity_sid=values.unset,
assignment_activity_sid=values.unset,
max_reserved_workers=values.unset, task_order=values.unset):
return self._proxy.update(
friendly_name=friendly_name,
target_workers=target_workers,
reservation_activity_sid=reservation_activity_sid,
assignment_activity_sid=assignment_activity_sid,
max_reserved_workers=max_reserved_workers,
task_order=task_order,
) | Update the TaskQueueInstance
:param unicode friendly_name: Human readable description of this TaskQueue
:param unicode target_workers: A string describing the Worker selection criteria for any Tasks that enter this TaskQueue.
:param unicode reservation_activity_sid: ActivitySID that will be assigned to Workers when they are reserved for a task from this TaskQueue.
:param unicode assignment_activity_sid: ActivitySID that will be assigned to Workers when they are assigned a task from this TaskQueue.
:param unicode max_reserved_workers: The maximum amount of workers to create reservations for the assignment of a task while in this queue.
:param TaskQueueInstance.TaskOrder task_order: TaskOrder will determine which order the Tasks will be assigned to Workers.
:returns: Updated TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance |
6,236 | def get_player(self, name=None, platform=None, uid=None):
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0] | |coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found |
6,237 | def get(self, **options):
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None | Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results. |
6,238 | def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet, since=github.GithubObject.NotSet, before=github.GithubObject.NotSet):
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert before is github.GithubObject.NotSet or isinstance(before, datetime.datetime), before
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
if since is not github.GithubObject.NotSet:
params["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if before is not github.GithubObject.NotSet:
params["before"] = before.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
) | :calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:param since: datetime.datetime
:param before: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification` |
6,239 | def graft(func=None, *, namespace=None):
if not func:
return functools.partial(graft, namespace=namespace)
if isinstance(func, Graft):
return func
return Graft(func, namespace=namespace) | Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
} |
6,240 | def create_time_from_text(text):
text = text.replace(, )
if not re.match(, text):
raise ValueError("Time must be numeric")
minutes = int(text[-2:])
hours = int(text[0:2] if len(text) > 3 else text[0])
return datetime.time(hours, minutes) | Parse a time in the form ``hh:mm`` or ``hhmm`` (or even ``hmm``) and return a :class:`datetime.time` object. If no
valid time can be extracted from the given string, :exc:`ValueError` will be raised. |
6,241 | def process(self, candidates):
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates | :arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates. |
6,242 | def _line_iter(self, in_handle):
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | Read tab delimited file, handling ISA-Tab special case headers. |
6,243 | def to_graphviz(booster, fmap=, num_trees=0, rankdir=,
yes_color=, no_color=,
condition_node_params=None, leaf_node_params=None, **kwargs):
if condition_node_params is None:
condition_node_params = {}
if leaf_node_params is None:
leaf_node_params = {}
try:
from graphviz import Digraph
except ImportError:
raise ImportError()
if not isinstance(booster, (Booster, XGBModel)):
raise ValueError()
if isinstance(booster, XGBModel):
booster = booster.get_booster()
tree = booster.get_dump(fmap=fmap)[num_trees]
tree = tree.split()
kwargs = kwargs.copy()
kwargs.update({: rankdir})
graph = Digraph(graph_attr=kwargs)
for i, text in enumerate(tree):
if text[0].isdigit():
node = _parse_node(
graph, text, condition_node_params=condition_node_params,
leaf_node_params=leaf_node_params)
else:
if i == 0:
raise ValueError()
_parse_edge(graph, node, text, yes_color=yes_color,
no_color=no_color)
return graph | Convert specified tree to graphviz instance. IPython can automatically plot the
returned graphiz instance. Otherwise, you should call .render() method
of the returned graphiz instance.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
yes_color : str, default '#0000FF'
Edge color when meets the node condition.
no_color : str, default '#FF0000'
Edge color when doesn't meet the node condition.
condition_node_params : dict (optional)
condition node configuration,
{'shape':'box',
'style':'filled,rounded',
'fillcolor':'#78bceb'
}
leaf_node_params : dict (optional)
leaf node configuration
{'shape':'box',
'style':'filled',
'fillcolor':'#e48038'
}
kwargs :
Other keywords passed to graphviz graph_attr
Returns
-------
ax : matplotlib Axes |
6,244 | def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({: rtype,
: rdata})[]
data = {
: rtype,
: rname.to_text(True),
: int(rdataset.ttl),
: raw_rdata,
: Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records | Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found. |
6,245 | def main(argv=None):
ap = argparse.ArgumentParser(
description=,
)
ap.add_argument(, action=, help=)
ap.add_argument(, metavar=, help=)
args = ap.parse_args(argv)
mgr = Layouts()
if args.list:
for name in mgr.list_layouts():
print(name)
if args.get is not None:
layout = mgr.get_layout(args.get)
print(json.dumps(layout.json())) | Main entry-point for calling layouts directly as a program. |
6,246 | def get(self):
if not PyFunceble.CONFIGURATION["local"]:
if self.domain_extension not in self.ignored_extension:
referer = None
if self.domain_extension in PyFunceble.INTERN["iana_db"]:
if not PyFunceble.CONFIGURATION["no_whois"]:
referer = PyFunceble.INTERN["iana_db"][self.domain_extension]
if not referer:
Logs().referer_not_found(self.domain_extension)
return None
return referer
return None
return False
return None
return None | Return the referer aka the WHOIS server of the current domain extension. |
6,247 | def warn( callingClass, astr_key, astr_extraMsg="" ):
b_exitToOS = False
report( callingClass, astr_key, b_exitToOS, astr_extraMsg ) | Convenience dispatcher to the error_exit() method.
Will raise "warning" error, i.e. script processing continues. |
6,248 | def _reproject(self, eopatch, src_raster):
height, width = src_raster.shape
dst_raster = np.ones((height, width), dtype=self.raster_dtype)
src_bbox = transform_bbox(eopatch.bbox, CRS.POP_WEB)
src_transform = rasterio.transform.from_bounds(*src_bbox, width=width, height=height)
dst_bbox = eopatch.bbox
dst_transform = rasterio.transform.from_bounds(*dst_bbox, width=width, height=height)
rasterio.warp.reproject(src_raster, dst_raster,
src_transform=src_transform, src_crs={: CRS.ogc_string(CRS.POP_WEB)},
src_nodata=0,
dst_transform=dst_transform, dst_crs={: CRS.ogc_string(eopatch.bbox.crs)},
dst_nodata=self.no_data_val)
return dst_raster | Reprojects the raster data from Geopedia's CRS (POP_WEB) to EOPatch's CRS. |
6,249 | def validate_permission(self, key, permission):
if permission.perm_name not in self.__possible_permissions__:
raise AssertionError(
"perm_name is not one of {}".format(self.__possible_permissions__)
)
return permission | validates if group can get assigned with permission |
6,250 | def list_templates(self, extensions=None, filter_func=None):
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError(
)
filter_func = lambda x: in x and \
x.rsplit(, 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x | Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4 |
6,251 | def search_customer(self, limit=100, offset=0, email_pattern=None, last_name_pattern=None,
company_name_pattern=None, with_additional_data=False):
response = self.request(E.searchCustomerRequest(
E.limit(limit),
E.offset(offset),
E.emailPattern(email_pattern or ),
E.lastNamePattern(last_name_pattern or ),
E.companyNamePattern(company_name_pattern or ),
E.withAdditionalData(int(with_additional_data)),
))
return response.as_models(Customer) | Search the list of customers. |
6,252 | def red_workshift(request, message=None):
if message:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse()) | Redirects to the base workshift page for users who are logged in |
6,253 | def provideObjectsToLearn(self, objectNames=None):
if objectNames is None:
objectNames = self.objects.keys()
objects = {}
for name in objectNames:
objects[name] = [self._getSDRPairs([pair] * self.numColumns) \
for pair in self.objects[name]]
self._checkObjectsToLearn(objects)
return objects | Returns the objects in a canonical format to be sent to an experiment.
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
returnDict = {
"objectId1": [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
],
"objectId2": [
:
]
:
}
Parameters:
----------------------------
@param objectNames (list)
List of object names to provide to the experiment |
6,254 | def is_prelinked_bytecode(bytecode: bytes, link_refs: List[Dict[str, Any]]) -> bool:
for link_ref in link_refs:
for offset in link_ref["offsets"]:
try:
validate_empty_bytes(offset, link_ref["length"], bytecode)
except ValidationError:
return True
return False | Returns False if all expected link_refs are unlinked, otherwise returns True.
todo support partially pre-linked bytecode (currently all or nothing) |
6,255 | def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
last_skipped_token_i = -2
skipped_tokens = list()
try:
ymd = _ymd(timestr)
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
if fuzzy:
val_is_ampm = False
else:
raise ValueError( +
)
if val_is_ampm:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
res.ampm = value
elif fuzzy:
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
continue
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in
string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
res.tzname = l[i + 2]
i += 4
continue
if not (info.jump(l[i]) or fuzzy):
return None, None
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
if year is not None:
res.year = year
res.century_specified = ymd.century_specified
if month is not None:
res.month = month
if day is not None:
res.day = day
except (IndexError, ValueError, AssertionError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
else:
return res, None | Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) |
6,256 | def runner(parallel, config):
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel | Run functions, provided by string name, on multiple cores on the current machine. |
6,257 | def updated_topology_description(topology_description, server_description):
address = server_description.address
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
topology_type = _check_has_primary(sds)
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings) | Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description. |
6,258 | def process(self, ast):
id_classifier = IdentifierClassifier()
attached_ast = id_classifier.attach_identifier_attributes(ast)
self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL)
traverse(attached_ast,
on_enter=self._enter_handler,
on_leave=self._leave_handler)
self.scope_tree = self._scope_tree_builder.get_global_scope()
self.link_registry = self._scope_tree_builder.link_registry | Build a scope tree and links between scopes and identifiers by the
specified ast. You can access the built scope tree and the built links
by .scope_tree and .link_registry. |
6,259 | def dataset_create_new_cli(self,
folder=None,
public=False,
quiet=False,
convert_to_csv=True,
dir_mode=):
folder = folder or os.getcwd()
result = self.dataset_create_new(folder, public, quiet, convert_to_csv,
dir_mode)
if result.invalidTags:
print(
+ str(result.invalidTags))
if result.status.lower() == :
if public:
print(
+ result.url)
else:
print(
+ result.url)
else:
print( + result.error) | client wrapper for creating a new dataset
Parameters
==========
folder: the folder to initialize the metadata file in
public: should the dataset be public?
quiet: suppress verbose output (default is False)
convert_to_csv: if True, convert data to comma separated value
dir_mode: What to do with directories: "skip" - ignore; "zip" - compress and upload |
6,260 | def value(self):
originalPrice = self.lineItem.totalPrice
if self.flatRate == 0:
return originalPrice * self.percent
return self.flatRate | Returns the positive value to subtract from the total. |
6,261 | def _make_compile_argv(self, compile_request):
sources_minus_headers = list(self._iter_sources_minus_headers(compile_request))
if len(sources_minus_headers) == 0:
raise self._HeaderOnlyLibrary()
compiler = compile_request.compiler
compiler_options = compile_request.compiler_options
buildroot = get_buildroot()
argv = (
[compiler.exe_filename] +
compiler.extra_args +
self.context.log.debug("compile argv: {}".format(argv))
return argv | Return a list of arguments to use to compile sources. Subclasses can override and append. |
6,262 | def parse_input_samples(job, inputs):
job.fileStore.logToMaster()
samples = []
if inputs.config:
with open(inputs.config, ) as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split()
assert len(sample) == 2,
samples.append(sample)
job.addChildJobFn(map_job, download_sample, samples, inputs) | Parses config file to pull sample information.
Stores samples as tuples of (uuid, URL)
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main) |
6,263 | def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs):
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop()
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name")
ipv6_acl_in_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
6,264 | def _is_text_data(self, data_type):
dt = DATA_TYPES[data_type]
if type(self.data) is dt[] and len(self.data) < dt[] and all(type(char) == str for char in self.data):
self.type = data_type.upper()
self.len = len(self.data)
return True | Private method for testing text data types. |
6,265 | def _parse_xml(self, xml):
from re import split
vms("Parsing <cron> XML child tag.", 2)
self.frequency = get_attrib(xml, "frequency", default=5, cast=int)
self.emails = split(",\s*", get_attrib(xml, "emails", default=""))
self.notify = split(",\s*", get_attrib(xml, "notify", default="")) | Extracts the attributes from the XMLElement instance. |
6,266 | def swipe(self):
@param_to_property(direction=["up", "down", "right", "left"])
def _swipe(direction="left", steps=10, percent=1):
if percent == 1:
return self.jsonrpc.swipe(self.selector, direction, steps)
else:
return self.jsonrpc.swipe(self.selector, direction, percent, steps)
return _swipe | Perform swipe action. if device platform greater than API 18, percent can be used and value between 0 and 1
Usages:
d().swipe.right()
d().swipe.left(steps=10)
d().swipe.up(steps=10)
d().swipe.down()
d().swipe("right", steps=20)
d().swipe("right", steps=20, percent=0.5) |
6,267 | def tag_ner(lang, input_text, output_type=list):
_check_latest_data(lang)
assert lang in NER_DICT.keys(), \
.format(.join(NER_DICT.keys()))
types = [str, list]
assert type(input_text) in types, .format(.join(types))
assert output_type in types, .format(.join(types))
if type(input_text) == str:
punkt = PunktLanguageVars()
tokens = punkt.word_tokenize(input_text)
new_tokens = []
for word in tokens:
if word.endswith():
new_tokens.append(word[:-1])
new_tokens.append()
else:
new_tokens.append(word)
input_text = new_tokens
ner_file_path = os.path.expanduser(NER_DICT[lang])
with open(ner_file_path) as file_open:
ner_str = file_open.read()
ner_list = ner_str.split()
ner_tuple_list = []
for count, word_token in enumerate(input_text):
match = False
for ner_word in ner_list:
if word_token == ner_word:
ner_tuple = (word_token, )
ner_tuple_list.append(ner_tuple)
match = True
break
if not match:
ner_tuple_list.append((word_token,))
if output_type is str:
string =
for tup in ner_tuple_list:
start_space =
final_space =
if tup[0] in [, , , , , ]:
start_space =
if len(tup) == 2:
string += start_space + tup[0] + + tup[1] + final_space
else:
string += start_space + tup[0] + final_space
return string
return ner_tuple_list | Run NER for chosen language.
Choosing output_type=list, returns a list of tuples:
>>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
[('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] |
6,268 | def _get_phi_al_regional(self, C, mag, vs30measured, rrup):
phi_al = np.ones((len(vs30measured)))
idx = rrup < 30
phi_al[idx] *= C[]
idx = ((rrup <= 80) & (rrup >= 30.))
phi_al[idx] *= C[] + (C[] - C[]) / 50. * (rrup[idx] - 30.)
idx = rrup > 80
phi_al[idx] *= C[]
return phi_al | Returns intra-event (Tau) standard deviation (equation 26, page 1046) |
6,269 | def generate_tokens(readline):
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + ,
contstr, needcont = , 0
contline = None
indents = [0]
while 1:
try:
line = readline()
except StopIteration:
line =
lnum = lnum + 1
pos, max = 0, len(line)
if contstr:
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = , 0
contline = None
elif needcont and line[-2:] != and line[-3:] != :
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr =
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued:
if not line: break
column = 0
while pos < max:
if line[pos] == : column = column + 1
elif line[pos] == : column = (column//tabsize + 1)*tabsize
elif line[pos] == : column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in :
if line[pos] == :
comment_token = line[pos:].rstrip()
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == ], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]:
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, , (lnum, pos), (lnum, pos), line)
else:
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch:
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == and token != ):
yield (NUMBER, token, spos, epos, line)
elif initial in :
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == :
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch:
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start)
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == :
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else:
yield (STRING, token, spos, epos, line)
elif initial in namechars:
yield (NAME, token, spos, epos, line)
elif initial == :
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in : parenlev = parenlev + 1
elif initial in : parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]:
yield (DEDENT, , (lnum, 0), (lnum, 0), )
yield (ENDMARKER, , (lnum, 0), (lnum, 0), ) | The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included. |
6,270 | def create(cls, path_name=None, name=None, project_id=None,
log_modified_at=None, crawlable=True):
result = cls(path_name, name, project_id, log_modified_at, crawlable)
db.session.add(result)
db.session.commit()
crawl_result(result, True)
return result | Initialize an instance and save it to db. |
6,271 | def list_gemeenten_by_provincie(self, provincie):
try:
gewest = provincie.gewest
prov = provincie
except AttributeError:
prov = self.get_provincie_by_id(provincie)
gewest = prov.gewest
gewest.clear_gateway()
def creator():
gewest_gemeenten = self.list_gemeenten(gewest.id)
return[
Gemeente(
r.id,
r.naam,
r.niscode,
gewest
)for r in gewest_gemeenten if str(r.niscode)[0] == str(prov.niscode)[0]
]
if self.caches[].is_configured:
key = % prov.id
gemeente = self.caches[].get_or_create(key, creator)
else:
gemeente = creator()
for g in gemeente:
g.set_gateway(self)
return gemeente | List all `gemeenten` in a `provincie`.
:param provincie: The :class:`Provincie` for which the \
`gemeenten` are wanted.
:rtype: A :class:`list` of :class:`Gemeente`. |
6,272 | def create_paired_dir(output_dir, meta_id, static=False, needwebdir=True):
root_path = os.path.abspath(output_dir)
else:
output_path = root_path
else:
return meta_dir | Creates the meta or static dirs.
Adds an "even" or "odd" subdirectory to the static path
based on the meta-id. |
6,273 | def writeFITSTable(filename, table):
def FITSTableType(val):
if isinstance(val, bool):
types = "L"
elif isinstance(val, (int, np.int64, np.int32)):
types = "J"
elif isinstance(val, (float, np.float64, np.float32)):
types = "E"
elif isinstance(val, six.string_types):
types = "{0}A".format(len(val))
else:
log.warning("Column {0} is of unknown type {1}".format(val, type(val)))
log.warning("Using 5A")
types = "5A"
return types
cols = []
for name in table.colnames:
cols.append(fits.Column(name=name, format=FITSTableType(table[name][0]), array=table[name]))
cols = fits.ColDefs(cols)
tbhdu = fits.BinTableHDU.from_columns(cols)
for k in table.meta:
tbhdu.header[] = .join((k, table.meta[k]))
tbhdu.writeto(filename, overwrite=True) | Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing. |
6,274 | def send_error(self, code, message=None):
message = message.strip()
self.log_error("code %d, message %s", code, message)
self.send_response(code)
self.send_header("Content-Type", "text/plain")
self.send_header(, )
self.end_headers()
if message:
self.wfile.write(message) | Send and log plain text error reply.
:param code:
:param message: |
6,275 | def _create_fw_fab_dev(self, tenant_id, drvr_name, fw_dict):
if fw_dict.get() == fw_constants.FW_TENANT_EDGE:
self._create_fw_fab_dev_te(tenant_id, drvr_name, fw_dict) | This routine calls the Tenant Edge routine if FW Type is TE. |
6,276 | def add_callbacks(self, future, callback, errback):
def done(f):
try:
res = f.result()
if callback:
callback(res)
except Exception:
if errback:
errback(create_failure())
return future.add_done_callback(done) | callback or errback may be None, but at least one must be
non-None. |
6,277 | def _parse_feature(self, info):
parts = info.split(b, 1)
name = parts[0]
if len(parts) > 1:
value = self._path(parts[1])
else:
value = None
self.features[name] = value
return commands.FeatureCommand(name, value, lineno=self.lineno) | Parse a feature command. |
6,278 | def connect_functions(self):
self.cfg_load_pushbutton.clicked.connect(lambda: self.load_overall_config())
self.cfg_save_pushbutton.clicked.connect(lambda: self.save_overall_config())
self.blue_listwidget.itemSelectionChanged.connect(self.load_selected_bot)
self.orange_listwidget.itemSelectionChanged.connect(self.load_selected_bot)
self.blue_listwidget.dropEvent = lambda event: self.bot_item_drop_event(self.blue_listwidget, event)
self.orange_listwidget.dropEvent = lambda event: self.bot_item_drop_event(self.orange_listwidget, event)
self.blue_name_lineedit.editingFinished.connect(self.team_settings_edit_event)
self.orange_name_lineedit.editingFinished.connect(self.team_settings_edit_event)
self.blue_color_spinbox.valueChanged.connect(self.team_settings_edit_event)
self.orange_color_spinbox.valueChanged.connect(self.team_settings_edit_event)
self.blue_minus_toolbutton.clicked.connect(lambda e: self.remove_agent(self.current_bot))
self.orange_minus_toolbutton.clicked.connect(lambda e: self.remove_agent(self.current_bot))
self.blue_plus_toolbutton.clicked.connect(lambda e: self.add_agent_button(team_index=0))
self.orange_plus_toolbutton.clicked.connect(lambda e: self.add_agent_button(team_index=1))
for child in self.bot_config_groupbox.findChildren(QWidget):
if isinstance(child, QLineEdit):
child.editingFinished.connect(self.bot_config_edit_event)
elif isinstance(child, QSlider):
child.valueChanged.connect(self.bot_config_edit_event)
elif isinstance(child, QRadioButton):
child.toggled.connect(self.bot_config_edit_event)
elif isinstance(child, QComboBox):
child.currentTextChanged.connect(self.bot_config_edit_event)
self.loadout_preset_toolbutton.clicked.connect(self.car_customisation.popup)
self.agent_preset_toolbutton.clicked.connect(self.agent_customisation.popup)
self.preset_load_toplevel_pushbutton.clicked.connect(self.load_preset_toplevel)
for child in self.match_settings_groupbox.findChildren(QWidget):
if isinstance(child, QComboBox):
child.currentTextChanged.connect(self.match_settings_edit_event)
elif isinstance(child, QCheckBox):
child.toggled.connect(self.match_settings_edit_event)
self.edit_mutators_pushbutton.clicked.connect(self.mutator_customisation.popup)
self.kill_bots_pushbutton.clicked.connect(self.kill_bots)
self.run_button.clicked.connect(self.run_button_pressed) | Connects all events to the functions which should be called
:return: |
6,279 | def complete_previous(self, count=1, disable_wrap_around=False):
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
if disable_wrap_around:
return
elif self.complete_state.complete_index is None:
index = len(self.complete_state.current_completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self.go_to_completion(index) | Browse to the previous completions.
(Does nothing if there are no completion.) |
6,280 | def ucast_ip_mask(ip_addr_and_mask, return_tuple=True):
regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$")
if return_tuple:
while not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
print("Not a good unicast IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | Function to check if a address is unicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options |
6,281 | def append_data(self, len_tag, val_tag, data, header=False):
self.append_pair(len_tag, len(data), header=header)
self.append_pair(val_tag, data, header=header)
return | Append raw data, possibly including a embedded SOH.
:param len_tag: Tag number for length field.
:param val_tag: Tag number for value field.
:param data: Raw data byte string.
:param header: Append to header if True; default to body.
Appends two pairs: a length pair, followed by a data pair,
containing the raw data supplied. Example fields that should
use this method include: 95/96, 212/213, 354/355, etc. |
6,282 | def build_query_uri(self, uri=None, start=0, count=-1, filter=, query=, sort=, view=, fields=, scope_uris=):
if filter:
filter = self.make_query_filter(filter)
if query:
query = "&query=" + quote(query)
if sort:
sort = "&sort=" + quote(sort)
if view:
view = "&view=" + quote(view)
if fields:
fields = "&fields=" + quote(fields)
if scope_uris:
scope_uris = "&scopeUris=" + quote(scope_uris)
path = uri if uri else self._base_uri
self.validate_resource_uri(path)
symbol = if not in path else
uri = "{0}{1}start={2}&count={3}{4}{5}{6}{7}{8}{9}".format(path, symbol, start, count, filter, query, sort,
view, fields, scope_uris)
return uri | Builds the URI from given parameters.
More than one request can be send to get the items, regardless the query parameter 'count', because the actual
number of items in the response might differ from the requested count. Some types of resource have a limited
number of items returned on each call. For those resources, additional calls are made to the API to retrieve
any other items matching the given filter. The actual number of items can also differ from the requested call
if the requested number of items would take too long.
The use of optional parameters for OneView 2.0 is described at:
http://h17007.www1.hpe.com/docs/enterprise/servers/oneview2.0/cic-api/en/api-docs/current/index.html
Note:
Single quote - "'" - inside a query parameter is not supported by OneView API.
Args:
start: The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count: The number of resources to return. A count of -1 requests all items (default).
filter (list or str): A general filter/query string to narrow the list of items returned. The default is no
filter; all resources are returned.
query: A single query parameter can do what would take multiple parameters or multiple GET requests using
filter. Use query for more complex queries. NOTE: This parameter is experimental for OneView 2.0.
sort: The sort order of the returned data set. By default, the sort order is based on create time with the
oldest entry first.
view: Returns a specific subset of the attributes of the resource or collection by specifying the name of a
predefined view. The default view is expand (show all attributes of the resource and all elements of
the collections or resources).
fields: Name of the fields.
uri: A specific URI (optional)
scope_uris: An expression to restrict the resources returned according to the scopes to
which they are assigned.
Returns:
uri: The complete uri |
6,283 | def option(name, help=""):
def decorator(func):
options = getattr(func, "options", [])
_option = Param(name, help)
options.insert(0, _option)
func.options = options
return func
return decorator | Decorator that add an option to the wrapped command or function. |
6,284 | def political_views(self) -> str:
views = self._data[]
return self.random.choice(views) | Get a random political views.
:return: Political views.
:Example:
Liberal. |
6,285 | def do_proxy_failover(self, proxy_url, for_url):
self._proxy_resolver.ban_proxy(proxy_url)
return self._proxy_resolver.get_proxy_for_requests(for_url) | :param str proxy_url: Proxy to ban.
:param str for_url: The URL being requested.
:returns: The next proxy config to try, or 'DIRECT'.
:raises ProxyConfigExhaustedError: If the PAC file provided no usable proxy configuration. |
6,286 | def showOperandLines(rh):
if rh.function == :
rh.printLn("N", " For the GetHost function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " diskpoolnames - " +
"Returns the names of the directory manager disk pools.")
rh.printLn("N", " diskpoolspace - " +
"Returns disk pool size information.")
rh.printLn("N", " fcpdevices - " +
"Lists the FCP device channels that are active, free, or")
rh.printLn("N", " offline.")
rh.printLn("N", " general - " +
"Returns the general information related to the z/VM")
rh.printLn("N", " hypervisor environment.")
rh.printLn("N", " help - Returns this help information.")
rh.printLn("N", " version - Show the version of this function")
if rh.subfunction != :
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <poolName> - Name of the disk pool.")
return | Produce help output related to operands.
Input:
Request Handle |
6,287 | def get_valid_examples(self):
path = os.path.join(self._get_schema_folder(), "examples", "valid")
return list(_get_json_content_from_folder(path)) | Return a list of valid examples for the given schema. |
6,288 | def tradepileDelete(self, trade_id):
method =
url = % trade_id
self.__request__(method, url)
return True | Remove card from tradepile.
:params trade_id: Trade id. |
6,289 | def read(self, num_bytes=None):
res = self.get_next(num_bytes)
self.skip(len(res))
return res | Read and return the specified bytes from the buffer. |
6,290 | def set_file_path(self, filePath):
if filePath is not None:
assert isinstance(filePath, basestring), "filePath must be None or string"
filePath = str(filePath)
self.__filePath = filePath | Set the file path that needs to be locked.
:Parameters:
#. filePath (None, path): The file that needs to be locked. When given and a lock
is acquired, the file will be automatically opened for writing or reading
depending on the given mode. If None is given, the locker can always be used
for its general purpose as shown in the examples. |
6,291 | def merge_selected_cells(self, selection):
tab = self.grid.current_table
bbox = selection.get_bbox()
if bbox is None:
row, col, tab = self.grid.actions.cursor
(bb_top, bb_left), (bb_bottom, bb_right) = (row, col), (row, col)
else:
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
merge_area = bb_top, bb_left, bb_bottom, bb_right
cell_attributes = self.grid.code_array.cell_attributes
tl_merge_area = cell_attributes[(bb_top, bb_left, tab)]["merge_area"]
if tl_merge_area is not None and tl_merge_area[:2] == merge_area[:2]:
self.unmerge(tl_merge_area, tab)
else:
self.merge(merge_area, tab) | Merges or unmerges cells that are in the selection bounding box
Parameters
----------
selection: Selection object
\tSelection for which attr toggle shall be returned |
6,292 | def load_steps_impl(self, registry, path, module_names=None):
if not module_names:
module_names = []
path = os.path.abspath(path)
for module_name in module_names:
mod = self.modules.get((path, module_name))
if mod is None:
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
try:
actual_module_name = os.path.basename(module_name)
complete_path = os.path.join(path, os.path.dirname(module_name))
info = imp.find_module(actual_module_name, [complete_path])
except ImportError:
return
try:
mod = imp.load_module("stepdefs_" + str(self.module_counter), *info)
except:
exc = sys.exc_info()
raise StepImplLoadException(exc)
self.module_counter += 1
self.modules[(path, module_name)] = mod
for item_name in dir(mod):
item = getattr(mod, item_name)
if isinstance(item, StepImpl):
registry.add_step(item.step_type, item)
elif isinstance(item, HookImpl):
registry.add_hook(item.cb_type, item)
elif isinstance(item, NamedTransformImpl):
registry.add_named_transform(item)
elif isinstance(item, TransformImpl):
registry.add_transform(item) | Load the step implementations at the given path, with the given module names. If
module_names is None then the module 'steps' is searched by default. |
6,293 | def execute(func: types.FunctionType):
spec = getfullargspec(func)
default = spec.defaults
arg_cursor = 0
def get_item(name):
nonlocal arg_cursor
ctx = func.__globals__
value = ctx.get(name, _undef)
if value is _undef:
try:
value = default[arg_cursor]
arg_cursor += 1
except (TypeError, IndexError):
raise ValueError(f"Current context has no variable `{name}`")
return value
return func(*(get_item(arg_name) for arg_name in spec.args)) | >>> from Redy.Magic.Classic import execute
>>> x = 1
>>> @execute
>>> def f(x = x) -> int:
>>> return x + 1
>>> assert f is 2 |
6,294 | def aggregate_count_over_time(self, metric_store, groupby_name, aggregate_timestamp):
all_qps = metric_store[]
qps = all_qps[groupby_name]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None | Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp
:param dict metric_store: The metric store used to store all the parsed the log data
:param string groupby_name: the group name that the log line belongs to
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None |
6,295 | def get_request_feature(self, name):
if in name:
return self.request.query_params.getlist(
name) if name in self.features else None
elif in name:
return self._extract_object_params(
name) if name in self.features else {}
else:
return self.request.query_params.get(
name) if name in self.features else None | Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None. |
6,296 | def get_themes(templates_path):
themes = os.listdir(templates_path)
if in themes:
themes.remove()
return themes | Returns available themes list. |
6,297 | def log_request(handler):
block = + _format_headers_log(handler.request.headers)
if handler.request.arguments:
block +=
for k, v in handler.request.arguments.items():
block += .format(repr(k), repr(v))
app_log.info(block) | Logging request is opposite to response, sometime its necessary,
feel free to enable it. |
6,298 | def fault_sets(self):
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/FaultSet/instances")).json()
all_faultsets = []
for fs in response:
all_faultsets.append(
SIO_Fault_Set.from_dict(fs)
)
return all_faultsets | You can only create and configure Fault Sets before adding SDSs to the system, and configuring them incorrectly
may prevent the creation of volumes. An SDS can only be added to a Fault Set during the creation of the SDS.
:rtype: list of Faultset objects |
6,299 | def _parse_depot_section(f):
depots = []
for line in f:
line = strip(line)
if line == or line == :
break
else:
depots.append(line)
if len(depots) != 1:
raise ParseException()
return int(depots[0]) | Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.