Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
11,700 | def recursive_dictionary_get(keys, dictionary):
if "." in keys and len(keys) > 1:
key = keys.split(".", 1)
new_dict = dictionary.get(key[0])
if not new_dict or not hasattr(new_dict, "get"):
return None
return recursive_dictionary_get(key[1], new_dict)
else:
return dictionary.get(keys) if (dictionary and hasattr(dictionary, "get")) else None | Gets contents of requirement key recursively so users can search
for specific keys inside nested requirement dicts.
:param keys: key or dot separated string of keys to look for.
:param dictionary: Dictionary to search from
:return: results of search or None |
11,701 | def html_temp_launch(html):
fname = tempfile.gettempdir()+"/swhlab/temp.html"
with open(fname,) as f:
f.write(html)
webbrowser.open(fname) | given text, make it a temporary HTML file and launch it. |
11,702 | def init_app(self, app):
if not hasattr(app, ):
app.extensions = {}
app.extensions[] = self
self._set_default__configuration_options(app) | Register this extension with the flask app.
:param app: A flask application |
11,703 | def _simple_command(self, command, arg=None, **kwargs):
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs) | Send a simple command. |
11,704 | def open(self, vendor_id: int = 0x16c0, product_id: int = 0x5dc, bus: int = None, address: int = None) -> bool:
kwargs = {}
if vendor_id:
kwargs["idVendor"] = vendor_id
if product_id:
kwargs["idProduct"] = product_id
if bus:
kwargs["bus"] = bus
if address:
kwargs["address"] = address
self._dev = usb.core.find(**kwargs)
return self._dev is not None | Open the first device that matches the search criteria. Th default parameters
are set up for the likely most common case of a single uDMX interface.
However, for the case of multiple uDMX interfaces, you can use the
bus and address paramters to further specifiy the uDMX interface
to be opened.
:param vendor_id:
:param product_id:
:param bus: USB bus number 1-n
:param address: USB device address 1-n
:return: Returns true if a device was opened. Otherwise, returns false. |
11,705 | def remove(self, item):
check_not_none(item, "Value can't be None")
item_data = self._to_data(item)
return self._encode_invoke(list_remove_codec, value=item_data) | Removes the specified element's first occurrence from the list if it exists in this list.
:param item: (object), the specified element.
:return: (bool), ``true`` if the specified element is present in this list. |
11,706 | def close(self):
try:
if not self.session.closed:
if self.session._connector_owner:
self.session._connector.close()
self.session._connector = None
except Exception as e:
Config.dummy_logger.error("can not close session for: %s" % e) | Should be closed[explicit] while using external session or connector,
instead of close by self.__del__. |
11,707 | def _xml_element_value(el: Element, int_tags: list):
if el.text is None:
return None
try:
if el.tag in int_tags:
return int(el.text)
except:
pass
s = str(el.text).strip()
return s if s else None | Gets XML Element value.
:param el: Element
:param int_tags: List of tags that should be treated as ints
:return: value of the element (int/str) |
11,708 | def find_bindmounts(self):
for mountpoint, (orig, fs, opts) in self.mountpoints.items():
if in opts and re.match(self.re_pattern, mountpoint):
yield mountpoint | Finds all bind mountpoints that are inside mounts that match the :attr:`re_pattern` |
11,709 | def is_data_dependent(fmto, data):
if callable(fmto.data_dependent):
return fmto.data_dependent(data)
return fmto.data_dependent | Check whether a formatoption is data dependent
Parameters
----------
fmto: Formatoption
The :class:`Formatoption` instance to check
data: xarray.DataArray
The data array to use if the :attr:`~Formatoption.data_dependent`
attribute is a callable
Returns
-------
bool
True, if the formatoption depends on the data |
11,710 | def add_tcp_flag(self, tcp_flag):
if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]:
raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]")
prev_size = 0
if self._json_dict.get() is None:
self._json_dict[] = 0
else:
prev_size = len(str(self._json_dict[])) + len() + 3
self._json_dict[] |= tcp_flag
new_size = len(str(self._json_dict[])) + len() + 3
self._size += new_size - prev_size
if prev_size == 0 and self._has_field:
self._size += 2
self._has_field = True | Add a single TCP flag - will be OR'd into the existing bitmask |
11,711 | def load_command_table(self, args):
with CommandSuperGroup(__name__, self,
) as super_group:
with super_group.group() as group:
group.command(, )
with CommandSuperGroup(__name__, self, ,
client_factory=client_create) as super_group:
with super_group.group() as group:
group.command(, )
group.command(, )
group.command(, )
group.command(, )
group.command(, )
with ArgumentsContext(self, ) as ac:
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
ac.argument(, options_list=[, ])
return OrderedDict(self.command_table) | Load all Service Fabric commands |
11,712 | def handle_stranded_tasks(self, engine):
lost = self.pending[engine]
for msg_id in lost.keys():
if msg_id not in self.pending[engine]:
continue
raw_msg = lost[msg_id].raw_msg
idents,msg = self.session.feed_identities(raw_msg, copy=False)
parent = self.session.unpack(msg[1].bytes)
idents = [engine, idents[0]]
try:
raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
except:
content = error.wrap_exception()
header = dict(
status=,
engine=engine,
date=datetime.now(),
)
msg = self.session.msg(, content, parent=parent, subheader=header)
raw_reply = map(zmq.Message, self.session.serialize(msg, ident=idents))
self.dispatch_result(raw_reply)
self.completed.pop(engine)
self.failed.pop(engine) | Deal with jobs resident in an engine that died. |
11,713 | def syncFlags(self):
self.flags = set(self.skype.conn("GET", SkypeConnection.API_FLAGS,
auth=SkypeConnection.Auth.SkypeToken).json()) | Update the cached list of all enabled flags, and store it in the :attr:`flags` attribute. |
11,714 | def live_profile(script, argv, profiler_factory, interval, spawn, signum,
pickle_protocol, mono):
filename, code, globals_ = script
sys.argv[:] = [filename] + list(argv)
parent_sock, child_sock = socket.socketpair()
stderr_r_fd, stderr_w_fd = os.pipe()
pid = os.fork()
if pid:
os.close(stderr_w_fd)
viewer, loop = make_viewer(mono)
title = get_title(filename)
client = ProfilingClient(viewer, loop.event_loop, parent_sock, title)
client.start()
try:
loop.run()
except KeyboardInterrupt:
os.kill(pid, signal.SIGINT)
except BaseException:
os.kill(pid, signal.SIGTERM)
raise
finally:
parent_sock.close()
w_pid, status = os.waitpid(pid, os.WNOHANG)
if w_pid == 0:
os.kill(pid, signal.SIGTERM)
exit_code = os.WEXITSTATUS(status)
with os.fdopen(stderr_r_fd, ) as f:
child_stderr = f.read()
if child_stderr:
sys.stdout.flush()
sys.stderr.write(child_stderr)
sys.exit(exit_code)
else:
os.close(stderr_r_fd)
devnull = os.open(os.devnull, os.O_RDWR)
for f in [sys.stdin, sys.stdout]:
os.dup2(devnull, f.fileno())
os.dup2(stderr_w_fd, sys.stderr.fileno())
frame = sys._getframe()
profiler = profiler_factory(base_frame=frame, base_code=code)
profiler_trigger = BackgroundProfiler(profiler, signum)
profiler_trigger.prepare()
server_args = (interval, noop, pickle_protocol)
server = SelectProfilingServer(None, profiler_trigger, *server_args)
server.clients.add(child_sock)
spawn(server.connected, child_sock)
try:
exec_(code, globals_)
finally:
os.close(stderr_w_fd)
child_sock.shutdown(socket.SHUT_WR) | Profile a Python script continuously. |
11,715 | def convert_sum(
params, w_name, scope_name, inputs, layers, weights, names
):
print()
def target_layer(x):
import keras.backend as K
return K.sum(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) | Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers |
11,716 | def bounded_uniform(cls, lowest, highest, weight_interval=None):
if weight_interval is None:
weights = [(lowest, 1), (highest, 1)]
else:
i = lowest
weights = []
while i < highest:
weights.append((i, 1))
i += weight_interval
weights.append((highest, 1))
return cls(weights) | Initialize with a uniform distribution between two values.
If no ``weight_interval`` is passed, this weight distribution
will just consist of ``[(lowest, 1), (highest, 1)]``. If specified,
weights (still with uniform weight distribution) will be added every
``weight_interval``. Use this if you intend to modify the weights
in any complex way after initialization.
Args:
lowest (float or int):
highest (float or int):
weight_interval (int):
Returns:
SoftFloat: A newly constructed instance. |
11,717 | def add_auth_to_method(self, path, method_name, auth, api):
method_authorizer = auth and auth.get()
if method_authorizer:
api_auth = api.get()
api_authorizers = api_auth and api_auth.get()
default_authorizer = api_auth and api_auth.get()
self.set_method_authorizer(path, method_name, method_authorizer, api_authorizers, default_authorizer) | Adds auth settings for this path/method. Auth settings currently consist solely of Authorizers
but this method will eventually include setting other auth settings such as API Key,
Resource Policy, etc.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKey, ResourcePolicy (only Authorizers supported
currently)
:param dict api: Reference to the related Api's properties as defined in the template. |
11,718 | def get_providers(self, security_filter,
name_filter=,
only_providers_flag=,
internal_external=,
ordering_authority=,
real_provider=):
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PROVIDERS,
parameter1=security_filter,
parameter2=name_filter,
parameter3=only_providers_flag,
parameter4=internal_external,
parameter5=ordering_authority,
parameter6=real_provider)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PROVIDERS)
return result | invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param security_filter - This is the EntryCode of the Security_Code_DE dictionary
for the providers being sought. A list of valid security codes can be obtained from
GetDictionary on the Security_Code_DE dictionary.
:param name_filter
:param only_providers_flag
:param internal_external
:param ordering_authority
:param real_provider
:return: JSON response |
11,719 | def _toplevel(cls):
superclasses = (
list(set(ClosureModel.__subclasses__()) &
set(cls._meta.get_parent_list()))
)
return next(iter(superclasses)) if superclasses else cls | Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A. |
11,720 | def register(self, renewable, timeout=300):
starttime = renewable_start_time(renewable)
renew_future = asyncio.ensure_future(self._auto_lock_renew(renewable, starttime, timeout), loop=self.loop)
self._futures.append(renew_future) | Register a renewable entity for automatic lock renewal.
:param renewable: A locked entity that needs to be renewed.
:type renewable: ~azure.servicebus.aio.async_message.Message or
~azure.servicebus.aio.async_receive_handler.SessionReceiver
:param timeout: A time in seconds that the lock should be maintained for.
Default value is 300 (5 minutes).
:type timeout: int |
11,721 | def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str:
return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec) | Get string representation of dependency specification as provided by PythonDependencyParser. |
11,722 | def Zuo_Stenby(T, Tc, Pc, omega):
r
Tc_1, Pc_1, omega_1 = 190.56, 4599000.0/1E5, 0.012
Tc_2, Pc_2, omega_2 = 568.7, 2490000.0/1E5, 0.4
Pc = Pc/1E5
def ST_r(ST, Tc, Pc):
return log(1 + ST/(Tc**(1/3.0)*Pc**(2/3.0)))
ST_1 = 40.520*(1 - T/Tc)**1.287
ST_2 = 52.095*(1 - T/Tc)**1.21548
ST_r_1, ST_r_2 = ST_r(ST_1, Tc_1, Pc_1), ST_r(ST_2, Tc_2, Pc_2)
sigma_r = ST_r_1 + (omega-omega_1)/(omega_2 - omega_1)*(ST_r_2-ST_r_1)
sigma = Tc**(1/3.0)*Pc**(2/3.0)*(exp(sigma_r)-1)
sigma = sigma/1000
return sigma | r'''Calculates air-water surface tension using the reference fluids
methods of [1]_.
.. math::
\sigma^{(1)} = 40.520(1-T_r)^{1.287}
\sigma^{(2)} = 52.095(1-T_r)^{1.21548}
\sigma_r = \sigma_r^{(1)}+ \frac{\omega - \omega^{(1)}}
{\omega^{(2)}-\omega^{(1)}} (\sigma_r^{(2)}-\sigma_r^{(1)})
\sigma = T_c^{1/3}P_c^{2/3}[\exp{(\sigma_r)} -1]
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Presently untested. Have not personally checked the sources.
I strongly believe it is broken.
The reference values for methane and n-octane are from the DIPPR database.
Examples
--------
Chlorobenzene
>>> Zuo_Stenby(293., 633.0, 4530000.0, 0.249)
0.03345569011871088
References
----------
.. [1] Zuo, You-Xiang, and Erling H. Stenby. "Corresponding-States and
Parachor Models for the Calculation of Interfacial Tensions." The
Canadian Journal of Chemical Engineering 75, no. 6 (December 1, 1997):
1130-37. doi:10.1002/cjce.5450750617 |
11,723 | def create_tag(self, name):
return self._tag(self.request(, method=, data={
"name": name,
})["tag"]) | .. versionadded:: 0.2.0
Add a new tag resource to the account
:param str name: the name of the new tag
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error |
11,724 | def find_weights(self, scorer, test_size=0.2, method=):
p = Optimizer(self.models, test_size=test_size, scorer=scorer)
return p.minimize(method) | Finds optimal weights for weighted average of models.
Parameters
----------
scorer : function
Scikit-learn like metric.
test_size : float, default 0.2
method : str
Type of solver. Should be one of:
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
Returns
-------
list |
11,725 | def _modelmat(self, X, term=-1):
X = check_X(X, n_feats=self.statistics_[],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self.terms.build_columns(X, term=term) | Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features |
11,726 | def upload_file_content(self, file_id, etag=None, source=None, content=None):
"71e1ed9ee52e565a56aec66bc648a32c""71e1ed9ee52e565a56aec66bc648a32c"
if not is_valid_uuid(file_id):
raise StorageArgumentException(
.format(file_id))
if not (source or content) or (source and content):
raise StorageArgumentException(
)
resp = self._authenticated_request \
.to_endpoint(.format(file_id)) \
.with_body(content or open(source, )) \
.with_headers({: etag} if etag else {}) \
.post()
if not in resp.headers:
raise StorageException()
return resp.headers[] | Upload a file content. The file entity must already exist.
If an ETag is provided the file stored on the server is verified
against it. If it does not match, StorageException is raised.
This means the client needs to update its knowledge of the resource
before attempting to update again. This can be used for optimistic
concurrency control.
Args:
file_id (str): The UUID of the file whose content is written.
etag (str): The etag to match the contents against.
source (str): The path of the local file whose content to be uploaded.
content (str): A string of the content to be uploaded.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
The ETag of the file upload::
'"71e1ed9ee52e565a56aec66bc648a32c"'
Raises:
IOError: The source cannot be opened.
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes |
11,727 | def into(self, val: str) -> Union[, ]:
if val in self.paths:
return self.paths[val]
if self.param:
return self.param
raise IndexError(_("Value {} is missing from api").format(val)) | Get another leaf node with name `val` if possible |
11,728 | def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
if not self._model_initialized_:
import warnings
warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning)
return False
x = self.optimizer_array.copy()
if not verbose:
if target_param is None:
transformed_index = np.arange(len(x))
else:
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan:
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
try:
names = self.parameter_names_flat()
except NotImplementedError:
names = [ % i for i in range(len(x))]
header = [, , , , , ]
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: .join(x), [header_string]))
separator = * len(header_string[0])
print(.join([header_string[0], separator]))
if target_param is None:
target_param = self
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for xind in zip(transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else:
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0):
ratio = (f1 - f2) == gradient[xind]
else:
ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[xind])
ret &= True
else:
formatted_name = "\033[91m {0} \033[0m".format(names[xind])
ret &= False
if df_unstable:
formatted_name = "\033[94m {0} \033[0m".format(names[xind])
r = % float(ratio)
d = % float(difference)
g = % gradient[xind]
ng = % float(numerical_gradient)
df = % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret | Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue). |
11,729 | def updateResults(self, newResults, **kwArgs):
if in kwArgs.keys():
reset = kwArgs[]
else:
reset = 0
if reset == 0:
for key in newResults.keys():
if key == or key == :
continue
self.results[key] = newResults[key]
elif reset == 1:
self.results[] = 0
self.results[] = 0
self.results[] = 0
self.results[] = 0
self.results[] =
elif reset == 2:
self.results[] = 0
self.results[] = 0
self.results[] = 0
self.results[] = 0
self.results[] =
self.results[] =
self.results[] =
return | Update the results related to this request excluding the 'response'
and 'logEntries' values.
We specifically update (if present):
overallRC, rc, rs, errno.
Input:
Dictionary containing the results to be updated or an empty
dictionary the reset keyword was specified.
Reset keyword:
0 - Not a reset. This is the default is reset keyword was not
specified.
1 - Reset failure related items in the result dictionary.
This exclude responses and log entries.
2 - Reset all result items in the result dictionary.
Output:
Request handle is updated with the results. |
11,730 | def metadata(self):
if self._metadata is None:
try:
with open(self.paths.metadata()) as metadata_fd:
self._metadata = json.load(metadata_fd)
except IOError:
self._metadata = {}
return self._metadata | Retrieve the metadata info for this prefix
Returns:
dict: metadata info |
11,731 | def _assemble_with_columns(self, sql_str, columns, *args, **kwargs):
qcols = []
for col in columns:
if in col:
wlist = col.split()
qcols.append(sql.SQL().join([sql.Identifier(x) for x in wlist]))
else:
qcols.append(sql.Identifier(col))
query_string = sql.SQL(sql_str).format(
sql.SQL().join(qcols),
*[sql.Literal(a) for a in args]
)
return query_string | Format a select statement with specific columns
:sql_str: An SQL string template
:columns: The columns to be selected and put into {0}
:*args: Arguments to use as query parameters.
:returns: Psycopg2 compiled query |
11,732 | def daemon_start(main, pidfile, daemon=True, workspace=None):
logger.debug("start daemon application pidfile={pidfile} daemon={daemon} workspace={workspace}.".format(pidfile=pidfile, daemon=daemon, workspace=workspace))
new_pid = os.getpid()
workspace = workspace or os.getcwd()
os.chdir(workspace)
daemon_flag = False
if pidfile and daemon:
old_pid = load_pid(pidfile)
if old_pid:
logger.debug("pidfile {pidfile} already exists, pid={pid}.".format(pidfile=pidfile, pid=old_pid))
if old_pid and is_running(old_pid):
error_message = "Service is running in process: {pid}.".format(pid=old_pid)
logger.error(error_message)
six.print_(error_message, file=os.sys.stderr)
os.sys.exit(95)
clean_pid_file(pidfile)
if daemon and os.name == "posix":
make_basic_daemon()
daemon_flag = True
if daemon_flag:
logger.info("Start application in DAEMON mode, pidfile={pidfile} pid={pid}".format(pidfile=pidfile, pid=new_pid))
else:
logger.info("Start application in FRONT mode, pid={pid}.".format(pid=new_pid))
write_pidfile(pidfile)
atexit.register(clean_pid_file, pidfile)
main()
return | Start application in background mode if required and available. If not then in front mode. |
11,733 | def create_entity2user(enti_uid, user_id):
record = TabEntity2User.select().where(
(TabEntity2User.entity_id == enti_uid) & (TabEntity2User.user_id == user_id)
)
if record.count() > 0:
record = record.get()
MEntity2User.count_increate(record.uid, record.count)
else:
TabEntity2User.create(
uid=tools.get_uuid(),
entity_id=enti_uid,
user_id=user_id,
count=1,
timestamp=time.time()
) | create entity2user record in the database. |
11,734 | def _yield_exercises(self):
for day in self.days:
for dynamic_ex in day.dynamic_exercises:
yield dynamic_ex
for static_ex in day.static_exercises:
yield static_ex | A helper function to reduce the number of nested loops.
Yields
-------
(dynamic_ex) or (static_ex)
Yields the exercises in the program. |
11,735 | def link_with_parents(self, parent, c_selectors, c_rules):
parent_found = None
for p_selectors, p_rules in self.parts.items():
_p_selectors, _, _ = p_selectors.partition()
_p_selectors = _p_selectors.split()
new_selectors = set()
found = False
new_parent = re.sub(prev_symbol + _parent +
post_symbol, _c_selector, p_selector)
if p_selector != new_parent:
new_selectors.add(new_parent)
found = True
if found:
parent_found = parent_found or []
parent_found.extend(p_rules)
if new_selectors:
new_selectors = self.normalize_selectors(
p_selectors, new_selectors)
if new_selectors != p_selectors:
del self.parts[p_selectors]
self.parts.setdefault(new_selectors, [])
self.parts[new_selectors].extend(p_rules)
deps = set()
for c_rule in c_rules or []:
c_rule[SELECTORS] = c_selectors
deps.add(c_rule[POSITION])
for p_rule in p_rules:
p_rule[SELECTORS] = new_selectors
p_rule[DEPS].update(
deps)
return parent_found | Link with a parent for the current child rule.
If parents found, returns a list of parent rules to the child |
11,736 | def match_member_id(self, member_conf, current_member_confs):
if current_member_confs is None:
return None
for curr_mem_conf in current_member_confs:
if is_same_address(member_conf[], curr_mem_conf[]):
return curr_mem_conf[]
return None | Attempts to find an id for member_conf where fom current members confs
there exists a element.
Returns the id of an element of current confs
WHERE member_conf.host and element.host are EQUAL or map to same host |
11,737 | def _load_tsv_variables(layout, suffix, dataset=None, columns=None,
prepend_type=False, scope=, **selectors):
scanssessionsparticipantsageparticipants.ages get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance.
scansrunsessionssessionparticipantssubject.tsvfile\tll be extracted when the BIDSVariable is initialized anyway).
for ent_name, ent_val in f.entities.items():
if ent_name in ALL_ENTITIES:
_data[ent_name] = ent_val
if suffix == :
_data.drop(columns=, inplace=True)
image = _data[]
_data = _data.drop(, axis=1)
dn = f.dirname
paths = [join(dn, p) for p in image.values]
ent_recs = [layout.files[p].entities for p in paths
if p in layout.files]
ent_cols = pd.DataFrame.from_records(ent_recs)
_data = pd.concat([_data, ent_cols], axis=1, sort=True)
node.add_variable(SimpleVariable(name=col_name, data=df, source=suffix))
return dataset | Reads variables from scans.tsv, sessions.tsv, and participants.tsv.
Args:
layout (BIDSLayout): The BIDSLayout to use.
suffix (str): The suffix of file to read from. Must be one of 'scans',
'sessions', or 'participants'.
dataset (NodeIndex): A BIDS NodeIndex container. If None, a new one is
initialized.
columns (list): Optional list of names specifying which columns in the
files to return. If None, all columns are returned.
prepend_type (bool): If True, variable names are prepended with the
type name (e.g., 'age' becomes 'participants.age').
scope (str, list): The scope of the space to search for variables. See
docstring for BIDSLayout for details and valid predefined values.
selectors (dict): Optional keyword arguments passed onto the
BIDSLayout instance's get() method; can be used to constrain
which data are loaded.
Returns: A NodeIndex instance. |
11,738 | def get_field(name, data, default="object", document_object_field=None, is_document=False):
if isinstance(data, AbstractField):
return data
data = keys_to_string(data)
_type = data.get(, default)
if _type == "string":
return StringField(name=name, **data)
elif _type == "binary":
return BinaryField(name=name, **data)
elif _type == "boolean":
return BooleanField(name=name, **data)
elif _type == "byte":
return ByteField(name=name, **data)
elif _type == "short":
return ShortField(name=name, **data)
elif _type == "integer":
return IntegerField(name=name, **data)
elif _type == "long":
return LongField(name=name, **data)
elif _type == "float":
return FloatField(name=name, **data)
elif _type == "double":
return DoubleField(name=name, **data)
elif _type == "ip":
return IpField(name=name, **data)
elif _type == "date":
return DateField(name=name, **data)
elif _type == "multi_field":
return MultiField(name=name, **data)
elif _type == "geo_point":
return GeoPointField(name=name, **data)
elif _type == "attachment":
return AttachmentField(name=name, **data)
elif is_document or _type == "document":
if document_object_field:
return document_object_field(name=name, **data)
else:
data.pop("name",None)
return DocumentObjectField(name=name, **data)
elif _type == "object":
if in data or "_all" in data:
if document_object_field:
return document_object_field(name=name, **data)
else:
return DocumentObjectField(name=name, **data)
return ObjectField(name=name, **data)
elif _type == "nested":
return NestedObject(name=name, **data)
raise RuntimeError("Invalid type: %s" % _type) | Return a valid Field by given data |
11,739 | def execute(self, task):
try:
return task.run()
except Exception:
if task.retries > 0:
task.retries -= 1
task.to_retrying()
if task.async:
data = task.serialize()
task.task_id = self.backend.push(
self.queue_name,
task.task_id,
data
)
else:
return self.execute(task)
else:
raise | Given a task instance, this runs it.
This includes handling retries & re-raising exceptions.
Ex::
task = Task(async=False, retries=5)
task.to_call(add, 101, 35)
finished_task = gator.execute(task)
:param task_id: The identifier of the task to process
:type task_id: string
:returns: The completed ``Task`` instance |
11,740 | def auth_required(self):
if self._auth:
return self._auth, self
return self.__parent__.auth_required() | If any ancestor required an authentication, this node needs it too. |
11,741 | def DeleteInstance(r, instance, dry_run=False):
return r.request("delete", "/2/instances/%s" % instance,
query={"dry-run": dry_run}) | Deletes an instance.
@type instance: str
@param instance: the instance to delete
@rtype: int
@return: job id |
11,742 | def check_email_syntax (self, mail):
if len(mail) > 256:
self.set_result(_("Mail address `%(addr)s in mail address `%(addr)s.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if not domain:
self.set_result(_("Missing domain part of mail address `%(addr)s too long. Allowed 64 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(local)}, valid=False, overwrite=False)
return
if len(domain) > 255:
self.set_result(_("Domain part of mail address `%(addr)s.") % \
{"addr": mail}, valid=False, overwrite=False)
return
else:
if local.startswith(u"."):
self.set_result(_("Local part of mail address `%(addr)s may not end with a dot.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if u".." in local:
self.set_result(_("Local part of mail address `%(addr)s@ \\",[] contains unquoted character `%(char)s.") % \
{"addr": mail, "char": char}, valid=False, overwrite=False)
return
if is_literal(domain):
{"addr": mail}, valid=False, overwrite=False)
return
else:
{"addr": mail}, valid=False, overwrite=False)
return
if domain.endswith(".") or domain.split(".")[-1].isdigit():
self.set_result(_("Invalid top level domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return | Check email syntax. The relevant RFCs:
- How to check names (memo):
http://tools.ietf.org/html/rfc3696
- Email address syntax
http://tools.ietf.org/html/rfc2822
- SMTP protocol
http://tools.ietf.org/html/rfc5321#section-4.1.3
- IPv6
http://tools.ietf.org/html/rfc4291#section-2.2
- Host syntax
http://tools.ietf.org/html/rfc1123#section-2 |
11,743 | def insort_right(a, x, lo=0, hi=None):
if lo < 0:
raise ValueError()
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x) | Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched. |
11,744 | def forms(self):
forms = FormsDict()
for name, item in self.POST.iterallitems():
if not hasattr(item, ):
forms[name] = item
return forms | Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. |
11,745 | def axis_bounds(self) -> Dict[str, Tuple[float, float]]:
return {ax: (0, pos+0.5) for ax, pos in _HOME_POSITION.items()
if ax not in } | The (minimum, maximum) bounds for each axis. |
11,746 | def fmt_duration(secs):
return .join(fmt.human_duration(secs, 0, precision=2, short=True).strip().split()) | Format a duration in seconds. |
11,747 | def login_required(request):
"Lookup decorator to require the user to be authenticated."
user = getattr(request, , None)
if user is None or not user.is_authenticated:
return HttpResponse(status=401) | Lookup decorator to require the user to be authenticated. |
11,748 | def loadResults(resultsFile):
with open(resultsFile) as f:
raw=f.read().split("\n")
foldersByDay={}
for line in raw:
folder=line.split()[1]+"\\"
line=[]+line.split()[2].split(", ")
for day in line[1:]:
if not day in foldersByDay:
foldersByDay[day]=[]
foldersByDay[day]=foldersByDay[day]+[folder]
nActiveDays=len(foldersByDay)
dayFirst=sorted(foldersByDay.keys())[0]
dayLast=sorted(foldersByDay.keys())[-1]
dayFirst=datetime.datetime.strptime(dayFirst, "%Y-%m-%d" )
dayLast=datetime.datetime.strptime(dayLast, "%Y-%m-%d" )
nDays = (dayLast - dayFirst).days + 1
emptyDays=0
for deltaDays in range(nDays):
day=dayFirst+datetime.timedelta(days=deltaDays)
stamp=datetime.datetime.strftime(day, "%Y-%m-%d" )
if not stamp in foldersByDay:
foldersByDay[stamp]=[]
emptyDays+=1
percActive=nActiveDays/nDays*100
print("%d of %d days were active (%.02f%%)"%(nActiveDays,nDays,percActive))
return foldersByDay | returns a dict of active folders with days as keys. |
11,749 | def _build(self,
input_batch,
is_training,
test_local_stats=False):
input_shape = input_batch.get_shape()
if not self._data_format:
if len(input_shape) == 2:
self._data_format = "NC"
elif len(input_shape) == 3:
self._data_format = "NWC"
elif len(input_shape) == 4:
self._data_format = "NHWC"
elif len(input_shape) == 5:
self._data_format = "NDHWC"
else:
raise base.IncompatibleShapeError(
"Input shape {} has too many or too few dimensions.".format(
input_shape))
self._channel_index = self._data_format.index("C")
self._axis = list(range(len(self._data_format)))
del self._axis[self._channel_index]
if len(self._data_format) != len(input_shape):
raise base.IncompatibleShapeError(
"Incorrect data format {} for input shape {}.".format(
self._data_format, input_shape))
dtype = input_batch.dtype.base_dtype
if self._fused and dtype == tf.bfloat16:
raise base.NotSupportedError(
"Fused batch norm does not support tf.bfloat16.")
stat_dtype = tf.float32 if dtype in [tf.float16, tf.bfloat16] else dtype
self._num_channels = int(input_shape[self._channel_index])
if self._channel_index == 1:
self._image_shape = [int(x) for x in input_shape[2:]]
else:
self._image_shape = [int(x) for x in input_shape[1:-1]]
self._expanded_mean_shape = [1] * len(input_shape)
self._expanded_mean_shape[self._channel_index] = self._num_channels
use_batch_stats = is_training | test_local_stats
mean, variance = self._build_statistics(input_batch, use_batch_stats,
stat_dtype)
self._build_scale_offset(dtype)
out, mean, variance = self._batch_norm_op(input_batch, mean, variance,
use_batch_stats, stat_dtype)
update_ops = self._build_update_ops(mean, variance, is_training)
if update_ops:
if self._update_ops_collection:
for update_op in update_ops:
tf.add_to_collection(self._update_ops_collection, update_op)
else:
with tf.control_dependencies(update_ops):
out = tf.identity(out)
return out | Connects the BatchNormV2 module into the graph.
Args:
input_batch: A Tensor of the same dimension as `len(data_format)`.
is_training: A boolean to indicate if the module should be connected in
training mode, meaning the moving averages are updated. Can be a Tensor.
test_local_stats: A boolean to indicate if local batch statistics should
be used when `is_training=False`. If not, moving averages are used.
By default `False`. Can be a Tensor.
Returns:
A tensor with the same shape as `input_batch`.
Raises:
base.IncompatibleShapeError: If `data_format` is not valid for the
input shape.
base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`. |
11,750 | def get_proxy_parts(proxy):
proxy_parts = {: None,
: None,
: None,
: None,
: None,
}
results = re.match(proxy_parts_pattern, proxy)
if results:
matched = results.groupdict()
for key in proxy_parts:
proxy_parts[key] = matched.get(key)
else:
logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy))
if proxy_parts[] is None:
proxy_parts[] =
return proxy_parts | Take a proxy url and break it up to its parts |
11,751 | def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_vpcs()
subnet_to_az = self._find_usage_subnets()
self._find_usage_ACLs()
self._find_usage_route_tables()
self._find_usage_gateways()
self._find_usage_nat_gateways(subnet_to_az)
self._find_usages_vpn_gateways()
self._find_usage_network_interfaces()
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. |
11,752 | def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
fname = TauDEM.func_name()
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{: fel}, workingdir,
None,
{: streamSkeleton},
{: mpiexedir, : hostfile, : np},
{: log_file, : runtime_file}) | Run peuker-douglas function |
11,753 | def process_needlist(app, doctree, fromdocname):
env = app.builder.env
for node in doctree.traverse(Needlist):
if not app.config.needs_include_needs:
for att in (, , , ):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needfilter = env.need_all_needlists[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
if current_needfilter["sort_by"] is not None:
if current_needfilter["sort_by"] == "id":
all_needs = sorted(all_needs, key=lambda node: node["id"])
elif current_needfilter["sort_by"] == "status":
all_needs = sorted(all_needs, key=status_sorter)
found_needs = procces_filters(all_needs, current_needfilter)
line_block = nodes.line_block()
for need_info in found_needs:
para = nodes.line()
description = "%s: %s" % (need_info["id"], need_info["title"])
if current_needfilter["show_status"] and need_info["status"] is not None:
description += " (%s)" % need_info["status"]
if current_needfilter["show_tags"] and need_info["tags"] is not None:
description += " [%s]" % "; ".join(need_info["tags"])
title = nodes.Text(description, description)
if not need_info["hide"]:
ref = nodes.reference(, )
ref[] = need_info[]
ref[] = app.builder.get_relative_uri(
fromdocname, need_info[])
ref[] += + need_info[][]
ref.append(title)
para += ref
else:
para += title
line_block.append(para)
content.append(line_block)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needfilter["show_filters"]:
content.append(used_filter_paragraph(current_needfilter))
node.replace_self(content) | Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location. |
11,754 | def generate_exercises_from_importstudioid(self, args, options):
print()
self.studioapi = StudioApi(token=args[])
channel_dict = self.studioapi.get_tree_for_studio_id(args[])
json.dump(channel_dict, open(, ), indent=4, ensure_ascii=False, sort_keys=True)
soure_ids_seen = []
def _generate_source_id(subtree):
candidate = subtree[].replace(, )
if candidate not in soure_ids_seen:
source_id = candidate
soure_ids_seen.append(source_id)
else:
source_id = candidate + subtree[][0:7]
soure_ids_seen.append(source_id)
return source_id
def _write_subtree(path_tuple, subtree, is_root=False):
print(*len(path_tuple) + , subtree[])
kind = subtree[]
if kind == :
if is_root:
self.write_topic_row_from_studio_dict(path_tuple, subtree, is_root=is_root)
for child in subtree[]:
_write_subtree(path_tuple, child)
else:
self.write_topic_row_from_studio_dict(path_tuple, subtree)
for child in subtree[]:
_write_subtree(path_tuple+[subtree[]], child)
elif kind == :
source_id = _generate_source_id(subtree)
self.write_exercice_row_from_studio_dict(path_tuple, subtree, source_id)
for question_dict in subtree[]:
self.write_question_row_from_question_dict(source_id, question_dict)
else:
print(, subtree[])
path_tuple = [ self.channeldir.split()[-1] ]
_write_subtree(path_tuple, channel_dict, is_root=True) | Create rows in Exercises.csv and ExerciseQuestions.csv from a Studio channel,
specified based on a studio_id (e.g. studio_id of main_tree for some channel)' |
11,755 | def query_sequence_length(self):
if self.entries.seq: return len(self.entries.seq)
if not self.entries.cigar:
raise ValueError()
return sum([x[0] for x in self.cigar_array if re.match(,x[1])]) | does not include hard clipped |
11,756 | def _iparam_objectname(objectname, arg_name):
if isinstance(objectname, (CIMClassName, CIMInstanceName)):
objectname = objectname.copy()
objectname.host = None
objectname.namespace = None
elif isinstance(objectname, six.string_types):
objectname = CIMClassName(objectname)
elif objectname is None:
pass
else:
raise TypeError(
_format("The {0!A} argument of the WBEMConnection operation "
"has invalid type {1} (must be None, a string, a "
"CIMClassName, or a CIMInstanceName)",
arg_name, type(objectname)))
return objectname | Convert an object name (= class or instance name) specified in an
operation method into a CIM object that can be passed to
imethodcall(). |
11,757 | def loess_inline(h,x,cut,nan=True):
n=np.size(h)
if n == 1 : lf = h[0]
else : pass
l_c=cut/2.0
w=np.zeros(n)
lf = np.repeat(np.NaN,n)
flag = ~np.isnan(h)
fcnt = flag.sum()
fnt = np.arange(n).compress(flag)
for i in fnt :
icur=i
q=(np.abs((x-x[icur])/l_c))
s = (1.0 - q*q*q)
outOfFilter_flag = q > 1.0
outCnt = outOfFilter_flag.sum()
outOfFilter = np.arange(n).compress(outOfFilter_flag)
if (outCnt > 0) : s[outOfFilter]=0.0
w=s*s*s
sumvar=np.nansum(w)
lf[icur]=np.nansum(w*h)/sumvar
return lf | #This is a raw, inline version of the loess filtering function. Runs much more slowlier. |
11,758 | def example4():
transform_matrix = create_transformation_matrix()
result = tl.prepro.affine_transform_cv2(image, transform_matrix)
coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]
coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)
def imwrite(image, coords_list, name):
coords_list_ = []
for coords in coords_list:
coords = np.array(coords, np.int32)
coords = coords.reshape((-1, 1, 2))
coords_list_.append(coords)
image = cv2.polylines(image, coords_list_, True, (0, 255, 255), 3)
cv2.imwrite(name, image[..., ::-1])
imwrite(image, coords, )
imwrite(result, coords_result, ) | Example 4: Transforming coordinates using affine matrix. |
11,759 | def assertDateTimesLagEqual(self, sequence, lag, msg=None):
==
if not isinstance(sequence, collections.Iterable):
raise TypeError()
if not isinstance(lag, timedelta):
raise TypeError()
if isinstance(max(sequence), datetime):
target = datetime.today()
elif isinstance(max(sequence), date):
target = date.today()
else:
raise TypeError()
self.assertEqual(target - max(sequence), lag, msg=msg) | Fail unless max element in ``sequence`` is separated from
the present by ``lag`` as determined by the '==' operator.
If the max element is a datetime, "present" is defined as
``datetime.now()``; if the max element is a date, "present"
is defined as ``date.today()``.
This is equivalent to
``self.assertEqual(present - max(sequence), lag)``.
Parameters
----------
sequence : iterable
lag : timedelta
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``sequence`` is not iterable.
TypeError
If ``lag`` is not a timedelta object.
TypeError
If max element in ``sequence`` is not a datetime or date
object. |
11,760 | def main(argv=None):
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, )
if args.cloud:
pipeline_name =
else:
pipeline_name =
os.environ[]=
options = {
: args.job_name,
: temp_dir,
: args.project_id,
:
os.path.abspath(os.path.join(
os.path.dirname(__file__),
)),
}
if args.num_workers:
options[] = args.num_workers
if args.worker_machine_type:
options[] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print( %
(pipeline_result.job_id(), args.project_id)) | Run Preprocessing as a Dataflow. |
11,761 | def load(self, path):
fieldnames = [, ]
with open(path, , encoding=, newline=) as fh:
reader = csv.DictReader(fh, delimiter=, fieldnames=fieldnames,
skipinitialspace=True)
for row in reader:
work, label = row[], row[]
if label:
if label not in self._ordered_labels:
self._ordered_labels.append(label)
if work in self:
raise MalformedCatalogueError(
CATALOGUE_WORK_RELABELLED_ERROR.format(work))
self[work] = label | Loads the data from `path` into the catalogue.
:param path: path to catalogue file
:type path: `str` |
11,762 | def pluginSetting(name, namespace=None, typ=None):
def _find_in_cache(name, key):
for setting in _settings[namespace]:
if setting["name"] == name:
return setting[key]
return None
def _type_map(t):
if t == BOOL:
return bool
elif t == NUMBER:
return float
else:
return unicode
namespace = namespace or _callerName().split(".")[0]
full_name = namespace + "/" + name
if settings.contains(full_name):
if typ is None:
typ = _type_map(_find_in_cache(name, ))
v = settings.value(full_name, None, type=typ)
try:
if isinstance(v, QPyNullVariant):
v = None
except:
pass
return v
else:
return _find_in_cache(name, ) | Returns the value of a plugin setting.
:param name: the name of the setting. It is not the full path, but just the last name of it
:param namespace: The namespace. If not passed or None, the namespace will be inferred from
the caller method. Normally, this should not be passed, since it suffices to let this function
find out the plugin from where it is being called, and it will automatically use the
corresponding plugin namespace |
11,763 | def watch_statuses(self, observer, batch_ids):
with self._lock:
statuses = self.get_statuses(batch_ids)
if self._has_no_pendings(statuses):
observer.notify_batches_finished(statuses)
else:
self._observers[observer] = statuses | Allows a component to register to be notified when a set of
batches is no longer PENDING. Expects to be able to call the
"notify_batches_finished" method on the registered component, sending
the statuses of the batches.
Args:
observer (object): Must implement "notify_batches_finished" method
batch_ids (list of str): The ids of the batches to watch |
11,764 | def _band_calculations(self, rsr, flux, scale, **options):
from scipy.interpolate import InterpolatedUnivariateSpline
if in options:
detector = options[]
else:
detector = 1
if self.wavespace == :
if in rsr:
wvl = rsr[] * scale
resp = rsr[]
else:
wvl = rsr[.format(detector)][] * scale
resp = rsr[.format(detector)][]
else:
if in rsr:
wvl = rsr[] * scale
resp = rsr[]
else:
wvl = rsr[.format(detector)][] * scale
resp = rsr[.format(detector)][]
start = wvl[0]
end = wvl[-1]
LOG.debug("Begin and end wavelength/wavenumber: %f %f ", start, end)
dlambda = self._dlambda
xspl = np.linspace(start, end, round((end - start) / self._dlambda) + 1)
ius = InterpolatedUnivariateSpline(wvl, resp)
resp_ipol = ius(xspl)
self.interpolate(dlambda=dlambda, ival_wavelength=(start, end))
maskidx = np.logical_and(np.greater_equal(self.ipol_wavelength, start),
np.less_equal(self.ipol_wavelength, end))
wvl = np.repeat(self.ipol_wavelength, maskidx)
irr = np.repeat(self.ipol_irradiance, maskidx)
if flux:
return np.trapz(irr * resp_ipol, wvl)
else:
return np.trapz(irr * resp_ipol, wvl) / np.trapz(resp_ipol, wvl) | Derive the inband solar flux or inband solar irradiance for a given
instrument relative spectral response valid for an earth-sun distance
of one AU.
rsr: Relative Spectral Response (one detector only)
Dictionary with two members 'wavelength' and 'response'
options:
detector: Detector number (between 1 and N - N=number of detectors
for channel) |
11,765 | def render(request, template_name, context=None, content_type=None, status=None, using=None, logs=None):
if logs:
obj_logger = ObjectLogger()
if not isinstance(logs, list):
logs = [logs, ]
for log in logs:
log = obj_logger.log_response(
log,
context,
status=str(status),
headers=,
content_type=str(content_type))
log.save()
return django_render(
request,
template_name,
context=context,
content_type=content_type,
status=status,
using=using) | Wrapper around Django render method. Can take one or a list of logs and logs the response.
No overhead if no logs are passed. |
11,766 | def _parse_led(self, keypad, component_xml):
component_num = int(component_xml.get())
led_num = component_num - 80
led = Led(self._lutron, keypad,
name=( % led_num),
led_num=led_num,
component_num=component_num)
return led | Parses an LED device that part of a keypad. |
11,767 | def do_wait(coro: Callable) -> Any:
event_loop = None
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop.run_until_complete(coro) | Perform aynchronous operation; await then return the result.
:param coro: coroutine to await
:return: coroutine result |
11,768 | def make_all_uppercase(
lst: Union[list, tuple, str, set]
) -> Union[list, tuple, str, set]:
if not isinstance(lst, (list, tuple, str, set)):
raise TypeError()
if isinstance(lst, str):
return lst.upper()
arr = list(lst)
arr[:] = [
Aux.make_all_uppercase(element) if (
isinstance(element, (list, tuple, str, set))
) else element for element in arr
]
if isinstance(lst, set):
return set(arr)
elif isinstance(lst, tuple):
return tuple(arr)
return arr | Make all characters uppercase.
It supports characters in a (mix of) list, tuple, set or string.
The return value is of the same type of the input value. |
11,769 | def namedTempFileReader(self) -> NamedTempFileReader:
directory = self._directory()
assert isinstance(directory, Directory), (
"Expected Directory, receieved %s" % directory)
return NamedTempFileReader(directory, self) | Named Temporary File Reader
This provides an object compatible with NamedTemporaryFile, used for reading this
files contents. This will still delete after the object falls out of scope.
This solves the problem on windows where a NamedTemporaryFile can not be read
while it's being written to |
11,770 | def check_data_complete(data, parameter_columns):
param_edges = [p[1:] for p in parameter_columns if isinstance(p, (Tuple, List))]
for p in param_edges:
other_params = [p_ed[0] for p_ed in param_edges if p_ed != p]
if other_params:
sub_tables = data.groupby(list(other_params))
else:
sub_tables = {None: data}.items()
n_p_total = len(set(data[p[0]]))
for _, table in sub_tables:
param_data = table[[p[0], p[1]]].copy().sort_values(by=p[0])
start, end = param_data[p[0]].reset_index(drop=True), param_data[p[1]].reset_index(drop=True)
if len(set(start)) < n_p_total:
raise ValueError(f)
if len(start) <= 1:
continue
for i in range(1, len(start)):
e = end[i-1]
s = start[i]
if e > s or s == start[i-1]:
raise ValueError(f
f)
if e < s:
raise NotImplementedError(f
f
f) | For any parameters specified with edges, make sure edges
don't overlap and don't have any gaps. Assumes that edges are
specified with ends and starts overlapping (but one exclusive and
the other inclusive) so can check that end of previous == start
of current.
If multiple parameters, make sure all combinations of parameters
are present in data. |
11,771 | def merge(d1, d2):
d1, d2 = deepcopy(d1), deepcopy(d2)
if d1 == {} or type(d1) is not dict:
return _merge_fix(d2)
for key in d2.keys():
if key[0] == :
data = d2[key]
key = key[1:]
if key in d1:
if type(d1[key]) is dict and type(data) is dict:
d1[key] = merge(d1[key], data)
elif type(d1[key]) is list:
d1[key].append(data)
else:
d1[key] = [d1[key], data]
else:
d1[key] = data
elif key[0] == :
data = d2[key]
key = key[1:]
if key in d1:
if data is None:
d1.pop(key)
elif type(d1[key]) is list and data in d1[key]:
d1[key].remove(data)
else:
d1[key] = _merge_fix(d2[key])
return d1 | This method does cool stuff like append and replace for dicts
d1 = {
"steve": 10,
"gary": 4
}
d2 = {
"&steve": 11,
"-gary": null
}
result = {
"steve": [10, 11]
} |
11,772 | def dump_by_server(self, hosts):
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint | Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo). |
11,773 | def translate_aliases(kwargs, aliases):
result = {}
for given_key, value in kwargs.items():
canonical_key = aliases.get(given_key, given_key)
if canonical_key in result:
key_names = .join("{}".format(k) for k in kwargs if
aliases.get(k) == canonical_key)
raise dbt.exceptions.AliasException(
.format(key_names, canonical_key)
)
result[canonical_key] = value
return result | Given a dict of keyword arguments and a dict mapping aliases to their
canonical values, canonicalize the keys in the kwargs dict.
:return: A dict continaing all the values in kwargs referenced by their
canonical key.
:raises: `AliasException`, if a canonical key is defined more than once. |
11,774 | def get_clean_factor(factor,
forward_returns,
groupby=None,
binning_by_group=False,
quantiles=5,
bins=None,
groupby_labels=None,
max_loss=0.35,
zero_aware=False):
initial_amount = float(len(factor.index))
factor_copy = factor.copy()
factor_copy.index = factor_copy.index.rename([, ])
merged_data = forward_returns.copy()
merged_data[] = factor_copy
if groupby is not None:
if isinstance(groupby, dict):
diff = set(factor_copy.index.get_level_values(
)) - set(groupby.keys())
if len(diff) > 0:
raise KeyError(
"Assets {} not in group mapping".format(
list(diff)))
ss = pd.Series(groupby)
groupby = pd.Series(index=factor_copy.index,
data=ss[factor_copy.index.get_level_values(
)].values)
if groupby_labels is not None:
diff = set(groupby.values) - set(groupby_labels.keys())
if len(diff) > 0:
raise KeyError(
"groups {} not in passed group names".format(
list(diff)))
sn = pd.Series(groupby_labels)
groupby = pd.Series(index=groupby.index,
data=sn[groupby.values].values)
merged_data[] = groupby.astype()
merged_data = merged_data.dropna()
fwdret_amount = float(len(merged_data.index))
no_raise = False if max_loss == 0 else True
quantile_data = quantize_factor(
merged_data,
quantiles,
bins,
binning_by_group,
no_raise,
zero_aware
)
merged_data[] = quantile_data
merged_data = merged_data.dropna()
binning_amount = float(len(merged_data.index))
tot_loss = (initial_amount - binning_amount) / initial_amount
fwdret_loss = (initial_amount - fwdret_amount) / initial_amount
bin_loss = tot_loss - fwdret_loss
print("Dropped %.1f%% entries from factor data: %.1f%% in forward "
"returns computation and %.1f%% in binning phase "
"(set max_loss=0 to see potentially suppressed Exceptions)." %
(tot_loss * 100, fwdret_loss * 100, bin_loss * 100))
if tot_loss > max_loss:
message = ("max_loss (%.1f%%) exceeded %.1f%%, consider increasing it."
% (max_loss * 100, tot_loss * 100))
raise MaxLossExceededError(message)
else:
print("max_loss is %.1f%%, not exceeded: OK!" % (max_loss * 100))
return merged_data | Formats the factor data, forward return data, and group mappings into a
DataFrame that contains aligned MultiIndex indices of timestamp and asset.
The returned data will be formatted to be suitable for Alphalens functions.
It is safe to skip a call to this function and still make use of Alphalens
functionalities as long as the factor data conforms to the format returned
from get_clean_factor_and_forward_returns and documented here
Parameters
----------
factor : pd.Series - MultiIndex
A MultiIndex Series indexed by timestamp (level 0) and asset
(level 1), containing the values for a single alpha factor.
::
-----------------------------------
date | asset |
-----------------------------------
| AAPL | 0.5
-----------------------
| BA | -1.1
-----------------------
2014-01-01 | CMG | 1.7
-----------------------
| DAL | -0.1
-----------------------
| LULU | 2.7
-----------------------
forward_returns : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by timestamp (level 0) and asset
(level 1), containing the forward returns for assets.
Forward returns column names must follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).
'date' index freq property must be set to a trading calendar
(pandas DateOffset), see infer_trading_calendar for more details.
This information is currently used only in cumulative returns
computation
::
---------------------------------------
| | 1D | 5D | 10D
---------------------------------------
date | asset | | |
---------------------------------------
| AAPL | 0.09|-0.01|-0.079
----------------------------
| BA | 0.02| 0.06| 0.020
----------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036
----------------------------
| DAL |-0.02|-0.06|-0.029
----------------------------
| LULU |-0.03| 0.05|-0.009
----------------------------
groupby : pd.Series - MultiIndex or dict
Either A MultiIndex Series indexed by date and asset,
containing the period wise group codes for each asset, or
a dict of asset to group mappings. If a dict is passed,
it is assumed that group mappings are unchanged for the
entire time period of the passed factor data.
binning_by_group : bool
If True, compute quantile buckets separately for each group.
This is useful when the factor values range vary considerably
across gorups so that it is wise to make the binning group relative.
You should probably enable this if the factor is intended
to be analyzed for a group neutral portfolio
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Chooses the buckets to be evenly spaced according to the values
themselves. Useful when the factor contains discrete values.
Only one of 'quantiles' or 'bins' can be not-None
groupby_labels : dict
A dictionary keyed by group code with values corresponding
to the display name for each group.
max_loss : float, optional
Maximum percentage (0.00 to 1.00) of factor data dropping allowed,
computed comparing the number of items in the input factor index and
the number of items in the output DataFrame index.
Factor data can be partially dropped due to being flawed itself
(e.g. NaNs), not having provided enough price data to compute
forward returns for all factor values, or because it is not possible
to perform binning.
Set max_loss=0 to avoid Exceptions suppression.
zero_aware : bool, optional
If True, compute quantile buckets separately for positive and negative
signal values. This is useful if your signal is centered and zero is
the separation between long and short signals, respectively.
'quantiles' is None.
Returns
-------
merged_data : pd.DataFrame - MultiIndex
A MultiIndex Series indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- forward returns column names follow the format accepted by
pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)
- 'date' index freq property (merged_data.index.levels[0].freq) is the
same as that of the input forward returns data. This is currently
used only in cumulative returns computation
::
-------------------------------------------------------------------
| | 1D | 5D | 10D |factor|group|factor_quantile
-------------------------------------------------------------------
date | asset | | | | | |
-------------------------------------------------------------------
| AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3
--------------------------------------------------------
| BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5
--------------------------------------------------------
2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1
--------------------------------------------------------
| DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5
--------------------------------------------------------
| LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2
-------------------------------------------------------- |
11,775 | def cvtToBlocks(rh, diskSize):
rh.printSysLog("Enter generalUtils.cvtToBlocks")
blocks = 0
results = {: 0, : 0, : 0, : 0}
blocks = diskSize.strip().upper()
lastChar = blocks[-1]
if lastChar == or lastChar == :
byteSize = blocks[:-1]
if byteSize == :
msg = msgs.msg[][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg[][0]
else:
try:
if lastChar == :
blocks = (float(byteSize) * 1024 * 1024) / 512
elif lastChar == :
blocks = (float(byteSize) * 1024 * 1024 * 1024) / 512
blocks = str(int(math.ceil(blocks)))
except Exception:
msg = msgs.msg[][1] % (modId, byteSize)
rh.printLn("ES", msg)
results = msgs.msg[][0]
elif blocks.strip():
msg = msgs.msg[][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg[][0]
rh.printSysLog("Exit generalUtils.cvtToBlocks, rc: " +
str(results[]))
return results, blocks | Convert a disk storage value to a number of blocks.
Input:
Request Handle
Size of disk in bytes
Output:
Results structure:
overallRC - Overall return code for the function:
0 - Everything went ok
4 - Input validation error
rc - Return code causing the return. Same as overallRC.
rs - Reason code causing the return.
errno - Errno value causing the return. Always zero.
Converted value in blocks |
11,776 | def post(self, request, pzone_pk):
pzone = None
try:
pzone = PZone.objects.get(pk=pzone_pk)
except PZone.DoesNotExist:
raise Http404("Cannot find given pzone.")
json_obj = []
http_status = 500
json_op = json.loads(request.body.decode("utf8"))
if not isinstance(json_op, list):
json_op = [json_op]
for data in json_op:
try:
serializer = self.get_serializer_class_by_name(data["type_name"])
except ContentType.DoesNotExist as e:
json_obj = {"errors": [str(e)]}
http_status = 400
break
serialized = serializer(data=data)
if serialized.is_valid():
serialized.save()
json_obj.append(serialized.data)
http_status = 200
else:
json_obj = serialized.errors
http_status = 400
break
if http_status == 200 and len(json_obj) == 1:
json_obj = json_obj[0]
next_ops = PZoneOperation.objects.filter(when__lte=timezone.now())
if len(next_ops) > 0:
next_op = next_ops[0]
cache.set( + pzone.name, next_op.when, 60 * 60 * 5)
return Response(
json_obj,
status=http_status,
content_type="application/json"
) | Add a new operation to the given pzone, return json of the new operation. |
11,777 | def _create(self, format, args):
constructor = self._LEAF_CONSTRUCTORS.get(format[0])
if constructor:
if args is not None:
if not args:
raise TypeError()
v = constructor(args[0])
return (v, format[1:], args[1:])
else:
return (None, format[1:], None)
if format[0] == :
return self._create_tuple(format, args)
if format.startswith():
return self._create_dict(format, args)
if format[0] == :
return self._create_array(format, args)
raise NotImplementedError( + format) | Create a GVariant object from given format and argument list.
This method recursively calls itself for complex structures (arrays,
dictionaries, boxed).
Return a tuple (variant, rest_format, rest_args) with the generated
GVariant, the remainder of the format string, and the remainder of the
arguments.
If args is None, then this won't actually consume any arguments, and
just parse the format string and generate empty GVariant structures.
This is required for creating empty dictionaries or arrays. |
11,778 | def add_curie(self, name, href):
self.draft.set_curie(self, name, href)
return self | Adds a CURIE definition.
A CURIE link with the given ``name`` and ``href`` is added to the
document.
This method returns self, allowing it to be chained with additional
method calls. |
11,779 | def update_metric(self, eval_metric, labels, pre_sliced=False):
if self._label_shapes is None:
return
if pre_sliced:
raise RuntimeError("PythonModule does not support presliced labels")
eval_metric.update(labels, self.get_outputs()) | Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``. |
11,780 | def search_agents(self, start=0, limit=100, filter={}, **kwargs):
genericmy Agent
request_data = {: start, : limit, : filter}
request_data.update(kwargs)
return self._call_rest_api(, , data=request_data, error=) | search_agents(self, start=0, limit=100, filter={}, **kwargs)
Search agents
:Parameters:
* *start* (`int`) -- start index to retrieve from. Default is 0
* *limit* (`int`) -- maximum number of entities to retrieve. Default is 100
* *filter* (`object`) -- free text search pattern (checks in agent data and properties)
:return: List of search results or empty list
:Example:
.. code-block:: python
filter = {'generic': 'my Agent'}
search_result = opereto_client.search_agents(filter=filter) |
11,781 | def from_binary(self, d):
p = MsgEphemerisGPSDepF._parser.parse(d)
for n in self.__class__.__slots__:
setattr(self, n, getattr(p, n)) | Given a binary payload d, update the appropriate payload fields of
the message. |
11,782 | def build_fptree(self, transactions, root_value,
root_count, frequent, headers):
root = FPNode(root_value, root_count, None)
for transaction in transactions:
sorted_items = [x for x in transaction if x in frequent]
sorted_items.sort(key=lambda x: frequent[x], reverse=True)
if len(sorted_items) > 0:
self.insert_tree(sorted_items, root, headers)
return root | Build the FP tree and return the root node. |
11,783 | async def set_heater_temp(self, device_id, set_temp):
payload = {"homeType": 0,
"timeZoneNum": "+02:00",
"deviceId": device_id,
"value": int(set_temp),
"key": "holidayTemp"}
await self.request("changeDeviceInfo", payload) | Set heater temp. |
11,784 | def _fill_function(*args):
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
func = args[0]
keys = [, , , ]
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
func = args[0]
keys = [, , , , ]
state = dict(zip(keys, args[1:]))
else:
raise ValueError( % (args,))
if value is not _empty_cell_value:
cell_set(cell, value)
return func | Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func(). |
11,785 | def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
vlan10
ret = {: name, : False, : , : {}}
if not data:
data = {}
if not in data:
data.update({: name})
if not in data:
data.update({: start_addr})
if not in data:
data.update({: end_addr})
obj = __salt__[](data[], data[], **api_opts)
if obj is None:
obj = __salt__[](start_addr=data[], end_addr=None, **api_opts)
if obj is None:
obj = __salt__[](start_addr=None, end_addr=data[], **api_opts)
if not obj:
ret[] = True
ret[] =
return ret
if __opts__[]:
ret[] = None
ret[] =
return ret
if __salt__[](objref=obj[]):
ret[] = True
ret[] = {: .format(start_addr, end_addr),
: }
return ret | Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20 |
11,786 | def advisory_lock(dax, key, lock_mode=LockMode.wait, xact=False):
if lock_mode == LockMode.wait:
obtain_lock(dax, key, lock_mode, xact)
else:
got_lock = obtain_lock(dax, key, lock_mode, xact)
if not got_lock:
if lock_mode == LockMode.error:
raise Exception("Unable to obtain advisory lock {}".format(key))
else:
yield False
return
try:
yield True
finally:
if not xact:
release_lock(dax, key, lock_mode) | A context manager for obtaining a lock, executing code, and then releasing
the lock.
A boolean value is passed to the block indicating whether or not the lock was
obtained.
:dax: a DataAccess instance
:key: either a big int or a 2-tuple of integers
:lock_mode: a member of the LockMode enum. Determines how this function
operates:
- wait: the wrapped code will not be executed until the lock
is obtained.
- skip: an attempt will be made to get the lock, and if
unsuccessful, False is passed to the code block
- error: an attempt will be made to get the lock, and if
unsuccessful, an exception will be raised.
:xact: a boolean, if True, the lock will be obtained according to lock_mode,
but will not be released after the code is executed, since it will be
automatically released at the end of the transaction. |
11,787 | def _get_samples(n, sim, inc_warmup=True):
return pystan._misc.get_samples(n, sim, inc_warmup) | Get chains for `n`th parameter.
Parameters
----------
n : int
sim : dict
A dictionary tied to a StanFit4Model instance.
Returns
-------
chains : list of array
Each chain is an element in the list. |
11,788 | def presign_v4(method, url, access_key, secret_key, session_token=None,
region=None, headers=None, expires=None, response_headers=None,
request_date=None):
if not access_key or not secret_key:
raise InvalidArgumentError()
if region is None:
region =
if headers is None:
headers = {}
if expires is None:
expires =
if request_date is None:
request_date = datetime.utcnow()
parsed_url = urlsplit(url)
content_hash_hex = _UNSIGNED_PAYLOAD
host = parsed_url.netloc
headers[] = host
iso8601Date = request_date.strftime("%Y%m%dT%H%M%SZ")
headers_to_sign = headers
query = {}
query[] = _SIGN_V4_ALGORITHM
query[] = generate_credential_string(access_key,
request_date,
region)
query[] = iso8601Date
query[] = str(expires)
if session_token:
query[] = session_token
signed_headers = get_signed_headers(headers_to_sign)
query[] = .join(signed_headers)
if response_headers is not None:
query.update(response_headers)
url_components = [parsed_url.geturl()]
if query is not None:
ordered_query = collections.OrderedDict(sorted(query.items()))
query_components = []
for component_key in ordered_query:
single_component = [component_key]
if ordered_query[component_key] is not None:
single_component.append()
single_component.append(
queryencode(ordered_query[component_key])
)
else:
single_component.append()
query_components.append(.join(single_component))
query_string = .join(query_components)
if query_string:
url_components.append()
url_components.append(query_string)
new_url = .join(url_components)
new_parsed_url = urlsplit(new_url)
canonical_request = generate_canonical_request(method,
new_parsed_url,
headers_to_sign,
signed_headers,
content_hash_hex)
string_to_sign = generate_string_to_sign(request_date, region,
canonical_request)
signing_key = generate_signing_key(request_date, region, secret_key)
signature = hmac.new(signing_key, string_to_sign.encode(),
hashlib.sha256).hexdigest()
new_parsed_url = urlsplit(new_url + "&X-Amz-Signature="+signature)
return new_parsed_url.geturl() | Calculates signature version '4' for regular presigned URLs.
:param method: Method to be presigned examples 'PUT', 'GET'.
:param url: URL to be presigned.
:param access_key: Access key id for your AWS s3 account.
:param secret_key: Secret access key for your AWS s3 account.
:param session_token: Session token key set only for temporary
access credentials.
:param region: region of the bucket, it is optional.
:param headers: any additional HTTP request headers to
be presigned, it is optional.
:param expires: final expiration of the generated URL. Maximum is 7days.
:param response_headers: Specify additional query string parameters.
:param request_date: the date of the request. |
11,789 | def is_cgi(self):
collapsed_path = _url_collapse_path(self.path)
dir_sep = collapsed_path.find(, 1)
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
if head in self.cgi_directories:
self.cgi_info = head, tail
return True
return False | Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string). |
11,790 | def peek(self, eof_token=False):
if len(self.queue) == 0:
self._refill(eof_token)
return self.queue[-1] | Same as :meth:`next`, except the token is not dequeued. |
11,791 | def get_payments_of_credit_note_per_page(self, credit_note_id, per_page=1000, page=1):
return self._get_resource_per_page(
resource=CREDIT_NOTE_PAYMENTS,
per_page=per_page,
page=page,
params={: credit_note_id},
) | Get payments of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list |
11,792 | def _von_mises_cdf_normal(x, concentration, dtype):
def cdf_func(concentration):
z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) *
tf.sin(.5 * x))
z2 = z ** 2
z3 = z2 * z
z4 = z2 ** 2
c = 24. * concentration
c1 = 56.
xi = z - z3 / ((c - 2. * z2 - 16.) / 3. -
(z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2
distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype))
return distrib.cdf(xi)
return value_and_gradient(cdf_func, concentration) | Computes the von Mises CDF and its derivative via Normal approximation. |
11,793 | def open_document(self, path, encoding=None, replace_tabs_by_spaces=True,
clean_trailing_whitespaces=True, safe_save=True,
restore_cursor_position=True, preferred_eol=0,
autodetect_eol=True, show_whitespaces=False, **kwargs):
original_path = os.path.normpath(path)
path = os.path.normcase(original_path)
paths = []
widgets = []
for w in self.widgets(include_clones=False):
if os.path.exists(w.file.path):
widgets.append(w)
paths.append(os.path.normcase(w.file.path))
if path in paths:
i = paths.index(path)
w = widgets[i]
tw = w.parent_tab_widget
tw.setCurrentIndex(tw.indexOf(w))
return w
else:
assert os.path.exists(original_path)
name = os.path.split(original_path)[1]
use_parent_dir = False
for tab in self.widgets():
title = QtCore.QFileInfo(tab.file.path).fileName()
if title == name:
tw = tab.parent_tab_widget
new_name = os.path.join(os.path.split(os.path.dirname(
tab.file.path))[1], title)
tw.setTabText(tw.indexOf(tab), new_name)
use_parent_dir = True
if use_parent_dir:
name = os.path.join(
os.path.split(os.path.dirname(path))[1], name)
use_parent_dir = False
tab = self._create_code_edit(self.guess_mimetype(path), **kwargs)
self.editor_created.emit(tab)
tab.open_parameters = {
: encoding,
: replace_tabs_by_spaces,
: clean_trailing_whitespaces,
: safe_save,
: restore_cursor_position,
: preferred_eol,
: autodetect_eol,
: show_whitespaces,
: kwargs
}
tab.file.clean_trailing_whitespaces = clean_trailing_whitespaces
tab.file.safe_save = safe_save
tab.file.restore_cursor = restore_cursor_position
tab.file.replace_tabs_by_spaces = replace_tabs_by_spaces
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.show_whitespaces = show_whitespaces
try:
tab.file.open(original_path, encoding=encoding)
except Exception as e:
_logger().exception()
tab.close()
tab.setParent(None)
tab.deleteLater()
raise e
else:
tab.setDocumentTitle(name)
tab.file._path = original_path
icon = self._icon(path)
self.add_tab(tab, title=name, icon=icon)
self.document_opened.emit(tab)
for action in self.closed_tabs_menu.actions():
if action.toolTip() == original_path:
self.closed_tabs_menu.removeAction(action)
break
self.closed_tabs_history_btn.setEnabled(
len(self.closed_tabs_menu.actions()) > 0)
return tab | Opens a document.
:param path: Path of the document to open
:param encoding: The encoding to use to open the file. Default is
locale.getpreferredencoding().
:param replace_tabs_by_spaces: Enable/Disable replace tabs by spaces.
Default is true.
:param clean_trailing_whitespaces: Enable/Disable clean trailing
whitespaces (on save). Default is True.
:param safe_save: If True, the file is saved to a temporary file first.
If the save went fine, the temporary file is renamed to the final
filename.
:param restore_cursor_position: If true, last cursor position will be
restored. Default is True.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param show_whitespaces: True to show white spaces.
:param kwargs: addtional keyword args to pass to the widget
constructor.
:return: The created code editor |
11,794 | def ParseFileObject(self, parser_mediator, file_object):
regf_file = pyregf.file()
try:
regf_file.open_file_object(file_object)
except IOError:
return
root_key = regf_file.get_root_key()
if root_key is None:
regf_file.close()
return
root_file_key = root_key.get_sub_key_by_path(self._AMCACHE_ROOT_FILE_KEY)
if root_file_key is None:
regf_file.close()
return
for volume_key in root_file_key.sub_keys:
for am_entry in volume_key.sub_keys:
self._ProcessAMCacheFileKey(am_entry, parser_mediator)
root_program_key = root_key.get_sub_key_by_path(
self._AMCACHE_ROOT_PROGRAM_KEY)
if root_program_key is None:
regf_file.close()
return
for am_entry in root_program_key.sub_keys:
self._ProcessAMCacheProgramKey(am_entry, parser_mediator)
regf_file.close() | Parses an Amcache.hve file for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object. |
11,795 | def glob (dirs, patterns):
result = []
dirs = to_seq (dirs)
patterns = to_seq (patterns)
splitdirs = []
for dir in dirs:
splitdirs += dir.split (os.pathsep)
for dir in splitdirs:
for pattern in patterns:
p = os.path.join (dir, pattern)
import glob
result.extend (glob.glob (p))
return result | Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain "." or ".." elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ] |
11,796 | def set_data(self, data):
self.shape = data.shape
if self._data is None:
assert self._deferred_init, \
"Parameter has not been initialized"%self.name
self._deferred_init = self._deferred_init[:3] + (data,)
return
if self._trainer and self._trainer._kv_initialized and self._trainer._update_on_kvstore:
if self not in self._trainer._params_to_init:
self._trainer._reset_kvstore()
for arr in self._check_and_get(self._data, list):
arr[:] = data | Sets this parameter's value on all contexts. |
11,797 | async def is_owner(self, user):
if self.owner_id is None:
app = await self.application_info()
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
return user.id == self.owner_id | Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
Parameters
-----------
user: :class:`.abc.User`
The user to check for. |
11,798 | def get_free_gpus(max_procs=0):
logger = logging.getLogger(__name__)
try:
py3nvml.nvmlInit()
except:
str_ =
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return []
num_gpus = py3nvml.nvmlDeviceGetCount()
gpu_free = [False]*num_gpus
for i in range(num_gpus):
try:
h = py3nvml.nvmlDeviceGetHandleByIndex(i)
except:
continue
procs = try_get_info(py3nvml.nvmlDeviceGetComputeRunningProcesses, h,
[])
if len(procs) <= max_procs:
gpu_free[i] = True
py3nvml.nvmlShutdown()
return gpu_free | Checks the number of processes running on your GPUs.
Parameters
----------
max_procs : int
Maximum number of procs allowed to run on a gpu for it to be considered
'available'
Returns
-------
availabilities : list(bool)
List of length N for an N-gpu system. The nth value will be true, if the
nth gpu had at most max_procs processes running on it. Set to 0 to look
for gpus with no procs on it.
Note
----
If function can't query the driver will return an empty list rather than raise an
Exception. |
11,799 | def get_dataset_files(in_path):
audio_files = []
for ext in ds_config.audio_exts:
audio_files += glob.glob(
os.path.join(in_path, ds_config.audio_dir, "*" + ext))
utils.ensure_dir(os.path.join(in_path, ds_config.features_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.estimations_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.references_dir))
file_structs = []
for audio_file in audio_files:
file_structs.append(FileStruct(audio_file))
file_structs = sorted(file_structs,
key=lambda file_struct: file_struct.audio_file)
return file_structs | Gets the files of the given dataset. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.