Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
378,200 | def next(self):
loop = self.get_io_loop()
return self._framework.run_on_executor(loop, self._next) | Advance the cursor.
This method blocks until the next change document is returned or an
unrecoverable error is raised.
Raises :exc:`StopAsyncIteration` if this change stream is closed.
You can iterate the change stream by calling
``await change_stream.next()`` repeatedly, or with an "async for" loop:
.. code-block:: python3
async for change in db.collection.watch():
print(change) |
378,201 | def modelresource_factory(model, resource_class=ModelResource):
attrs = {: model}
Meta = type(str(), (object,), attrs)
class_name = model.__name__ + str()
class_attrs = {
: Meta,
}
metaclass = ModelDeclarativeMetaclass
return metaclass(class_name, (resource_class,), class_attrs) | Factory for creating ``ModelResource`` class for given Django model. |
378,202 | def validateBusName(n):
try:
if not in n:
raise Exception()
if in n:
raise Exception()
if len(n) > 255:
raise Exception()
if n[0] == :
raise Exception()
if n[0].isdigit():
raise Exception()
if bus_re.search(n):
raise Exception(
)
if not n[0] == and dot_digit_re.search(n):
raise Exception(
)
except Exception as e:
raise MarshallingError( % (n, str(e))) | Verifies that the supplied name is a valid DBus Bus name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus bus name |
378,203 | def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, )
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results | Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data. |
378,204 | def get_electrode_node(self, electrode):
elec_node_raw = int(self.electrodes[electrode - 1][0])
if(self.header[]):
elec_node = self.nodes[][elec_node_raw]
else:
elec_node = elec_node_raw - 1
return int(elec_node) | For a given electrode (e.g. from a config.dat file), return the true
node number as in self.nodes['sorted'] |
378,205 | def optimal_variational_posterior(
kernel,
inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
mean_fn=None,
jitter=1e-6,
name=None):
with tf.name_scope(name or ):
dtype = dtype_util.common_dtype(
[inducing_index_points,
observation_index_points,
observations,
observation_noise_variance,
jitter], tf.float32)
inducing_index_points = tf.convert_to_tensor(
value=inducing_index_points,
dtype=dtype, name=)
observation_index_points = tf.convert_to_tensor(
value=observation_index_points, dtype=dtype,
name=)
observations = tf.convert_to_tensor(
value=observations, dtype=dtype, name=)
observation_noise_variance = tf.convert_to_tensor(
value=observation_noise_variance,
dtype=dtype,
name=)
jitter = tf.convert_to_tensor(
value=jitter, dtype=dtype, name=)
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError()
kzz = kernel.matrix(inducing_index_points, inducing_index_points)
kzx = kernel.matrix(inducing_index_points, observation_index_points)
noise_var_inv = tf.math.reciprocal(observation_noise_variance)
sigma_inv = _add_diagonal_shift(
kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True),
jitter)
chol_sigma_inv = tf.linalg.cholesky(sigma_inv)
kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx)
kzx_obs = kzx_lin_op.matvec(
observations - mean_fn(observation_index_points))
kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz)
loc = (mean_fn(inducing_index_points) +
noise_var_inv * kzz_lin_op.matvec(
_solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs)))
chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular(
chol_sigma_inv)
scale = chol_sigma_inv_lin_op.solve(kzz)
return loc, scale | Model selection for optimal variational hyperparameters.
Given the full training set (parameterized by `observations` and
`observation_index_points`), compute the optimal variational
location and scale for the VGP. This is based of the method suggested
in [Titsias, 2009][1].
Args:
kernel: `PositiveSemidefiniteKernel`-like instance representing the
GP's covariance function.
inducing_index_points: `float` `Tensor` of locations of inducing points in
the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just
like `observation_index_points`. The batch shape components needn't be
identical to those of `observation_index_points`, but must be broadcast
compatible with them.
observation_index_points: `float` `Tensor` representing finite (batch of)
vector(s) of points where observations are defined. Shape has the
form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e1` is the number
(size) of index points in each batch (we denote it `e1` to distinguish
it from the numer of inducing index points, denoted `e2` below).
observations: `float` `Tensor` representing collection, or batch of
collections, of observations corresponding to
`observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which
must be brodcastable with the batch and example shapes of
`observation_index_points`. The batch shape `[b1, ..., bB]` must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `observation_index_points`, etc.).
observation_noise_variance: `float` `Tensor` representing the variance
of the noise in the Normal likelihood distribution of the model. May be
batched, in which case the batch shape must be broadcastable with the
shapes of all other batched parameters (`kernel.batch_shape`,
`index_points`, etc.).
Default value: `0.`
mean_fn: Python `callable` that acts on index points to produce a (batch
of) vector(s) of mean values at those index points. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
(broadcastable with) `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "optimal_variational_posterior".
Returns:
loc, scale: Tuple representing the variational location and scale.
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
#### References
[1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process
Regression", 2009.
http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf |
378,206 | def _get_handlers(self):
members = {}
for d in __conf__.ACTION_DIR_NAME:
members.update(get_members(d,
None,
lambda m: isclass(m) and issubclass(m, BaseHandler) and hasattr(m, "__urls__") and m.__urls__))
handlers = [(pattern, order, h) for h in members.values() for pattern, order in h.__urls__]
try:
api_version = __conf__.API_VERSION
except Exception as e:
api_version =
handlers = [(api_version + pattern, handler) for pattern, _, handler in handlers]
handlers.append((r, tornado.web.StaticFileHandler, {"path":"static", "default_filename":"index.html"}))
return handlers | 获取 action.handlers
添加路径 __conf__.ACTION_DIR_NAME 列表中的 action by ABeen |
378,207 | def join(self, t2, unique=False):
x_v1 = np.concatenate((self.x, t2.x), axis=0)
y_v1 = np.concatenate((self.y, t2.y), axis=0)
if not unique:
a = np.ascontiguousarray(np.vstack((x_v1, y_v1)).T)
unique_a = np.unique(a.view([(, a.dtype)]*a.shape[1]))
unique_coords = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
x_v1 = unique_coords[:,0]
y_v1 = unique_coords[:,1]
return x_v1, y_v1 | Join this triangulation with another. If the points are known to have no duplicates, then
set unique=False to skip the testing and duplicate removal |
378,208 | def sort_func(self, key):
if key == self._KEYS.VALUE:
return
if key == self._KEYS.SOURCE:
return
return key | Sorting logic for `Quantity` objects. |
378,209 | def send_button(recipient):
page.send(recipient, Template.Buttons("hello", [
Template.ButtonWeb("Open Web URL", "https://www.oculus.com/en-us/rift/"),
Template.ButtonPostBack("trigger Postback", "DEVELOPED_DEFINED_PAYLOAD"),
Template.ButtonPhoneNumber("Call Phone Number", "+16505551234")
])) | Shortcuts are supported
page.send(recipient, Template.Buttons("hello", [
{'type': 'web_url', 'title': 'Open Web URL', 'value': 'https://www.oculus.com/en-us/rift/'},
{'type': 'postback', 'title': 'tigger Postback', 'value': 'DEVELOPED_DEFINED_PAYLOAD'},
{'type': 'phone_number', 'title': 'Call Phone Number', 'value': '+16505551234'},
])) |
378,210 | def proof_req_briefs2req_creds(proof_req: dict, briefs: Union[dict, Sequence[dict]]) -> dict:
rv = {
: {},
: {},
: {}
}
attr_refts = proof_req_attr_referents(proof_req)
pred_refts = proof_req_pred_referents(proof_req)
for brief in iter_briefs(briefs):
cred_info = brief[]
timestamp = (brief[] or {}).get(, None)
for attr in cred_info[]:
if attr in attr_refts.get(cred_info[], {}):
req_attr = {
: cred_info[],
: attr not in pred_refts.get(cred_info[], {}),
: timestamp
}
if not timestamp:
req_attr.pop()
rv[][attr_refts[cred_info[]][attr]] = req_attr
if attr in pred_refts.get(cred_info[], {}):
for uuid in pred_refts[cred_info[]][attr]:
req_pred = {
: cred_info[],
: timestamp
}
if not timestamp:
req_pred.pop()
rv[][uuid] = req_pred
return rv | Given a proof request and cred-brief(s), return a requested-creds structure.
The proof request must have cred def id restrictions on all requested attribute specifications.
:param proof_req: proof request
:param briefs: credential brief, sequence thereof (as indy-sdk wallet credential search returns),
or cred-brief-dict (as HolderProver.get_cred_briefs_for_proof_req_q() returns); e.g.,
::
[
{
"cred_info": {
"cred_rev_id": "149",
"cred_def_id": "LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag",
"schema_id": "LjgpST2rjsoxYegQDRm7EL:2:bc-reg:1.0",
"rev_reg_id": "LjgpST2rjsoxYegQDRm7EL:4:LjgpST2rjsoxYegQDRm7EL:3:CL:15:tag:CL_ACCUM:1",
"referent": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"attrs": {
"busId": "11144444",
"endDate": "",
"id": "3",
"effectiveDate": "2012-12-01",
"jurisdictionId": "1",
"orgTypeId": "2",
"legalName": "Tart City"
}
},
"interval": {
"to": 1532448939,
"from": 1234567890
}
},
...
]
:return: indy-sdk requested creds json to pass to proof creation request; e.g.,
::
{
"requested_attributes": {
"15_endDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_id_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_effectiveDate_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_busId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_orgTypeId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": false
},
"15_jurisdictionId_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
},
"15_legalName_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
"revealed": true
}
},
"requested_predicates": {
"15_orgTypeId_GE_uuid": {
"timestamp": 1532448939,
"cred_id": "43f8dc18-ac00-4b72-8a96-56f47dba77ca",
}
},
"self_attested_attributes": {}
} |
378,211 | def _put_bucket_lifecycle(self):
status =
if self.s3props[][]:
lifecycle_config = {
: self.s3props[][]
}
LOG.debug(, lifecycle_config)
_response = self.s3client.put_bucket_lifecycle_configuration(Bucket=self.bucket,
LifecycleConfiguration=lifecycle_config)
status =
else:
_response = self.s3client.delete_bucket_lifecycle(Bucket=self.bucket)
LOG.debug(, _response)
LOG.info(, status) | Adds bucket lifecycle configuration. |
378,212 | def vel_grad_avg(self):
return ((u.standard_gravity * self.HL) /
(pc.viscosity_kinematic(self.temp) * self.Gt)).to(u.s ** -1) | Calculate the average velocity gradient (G-bar) of water flowing
through the flocculator.
:returns: Average velocity gradient (G-bar)
:rtype: float * 1 / second |
378,213 | def checkValue(self,value,strict=0):
v = self._coerceValue(value,strict)
return self.checkOneValue(v,strict) | Check and convert a parameter value.
Raises an exception if the value is not permitted for this
parameter. Otherwise returns the value (converted to the
right type.) |
378,214 | def get_string(self, betas: List[float], gammas: List[float], samples: int = 100):
if samples <= 0 and not isinstance(samples, int):
raise ValueError("samples variable must be positive integer")
param_prog = self.get_parameterized_program()
stacked_params = np.hstack((betas, gammas))
sampling_prog = Program()
ro = sampling_prog.declare(, , len(self.qubits))
sampling_prog += param_prog(stacked_params)
sampling_prog += [MEASURE(qubit, r) for qubit, r in zip(self.qubits, ro)]
sampling_prog.wrap_in_numshots_loop(samples)
executable = self.qc.compile(sampling_prog)
bitstring_samples = self.qc.run(executable)
bitstring_tuples = list(map(tuple, bitstring_samples))
freq = Counter(bitstring_tuples)
most_frequent_bit_string = max(freq, key=lambda x: freq[x])
return most_frequent_bit_string, freq | Compute the most probable string.
The method assumes you have passed init_betas and init_gammas with your
pre-computed angles or you have run the VQE loop to determine the
angles. If you have not done this you will be returning the output for
a random set of angles.
:param betas: List of beta angles
:param gammas: List of gamma angles
:param samples: (Optional) number of samples to get back from the QuantumComputer.
:returns: tuple representing the bitstring, Counter object from
collections holding all output bitstrings and their frequency. |
378,215 | def _script_to_har_entry(cls, script, url):
entry = {
: {: url},
: {: url, : {: script}}
}
cls._set_entry_type(entry, INLINE_SCRIPT_ENTRY)
return entry | Return entry for embed script |
378,216 | def resource_to_portal_type(resource):
if resource is None:
return None
resource_mapping = get_resource_mapping()
portal_type = resource_mapping.get(resource.lower())
if portal_type is None:
logger.warn("Could not map the resource "
"to any known portal type".format(resource))
return portal_type | Converts a resource to a portal type
:param resource: Resource name as it is used in the content route
:type name: string
:returns: Portal type name
:rtype: string |
378,217 | def metric_delete(self, project, metric_name):
path = "projects/%s/metrics/%s" % (project, metric_name)
self._gapic_api.delete_log_metric(path) | API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric |
378,218 | def drawItem(self, item, painter, option):
dataset = item.dataset()
painter.save()
painter.setRenderHint(painter.Antialiasing)
center = item.buildData()
radius = item.buildData()
if int(option.state) & QStyle.State_MouseOver != 0:
alpha = 20
mouse_over = True
else:
alpha = 0
mouse_over = False
for value, subpath in item.buildData(, []):
clr = dataset.color(value)
bg = clr.lighter(110)
bg.setAlpha(alpha + 100)
painter.setBrush(bg)
if mouse_over:
scale = 1.08
dx = (center.x() / scale) - center.x()
dy = (center.y() / scale) - center.y()
painter.save()
painter.scale(scale, scale)
painter.translate(dx, dy)
painter.setPen(Qt.NoPen)
painter.drawPath(subpath)
painter.restore()
pen = QPen(clr)
pen.setWidth(0.5)
painter.setPen(pen)
painter.drawPath(subpath)
painter.restore() | Draws the inputed item as a bar graph.
:param item | <XChartDatasetItem>
painter | <QPainter>
option | <QStyleOptionGraphicsItem> |
378,219 | def write_jsonl_file(fname, data):
if not isinstance(data, list):
print(, fname)
return
with open(fname, ) as of:
for row in data:
if row.strip():
of.write( % row.strip()) | Writes a jsonl file.
Args:
data: list of json encoded data |
378,220 | def fetch(self, category=CATEGORY_COMMIT, from_date=DEFAULT_DATETIME, to_date=DEFAULT_LAST_DATETIME,
branches=None, latest_items=False, no_update=False):
if not from_date:
from_date = DEFAULT_DATETIME
if not to_date:
to_date = DEFAULT_LAST_DATETIME
kwargs = {
: from_date,
: to_date,
: branches,
: latest_items,
: no_update
}
items = super().fetch(category, **kwargs)
return items | Fetch commits.
The method retrieves from a Git repository or a log file
a list of commits. Commits are returned in the same order
they were obtained.
When `from_date` parameter is given it returns items commited
since the given date.
The list of `branches` is a list of strings, with the names of
the branches to fetch. If the list of branches is empty, no
commit is fetched. If the list of branches is None, all commits
for all branches will be fetched.
The parameter `latest_items` returns only those commits which
are new since the last time this method was called.
The parameter `no_update` returns all commits without performing
an update of the repository before.
Take into account that `from_date` and `branches` are ignored
when the commits are fetched from a Git log file or when
`latest_items` flag is set.
The class raises a `RepositoryError` exception when an error
occurs accessing the repository.
:param category: the category of items to fetch
:param from_date: obtain commits newer than a specific date
(inclusive)
:param to_date: obtain commits older than a specific date
:param branches: names of branches to fetch from (default: None)
:param latest_items: sync with the repository to fetch only the
newest commits
:param no_update: if enabled, don't update the repo with the latest changes
:returns: a generator of commits |
378,221 | def key_value(self, **kwargs):
field_name = self.name
new_df = copy_df(self)
new_df._perform_operation(op.FieldKVConfigOperation({field_name: KVConfig(**kwargs)}))
return new_df | Set fields to be key-value represented.
:rtype: Column
:Example:
>>> new_ds = df.key_value('f1 f2', kv=':', item=',') |
378,222 | def project_data_source_path(cls, project, data_source):
return google.api_core.path_template.expand(
"projects/{project}/dataSources/{data_source}",
project=project,
data_source=data_source,
) | Return a fully-qualified project_data_source string. |
378,223 | def get_time(self, idx):
qpi = self.get_qpimage_raw(idx)
if "time" in qpi.meta:
thetime = qpi.meta["time"]
else:
thetime = np.nan
return thetime | Return time of data at index `idx`
Returns nan if the time is not defined |
378,224 | def _generate_prime(bits, rng):
"primtive attempt at prime generation"
hbyte_mask = pow(2, bits % 8) - 1
while True:
x = rng.read((bits+7) // 8)
if hbyte_mask > 0:
x = chr(ord(x[0]) & hbyte_mask) + x[1:]
n = util.inflate_long(x, 1)
n |= 1
n |= (1 << (bits - 1))
while not number.isPrime(n):
n += 2
if util.bit_length(n) == bits:
break
return n | primtive attempt at prime generation |
378,225 | def point_lm(self, context):
lm = np.empty(context.shape, context.dtype)
montblanc.log.info(context.array_schema.shape)
montblanc.log.info(context.iter_args)
(ls, us) = context.dim_extents()
lm[:,0] = 0.0008
lm[:,1] = 0.0036
lm[:,:] = 0
return lm | Return a lm coordinate array to montblanc |
378,226 | def visit_tuple(self, node):
if len(node.elts) == 1:
return "(%s, )" % node.elts[0].accept(self)
return "(%s)" % ", ".join(child.accept(self) for child in node.elts) | return an astroid.Tuple node as string |
378,227 | def setup_logging(
default_level=logging.INFO,
default_path=None,
env_key=,
handler_name=,
handlers_dict=None,
log_dict=None,
config_name=None,
splunk_host=None,
splunk_port=None,
splunk_index=None,
splunk_token=None,
splunk_verify=False,
splunk_handler_name=,
splunk_sleep_interval=-1,
splunk_debug=False):
if SPLUNK_DEBUG:
splunk_debug = True
if not splunk_token:
if SPLUNK_TOKEN:
splunk_token = splunk_token
config = None
if os.getenv(
,
False):
try:
config = json.loads(os.getenv(
,
None).strip())
except Exception as e:
print(
).format(
e)
elif log_dict:
config = config
if not config and default_path:
path = default_path
file_name = default_path.split()[-1]
if config_name:
file_name = config_name
path = .format(
.join(default_path.split()[:-1]),
file_name)
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, ) as f:
config = json.load(f)
else:
cwd_path = os.getcwd() + .format(
file_name)
if os.path.exists(cwd_path):
with open(cwd_path, ) as f:
config = json.load(f)
else:
rels_path = os.getcwd() + .format(
file_name)
if os.path.exists(rels_path):
with open(rels_path, ) as f:
config = json.load(f)
else:
repo_config = (
)
if os.path.exists(repo_config):
if splunk_debug:
print(
.format(
repo_config))
with open(repo_config, ) as f:
config = json.load(f)
if config:
if handlers_dict:
config[] = handlers_dict
found_splunk_handler = False
if handler_name:
for hidx, h in enumerate(config[]):
if splunk_debug:
print(.format(
hidx,
h))
if handler_name == h:
config[][].append(h)
if splunk_handler_name == h and splunk_token:
found_splunk_handler = True
if found_splunk_handler:
if splunk_token:
config[][splunk_handler_name][] = \
splunk_token
config[][splunk_handler_name][] = \
splunk_verify
if splunk_host:
config[][splunk_handler_name][] = \
splunk_host
if splunk_port:
config[][splunk_handler_name][] = \
splunk_port
if splunk_index:
config[][splunk_handler_name][] = \
splunk_index
config[][splunk_handler_name][] = \
splunk_debug
if config[][splunk_handler_name].get(
,
True):
key =
config[][splunk_handler_name][key] = \
SPLUNK_QUEUE_SIZE
if SPLUNK_RETRY_COUNT:
key =
config[][splunk_handler_name][key] = \
SPLUNK_RETRY_COUNT
if SPLUNK_TIMEOUT:
config[][splunk_handler_name][key] = \
SPLUNK_TIMEOUT
key =
if splunk_sleep_interval >= 0:
config[][splunk_handler_name][key] = \
splunk_sleep_interval
else:
if SPLUNK_SLEEP_INTERVAL:
key =
config[][splunk_handler_name][key] = \
SPLUNK_SLEEP_INTERVAL
if found_splunk_handler:
config[][].append(
splunk_handler_name)
else:
if splunk_debug:
print(
)
config[].pop(, None)
good_handlers = []
for k in config[][]:
if k != splunk_handler_name:
good_handlers.append(k)
config[][] = good_handlers
else:
if splunk_debug:
print(
)
config[].pop(splunk_handler_name, None)
good_handlers = []
for k in config[][]:
if k != splunk_handler_name:
good_handlers.append(k)
config[][] = good_handlers
if len(config[][]) == 0:
print((
).format(
config[][],
ppj(config)))
else:
if splunk_debug:
print((
).format(
ppj(config)))
logging.config.dictConfig(
config)
return
else:
if not splunk_host and not splunk_port:
if SPLUNK_ADDRESS:
try:
addr_split = SPLUNK_ADDRESS.split()
if len(addr_split) > 1:
splunk_host = addr_split[0]
splunk_port = int(addr_split[1])
except Exception as e:
print((
).format(
SPLUNK_ADDRESS,
e))
else:
if not splunk_host:
if SPLUNK_HOST:
splunk_host = SPLUNK_HOST
if not splunk_port:
if SPLUNK_PORT:
splunk_port = SPLUNK_PORT
config = {
: 1,
: False,
: {
: {
: ,
: (
)
},
: {
: ,
: (
)
},
: {
: ,
: (
)
},
splunk_handler_name: {
: ,
: (
)
}
},
: {
: {
: ,
: ,
: ,
:
},
: {
: ,
: ,
: ,
:
},
: {
: ,
: ,
: ,
:
}
},
: {
: {
: ,
: True
}
},
: {
: ,
: True,
: [
]
}
}
if splunk_token and splunk_host and splunk_port:
config[][splunk_handler_name] = {
: (
),
: splunk_host,
: splunk_port,
: SPLUNK_INDEX,
: splunk_token,
: splunk_handler_name,
: SPLUNK_SOURCETYPE,
: SPLUNK_VERIFY,
: SPLUNK_TIMEOUT,
: SPLUNK_RETRY_COUNT,
: SPLUNK_SLEEP_INTERVAL,
: SPLUNK_QUEUE_SIZE,
: SPLUNK_DEBUG
}
config[][].append(splunk_handler_name)
logging.config.dictConfig(config)
return | setup_logging
Setup logging configuration
:param default_level: level to log
:param default_path: path to config (optional)
:param env_key: path to config in this env var
:param handler_name: handler name in the config
:param handlers_dict: handlers dict
:param log_dict: full log dictionary config
:param config_name: filename for config
:param splunk_host: optional splunk host
:param splunk_port: optional splunk port
:param splunk_index: optional splunk index
:param splunk_token: optional splunk token
:param splunk_verify: optional splunk verify - default to False
:param splunk_handler_name: optional splunk handler name
:param splunk_sleep_interval: optional splunk sleep interval
:param splunk_debug: optional splunk debug - default to False |
378,228 | def create(self, name, incident_preference):
data = {
"policy": {
"name": name,
"incident_preference": incident_preference
}
}
return self._post(
url=.format(self.URL),
headers=self.headers,
data=data
) | This API endpoint allows you to create an alert policy
:type name: str
:param name: The name of the policy
:type incident_preference: str
:param incident_preference: Can be PER_POLICY, PER_CONDITION or
PER_CONDITION_AND_TARGET
:rtype: dict
:return: The JSON response of the API
::
{
"policy": {
"created_at": "time",
"id": "integer",
"incident_preference": "string",
"name": "string",
"updated_at": "time"
}
} |
378,229 | def update_option_value_by_id(cls, option_value_id, option_value, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._update_option_value_by_id_with_http_info(option_value_id, option_value, **kwargs)
else:
(data) = cls._update_option_value_by_id_with_http_info(option_value_id, option_value, **kwargs)
return data | Update OptionValue
Update attributes of OptionValue
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_value_by_id(option_value_id, option_value, async=True)
>>> result = thread.get()
:param async bool
:param str option_value_id: ID of optionValue to update. (required)
:param OptionValue option_value: Attributes of optionValue to update. (required)
:return: OptionValue
If the method is called asynchronously,
returns the request thread. |
378,230 | def put_connection_filename(filename, working_filename, verbose = False):
if working_filename != filename:
deferred_signals = []
def newsigterm(signum, frame):
deferred_signals.append(signum)
oldhandlers = {}
for sig in (signal.SIGTERM, signal.SIGTSTP):
oldhandlers[sig] = signal.getsignal(sig)
signal.signal(sig, newsigterm)
if verbose:
print >>sys.stderr, "moving to ..." % (working_filename, filename),
shutil.move(working_filename, filename)
if verbose:
print >>sys.stderr, "done."
try:
open(working_filename, "w").close()
except:
pass
with temporary_files_lock:
del temporary_files[working_filename]
for sig, oldhandler in oldhandlers.iteritems():
signal.signal(sig, oldhandler)
while deferred_signals:
os.kill(os.getpid(), deferred_signals.pop(0))
with temporary_files_lock:
if not temporary_files:
uninstall_signal_trap() | This function reverses the effect of a previous call to
get_connection_filename(), restoring the working copy to its
original location if the two are different. This function should
always be called after calling get_connection_filename() when the
file is no longer in use.
During the move operation, this function traps the signals used by
Condor to evict jobs. This reduces the risk of corrupting a
document by the job terminating part-way through the restoration of
the file to its original location. When the move operation is
concluded, the original signal handlers are restored and if any
signals were trapped they are resent to the current process in
order. Typically this will result in the signal handlers installed
by the install_signal_trap() function being invoked, meaning any
other scratch files that might be in use get deleted and the
current process is terminated. |
378,231 | def _cast(self, value, format=None, **opts):
if format is not None:
return datetime.strptime(value, format)
return dateutil.parser.parse(value) | Optionally apply a format string. |
378,232 | def get_init_container(self,
init_command,
init_args,
env_vars,
context_mounts,
persistence_outputs,
persistence_data):
env_vars = to_list(env_vars, check_none=True)
if self.original_name is not None and self.cloning_strategy == CloningStrategy.RESUME:
return []
if self.original_name is not None and self.cloning_strategy == CloningStrategy.COPY:
command = InitCommands.COPY
original_outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.original_name)
else:
command = InitCommands.CREATE
original_outputs_path = None
outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.experiment_name)
_, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs)
volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True)
init_command = init_command or ["/bin/sh", "-c"]
init_args = init_args or to_list(
get_output_args(command=command,
outputs_path=outputs_path,
original_outputs_path=original_outputs_path))
init_args += to_list(get_auth_context_args(entity=,
entity_name=self.experiment_name))
return [
client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
image_pull_policy=self.init_docker_image_pull_policy,
command=init_command,
args=[.join(init_args)],
env=env_vars,
volume_mounts=volume_mounts)
] | Pod init container for setting outputs path. |
378,233 | async def wait_read(self, message=None, *, timeout=None):
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout) | Awaits for the sent message to be read. Note that receiving
a response doesn't imply the message was read, and this action
will also trigger even without a response. |
378,234 | def to_df(self) -> pd.DataFrame:
if issparse(self._X):
X = self._X.toarray()
else:
X = self._X
return pd.DataFrame(X, index=self.obs_names, columns=self.var_names) | Generate shallow :class:`~pandas.DataFrame`.
The data matrix :attr:`X` is returned as
:class:`~pandas.DataFrame`, where :attr:`obs_names` initializes the
index, and :attr:`var_names` the columns.
* No annotations are maintained in the returned object.
* The data matrix is densified in case it is sparse. |
378,235 | def get_context(self, value):
context = super(RenditionAwareStructBlock, self).get_context(value)
context[] = self.rendition.\
image_rendition or
return context | Ensure `image_rendition` is added to the global context. |
378,236 | def _get_relation(self, related_model: type, relations: List[str]) -> Tuple[Optional[List[type]], Optional[type]]:
relations_list, last_relation = [], related_model
for relation in relations:
relationship = getattr(last_relation, relation, None)
if relationship is None:
return (None, None)
last_relation = relationship.mapper.class_
relations_list.append(last_relation)
return (relations_list, last_relation) | Transform the list of relation to list of class.
:param related_mode: The model of the query.
:type related_mode: type
:param relations: The relation list get from the `_extract_relations`.
:type relations: List[str]
:return: Tuple with the list of relations (class) and the second
element is the last relation class.
:rtype: Tuple[Optional[List[type]], Optional[type]] |
378,237 | def nbopen(filename):
filename = osp.abspath(filename)
home_dir = get_home_dir()
server_info = find_best_server(filename)
if server_info is not None:
print("Using existing server at", server_info[])
return server_info
else:
if filename.startswith(home_dir):
nbdir = home_dir
else:
nbdir = osp.dirname(filename)
print("Starting new server")
command = [sys.executable, , , ,
.format(nbdir),
,
"--KernelSpecManager.kernel_spec_class=".format(
KERNELSPEC)]
if os.name == :
creation_flag = 0x08000000
else:
creation_flag = 0
if DEV:
env = os.environ.copy()
env["PYTHONPATH"] = osp.dirname(get_module_path())
proc = subprocess.Popen(command, creationflags=creation_flag,
env=env)
else:
proc = subprocess.Popen(command, creationflags=creation_flag)
def kill_server_and_childs(pid):
ps_proc = psutil.Process(pid)
for child in ps_proc.children(recursive=True):
child.kill()
ps_proc.kill()
atexit.register(kill_server_and_childs, proc.pid)
for _x in range(100):
server_info = find_best_server(filename)
if server_info is not None:
break
else:
time.sleep(0.25)
if server_info is None:
raise NBServerError()
return server_info | Open a notebook using the best available server.
Returns information about the selected server. |
378,238 | def process_raw_data(self, fname, max_size):
logging.info(f)
data = []
with open(fname) as dfile:
for idx, line in enumerate(dfile):
if max_size and idx == max_size:
break
data.append(line)
return data | Loads data from the input file.
:param fname: input file name
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset |
378,239 | def flush_content(self):
LOGGER.debug("> Flushing cache content.".format(self.__class__.__name__))
self.clear()
return True | Flushes the cache content.
Usage::
>>> cache = Cache()
>>> cache.add_content(John="Doe", Luke="Skywalker")
True
>>> cache.flush_content()
True
>>> cache
{}
:return: Method success.
:rtype: bool |
378,240 | def from_type_name(cls, typ, name):
for k, nt in cls.defined_aliases.items():
if typ is not None and typ != nt.type: continue
if name == nt.name:
if len(k) == 1: return cls(xc=k)
if len(k) == 2: return cls(x=k[0], c=k[1])
raise ValueError("Wrong key: %s" % k)
if "+" in name:
x, c = (s.strip() for s in name.split("+"))
x, c = LibxcFunc[x], LibxcFunc[c]
return cls(x=x, c=c)
else:
xc = LibxcFunc[name]
return cls(xc=xc)
if typ is None:
raise ValueError("Cannot find name=%s in defined_aliases" % name)
else:
raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name)) | Build the object from (type, name). |
378,241 | def episode_list(a):
html = get_html(ROOT + a.get())
div = html.find(, {: "list detail eplist"})
links = []
for tag in div.find_all(, {: "name"}):
links.append(tag)
return links | List of all episodes of a season |
378,242 | def shake_shake_layer(x, output_filters, num_blocks, stride, hparams):
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
with tf.variable_scope("layer_{}".format(block_num)):
x = shake_shake_block(x, output_filters, curr_stride, hparams)
return x | Builds many sub layers into one full layer. |
378,243 | def get_flow(self, name):
config = getattr(self, "flows__{}".format(name))
if not config:
raise FlowNotFoundError("Flow not found: {}".format(name))
return FlowConfig(config) | Returns a FlowConfig |
378,244 | def message_archive(self, project_id, category_id=None):
path = % project_id
req = ET.Element()
ET.SubElement(req, ).text = str(int(project_id))
if category_id is not None:
ET.SubElement(req, ).text = str(int(category_id))
return self._request(path, req) | This will return a summary record for each message in a project. If
you specify a category_id, only messages in that category will be
returned. (Note that a summary record includes only a few bits of
information about a post, not the complete record.) |
378,245 | def make_report(self, sections_first=True, section_header_params=None):
full_story = list(self._preformat_text(self.title, style=,
fontsize=18, alignment=))
if section_header_params is None:
section_header_params = {: , : 14,
: }
if sections_first:
full_story += self._make_sections(**section_header_params)
full_story += self.story
else:
full_story += self.story
full_story += self._make_sections(**section_header_params)
fname = self.name +
doc = SimpleDocTemplate(fname, pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(full_story)
return fname | Create the pdf document with name `self.name + '.pdf'`.
Parameters
----------
sections_first : bool
If True (default), text and images with sections are presented first
and un-sectioned content is appended afterword. If False, sectioned
text and images will be placed before the sections.
section_header_params : dict or None
Optionally overwrite/extend the default formatting for the section
headers. Default is None. |
378,246 | def __replace_all(repls: dict, input: str) -> str:
return re.sub(.join(re.escape(key) for key in repls.keys()),
lambda k: repls[k.group(0)], input) | Replaces from a string **input** all the occurrences of some
symbols according to mapping **repls**.
:param dict repls: where #key is the old character and
#value is the one to substitute with;
:param str input: original string where to apply the
replacements;
:return: *(str)* the string with the desired characters replaced |
378,247 | def date(fmt=None,timestamp=None):
"Manejo de fechas (simil PHP)"
if fmt==:
t = datetime.datetime.now()
return int(time.mktime(t.timetuple()))
if fmt==:
d = datetime.datetime.fromtimestamp(timestamp)
return d.isoformat()
if fmt==:
d = datetime.datetime.now()
return d.strftime("%Y%m%d") | Manejo de fechas (simil PHP) |
378,248 | def agg_shape(self, shp, aggregate_by):
return shp + tuple(
len(getattr(self, tagname)) - 1 for tagname in aggregate_by) | :returns: a shape shp + (T, ...) depending on the tagnames |
378,249 | def set(self, align=, font=, type=, width=1, height=1):
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
if font.upper() == "B":
self._raw(TXT_FONT_B)
else:
self._raw(TXT_FONT_A)
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else:
self._raw(TXT_NORMAL) | Set text properties |
378,250 | def create_queue(
self,
parent,
queue,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "create_queue" not in self._inner_api_calls:
self._inner_api_calls[
"create_queue"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_queue,
default_retry=self._method_configs["CreateQueue"].retry,
default_timeout=self._method_configs["CreateQueue"].timeout,
client_info=self._client_info,
)
request = cloudtasks_pb2.CreateQueueRequest(parent=parent, queue=queue)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_queue"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Creates a queue.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless
of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine ``queue.yaml`` or ``queue.xml`` file to manage your
queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__ before
using this method.
Example:
>>> from google.cloud import tasks_v2
>>>
>>> client = tasks_v2.CloudTasksClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `queue`:
>>> queue = {}
>>>
>>> response = client.create_queue(parent, queue)
Args:
parent (str): Required.
The location name in which the queue will be created. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``
The list of allowed locations can be obtained by calling Cloud Tasks'
implementation of ``ListLocations``.
queue (Union[dict, ~google.cloud.tasks_v2.types.Queue]): Required.
The queue to create.
``Queue's name`` cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.tasks_v2.types.Queue`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.tasks_v2.types.Queue` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
378,251 | def allow_buttons(self, message="", link=True, back=True):
self.info_label.set_label(message)
self.allow_close_window()
if link and self.link is not None:
self.link.set_sensitive(True)
self.link.show_all()
if back:
self.back_btn.show()
self.main_btn.set_sensitive(True) | Function allows buttons |
378,252 | def retry(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id):
loop = asyncio.new_event_loop()
loop.run_until_complete(self.retry_async(func, partition_id, retry_message,
final_failure_message, max_retries, host_id)) | Make attempt_renew_lease async call sync. |
378,253 | def print_packet_count():
for name in archive.list_packet_names():
packet_count = 0
for group in archive.list_packet_histogram(name):
for rec in group.records:
packet_count += rec.count
print(.format(name, packet_count)) | Print the number of packets grouped by packet name. |
378,254 | def gen_send_stdout_url(ip, port):
return .format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID) | Generate send stdout url |
378,255 | def install(self):
keys = self.get_keys_from_ldap()
for user, ssh_keys in keys.items():
user_dir = API.__authorized_keys_path(user)
if not os.path.isdir(user_dir):
os.makedirs(user_dir)
authorized_keys_file = os.path.join(user_dir, )
with open(authorized_keys_file, ) as FILE:
print("\n".join([k.decode() for k in ssh_keys]), file=FILE) | Install/download ssh keys from LDAP for consumption by SSH. |
378,256 | def delete_async(blob_key, **options):
if not isinstance(blob_key, (basestring, BlobKey)):
raise TypeError( % (blob_key,))
rpc = blobstore.create_rpc(**options)
yield blobstore.delete_async(blob_key, rpc=rpc) | Async version of delete(). |
378,257 | def generic_visit(self, node):
super(RangeValues, self).generic_visit(node)
return self.add(node, UNKNOWN_RANGE) | Other nodes are not known and range value neither. |
378,258 | def runSearchCallSets(self, request):
return self.runSearchRequest(
request, protocol.SearchCallSetsRequest,
protocol.SearchCallSetsResponse,
self.callSetsGenerator) | Runs the specified SearchCallSetsRequest. |
378,259 | def wait_until_not_visible(self, timeout=None):
try:
self.utils.wait_until_element_not_visible(self, timeout)
except TimeoutException as exception:
parent_msg = " and parent locator ".format(self.parent) if self.parent else
msg = "Page element of type with locator %s%s is still visible after %s seconds"
timeout = timeout if timeout else self.utils.get_explicitly_wait()
self.logger.error(msg, type(self).__name__, self.locator, parent_msg, timeout)
exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg, timeout))
raise exception
return self | Search element and wait until it is not visible
:param timeout: max time to wait
:returns: page element instance |
378,260 | def get_groups(self, env, token):
groups = None
memcache_client = cache_from_env(env)
if memcache_client:
memcache_key = % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
s3_auth_details = env.get()
if s3_auth_details:
if not self.s3_support:
self.logger.warning()
return None
if self.swauth_remote:
self.logger.warning(
)
return None
try:
account, user = s3_auth_details[].split(, 1)
signature_from_user = s3_auth_details[]
msg = s3_auth_details[]
except Exception:
self.logger.debug(
%
(s3_auth_details, ))
return None
path = quote( % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if in resp.headers:
account_id = resp.headers[]
else:
path = quote( % (self.auth_account, account))
resp2 = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers[]
path = env[]
env[] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
if detail:
creds = detail.get()
try:
auth_encoder, creds_dict = \
swauth.authtypes.validate_creds(creds)
except ValueError as e:
self.logger.error( % e.args[0])
return None
password = creds_dict[]
if isinstance(password, six.text_type):
password = password.encode()
if isinstance(msg, six.text_type):
msg = msg.encode()
valid_signature = base64.encodestring(hmac.new(
password, msg, sha1).digest()).strip()
if signature_from_user != valid_signature:
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(account_id)
groups = .join(groups)
return groups
if not groups:
if self.swauth_remote:
with Timeout(self.swauth_remote_timeout):
conn = http_connect(self.swauth_remote_parsed.hostname,
self.swauth_remote_parsed.port, ,
% (self.swauth_remote_parsed.path,
quote(token)),
ssl=(self.swauth_remote_parsed.scheme == ))
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expires_from_now = float(resp.getheader())
groups = resp.getheader()
if memcache_client:
memcache_client.set(
memcache_key, (time() + expires_from_now, groups),
time=expires_from_now)
else:
object_name = self._get_concealed_token(token)
path = quote( %
(self.auth_account, object_name[-1], object_name))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
detail = json.loads(resp.body)
if detail[] < time():
self.make_pre_authed_request(
env, , path).get_response(self.app)
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(detail[])
groups = .join(groups)
if memcache_client:
memcache_client.set(
memcache_key,
(detail[], groups),
time=float(detail[] - time()))
return groups | Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user. |
378,261 | def gc2gdlat(gclat):
WGS84_e2 = 0.006694379990141317
return np.rad2deg(-np.arctan(np.tan(np.deg2rad(gclat))/(WGS84_e2 - 1))) | Converts geocentric latitude to geodetic latitude using WGS84.
Parameters
==========
gclat : array_like
Geocentric latitude
Returns
=======
gdlat : ndarray or float
Geodetic latitude |
378,262 | def render(filename, obj):
template_path = abspath(filename)
env = jinja_env(template_path)
template_base = os.path.basename(template_path)
try:
parsed_content = env.parse(env
.loader
.get_source(env, template_base))
template_vars = meta.find_undeclared_variables(parsed_content)
if template_vars:
missing_vars(template_vars, parsed_content, obj)
LOG.debug("rendering %s with %s vars",
template_path, len(template_vars))
return env \
.get_template(template_base) \
.render(**obj)
except jinja2.exceptions.TemplateSyntaxError as exception:
template_trace = traceback.format_tb(sys.exc_info()[2])
if exception.filename:
template_line = template_trace[len(template_trace) - 1]
raise aomi_excep.Validation("Bad template %s %s" %
(template_line,
str(exception)))
template_str =
if isinstance(exception.source, tuple):
template_str = "Embedded Template\n%s" % exception.source[0]
raise aomi_excep.Validation("Bad template %s" % str(exception),
source=template_str)
except jinja2.exceptions.UndefinedError as exception:
template_traces = [x.strip()
for x in traceback.format_tb(sys.exc_info()[2])
if in x]
raise aomi_excep.Validation("Missing template variable %s" %
.join(template_traces)) | Render a template, maybe mixing in extra variables |
378,263 | def updateSolutionTerminal(self):
self.solution_terminal.vFunc = ValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPfunc = MargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.vPPfunc = MargMargValueFunc2D(self.cFunc_terminal_,self.CRRA)
self.solution_terminal.hNrm = 0.0
self.solution_terminal.hLvl = lambda p : np.zeros_like(p)
self.solution_terminal.mLvlMin = lambda p : np.zeros_like(p) | Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
None
Returns
-------
None |
378,264 | def findSynonymsArray(self, word, num):
if not isinstance(word, basestring):
word = _convert_to_vector(word)
tuples = self._java_obj.findSynonymsArray(word, num)
return list(map(lambda st: (st._1(), st._2()), list(tuples))) | Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity). |
378,265 | def processDatasetBlocks(self, url, conn, inputdataset, order_counter):
ordered_dict = {}
srcblks = self.getSrcBlocks(url, dataset=inputdataset)
if len(srcblks) < 0:
e = "DBSMigration: No blocks in the required dataset %s found at source %s."%(inputdataset, url)
dbsExceptionHandler(, e, self.logger.exception, e)
dstblks = self.blocklist.execute(conn, dataset=inputdataset)
self.logger.debug("******* dstblks for dataset %s ***********" %inputdataset)
self.logger.debug(dstblks)
blocksInSrcNames = [ y[] for y in srcblks]
blocksInDstNames = []
for item in dstblks:
blocksInDstNames.append(item[])
ordered_dict[order_counter] = []
for ablk in blocksInSrcNames:
if not ablk in blocksInDstNames:
ordered_dict[order_counter].append(ablk)
if ordered_dict[order_counter] != []:
self.logger.debug("**** ordered_dict dict length ****")
self.logger.debug(len(ordered_dict))
return ordered_dict
else:
return {} | Utility function, that comapares blocks of a dataset at source and dst
and returns an ordered list of blocks not already at dst for migration |
378,266 | def msg_debug(message):
if _log_lvl == logging.DEBUG:
to_stdout(" (*) {message}".format(message=message), colorf=cyan)
if _logger:
_logger.debug(message) | Log a debug message
:param message: the message to be logged |
378,267 | def list_rocs_files(url=ROCS_URL):
soup = BeautifulSoup(get(url))
if not url.endswith():
url +=
files = []
for elem in soup.findAll():
if elem[].startswith():
continue
if elem.string.lower() == :
continue
files.append(url + elem[])
return files | Gets the contents of the given url. |
378,268 | def get_subpackages(app, module):
submodules = _get_submodules(app, module)
return [name for name, ispkg in submodules if ispkg] | Get all subpackages for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of packages names
:rtype: list
:raises: TypeError |
378,269 | def combine_or(matcher, *more_matchers):
def matcher(cause):
for sub_matcher in itertools.chain([matcher], more_matchers):
cause_cls = sub_matcher(cause)
if cause_cls is not None:
return cause_cls
return None
return matcher | Combines more than one matcher together (first that matches wins). |
378,270 | def archive_query_interval(self, _from, to):
with self.session as session:
table = self.tables.archive
try:
results = session.query(table)\
.filter(table.dateTime >= _from)\
.filter(table.dateTime < to)\
.all()
return [self.archive_schema.dump(entry).data for entry in results]
except SQLAlchemyError as exc:
session.rollback()
print_exc()
raise IOError(exc) | :param _from: Start of interval (int) (inclusive)
:param to: End of interval (int) (exclusive)
:raises: IOError |
378,271 | def _get_submodules(app, module):
if inspect.ismodule(module):
if hasattr(module, ):
p = module.__path__
else:
return []
elif isinstance(module, str):
p = module
else:
raise TypeError("Only Module or String accepted. %s given." % type(module))
logger.debug(, p)
submodules = [(name, ispkg) for loader, name, ispkg in pkgutil.iter_modules(p)]
logger.debug(, module, submodules)
return submodules | Get all submodules for the given module/package
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param module: the module to query or module path
:type module: module | str
:returns: list of module names and boolean whether its a package
:rtype: list
:raises: TypeError |
378,272 | def raise_server_error(self):
if self.server and self.server.error:
try:
if capybara.raise_server_errors:
raise self.server.error
finally:
self.server.reset_error() | Raise errors encountered by the server. |
378,273 | def _parse_bro_header(self, logfile):
_line = next(logfile)
while (not _line.startswith()):
_line = next(logfile)
_field_names = _line.strip().split(self.delimiter)[1:]
_line = next(logfile)
_field_types = _line.strip().split(self.delimiter)[1:]
return _field_names, _field_types | This method tries to parse the Bro log header section.
Note: My googling is failing me on the documentation on the format,
so just making a lot of assumptions and skipping some shit.
Assumption 1: The delimeter is a tab.
Assumption 2: Types are either time, string, int or float
Assumption 3: The header always ends with #fields and #types as
the last two lines.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
Args:
logfile: The Bro log file.
Returns:
A tuple of 2 lists. One for field names and other for field types. |
378,274 | def parse(self, message, schema):
func = {
: self._parse_audit_log_msg,
: self._parse_event_msg,
}[schema]
return func(message) | Parse message according to schema.
`message` should already be validated against the given schema.
See :ref:`schemadef` for more information.
Args:
message (dict): message data to parse.
schema (str): valid message schema.
Returns:
(dict): parsed message |
378,275 | def replace_group(self, index, func_grp, strategy, bond_order=1,
graph_dict=None, strategy_params=None, reorder=True,
extend_structure=True):
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
if len(neighbors) == 1:
self.substitute_group(index, func_grp, strategy,
bond_order=bond_order, graph_dict=graph_dict,
strategy_params=strategy_params,
reorder=reorder,
extend_structure=extend_structure)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError("Currently functional group replacement"
"cannot occur at an atom within a ring"
"structure.")
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(index, func_grp, strategy,
bond_order=bond_order, graph_dict=graph_dict,
strategy_params=strategy_params,
reorder=reorder,
extend_structure=extend_structure) | Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return: |
378,276 | def _AddOption(self, name):
if name in [option.name for option in self.options]:
raise TextFSMTemplateError( % name)
try:
option = self._options_cls.GetOption(name)(self)
except AttributeError:
raise TextFSMTemplateError( % name)
self.options.append(option) | Add an option to this Value.
Args:
name: (str), the name of the Option to add.
Raises:
TextFSMTemplateError: If option is already present or
the option does not exist. |
378,277 | def authenticate(self, verify=True):
self.__oauth = OAuth1(self.__consumer_key,
client_secret=self.__consumer_secret,
resource_owner_key=self.__access_token,
resource_owner_secret=self.__access_token_secret)
if verify:
r = requests.get(self._base_url + self._verify_url,
auth=self.__oauth,
proxies={"https": self.__proxy})
self.check_http_status(r.status_code) | Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True`` |
378,278 | def get(self):
pool = current_app.config[]
with pool() as bigchain:
validators = bigchain.get_validators()
return validators | API endpoint to get validators set.
Return:
A JSON string containing the validator set of the current node. |
378,279 | def breakfast(self, message="Breakfast is ready", shout: bool = False):
return self.helper.output(message, shout) | Say something in the morning |
378,280 | def list_resources(self, session, query=):
resources = self.devices.list_resources()
resources = rname.filter(resources, query)
if resources:
return resources
raise errors.VisaIOError(errors.StatusCode.error_resource_not_found.value) | Returns a tuple of all connected devices matching query.
:param query: regular expression used to match devices. |
378,281 | def set_maxsize(self, maxsize, **kwargs):
new_cache = self._get_cache_impl(self.impl_name, maxsize, **kwargs)
self._populate_new_cache(new_cache)
self.cache = new_cache | Set maxsize. This involves creating a new cache and transferring the items. |
378,282 | def _get_combined_keywords(_keywords, split_text):
result = []
_keywords = _keywords.copy()
len_text = len(split_text)
for i in range(len_text):
word = _strip_word(split_text[i])
if word in _keywords:
combined_word = [word]
if i + 1 == len_text:
result.append(word)
for j in range(i + 1, len_text):
other_word = _strip_word(split_text[j])
if other_word in _keywords and other_word == split_text[j] \
and other_word not in combined_word:
combined_word.append(other_word)
else:
for keyword in combined_word:
_keywords.pop(keyword)
result.append(" ".join(combined_word))
break
return result | :param keywords:dict of keywords:scores
:param split_text: list of strings
:return: combined_keywords:list |
378,283 | def edit_form(self, obj):
form = super(OAISetModelView, self).edit_form(obj)
del form.spec
return form | Customize edit form. |
378,284 | def _get_filepaths(self):
self._printer(str(self.__len__()) + " file paths have been parsed in " + str(self.timer.end))
if self._hash_files:
return pool_hash(self.filepaths)
else:
return self.filepaths | Filters list of file paths to remove non-included, remove excluded files and concatenate full paths. |
378,285 | def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False):
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
geom = ogr.Geometry(ogr.wkbPolygon)
if a.count() != 0:
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
px = np.ma.notmasked_edges(a, axis=0)
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
mx, my = pixelToMap(x, y, gt)
geom_wkt = .format(.join([.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
if simplify:
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom | Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale) |
378,286 | def visit_Try(self, node: ast.Try) -> Optional[ast.AST]:
new_node = self.generic_visit(node)
assert isinstance(new_node, ast.Try)
return ast.copy_location(
ast.Try(
body=_filter_dead_code(new_node.body),
handlers=new_node.handlers,
orelse=_filter_dead_code(new_node.orelse),
finalbody=_filter_dead_code(new_node.finalbody),
),
new_node,
) | Eliminate dead code from except try bodies. |
378,287 | def _default_ns_prefix(nsmap):
if None in nsmap:
default_url = nsmap[None]
prefix = None
for key, val in nsmap.iteritems():
if val == default_url and key is not None:
prefix = key
break
else:
raise ValueError(
"Default namespace {url} not found as a prefix".format(
url=default_url
)
)
return prefix
raise ValueError("No default namespace found in map") | XML doc may have several prefix:namespace_url pairs, can also specify
a namespace_url as default, tags in that namespace don't need a prefix
NOTE:
we rely on default namespace also present in prefixed form, I'm not sure if
this is an XML certainty or a quirk of the eBay WSDLs
in our case the WSDL contains:
<wsdl:documentation>
<Version>1.0.0</Version>
</wsdl:documentation>
...but our query needs to give a prefix to the path of `Version` so we need
to determine the default namespace of the doc, find the matching prefix and
return it |
378,288 | def pick_kmersize(fq):
if bam.is_bam(fq):
readlength = bam.estimate_read_length(fq)
else:
readlength = fastq.estimate_read_length(fq)
halfread = int(round(readlength / 2))
if halfread >= 31:
kmersize = 31
else:
kmersize = halfread
if kmersize % 2 == 0:
kmersize += 1
return kmersize | pick an appropriate kmer size based off of https://www.biostars.org/p/201474/
tl;dr version: pick 31 unless the reads are very small, if not then guess
that readlength / 2 is about right. |
378,289 | def __write(self, thePath, theData):
fd = open(thePath, "wb")
fd.write(theData)
fd.close() | Write data to a file.
@type thePath: str
@param thePath: The file path.
@type theData: str
@param theData: The data to write. |
378,290 | def fkapply(models,pool,fn,missing_fn,(nombre,pkey,field),*args):
"wrapper for do_* funcs to call process_* with missing handler. Unpacks the FieldKey."
if (nombre,pkey) in models: return fn(pool,models[nombre,pkey],field,*args)
else: return missing_fn(pool,field,*args) if missing_fn else [] | wrapper for do_* funcs to call process_* with missing handler. Unpacks the FieldKey. |
378,291 | def mask(self, mask):
check_class(mask, (np.ndarray, bool, np.bool_))
if isinstance(mask, (bool, np.bool_)):
self._mask = bool(mask)
else:
self._mask = mask | The mask values. Must be an array or a boolean scalar. |
378,292 | def dtype_repr(dtype):
dtype = np.dtype(dtype)
if dtype == np.dtype(int):
return ""
elif dtype == np.dtype(float):
return ""
elif dtype == np.dtype(complex):
return ""
elif dtype.shape:
return "(, {})".format(dtype.base, dtype.shape)
else:
return "".format(dtype) | Stringify ``dtype`` for ``repr`` with default for int and float. |
378,293 | def inject_params(model_name: str) -> ListenerParams:
params_file = model_name +
try:
with open(params_file) as f:
pr.__dict__.update(compatibility_params, **json.load(f))
except (OSError, ValueError, TypeError):
if isfile(model_name):
print( + params_file)
return pr | Set the global listener params to a saved model |
378,294 | def sync_folder_to_container(self, folder_path, container, delete=False,
include_hidden=False, ignore=None, ignore_timestamps=False,
object_prefix="", verbose=False):
cont = self.get_container(container)
self._local_files = []
if verbose:
log = logging.getLogger("pyrax")
log.info("Loading remote object list (prefix=%s)", object_prefix)
data = cont.get_objects(prefix=object_prefix, full_listing=True)
self._remote_files = dict((d.name, d) for d in data)
self._sync_summary = {"total": 0,
"uploaded": 0,
"ignored": 0,
"older": 0,
"duplicate": 0,
"failed": 0,
"failure_reasons": [],
"deleted": 0,
}
self._sync_folder_to_container(folder_path, cont, prefix="",
delete=delete, include_hidden=include_hidden, ignore=ignore,
ignore_timestamps=ignore_timestamps,
object_prefix=object_prefix, verbose=verbose)
self._remote_files = None
if verbose:
summary = self._sync_summary
log.info("Folder sync completed at %s" % time.ctime())
log.info(" Total files processed: %s" % summary["total"])
log.info(" Number Uploaded: %s" % summary["uploaded"])
log.info(" Number Ignored: %s" % summary["ignored"])
log.info(" Number Skipped (older): %s" % summary["older"])
log.info(" Number Skipped (dupe): %s" % summary["duplicate"])
log.info(" Number Deleted: %s" % summary["deleted"])
log.info(" Number Failed: %s" % summary["failed"])
if summary["failed"]:
for reason in summary["failure_reasons"]:
log.info(" Reason: %s" % reason) | Compares the contents of the specified folder, and checks to make sure
that the corresponding object is present in the specified container. If
there is no remote object matching the local file, it is created. If a
matching object exists, the etag is examined to determine if the object
in the container matches the local file; if they differ, the container
is updated with the local file if the local file is newer when
`ignore_timestamps' is False (default). If `ignore_timestamps` is True,
the object is overwritten with the local file contents whenever the
etags differ. NOTE: the timestamp of a remote object is the time it was
uploaded, not the original modification time of the file stored in that
object. Unless 'include_hidden' is True, files beginning with an
initial period are ignored.
If the 'delete' option is True, any objects in the container that do
not have corresponding files in the local folder are deleted.
You can selectively ignore files by passing either a single pattern or
a list of patterns; these will be applied to the individual folder and
file names, and any names that match any of the 'ignore' patterns will
not be uploaded. The patterns should be standard *nix-style shell
patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as
'program.pyc' and 'abcpyc'.
If `object_prefix` is set it will be appended to the object name when
it is checked and uploaded to the container. For example, if you use
sync_folder_to_container("folderToSync/", myContainer,
object_prefix="imgFolder") it will upload the files to the
container/imgFolder/... instead of just container/...
Set `verbose` to True to make it print what is going on. It will
show which files are being uploaded and which ones are not and why. |
378,295 | def check_client_key(self, client_key):
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper) | Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper. |
378,296 | def parse_args(args):
parser = argparse.ArgumentParser(
description="Just a Hello World demonstration")
parser.add_argument(
,
,
action=,
version=.format(ver=__version__))
return parser.parse_args(args) | Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace` |
378,297 | def query(self, query_str, *query_args, **query_options):
with self.connection(**query_options) as connection:
query_options[] = connection
return self._query(query_str, query_args, **query_options) | run a raw query on the db
query_str -- string -- the query to run
*query_args -- if the query_str is a formatting string, pass the values in this
**query_options -- any query options can be passed in by using key=val syntax |
378,298 | def _yield_week_day(self, enumeration=False):
if enumeration:
for week in range(1, self.duration + 1):
for day_index, day in enumerate(self.days):
yield (week, day_index, day)
else:
for week in range(1, self.duration + 1):
for day in self.days:
yield (week, day) | A helper function to reduce the number of nested loops.
Parameters
----------
enumeration
Whether or not to wrap the days in enumerate().
Yields
-------
tuple
A tuple with (week, day_index, day) or (week, day),
depending on 'enumeration' parameter. |
378,299 | def Similarity(self, value=None):
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold) | Constructor for new default Similarities. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.