text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def get_replication_group_imbalance_stats(rgs, partitions):
"""Calculate extra replica count replica count over each replication-group
and net extra-same-replica count.
"""
tot_rgs = len(rgs)
extra_replica_cnt_per_rg = defaultdict(int)
for partition in partitions:
# Get optimal replica-count for each partition
opt_replica_cnt, extra_replicas_allowed = \
compute_optimum(tot_rgs, partition.replication_factor)
# Extra replica count for each rg
for rg in rgs:
replica_cnt_rg = rg.count_replica(partition)
extra_replica_cnt, extra_replicas_allowed = \
get_extra_element_count(
replica_cnt_rg,
opt_replica_cnt,
extra_replicas_allowed,
)
extra_replica_cnt_per_rg[rg.id] += extra_replica_cnt
# Evaluate net imbalance across all replication-groups
net_imbalance = sum(extra_replica_cnt_per_rg.values())
return net_imbalance, extra_replica_cnt_per_rg | [
"def",
"get_replication_group_imbalance_stats",
"(",
"rgs",
",",
"partitions",
")",
":",
"tot_rgs",
"=",
"len",
"(",
"rgs",
")",
"extra_replica_cnt_per_rg",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"partition",
"in",
"partitions",
":",
"# Get optimal replica-count for each partition",
"opt_replica_cnt",
",",
"extra_replicas_allowed",
"=",
"compute_optimum",
"(",
"tot_rgs",
",",
"partition",
".",
"replication_factor",
")",
"# Extra replica count for each rg",
"for",
"rg",
"in",
"rgs",
":",
"replica_cnt_rg",
"=",
"rg",
".",
"count_replica",
"(",
"partition",
")",
"extra_replica_cnt",
",",
"extra_replicas_allowed",
"=",
"get_extra_element_count",
"(",
"replica_cnt_rg",
",",
"opt_replica_cnt",
",",
"extra_replicas_allowed",
",",
")",
"extra_replica_cnt_per_rg",
"[",
"rg",
".",
"id",
"]",
"+=",
"extra_replica_cnt",
"# Evaluate net imbalance across all replication-groups",
"net_imbalance",
"=",
"sum",
"(",
"extra_replica_cnt_per_rg",
".",
"values",
"(",
")",
")",
"return",
"net_imbalance",
",",
"extra_replica_cnt_per_rg"
]
| 41.2 | 13.72 |
def getLogger(name):
"""Return a logger from a given name.
If the name does not have a log handler, this will create one for it based
on the module name which will log everything to a log file in a location
the executing user will have access to.
:param name: ``str``
:return: ``object``
"""
log = logging.getLogger(name=name)
for handler in log.handlers:
if name == handler.name:
return log
else:
return LogSetup().default_logger(name=name.split('.')[0]) | [
"def",
"getLogger",
"(",
"name",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"name",
"=",
"name",
")",
"for",
"handler",
"in",
"log",
".",
"handlers",
":",
"if",
"name",
"==",
"handler",
".",
"name",
":",
"return",
"log",
"else",
":",
"return",
"LogSetup",
"(",
")",
".",
"default_logger",
"(",
"name",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")"
]
| 31.875 | 18.875 |
def get_array(array, **kw):
"""
Extract a subarray by filtering on the given keyword arguments
"""
for name, value in kw.items():
array = array[array[name] == value]
return array | [
"def",
"get_array",
"(",
"array",
",",
"*",
"*",
"kw",
")",
":",
"for",
"name",
",",
"value",
"in",
"kw",
".",
"items",
"(",
")",
":",
"array",
"=",
"array",
"[",
"array",
"[",
"name",
"]",
"==",
"value",
"]",
"return",
"array"
]
| 28.571429 | 10.285714 |
def populate(self, metamodel):
'''
Populate a *metamodel* with entities previously encountered from input.
'''
self.populate_classes(metamodel)
self.populate_unique_identifiers(metamodel)
self.populate_associations(metamodel)
self.populate_instances(metamodel)
self.populate_connections(metamodel) | [
"def",
"populate",
"(",
"self",
",",
"metamodel",
")",
":",
"self",
".",
"populate_classes",
"(",
"metamodel",
")",
"self",
".",
"populate_unique_identifiers",
"(",
"metamodel",
")",
"self",
".",
"populate_associations",
"(",
"metamodel",
")",
"self",
".",
"populate_instances",
"(",
"metamodel",
")",
"self",
".",
"populate_connections",
"(",
"metamodel",
")"
]
| 39.222222 | 14.333333 |
def faces_sparse(self):
"""
A sparse matrix representation of the faces.
Returns
----------
sparse : scipy.sparse.coo_matrix
Has properties:
dtype : bool
shape : (len(self.vertices), len(self.faces))
"""
sparse = geometry.index_sparse(
column_count=len(self.vertices),
indices=self.faces)
return sparse | [
"def",
"faces_sparse",
"(",
"self",
")",
":",
"sparse",
"=",
"geometry",
".",
"index_sparse",
"(",
"column_count",
"=",
"len",
"(",
"self",
".",
"vertices",
")",
",",
"indices",
"=",
"self",
".",
"faces",
")",
"return",
"sparse"
]
| 27.133333 | 13.133333 |
def delete_rrset(self, zone_name, rtype, owner_name):
"""Deletes an RRSet.
Arguments:
zone_name -- The zone containing the RRSet to be deleted. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
"""
return self.rest_api_connection.delete("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name) | [
"def",
"delete_rrset",
"(",
"self",
",",
"zone_name",
",",
"rtype",
",",
"owner_name",
")",
":",
"return",
"self",
".",
"rest_api_connection",
".",
"delete",
"(",
"\"/v1/zones/\"",
"+",
"zone_name",
"+",
"\"/rrsets/\"",
"+",
"rtype",
"+",
"\"/\"",
"+",
"owner_name",
")"
]
| 56.923077 | 35.769231 |
def add_route(self, template, controller, **kwargs):
""" Add a route definition
`controller` can be either a controller instance,
or the name of a callable that will be imported.
"""
if isinstance(controller, basestring):
controller = pymagic.import_name(controller)
self.routes.append((self.parse_route(template), controller, kwargs))
return self | [
"def",
"add_route",
"(",
"self",
",",
"template",
",",
"controller",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"controller",
",",
"basestring",
")",
":",
"controller",
"=",
"pymagic",
".",
"import_name",
"(",
"controller",
")",
"self",
".",
"routes",
".",
"append",
"(",
"(",
"self",
".",
"parse_route",
"(",
"template",
")",
",",
"controller",
",",
"kwargs",
")",
")",
"return",
"self"
]
| 34.583333 | 21 |
def batch_predict(
self,
name,
input_config,
output_config,
params=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Perform a batch prediction. Unlike the online ``Predict``, batch
prediction result won't be immediately available in the response.
Instead, a long running operation object is returned. User can poll the
operation result via ``GetOperation`` method. Once the operation is
done, ``BatchPredictResult`` is returned in the ``response`` field.
Available for following ML problems:
- Video Classification
- Text Extraction
- Tables
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.PredictionServiceClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> # TODO: Initialize `input_config`:
>>> input_config = {}
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.batch_predict(name, input_config, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Name of the model requested to serve the batch prediction.
input_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.BatchPredictInputConfig`
output_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig]): Required. The Configuration specifying where output predictions should
be written.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`
params (dict[str -> str]): Additional domain-specific parameters for the predictions, any string
must be up to 25000 characters long.
- For Video Classification : ``score_threshold`` - (float) A value from
0.0 to 1.0. When the model makes predictions for a video, it will
only produce results that have at least this confidence score. The
default is 0.5. ``segment_classification`` - (boolean) Set to true to
request segment-level classification. AutoML Video Intelligence
returns labels and their confidence scores for the entire segment of
the video that user specified in the request configuration. The
default is "true". ``shot_classification`` - (boolean) Set to true to
request shot-level classification. AutoML Video Intelligence
determines the boundaries for each camera shot in the entire segment
of the video that user specified in the request configuration. AutoML
Video Intelligence then returns labels and their confidence scores
for each detected shot, along with the start and end time of the
shot. WARNING: Model evaluation is not done for this classification
type, the quality of it depends on training data, but there are no
metrics provided to describe that quality. The default is "false".
``1s_interval_classification`` - (boolean) Set to true to request
classification for a video at one-second intervals. AutoML Video
Intelligence returns labels and their confidence scores for each
second of the entire segment of the video that user specified in the
request configuration. WARNING: Model evaluation is not done for this
classification type, the quality of it depends on training data, but
there are no metrics provided to describe that quality. The default
is "false".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_predict" not in self._inner_api_calls:
self._inner_api_calls[
"batch_predict"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_predict,
default_retry=self._method_configs["BatchPredict"].retry,
default_timeout=self._method_configs["BatchPredict"].timeout,
client_info=self._client_info,
)
request = prediction_service_pb2.BatchPredictRequest(
name=name,
input_config=input_config,
output_config=output_config,
params=params,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["batch_predict"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
prediction_service_pb2.BatchPredictResult,
metadata_type=proto_operations_pb2.OperationMetadata,
) | [
"def",
"batch_predict",
"(",
"self",
",",
"name",
",",
"input_config",
",",
"output_config",
",",
"params",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"batch_predict\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"batch_predict\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"batch_predict",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"BatchPredict\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"BatchPredict\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"prediction_service_pb2",
".",
"BatchPredictRequest",
"(",
"name",
"=",
"name",
",",
"input_config",
"=",
"input_config",
",",
"output_config",
"=",
"output_config",
",",
"params",
"=",
"params",
",",
")",
"if",
"metadata",
"is",
"None",
":",
"metadata",
"=",
"[",
"]",
"metadata",
"=",
"list",
"(",
"metadata",
")",
"try",
":",
"routing_header",
"=",
"[",
"(",
"\"name\"",
",",
"name",
")",
"]",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"routing_metadata",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"routing_header",
")",
"metadata",
".",
"append",
"(",
"routing_metadata",
")",
"operation",
"=",
"self",
".",
"_inner_api_calls",
"[",
"\"batch_predict\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")",
"return",
"google",
".",
"api_core",
".",
"operation",
".",
"from_gapic",
"(",
"operation",
",",
"self",
".",
"transport",
".",
"_operations_client",
",",
"prediction_service_pb2",
".",
"BatchPredictResult",
",",
"metadata_type",
"=",
"proto_operations_pb2",
".",
"OperationMetadata",
",",
")"
]
| 49.133803 | 28.373239 |
def do_b(self, line):
"""Send the Master a BinaryInput (group 2) value. Command syntax is: 'b index true' or 'b index false'"""
index, value_string = self.index_and_value_from_line(line)
if index and value_string:
if value_string.lower() == 'true' or value_string.lower() == 'false':
self.application.apply_update(opendnp3.Binary(value_string == 'true'), index)
else:
print('Please enter true or false as the second argument.') | [
"def",
"do_b",
"(",
"self",
",",
"line",
")",
":",
"index",
",",
"value_string",
"=",
"self",
".",
"index_and_value_from_line",
"(",
"line",
")",
"if",
"index",
"and",
"value_string",
":",
"if",
"value_string",
".",
"lower",
"(",
")",
"==",
"'true'",
"or",
"value_string",
".",
"lower",
"(",
")",
"==",
"'false'",
":",
"self",
".",
"application",
".",
"apply_update",
"(",
"opendnp3",
".",
"Binary",
"(",
"value_string",
"==",
"'true'",
")",
",",
"index",
")",
"else",
":",
"print",
"(",
"'Please enter true or false as the second argument.'",
")"
]
| 62.5 | 25.375 |
def native(self):
"""
The native Python datatype representation of this value
:return:
A datetime.datetime object in the UTC timezone or None
"""
if self.contents is None:
return None
if self._native is None:
string = str_cls(self)
has_timezone = re.search('[-\\+]', string)
# We don't know what timezone it is in, or it is UTC because of a Z
# suffix, so we just assume UTC
if not has_timezone:
string = string.rstrip('Z')
date = self._date_by_len(string)
self._native = date.replace(tzinfo=timezone.utc)
else:
# Python 2 doesn't support the %z format code, so we have to manually
# process the timezone offset.
date = self._date_by_len(string[0:-5])
hours = int(string[-4:-2])
minutes = int(string[-2:])
delta = timedelta(hours=abs(hours), minutes=minutes)
if hours < 0:
date -= delta
else:
date += delta
self._native = date.replace(tzinfo=timezone.utc)
return self._native | [
"def",
"native",
"(",
"self",
")",
":",
"if",
"self",
".",
"contents",
"is",
"None",
":",
"return",
"None",
"if",
"self",
".",
"_native",
"is",
"None",
":",
"string",
"=",
"str_cls",
"(",
"self",
")",
"has_timezone",
"=",
"re",
".",
"search",
"(",
"'[-\\\\+]'",
",",
"string",
")",
"# We don't know what timezone it is in, or it is UTC because of a Z",
"# suffix, so we just assume UTC",
"if",
"not",
"has_timezone",
":",
"string",
"=",
"string",
".",
"rstrip",
"(",
"'Z'",
")",
"date",
"=",
"self",
".",
"_date_by_len",
"(",
"string",
")",
"self",
".",
"_native",
"=",
"date",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
"else",
":",
"# Python 2 doesn't support the %z format code, so we have to manually",
"# process the timezone offset.",
"date",
"=",
"self",
".",
"_date_by_len",
"(",
"string",
"[",
"0",
":",
"-",
"5",
"]",
")",
"hours",
"=",
"int",
"(",
"string",
"[",
"-",
"4",
":",
"-",
"2",
"]",
")",
"minutes",
"=",
"int",
"(",
"string",
"[",
"-",
"2",
":",
"]",
")",
"delta",
"=",
"timedelta",
"(",
"hours",
"=",
"abs",
"(",
"hours",
")",
",",
"minutes",
"=",
"minutes",
")",
"if",
"hours",
"<",
"0",
":",
"date",
"-=",
"delta",
"else",
":",
"date",
"+=",
"delta",
"self",
".",
"_native",
"=",
"date",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
"return",
"self",
".",
"_native"
]
| 32.368421 | 19.842105 |
def _build_conflict_target(self):
"""Builds the `conflict_target` for the ON CONFLICT
clause."""
conflict_target = []
if not isinstance(self.query.conflict_target, list):
raise SuspiciousOperation((
'%s is not a valid conflict target, specify '
'a list of column names, or tuples with column '
'names and hstore key.'
) % str(self.query.conflict_target))
def _assert_valid_field(field_name):
field_name = self._normalize_field_name(field_name)
if self._get_model_field(field_name):
return
raise SuspiciousOperation((
'%s is not a valid conflict target, specify '
'a list of column names, or tuples with column '
'names and hstore key.'
) % str(field_name))
for field_name in self.query.conflict_target:
_assert_valid_field(field_name)
# special handling for hstore keys
if isinstance(field_name, tuple):
conflict_target.append(
'(%s->\'%s\')' % (
self._format_field_name(field_name),
field_name[1]
)
)
else:
conflict_target.append(
self._format_field_name(field_name))
return '(%s)' % ','.join(conflict_target) | [
"def",
"_build_conflict_target",
"(",
"self",
")",
":",
"conflict_target",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"self",
".",
"query",
".",
"conflict_target",
",",
"list",
")",
":",
"raise",
"SuspiciousOperation",
"(",
"(",
"'%s is not a valid conflict target, specify '",
"'a list of column names, or tuples with column '",
"'names and hstore key.'",
")",
"%",
"str",
"(",
"self",
".",
"query",
".",
"conflict_target",
")",
")",
"def",
"_assert_valid_field",
"(",
"field_name",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"if",
"self",
".",
"_get_model_field",
"(",
"field_name",
")",
":",
"return",
"raise",
"SuspiciousOperation",
"(",
"(",
"'%s is not a valid conflict target, specify '",
"'a list of column names, or tuples with column '",
"'names and hstore key.'",
")",
"%",
"str",
"(",
"field_name",
")",
")",
"for",
"field_name",
"in",
"self",
".",
"query",
".",
"conflict_target",
":",
"_assert_valid_field",
"(",
"field_name",
")",
"# special handling for hstore keys",
"if",
"isinstance",
"(",
"field_name",
",",
"tuple",
")",
":",
"conflict_target",
".",
"append",
"(",
"'(%s->\\'%s\\')'",
"%",
"(",
"self",
".",
"_format_field_name",
"(",
"field_name",
")",
",",
"field_name",
"[",
"1",
"]",
")",
")",
"else",
":",
"conflict_target",
".",
"append",
"(",
"self",
".",
"_format_field_name",
"(",
"field_name",
")",
")",
"return",
"'(%s)'",
"%",
"','",
".",
"join",
"(",
"conflict_target",
")"
]
| 35.55 | 15.675 |
def seconds_until_renew(self):
"""
Returns the number of seconds between the current time
and the set renew time. It can be negative if the
leader election is running late.
"""
delta = self.renew_time - datetime.now(self.renew_time.tzinfo)
return delta.total_seconds() | [
"def",
"seconds_until_renew",
"(",
"self",
")",
":",
"delta",
"=",
"self",
".",
"renew_time",
"-",
"datetime",
".",
"now",
"(",
"self",
".",
"renew_time",
".",
"tzinfo",
")",
"return",
"delta",
".",
"total_seconds",
"(",
")"
]
| 39.625 | 10.375 |
def _database_create(self, engine, database):
"""Create a new database and return a new url representing
a connection to the new database
"""
logger.info('Creating database "%s" in "%s"', database, engine)
database_operation(engine, 'create', database)
url = copy(engine.url)
url.database = database
return str(url) | [
"def",
"_database_create",
"(",
"self",
",",
"engine",
",",
"database",
")",
":",
"logger",
".",
"info",
"(",
"'Creating database \"%s\" in \"%s\"'",
",",
"database",
",",
"engine",
")",
"database_operation",
"(",
"engine",
",",
"'create'",
",",
"database",
")",
"url",
"=",
"copy",
"(",
"engine",
".",
"url",
")",
"url",
".",
"database",
"=",
"database",
"return",
"str",
"(",
"url",
")"
]
| 41.222222 | 9.555556 |
def create_user(self, user, table_privileges=['ALL PRIVILEGES'],
schema_privileges=['ALL PRIVILEGES'],
row_limit=50000):
con = self.connection or self._connect()
cur = con.cursor()
cur.execute('CREATE SCHEMA {0};'.format(user))
# self._initialize(schema=schema_name)
password = pwgen(8)
cur.execute(
"CREATE USER {user} with PASSWORD '{password}';"
.format(user=user, password=password))
""" Grant SELECT on public schema """
cur.execute('GRANT USAGE ON SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'ALTER ROLE {user} SET search_path TO {user};'
.format(user=user))
self.stdout.write(
'CREATED USER {user} WITH PASSWORD {password}\n'
.format(user=user, password=password))
""" initialize user-schema """
old_schema = self.schema
self.initialized = False
self.schema = user
self._initialize(con)
""" Privileges on user-schema"""
cur.execute(
'GRANT {privileges} ON SCHEMA {user} TO {user};'
.format(privileges=', '.join(schema_privileges), user=user))
cur.execute(
'GRANT {privileges} ON ALL TABLES IN SCHEMA {user} TO {user};'
.format(privileges=', '.join(table_privileges), user=user))
cur.execute(
'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {user} TO {user};'
.format(user=user))
con.commit()
if row_limit:
""" Limit number of rows"""
for table in ['reaction', 'publication', 'systems',
'reaction_system', 'publication_system',
'information']:
table_factor = 1
if table in ['reaction_system', 'publication_system']:
table_factor = 15
elif table == 'publication':
table_factor = 1 / 100
elif table == 'information':
table_factor = 1 / 100
trigger_function = """
CREATE OR REPLACE FUNCTION
check_number_of_rows_{user}_{table}()
RETURNS TRIGGER AS
$BODY$
BEGIN
IF (SELECT count(*) FROM {user}.{table}) > {row_limit}
THEN
RAISE EXCEPTION
'INSERT statement exceeding maximum number of rows';
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql""".format(user=user, table=table,
row_limit=row_limit * table_factor)
cur.execute(trigger_function)
trigger = """
DROP TRIGGER IF EXISTS tr_check_number_of_rows_{user}_{table}
on {user}.{table};
CREATE TRIGGER tr_check_number_of_rows_{user}_{table}
BEFORE INSERT ON {user}.systems
FOR EACH ROW EXECUTE PROCEDURE check_number_of_rows_{user}_{table}();
""".format(user=user, table=table)
cur.execute(trigger)
self.schema = old_schema
set_schema = 'ALTER ROLE {user} SET search_path TO {schema};'\
.format(user=self.user, schema=self.schema)
cur.execute(set_schema)
if self.connection is None:
con.commit()
con.close()
return password | [
"def",
"create_user",
"(",
"self",
",",
"user",
",",
"table_privileges",
"=",
"[",
"'ALL PRIVILEGES'",
"]",
",",
"schema_privileges",
"=",
"[",
"'ALL PRIVILEGES'",
"]",
",",
"row_limit",
"=",
"50000",
")",
":",
"con",
"=",
"self",
".",
"connection",
"or",
"self",
".",
"_connect",
"(",
")",
"cur",
"=",
"con",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'CREATE SCHEMA {0};'",
".",
"format",
"(",
"user",
")",
")",
"# self._initialize(schema=schema_name)",
"password",
"=",
"pwgen",
"(",
"8",
")",
"cur",
".",
"execute",
"(",
"\"CREATE USER {user} with PASSWORD '{password}';\"",
".",
"format",
"(",
"user",
"=",
"user",
",",
"password",
"=",
"password",
")",
")",
"cur",
".",
"execute",
"(",
"'GRANT USAGE ON SCHEMA public TO {user};'",
".",
"format",
"(",
"user",
"=",
"user",
")",
")",
"cur",
".",
"execute",
"(",
"'GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user};'",
".",
"format",
"(",
"user",
"=",
"user",
")",
")",
"cur",
".",
"execute",
"(",
"'ALTER ROLE {user} SET search_path TO {user};'",
".",
"format",
"(",
"user",
"=",
"user",
")",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"'CREATED USER {user} WITH PASSWORD {password}\\n'",
".",
"format",
"(",
"user",
"=",
"user",
",",
"password",
"=",
"password",
")",
")",
"\"\"\" initialize user-schema \"\"\"",
"old_schema",
"=",
"self",
".",
"schema",
"self",
".",
"initialized",
"=",
"False",
"self",
".",
"schema",
"=",
"user",
"self",
".",
"_initialize",
"(",
"con",
")",
"\"\"\" Privileges on user-schema\"\"\"",
"cur",
".",
"execute",
"(",
"'GRANT {privileges} ON SCHEMA {user} TO {user};'",
".",
"format",
"(",
"privileges",
"=",
"', '",
".",
"join",
"(",
"schema_privileges",
")",
",",
"user",
"=",
"user",
")",
")",
"cur",
".",
"execute",
"(",
"'GRANT {privileges} ON ALL TABLES IN SCHEMA {user} TO {user};'",
".",
"format",
"(",
"privileges",
"=",
"', '",
".",
"join",
"(",
"table_privileges",
")",
",",
"user",
"=",
"user",
")",
")",
"cur",
".",
"execute",
"(",
"'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {user} TO {user};'",
".",
"format",
"(",
"user",
"=",
"user",
")",
")",
"con",
".",
"commit",
"(",
")",
"if",
"row_limit",
":",
"\"\"\" Limit number of rows\"\"\"",
"for",
"table",
"in",
"[",
"'reaction'",
",",
"'publication'",
",",
"'systems'",
",",
"'reaction_system'",
",",
"'publication_system'",
",",
"'information'",
"]",
":",
"table_factor",
"=",
"1",
"if",
"table",
"in",
"[",
"'reaction_system'",
",",
"'publication_system'",
"]",
":",
"table_factor",
"=",
"15",
"elif",
"table",
"==",
"'publication'",
":",
"table_factor",
"=",
"1",
"/",
"100",
"elif",
"table",
"==",
"'information'",
":",
"table_factor",
"=",
"1",
"/",
"100",
"trigger_function",
"=",
"\"\"\"\n CREATE OR REPLACE FUNCTION\n check_number_of_rows_{user}_{table}()\n RETURNS TRIGGER AS\n $BODY$\n BEGIN\n IF (SELECT count(*) FROM {user}.{table}) > {row_limit}\n THEN\n RAISE EXCEPTION\n 'INSERT statement exceeding maximum number of rows';\n END IF;\n RETURN NEW;\n END;\n $BODY$\n LANGUAGE plpgsql\"\"\"",
".",
"format",
"(",
"user",
"=",
"user",
",",
"table",
"=",
"table",
",",
"row_limit",
"=",
"row_limit",
"*",
"table_factor",
")",
"cur",
".",
"execute",
"(",
"trigger_function",
")",
"trigger",
"=",
"\"\"\"\n DROP TRIGGER IF EXISTS tr_check_number_of_rows_{user}_{table}\n on {user}.{table};\n CREATE TRIGGER tr_check_number_of_rows_{user}_{table}\n BEFORE INSERT ON {user}.systems\n FOR EACH ROW EXECUTE PROCEDURE check_number_of_rows_{user}_{table}();\n \"\"\"",
".",
"format",
"(",
"user",
"=",
"user",
",",
"table",
"=",
"table",
")",
"cur",
".",
"execute",
"(",
"trigger",
")",
"self",
".",
"schema",
"=",
"old_schema",
"set_schema",
"=",
"'ALTER ROLE {user} SET search_path TO {schema};'",
".",
"format",
"(",
"user",
"=",
"self",
".",
"user",
",",
"schema",
"=",
"self",
".",
"schema",
")",
"cur",
".",
"execute",
"(",
"set_schema",
")",
"if",
"self",
".",
"connection",
"is",
"None",
":",
"con",
".",
"commit",
"(",
")",
"con",
".",
"close",
"(",
")",
"return",
"password"
]
| 38.442105 | 17.568421 |
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False | [
"def",
"is_fifo",
"(",
"self",
")",
":",
"try",
":",
"return",
"S_ISFIFO",
"(",
"self",
".",
"stat",
"(",
")",
".",
"st_mode",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"not",
"in",
"(",
"ENOENT",
",",
"ENOTDIR",
")",
":",
"raise",
"# Path doesn't exist or is a broken symlink",
"# (see https://bitbucket.org/pitrou/pathlib/issue/12/)",
"return",
"False"
]
| 31.5 | 13.166667 |
def unload(self):
'''unload module'''
self.mpstate.console.close()
self.mpstate.console = textconsole.SimpleConsole() | [
"def",
"unload",
"(",
"self",
")",
":",
"self",
".",
"mpstate",
".",
"console",
".",
"close",
"(",
")",
"self",
".",
"mpstate",
".",
"console",
"=",
"textconsole",
".",
"SimpleConsole",
"(",
")"
]
| 34.5 | 14.5 |
def hash_codes(text, hashes):
"""Hashes inline code tags.
Code tags can begin with an arbitrary number of back-ticks, as long
as the close contains the same number of back-ticks. This allows
back-ticks to be used within the code tag.
HTML entities (&, <, >, ", ') are automatically escaped inside the
code tag.
"""
def sub(match):
code = '<code>{}</code>'.format(escape(match.group(2)))
hashed = hash_text(code, 'code')
hashes[hashed] = code
return hashed
return re_code.sub(sub, text) | [
"def",
"hash_codes",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"sub",
"(",
"match",
")",
":",
"code",
"=",
"'<code>{}</code>'",
".",
"format",
"(",
"escape",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"hashed",
"=",
"hash_text",
"(",
"code",
",",
"'code'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"code",
"return",
"hashed",
"return",
"re_code",
".",
"sub",
"(",
"sub",
",",
"text",
")"
]
| 33.75 | 18.375 |
def einsum_vecmul_index(gate_indices, number_of_qubits):
"""Return the index string for Numpy.eignsum matrix-vector multiplication.
The returned indices are to perform a matrix multiplication A.v where
the matrix A is an M-qubit matrix, vector v is an N-qubit vector, and
M <= N, and identity matrices are implied on the subsystems where A has no
support on v.
Args:
gate_indices (list[int]): the indices of the right matrix subsystems
to contract with the left matrix.
number_of_qubits (int): the total number of qubits for the right matrix.
Returns:
str: An indices string for the Numpy.einsum function.
"""
mat_l, mat_r, tens_lin, tens_lout = _einsum_matmul_index_helper(gate_indices,
number_of_qubits)
# Combine indices into matrix multiplication string format
# for numpy.einsum function
return "{mat_l}{mat_r}, ".format(mat_l=mat_l, mat_r=mat_r) + \
"{tens_lin}->{tens_lout}".format(tens_lin=tens_lin,
tens_lout=tens_lout) | [
"def",
"einsum_vecmul_index",
"(",
"gate_indices",
",",
"number_of_qubits",
")",
":",
"mat_l",
",",
"mat_r",
",",
"tens_lin",
",",
"tens_lout",
"=",
"_einsum_matmul_index_helper",
"(",
"gate_indices",
",",
"number_of_qubits",
")",
"# Combine indices into matrix multiplication string format",
"# for numpy.einsum function",
"return",
"\"{mat_l}{mat_r}, \"",
".",
"format",
"(",
"mat_l",
"=",
"mat_l",
",",
"mat_r",
"=",
"mat_r",
")",
"+",
"\"{tens_lin}->{tens_lout}\"",
".",
"format",
"(",
"tens_lin",
"=",
"tens_lin",
",",
"tens_lout",
"=",
"tens_lout",
")"
]
| 45.52 | 28.6 |
def send_connection_request(self):
"""
Sends a ConnectionRequest to the iDigi server using the credentials
established with the id of the monitor as defined in the monitor
member.
"""
try:
self.log.info("Sending ConnectionRequest for Monitor %s."
% self.monitor_id)
# Send connection request and perform a receive to ensure
# request is authenticated.
# Protocol Version = 1.
payload = struct.pack('!H', 0x01)
# Username Length.
payload += struct.pack('!H', len(self.client.username))
# Username.
payload += six.b(self.client.username)
# Password Length.
payload += struct.pack('!H', len(self.client.password))
# Password.
payload += six.b(self.client.password)
# Monitor ID.
payload += struct.pack('!L', int(self.monitor_id))
# Header 6 Bytes : Type [2 bytes] & Length [4 Bytes]
# ConnectionRequest is Type 0x01.
data = struct.pack("!HL", CONNECTION_REQUEST, len(payload))
# The full payload.
data += payload
# Send Connection Request.
self.socket.send(data)
# Set a 60 second blocking on recv, if we don't get any data
# within 60 seconds, timeout which will throw an exception.
self.socket.settimeout(60)
# Should receive 10 bytes with ConnectionResponse.
response = self.socket.recv(10)
# Make socket blocking.
self.socket.settimeout(0)
if len(response) != 10:
raise PushException("Length of Connection Request Response "
"(%d) is not 10." % len(response))
# Type
response_type = int(struct.unpack("!H", response[0:2])[0])
if response_type != CONNECTION_RESPONSE:
raise PushException(
"Connection Response Type (%d) is not "
"ConnectionResponse Type (%d)." % (response_type, CONNECTION_RESPONSE))
status_code = struct.unpack("!H", response[6:8])[0]
self.log.info("Got ConnectionResponse for Monitor %s. Status %s."
% (self.monitor_id, status_code))
if status_code != STATUS_OK:
raise PushException("Connection Response Status Code (%d) is "
"not STATUS_OK (%d)." % (status_code, STATUS_OK))
except Exception as exception:
# TODO(posborne): This is bad! It isn't necessarily a socket exception!
# Likely a socket exception, close it and raise an exception.
self.socket.close()
self.socket = None
raise exception | [
"def",
"send_connection_request",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Sending ConnectionRequest for Monitor %s.\"",
"%",
"self",
".",
"monitor_id",
")",
"# Send connection request and perform a receive to ensure",
"# request is authenticated.",
"# Protocol Version = 1.",
"payload",
"=",
"struct",
".",
"pack",
"(",
"'!H'",
",",
"0x01",
")",
"# Username Length.",
"payload",
"+=",
"struct",
".",
"pack",
"(",
"'!H'",
",",
"len",
"(",
"self",
".",
"client",
".",
"username",
")",
")",
"# Username.",
"payload",
"+=",
"six",
".",
"b",
"(",
"self",
".",
"client",
".",
"username",
")",
"# Password Length.",
"payload",
"+=",
"struct",
".",
"pack",
"(",
"'!H'",
",",
"len",
"(",
"self",
".",
"client",
".",
"password",
")",
")",
"# Password.",
"payload",
"+=",
"six",
".",
"b",
"(",
"self",
".",
"client",
".",
"password",
")",
"# Monitor ID.",
"payload",
"+=",
"struct",
".",
"pack",
"(",
"'!L'",
",",
"int",
"(",
"self",
".",
"monitor_id",
")",
")",
"# Header 6 Bytes : Type [2 bytes] & Length [4 Bytes]",
"# ConnectionRequest is Type 0x01.",
"data",
"=",
"struct",
".",
"pack",
"(",
"\"!HL\"",
",",
"CONNECTION_REQUEST",
",",
"len",
"(",
"payload",
")",
")",
"# The full payload.",
"data",
"+=",
"payload",
"# Send Connection Request.",
"self",
".",
"socket",
".",
"send",
"(",
"data",
")",
"# Set a 60 second blocking on recv, if we don't get any data",
"# within 60 seconds, timeout which will throw an exception.",
"self",
".",
"socket",
".",
"settimeout",
"(",
"60",
")",
"# Should receive 10 bytes with ConnectionResponse.",
"response",
"=",
"self",
".",
"socket",
".",
"recv",
"(",
"10",
")",
"# Make socket blocking.",
"self",
".",
"socket",
".",
"settimeout",
"(",
"0",
")",
"if",
"len",
"(",
"response",
")",
"!=",
"10",
":",
"raise",
"PushException",
"(",
"\"Length of Connection Request Response \"",
"\"(%d) is not 10.\"",
"%",
"len",
"(",
"response",
")",
")",
"# Type",
"response_type",
"=",
"int",
"(",
"struct",
".",
"unpack",
"(",
"\"!H\"",
",",
"response",
"[",
"0",
":",
"2",
"]",
")",
"[",
"0",
"]",
")",
"if",
"response_type",
"!=",
"CONNECTION_RESPONSE",
":",
"raise",
"PushException",
"(",
"\"Connection Response Type (%d) is not \"",
"\"ConnectionResponse Type (%d).\"",
"%",
"(",
"response_type",
",",
"CONNECTION_RESPONSE",
")",
")",
"status_code",
"=",
"struct",
".",
"unpack",
"(",
"\"!H\"",
",",
"response",
"[",
"6",
":",
"8",
"]",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"info",
"(",
"\"Got ConnectionResponse for Monitor %s. Status %s.\"",
"%",
"(",
"self",
".",
"monitor_id",
",",
"status_code",
")",
")",
"if",
"status_code",
"!=",
"STATUS_OK",
":",
"raise",
"PushException",
"(",
"\"Connection Response Status Code (%d) is \"",
"\"not STATUS_OK (%d).\"",
"%",
"(",
"status_code",
",",
"STATUS_OK",
")",
")",
"except",
"Exception",
"as",
"exception",
":",
"# TODO(posborne): This is bad! It isn't necessarily a socket exception!",
"# Likely a socket exception, close it and raise an exception.",
"self",
".",
"socket",
".",
"close",
"(",
")",
"self",
".",
"socket",
"=",
"None",
"raise",
"exception"
]
| 42.059701 | 20.80597 |
def epanechnikovKernel(x,ref_x,h=1.0):
'''
The Epanechnikov kernel.
Parameters
----------
x : np.array
Values at which to evaluate the kernel
x_ref : float
The reference point
h : float
Kernel bandwidth
Returns
-------
out : np.array
Kernel values at each value of x
'''
u = (x-ref_x)/h # Normalize distance by bandwidth
these = np.abs(u) <= 1.0 # Kernel = 0 outside [-1,1]
out = np.zeros_like(x) # Initialize kernel output
out[these] = 0.75*(1.0-u[these]**2.0) # Evaluate kernel
return out | [
"def",
"epanechnikovKernel",
"(",
"x",
",",
"ref_x",
",",
"h",
"=",
"1.0",
")",
":",
"u",
"=",
"(",
"x",
"-",
"ref_x",
")",
"/",
"h",
"# Normalize distance by bandwidth",
"these",
"=",
"np",
".",
"abs",
"(",
"u",
")",
"<=",
"1.0",
"# Kernel = 0 outside [-1,1]",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"# Initialize kernel output",
"out",
"[",
"these",
"]",
"=",
"0.75",
"*",
"(",
"1.0",
"-",
"u",
"[",
"these",
"]",
"**",
"2.0",
")",
"# Evaluate kernel",
"return",
"out"
]
| 25.695652 | 22.304348 |
def list_notebooks(self):
"""List all notebooks in the notebook dir.
This returns a list of dicts of the form::
dict(notebook_id=notebook,name=name)
"""
names = glob.glob(os.path.join(self.notebook_dir,
'*' + self.filename_ext))
names = [os.path.splitext(os.path.basename(name))[0]
for name in names]
data = []
for name in names:
if name not in self.rev_mapping:
notebook_id = self.new_notebook_id(name)
else:
notebook_id = self.rev_mapping[name]
data.append(dict(notebook_id=notebook_id,name=name))
data = sorted(data, key=lambda item: item['name'])
return data | [
"def",
"list_notebooks",
"(",
"self",
")",
":",
"names",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"notebook_dir",
",",
"'*'",
"+",
"self",
".",
"filename_ext",
")",
")",
"names",
"=",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
")",
"[",
"0",
"]",
"for",
"name",
"in",
"names",
"]",
"data",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"not",
"in",
"self",
".",
"rev_mapping",
":",
"notebook_id",
"=",
"self",
".",
"new_notebook_id",
"(",
"name",
")",
"else",
":",
"notebook_id",
"=",
"self",
".",
"rev_mapping",
"[",
"name",
"]",
"data",
".",
"append",
"(",
"dict",
"(",
"notebook_id",
"=",
"notebook_id",
",",
"name",
"=",
"name",
")",
")",
"data",
"=",
"sorted",
"(",
"data",
",",
"key",
"=",
"lambda",
"item",
":",
"item",
"[",
"'name'",
"]",
")",
"return",
"data"
]
| 35.857143 | 17.809524 |
def wavelengthToRGB(wavelength):
gamma = 0.80;
intensityMax = 255;
""" Taken from Earl F. Glynn's web page:
* <a href="http://www.efg2.com/Lab/ScienceAndEngineering/Spectra.htm">Spectra Lab Report</a>
"""
factor = None
r = None
g = None
b = None
if((wavelength >= 380) and (wavelength<440)):
r = -(wavelength - 440) / (440.0 - 380.0)
g = 0.0
b = 1.0
elif((wavelength >= 440) and (wavelength<490)):
r = 0.0
g = (wavelength - 440) / (490.0 - 440.0)
b = 1.0
elif((wavelength >= 490) and (wavelength<510)):
r = 0.0
g = 1.0
b = -(wavelength - 510) / (510.0 - 490.0)
elif((wavelength >= 510) and (wavelength<580)):
r = (wavelength - 510) / (580.0 - 510.0)
g = 1.0
b = 0.0
elif((wavelength >= 580) and (wavelength<645)):
r = 1.0
g = -(wavelength - 645) / (645.0 - 580.0)
b = 0.0
elif((wavelength >= 645) and (wavelength<781)):
r = 1.0
g = 0.0
b = 0.0
else:
r = 0.0
g = 0.0
b = 0.0
# Let the intensity fall off near the vision limits
if((wavelength >= 380) and (wavelength<420)):
factor = 0.3 + 0.7*(wavelength - 380) / (420.0 - 380.0)
elif((wavelength >= 420) and (wavelength<701)):
factor = 1.0
elif((wavelength >= 701) and (wavelength<781)):
factor = 0.3 + 0.7*(780 - wavelength) / (780.0 - 700.0)
else:
factor = 0.0
# Don't want 0^x = 1 for x != 0
r = int(round(intensityMax * pow(r * factor, gamma)))
g = int(round(intensityMax * pow(g * factor, gamma)))
b = int(round(intensityMax * pow(b * factor, gamma)))
return (r, g, b) | [
"def",
"wavelengthToRGB",
"(",
"wavelength",
")",
":",
"gamma",
"=",
"0.80",
"intensityMax",
"=",
"255",
"factor",
"=",
"None",
"r",
"=",
"None",
"g",
"=",
"None",
"b",
"=",
"None",
"if",
"(",
"(",
"wavelength",
">=",
"380",
")",
"and",
"(",
"wavelength",
"<",
"440",
")",
")",
":",
"r",
"=",
"-",
"(",
"wavelength",
"-",
"440",
")",
"/",
"(",
"440.0",
"-",
"380.0",
")",
"g",
"=",
"0.0",
"b",
"=",
"1.0",
"elif",
"(",
"(",
"wavelength",
">=",
"440",
")",
"and",
"(",
"wavelength",
"<",
"490",
")",
")",
":",
"r",
"=",
"0.0",
"g",
"=",
"(",
"wavelength",
"-",
"440",
")",
"/",
"(",
"490.0",
"-",
"440.0",
")",
"b",
"=",
"1.0",
"elif",
"(",
"(",
"wavelength",
">=",
"490",
")",
"and",
"(",
"wavelength",
"<",
"510",
")",
")",
":",
"r",
"=",
"0.0",
"g",
"=",
"1.0",
"b",
"=",
"-",
"(",
"wavelength",
"-",
"510",
")",
"/",
"(",
"510.0",
"-",
"490.0",
")",
"elif",
"(",
"(",
"wavelength",
">=",
"510",
")",
"and",
"(",
"wavelength",
"<",
"580",
")",
")",
":",
"r",
"=",
"(",
"wavelength",
"-",
"510",
")",
"/",
"(",
"580.0",
"-",
"510.0",
")",
"g",
"=",
"1.0",
"b",
"=",
"0.0",
"elif",
"(",
"(",
"wavelength",
">=",
"580",
")",
"and",
"(",
"wavelength",
"<",
"645",
")",
")",
":",
"r",
"=",
"1.0",
"g",
"=",
"-",
"(",
"wavelength",
"-",
"645",
")",
"/",
"(",
"645.0",
"-",
"580.0",
")",
"b",
"=",
"0.0",
"elif",
"(",
"(",
"wavelength",
">=",
"645",
")",
"and",
"(",
"wavelength",
"<",
"781",
")",
")",
":",
"r",
"=",
"1.0",
"g",
"=",
"0.0",
"b",
"=",
"0.0",
"else",
":",
"r",
"=",
"0.0",
"g",
"=",
"0.0",
"b",
"=",
"0.0",
"# Let the intensity fall off near the vision limits",
"if",
"(",
"(",
"wavelength",
">=",
"380",
")",
"and",
"(",
"wavelength",
"<",
"420",
")",
")",
":",
"factor",
"=",
"0.3",
"+",
"0.7",
"*",
"(",
"wavelength",
"-",
"380",
")",
"/",
"(",
"420.0",
"-",
"380.0",
")",
"elif",
"(",
"(",
"wavelength",
">=",
"420",
")",
"and",
"(",
"wavelength",
"<",
"701",
")",
")",
":",
"factor",
"=",
"1.0",
"elif",
"(",
"(",
"wavelength",
">=",
"701",
")",
"and",
"(",
"wavelength",
"<",
"781",
")",
")",
":",
"factor",
"=",
"0.3",
"+",
"0.7",
"*",
"(",
"780",
"-",
"wavelength",
")",
"/",
"(",
"780.0",
"-",
"700.0",
")",
"else",
":",
"factor",
"=",
"0.0",
"# Don't want 0^x = 1 for x != 0",
"r",
"=",
"int",
"(",
"round",
"(",
"intensityMax",
"*",
"pow",
"(",
"r",
"*",
"factor",
",",
"gamma",
")",
")",
")",
"g",
"=",
"int",
"(",
"round",
"(",
"intensityMax",
"*",
"pow",
"(",
"g",
"*",
"factor",
",",
"gamma",
")",
")",
")",
"b",
"=",
"int",
"(",
"round",
"(",
"intensityMax",
"*",
"pow",
"(",
"b",
"*",
"factor",
",",
"gamma",
")",
")",
")",
"return",
"(",
"r",
",",
"g",
",",
"b",
")"
]
| 24.728814 | 22.40678 |
def send(self, sender, **kwargs):
"""Internal. Call connected functions."""
if id(self) not in _alleged_receivers:
return
for func in _alleged_receivers[id(self)]:
func(sender, **kwargs) | [
"def",
"send",
"(",
"self",
",",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"id",
"(",
"self",
")",
"not",
"in",
"_alleged_receivers",
":",
"return",
"for",
"func",
"in",
"_alleged_receivers",
"[",
"id",
"(",
"self",
")",
"]",
":",
"func",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")"
]
| 38.166667 | 8.333333 |
def bodvar(body, item, dim):
"""
Deprecated: This routine has been superseded by :func:`bodvcd` and
:func:`bodvrd`. This routine is supported for purposes of backward
compatibility only.
Return the values of some item for any body in the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodvar_c.html
:param body: ID code of body.
:type body: int
:param item:
Item for which values are desired,
("RADII", "NUT_PREC_ANGLES", etc.)
:type item: str
:param dim: Number of values returned.
:type dim: int
:return: values
:rtype: Array of floats
"""
body = ctypes.c_int(body)
dim = ctypes.c_int(dim)
item = stypes.stringToCharP(item)
values = stypes.emptyDoubleVector(dim.value)
libspice.bodvar_c(body, item, ctypes.byref(dim), values)
return stypes.cVectorToPython(values) | [
"def",
"bodvar",
"(",
"body",
",",
"item",
",",
"dim",
")",
":",
"body",
"=",
"ctypes",
".",
"c_int",
"(",
"body",
")",
"dim",
"=",
"ctypes",
".",
"c_int",
"(",
"dim",
")",
"item",
"=",
"stypes",
".",
"stringToCharP",
"(",
"item",
")",
"values",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"dim",
".",
"value",
")",
"libspice",
".",
"bodvar_c",
"(",
"body",
",",
"item",
",",
"ctypes",
".",
"byref",
"(",
"dim",
")",
",",
"values",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"values",
")"
]
| 32.592593 | 17.62963 |
def _annotate(reads, mirbase_ref, precursors):
"""
Using SAM/BAM coordinates, mismatches and realign to annotate isomiRs
"""
for r in reads:
for p in reads[r].precursors:
start = reads[r].precursors[p].start + 1 # convert to 1base
end = start + len(reads[r].sequence)
for mature in mirbase_ref[p]:
mi = mirbase_ref[p][mature]
is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])
logger.debug(("{r} {p} {start} {is_iso} {mature} {mi} {mature_s}").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))
if is_iso:
reads[r].precursors[p].mirna = mature
break
return reads | [
"def",
"_annotate",
"(",
"reads",
",",
"mirbase_ref",
",",
"precursors",
")",
":",
"for",
"r",
"in",
"reads",
":",
"for",
"p",
"in",
"reads",
"[",
"r",
"]",
".",
"precursors",
":",
"start",
"=",
"reads",
"[",
"r",
"]",
".",
"precursors",
"[",
"p",
"]",
".",
"start",
"+",
"1",
"# convert to 1base",
"end",
"=",
"start",
"+",
"len",
"(",
"reads",
"[",
"r",
"]",
".",
"sequence",
")",
"for",
"mature",
"in",
"mirbase_ref",
"[",
"p",
"]",
":",
"mi",
"=",
"mirbase_ref",
"[",
"p",
"]",
"[",
"mature",
"]",
"is_iso",
"=",
"_coord",
"(",
"reads",
"[",
"r",
"]",
".",
"sequence",
",",
"start",
",",
"mi",
",",
"precursors",
"[",
"p",
"]",
",",
"reads",
"[",
"r",
"]",
".",
"precursors",
"[",
"p",
"]",
")",
"logger",
".",
"debug",
"(",
"(",
"\"{r} {p} {start} {is_iso} {mature} {mi} {mature_s}\"",
")",
".",
"format",
"(",
"s",
"=",
"reads",
"[",
"r",
"]",
".",
"sequence",
",",
"mature_s",
"=",
"precursors",
"[",
"p",
"]",
"[",
"mi",
"[",
"0",
"]",
"-",
"1",
":",
"mi",
"[",
"1",
"]",
"]",
",",
"*",
"*",
"locals",
"(",
")",
")",
")",
"if",
"is_iso",
":",
"reads",
"[",
"r",
"]",
".",
"precursors",
"[",
"p",
"]",
".",
"mirna",
"=",
"mature",
"break",
"return",
"reads"
]
| 48.6875 | 22.4375 |
def countRandomBitFrequencies(numTerms = 100000, percentSparsity = 0.01):
"""Create a uniformly random counts matrix through sampling."""
# Accumulate counts by inplace-adding sparse matrices
counts = SparseMatrix()
size = 128*128
counts.resize(1, size)
# Pre-allocate buffer sparse matrix
sparseBitmap = SparseMatrix()
sparseBitmap.resize(1, size)
random.seed(42)
# Accumulate counts for each bit for each word
numWords=0
for term in xrange(numTerms):
bitmap = random.sample(xrange(size), int(size*percentSparsity))
bitmap.sort()
sparseBitmap.setRowFromSparse(0, bitmap, [1]*len(bitmap))
counts += sparseBitmap
numWords += 1
# Compute normalized version of counts as a separate matrix
frequencies = SparseMatrix()
frequencies.resize(1, size)
frequencies.copy(counts)
frequencies.divide(float(numWords))
# Wrap up by printing some statistics and then saving the normalized version
printFrequencyStatistics(counts, frequencies, numWords, size)
frequencyFilename = "bit_frequencies_random.pkl"
print "Saving frequency matrix in",frequencyFilename
with open(frequencyFilename, "wb") as frequencyPickleFile:
pickle.dump(frequencies, frequencyPickleFile)
return counts | [
"def",
"countRandomBitFrequencies",
"(",
"numTerms",
"=",
"100000",
",",
"percentSparsity",
"=",
"0.01",
")",
":",
"# Accumulate counts by inplace-adding sparse matrices",
"counts",
"=",
"SparseMatrix",
"(",
")",
"size",
"=",
"128",
"*",
"128",
"counts",
".",
"resize",
"(",
"1",
",",
"size",
")",
"# Pre-allocate buffer sparse matrix",
"sparseBitmap",
"=",
"SparseMatrix",
"(",
")",
"sparseBitmap",
".",
"resize",
"(",
"1",
",",
"size",
")",
"random",
".",
"seed",
"(",
"42",
")",
"# Accumulate counts for each bit for each word",
"numWords",
"=",
"0",
"for",
"term",
"in",
"xrange",
"(",
"numTerms",
")",
":",
"bitmap",
"=",
"random",
".",
"sample",
"(",
"xrange",
"(",
"size",
")",
",",
"int",
"(",
"size",
"*",
"percentSparsity",
")",
")",
"bitmap",
".",
"sort",
"(",
")",
"sparseBitmap",
".",
"setRowFromSparse",
"(",
"0",
",",
"bitmap",
",",
"[",
"1",
"]",
"*",
"len",
"(",
"bitmap",
")",
")",
"counts",
"+=",
"sparseBitmap",
"numWords",
"+=",
"1",
"# Compute normalized version of counts as a separate matrix",
"frequencies",
"=",
"SparseMatrix",
"(",
")",
"frequencies",
".",
"resize",
"(",
"1",
",",
"size",
")",
"frequencies",
".",
"copy",
"(",
"counts",
")",
"frequencies",
".",
"divide",
"(",
"float",
"(",
"numWords",
")",
")",
"# Wrap up by printing some statistics and then saving the normalized version",
"printFrequencyStatistics",
"(",
"counts",
",",
"frequencies",
",",
"numWords",
",",
"size",
")",
"frequencyFilename",
"=",
"\"bit_frequencies_random.pkl\"",
"print",
"\"Saving frequency matrix in\"",
",",
"frequencyFilename",
"with",
"open",
"(",
"frequencyFilename",
",",
"\"wb\"",
")",
"as",
"frequencyPickleFile",
":",
"pickle",
".",
"dump",
"(",
"frequencies",
",",
"frequencyPickleFile",
")",
"return",
"counts"
]
| 31.684211 | 21.552632 |
def get_hoisted(dct, child_name):
"""Pulls all of a child's keys up to the parent, with the names unchanged."""
child = dct[child_name]
del dct[child_name]
dct.update(child)
return dct | [
"def",
"get_hoisted",
"(",
"dct",
",",
"child_name",
")",
":",
"child",
"=",
"dct",
"[",
"child_name",
"]",
"del",
"dct",
"[",
"child_name",
"]",
"dct",
".",
"update",
"(",
"child",
")",
"return",
"dct"
]
| 31.5 | 15 |
def precompute_edge_matrices(adjacency, hparams):
"""Precompute the a_in and a_out tensors.
(we don't want to add to the graph everytime _fprop is called)
Args:
adjacency: placeholder of real valued vectors of shape [B, L, L, E]
hparams: HParams object
Returns:
edge_matrices: [batch, L * D, L * D] the dense matrix for message passing
viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function
of the edge vector of the adjacency matrix at that spot.
"""
batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency)
# build the edge_network for incoming edges
with tf.variable_scope("edge_network"):
x = tf.reshape(
adjacency, [batch_size * num_nodes * num_nodes, edge_dim],
name="adj_reshape_in")
for ip_layer in range(hparams.edge_network_layers):
name = "edge_network_layer_%d"%ip_layer
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.edge_network_hidden_size,
activation=tf.nn.relu,
name=name)
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.hidden_size**2,
activation=None,
name="edge_network_output")
# x = [batch * l * l, d *d]
edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes,
num_nodes, hparams.hidden_size,
hparams.hidden_size])
# reshape to [batch, l * d, l *d]
edge_matrices = tf.reshape(
tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [
-1, num_nodes * hparams.hidden_size,
num_nodes * hparams.hidden_size
],
name="edge_matrices")
return edge_matrices | [
"def",
"precompute_edge_matrices",
"(",
"adjacency",
",",
"hparams",
")",
":",
"batch_size",
",",
"num_nodes",
",",
"_",
",",
"edge_dim",
"=",
"common_layers",
".",
"shape_list",
"(",
"adjacency",
")",
"# build the edge_network for incoming edges",
"with",
"tf",
".",
"variable_scope",
"(",
"\"edge_network\"",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"adjacency",
",",
"[",
"batch_size",
"*",
"num_nodes",
"*",
"num_nodes",
",",
"edge_dim",
"]",
",",
"name",
"=",
"\"adj_reshape_in\"",
")",
"for",
"ip_layer",
"in",
"range",
"(",
"hparams",
".",
"edge_network_layers",
")",
":",
"name",
"=",
"\"edge_network_layer_%d\"",
"%",
"ip_layer",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
")",
",",
"hparams",
".",
"edge_network_hidden_size",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"name",
")",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
")",
",",
"hparams",
".",
"hidden_size",
"**",
"2",
",",
"activation",
"=",
"None",
",",
"name",
"=",
"\"edge_network_output\"",
")",
"# x = [batch * l * l, d *d]",
"edge_matrices_flat",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"batch_size",
",",
"num_nodes",
",",
"num_nodes",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
"]",
")",
"# reshape to [batch, l * d, l *d]",
"edge_matrices",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"transpose",
"(",
"edge_matrices_flat",
",",
"[",
"0",
",",
"1",
",",
"3",
",",
"2",
",",
"4",
"]",
")",
",",
"[",
"-",
"1",
",",
"num_nodes",
"*",
"hparams",
".",
"hidden_size",
",",
"num_nodes",
"*",
"hparams",
".",
"hidden_size",
"]",
",",
"name",
"=",
"\"edge_matrices\"",
")",
"return",
"edge_matrices"
]
| 39.288889 | 19.6 |
def absolute_error(y, y_pred):
"""Calculates the sum of the differences
between target and prediction.
Parameters:
-----------
y : vector, shape (n_samples,)
The target values.
y_pred : vector, shape (n_samples,)
The predicted values.
Returns:
--------
error : float number, sum of the differences
between target and prediction
"""
y, y_pred = convert_assert(y, y_pred)
return np.sum(y - y_pred) | [
"def",
"absolute_error",
"(",
"y",
",",
"y_pred",
")",
":",
"y",
",",
"y_pred",
"=",
"convert_assert",
"(",
"y",
",",
"y_pred",
")",
"return",
"np",
".",
"sum",
"(",
"y",
"-",
"y_pred",
")"
]
| 21.52381 | 17.952381 |
def brozzler_worker(argv=None):
'''
Main entry point for brozzler, gets sites and pages to brozzle from
rethinkdb, brozzles them.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'-n', '--max-browsers', dest='max_browsers', default='1',
help='max number of chrome instances simultaneously browsing pages')
arg_parser.add_argument(
'--proxy', dest='proxy', default=None, help='http proxy')
arg_parser.add_argument(
'--warcprox-auto', dest='warcprox_auto', action='store_true',
help=(
'when needed, choose an available instance of warcprox from '
'the rethinkdb service registry'))
arg_parser.add_argument(
'--skip-extract-outlinks', dest='skip_extract_outlinks',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-visit-hashtags', dest='skip_visit_hashtags',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-youtube-dl', dest='skip_youtube_dl',
action='store_true', help=argparse.SUPPRESS)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
brozzler.chrome.check_version(args.chrome_exe)
def dump_state(signum, frame):
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
try:
state_strs = []
frames = sys._current_frames()
threads = {th.ident: th for th in threading.enumerate()}
for ident in frames:
if threads[ident]:
state_strs.append(str(threads[ident]))
else:
state_strs.append('<???:thread:ident=%s>' % ident)
stack = traceback.format_stack(frames[ident])
state_strs.append(''.join(stack))
logging.info(
'dumping state (caught signal %s)\n%s' % (
signum, '\n'.join(state_strs)))
except BaseException as e:
logging.error('exception dumping state: %s' % e)
finally:
signal.signal(signal.SIGQUIT, dump_state)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
service_registry = doublethink.ServiceRegistry(rr)
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, max_browsers=int(args.max_browsers),
chrome_exe=args.chrome_exe, proxy=args.proxy,
warcprox_auto=args.warcprox_auto,
skip_extract_outlinks=args.skip_extract_outlinks,
skip_visit_hashtags=args.skip_visit_hashtags,
skip_youtube_dl=args.skip_youtube_dl)
signal.signal(signal.SIGQUIT, dump_state)
signal.signal(signal.SIGTERM, lambda s,f: worker.stop())
signal.signal(signal.SIGINT, lambda s,f: worker.stop())
th = threading.Thread(target=worker.run, name='BrozzlerWorkerThread')
th.start()
th.join()
logging.info('brozzler-worker is all done, exiting') | [
"def",
"brozzler_worker",
"(",
"argv",
"=",
"None",
")",
":",
"argv",
"=",
"argv",
"or",
"sys",
".",
"argv",
"arg_parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"argv",
"[",
"0",
"]",
")",
",",
"formatter_class",
"=",
"BetterArgumentDefaultsHelpFormatter",
")",
"add_rethinkdb_options",
"(",
"arg_parser",
")",
"arg_parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--chrome-exe'",
",",
"dest",
"=",
"'chrome_exe'",
",",
"default",
"=",
"suggest_default_chrome_exe",
"(",
")",
",",
"help",
"=",
"'executable to use to invoke chrome'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--max-browsers'",
",",
"dest",
"=",
"'max_browsers'",
",",
"default",
"=",
"'1'",
",",
"help",
"=",
"'max number of chrome instances simultaneously browsing pages'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--proxy'",
",",
"dest",
"=",
"'proxy'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'http proxy'",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--warcprox-auto'",
",",
"dest",
"=",
"'warcprox_auto'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"(",
"'when needed, choose an available instance of warcprox from '",
"'the rethinkdb service registry'",
")",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--skip-extract-outlinks'",
",",
"dest",
"=",
"'skip_extract_outlinks'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--skip-visit-hashtags'",
",",
"dest",
"=",
"'skip_visit_hashtags'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"arg_parser",
".",
"add_argument",
"(",
"'--skip-youtube-dl'",
",",
"dest",
"=",
"'skip_youtube_dl'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"add_common_options",
"(",
"arg_parser",
",",
"argv",
")",
"args",
"=",
"arg_parser",
".",
"parse_args",
"(",
"args",
"=",
"argv",
"[",
"1",
":",
"]",
")",
"configure_logging",
"(",
"args",
")",
"brozzler",
".",
"chrome",
".",
"check_version",
"(",
"args",
".",
"chrome_exe",
")",
"def",
"dump_state",
"(",
"signum",
",",
"frame",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGQUIT",
",",
"signal",
".",
"SIG_IGN",
")",
"try",
":",
"state_strs",
"=",
"[",
"]",
"frames",
"=",
"sys",
".",
"_current_frames",
"(",
")",
"threads",
"=",
"{",
"th",
".",
"ident",
":",
"th",
"for",
"th",
"in",
"threading",
".",
"enumerate",
"(",
")",
"}",
"for",
"ident",
"in",
"frames",
":",
"if",
"threads",
"[",
"ident",
"]",
":",
"state_strs",
".",
"append",
"(",
"str",
"(",
"threads",
"[",
"ident",
"]",
")",
")",
"else",
":",
"state_strs",
".",
"append",
"(",
"'<???:thread:ident=%s>'",
"%",
"ident",
")",
"stack",
"=",
"traceback",
".",
"format_stack",
"(",
"frames",
"[",
"ident",
"]",
")",
"state_strs",
".",
"append",
"(",
"''",
".",
"join",
"(",
"stack",
")",
")",
"logging",
".",
"info",
"(",
"'dumping state (caught signal %s)\\n%s'",
"%",
"(",
"signum",
",",
"'\\n'",
".",
"join",
"(",
"state_strs",
")",
")",
")",
"except",
"BaseException",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'exception dumping state: %s'",
"%",
"e",
")",
"finally",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGQUIT",
",",
"dump_state",
")",
"rr",
"=",
"rethinker",
"(",
"args",
")",
"frontier",
"=",
"brozzler",
".",
"RethinkDbFrontier",
"(",
"rr",
")",
"service_registry",
"=",
"doublethink",
".",
"ServiceRegistry",
"(",
"rr",
")",
"worker",
"=",
"brozzler",
".",
"worker",
".",
"BrozzlerWorker",
"(",
"frontier",
",",
"service_registry",
",",
"max_browsers",
"=",
"int",
"(",
"args",
".",
"max_browsers",
")",
",",
"chrome_exe",
"=",
"args",
".",
"chrome_exe",
",",
"proxy",
"=",
"args",
".",
"proxy",
",",
"warcprox_auto",
"=",
"args",
".",
"warcprox_auto",
",",
"skip_extract_outlinks",
"=",
"args",
".",
"skip_extract_outlinks",
",",
"skip_visit_hashtags",
"=",
"args",
".",
"skip_visit_hashtags",
",",
"skip_youtube_dl",
"=",
"args",
".",
"skip_youtube_dl",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGQUIT",
",",
"dump_state",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"lambda",
"s",
",",
"f",
":",
"worker",
".",
"stop",
"(",
")",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"lambda",
"s",
",",
"f",
":",
"worker",
".",
"stop",
"(",
")",
")",
"th",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"worker",
".",
"run",
",",
"name",
"=",
"'BrozzlerWorkerThread'",
")",
"th",
".",
"start",
"(",
")",
"th",
".",
"join",
"(",
")",
"logging",
".",
"info",
"(",
"'brozzler-worker is all done, exiting'",
")"
]
| 42.556962 | 18.126582 |
def delimit(values, delimiter=', '):
"Returns a list of tokens interleaved with the delimiter."
toks = []
if not values:
return toks
if not isinstance(delimiter, (list, tuple)):
delimiter = [delimiter]
last = len(values) - 1
for i, value in enumerate(values):
toks.append(value)
if i < last:
toks.extend(delimiter)
return toks | [
"def",
"delimit",
"(",
"values",
",",
"delimiter",
"=",
"', '",
")",
":",
"toks",
"=",
"[",
"]",
"if",
"not",
"values",
":",
"return",
"toks",
"if",
"not",
"isinstance",
"(",
"delimiter",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"delimiter",
"=",
"[",
"delimiter",
"]",
"last",
"=",
"len",
"(",
"values",
")",
"-",
"1",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"toks",
".",
"append",
"(",
"value",
")",
"if",
"i",
"<",
"last",
":",
"toks",
".",
"extend",
"(",
"delimiter",
")",
"return",
"toks"
]
| 20.315789 | 22.842105 |
def get(self, digest, chunk_size=1024 * 128):
"""
Return the contents of a blob
:param digest: the hex digest of the blob to return
:param chunk_size: the size of the chunks returned on each iteration
:return: generator returning chunks of data
"""
return self.conn.client.blob_get(self.container_name, digest,
chunk_size) | [
"def",
"get",
"(",
"self",
",",
"digest",
",",
"chunk_size",
"=",
"1024",
"*",
"128",
")",
":",
"return",
"self",
".",
"conn",
".",
"client",
".",
"blob_get",
"(",
"self",
".",
"container_name",
",",
"digest",
",",
"chunk_size",
")"
]
| 41.1 | 15.5 |
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges) | [
"def",
"getRandomWithMods",
"(",
"inputSpace",
",",
"maxChanges",
")",
":",
"size",
"=",
"len",
"(",
"inputSpace",
")",
"ind",
"=",
"np",
".",
"random",
".",
"random_integers",
"(",
"0",
",",
"size",
"-",
"1",
",",
"1",
")",
"[",
"0",
"]",
"value",
"=",
"copy",
".",
"deepcopy",
"(",
"inputSpace",
"[",
"ind",
"]",
")",
"if",
"maxChanges",
"==",
"0",
":",
"return",
"value",
"return",
"modifyBits",
"(",
"value",
",",
"maxChanges",
")"
]
| 26.846154 | 15.615385 |
def bibString(self, maxLength = 1000, WOSMode = False, restrictedOutput = False, niceID = True):
"""Makes a string giving the Record as a bibTex entry. If the Record is of a journal article (`PT J`) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names.
**Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier.
**Note** Record entries that are lists have their values seperated with the string `' and '`
# Parameters
_maxLength_ : `optional [int]`
> default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings
_WOSMode_ : `optional [bool]`
> default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that.
_restrictedOutput_ : `optional [bool]`
> default `False`, if `True` the tags output will be limited to tose found in `metaknowledge.commonRecordFields`
_niceID_ : `optional [bool]`
> default `True`, if `True` the ID used will be derived from the authors, publishing date and title, if `False` it will be the UT tag
# Returns
`str`
> The bibTex string of the Record
"""
keyEntries = []
if self.bad:
raise BadRecord("This record cannot be converted to a bibtex entry as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile))
if niceID:
if self.get('authorsFull'):
bibID = self['authorsFull'][0].title().replace(' ', '').replace(',', '').replace('.','')
else:
bibID = ''
if self.get('year', False):
bibID += '-' + str(self['year'])
if self.get('month', False):
bibID += '-' + str(self['month'])
if self.get('title', False):
tiSorted = sorted(self.get('title').split(' '), key = len)
bibID += '-' + tiSorted.pop().title()
while len(bibID) < 35 and len(tiSorted) > 0:
bibID += '-' + tiSorted.pop().title() #Title Case
if len(bibID) < 30:
bibID += str(self.id)
elif WOSMode:
bibID = 'ISI:{}'.format(self.id[4:])
else:
bibID = str(self.id)
keyEntries.append("author = {{{{{}}}}},".format(' and '.join(self.get('authorsFull', ['None']))))
if restrictedOutput:
tagsIter = ((k, self[k]) for k in commonRecordFields if k in self)
else:
tagsIter = self.items()
if WOSMode:
for tag, value in tagsIter:
if isinstance(value, list):
keyEntries.append("{} = {{{{{}}}}},".format(tag,'\n '.join((str(v) for v in value))))
else:
keyEntries.append("{} = {{{{{}}}}},".format(tag, value))
s = """@{0}{{ {1},\n{2}\n}}""".format('misc', bibID, '\n'.join(keyEntries))
else:
for tag, value in tagsIter:
keyEntries.append("{} = {},".format(tag, _bibFormatter(value, maxLength)))
s = """@{0}{{ {1},\n {2}\n}}""".format('misc', bibID, '\n '.join(keyEntries))
return s | [
"def",
"bibString",
"(",
"self",
",",
"maxLength",
"=",
"1000",
",",
"WOSMode",
"=",
"False",
",",
"restrictedOutput",
"=",
"False",
",",
"niceID",
"=",
"True",
")",
":",
"keyEntries",
"=",
"[",
"]",
"if",
"self",
".",
"bad",
":",
"raise",
"BadRecord",
"(",
"\"This record cannot be converted to a bibtex entry as the input was malformed.\\nThe original line number (if any) is: {} and the original file is: '{}'\"",
".",
"format",
"(",
"self",
".",
"_sourceLine",
",",
"self",
".",
"_sourceFile",
")",
")",
"if",
"niceID",
":",
"if",
"self",
".",
"get",
"(",
"'authorsFull'",
")",
":",
"bibID",
"=",
"self",
"[",
"'authorsFull'",
"]",
"[",
"0",
"]",
".",
"title",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"replace",
"(",
"','",
",",
"''",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
"else",
":",
"bibID",
"=",
"''",
"if",
"self",
".",
"get",
"(",
"'year'",
",",
"False",
")",
":",
"bibID",
"+=",
"'-'",
"+",
"str",
"(",
"self",
"[",
"'year'",
"]",
")",
"if",
"self",
".",
"get",
"(",
"'month'",
",",
"False",
")",
":",
"bibID",
"+=",
"'-'",
"+",
"str",
"(",
"self",
"[",
"'month'",
"]",
")",
"if",
"self",
".",
"get",
"(",
"'title'",
",",
"False",
")",
":",
"tiSorted",
"=",
"sorted",
"(",
"self",
".",
"get",
"(",
"'title'",
")",
".",
"split",
"(",
"' '",
")",
",",
"key",
"=",
"len",
")",
"bibID",
"+=",
"'-'",
"+",
"tiSorted",
".",
"pop",
"(",
")",
".",
"title",
"(",
")",
"while",
"len",
"(",
"bibID",
")",
"<",
"35",
"and",
"len",
"(",
"tiSorted",
")",
">",
"0",
":",
"bibID",
"+=",
"'-'",
"+",
"tiSorted",
".",
"pop",
"(",
")",
".",
"title",
"(",
")",
"#Title Case",
"if",
"len",
"(",
"bibID",
")",
"<",
"30",
":",
"bibID",
"+=",
"str",
"(",
"self",
".",
"id",
")",
"elif",
"WOSMode",
":",
"bibID",
"=",
"'ISI:{}'",
".",
"format",
"(",
"self",
".",
"id",
"[",
"4",
":",
"]",
")",
"else",
":",
"bibID",
"=",
"str",
"(",
"self",
".",
"id",
")",
"keyEntries",
".",
"append",
"(",
"\"author = {{{{{}}}}},\"",
".",
"format",
"(",
"' and '",
".",
"join",
"(",
"self",
".",
"get",
"(",
"'authorsFull'",
",",
"[",
"'None'",
"]",
")",
")",
")",
")",
"if",
"restrictedOutput",
":",
"tagsIter",
"=",
"(",
"(",
"k",
",",
"self",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"commonRecordFields",
"if",
"k",
"in",
"self",
")",
"else",
":",
"tagsIter",
"=",
"self",
".",
"items",
"(",
")",
"if",
"WOSMode",
":",
"for",
"tag",
",",
"value",
"in",
"tagsIter",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"keyEntries",
".",
"append",
"(",
"\"{} = {{{{{}}}}},\"",
".",
"format",
"(",
"tag",
",",
"'\\n '",
".",
"join",
"(",
"(",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"value",
")",
")",
")",
")",
"else",
":",
"keyEntries",
".",
"append",
"(",
"\"{} = {{{{{}}}}},\"",
".",
"format",
"(",
"tag",
",",
"value",
")",
")",
"s",
"=",
"\"\"\"@{0}{{ {1},\\n{2}\\n}}\"\"\"",
".",
"format",
"(",
"'misc'",
",",
"bibID",
",",
"'\\n'",
".",
"join",
"(",
"keyEntries",
")",
")",
"else",
":",
"for",
"tag",
",",
"value",
"in",
"tagsIter",
":",
"keyEntries",
".",
"append",
"(",
"\"{} = {},\"",
".",
"format",
"(",
"tag",
",",
"_bibFormatter",
"(",
"value",
",",
"maxLength",
")",
")",
")",
"s",
"=",
"\"\"\"@{0}{{ {1},\\n {2}\\n}}\"\"\"",
".",
"format",
"(",
"'misc'",
",",
"bibID",
",",
"'\\n '",
".",
"join",
"(",
"keyEntries",
")",
")",
"return",
"s"
]
| 53.591549 | 35.492958 |
def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name)
# Separated:
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
temp_arr___ = self.temp('arr[][][]')
# Activations:
layers = list(self._get_activations())
layers = ', '.join(layers)
layers = temp_arr_.format(type='int', name='layers', values=layers)
# Coefficients (weights):
coefficients = []
for layer in self.coefficients:
layer_weights = []
for weights in layer:
weights = ', '.join([self.repr(w) for w in weights])
layer_weights.append(temp_arr.format(weights))
layer_weights = ', '.join(layer_weights)
coefficients.append(temp_arr.format(layer_weights))
coefficients = ', '.join(coefficients)
coefficients = temp_arr___.format(type='double',
name='weights',
values=coefficients)
# Intercepts (biases):
intercepts = list(self._get_intercepts())
intercepts = ', '.join(intercepts)
intercepts = temp_arr__.format(type='double',
name='bias',
values=intercepts)
temp_class = self.temp('separated.class')
file_name = '{}.js'.format(self.class_name.lower())
return temp_class.format(class_name=self.class_name,
method_name=self.method_name,
hidden_activation=self.hidden_activation,
output_activation=self.output_activation,
n_features=self.n_inputs,
weights=coefficients,
bias=intercepts,
layers=layers,
file_name=file_name) | [
"def",
"predict",
"(",
"self",
",",
"temp_type",
")",
":",
"# Exported:",
"if",
"temp_type",
"==",
"'exported'",
":",
"temp",
"=",
"self",
".",
"temp",
"(",
"'exported.class'",
")",
"return",
"temp",
".",
"format",
"(",
"class_name",
"=",
"self",
".",
"class_name",
",",
"method_name",
"=",
"self",
".",
"method_name",
")",
"# Separated:",
"temp_arr",
"=",
"self",
".",
"temp",
"(",
"'arr'",
")",
"temp_arr_",
"=",
"self",
".",
"temp",
"(",
"'arr[]'",
")",
"temp_arr__",
"=",
"self",
".",
"temp",
"(",
"'arr[][]'",
")",
"temp_arr___",
"=",
"self",
".",
"temp",
"(",
"'arr[][][]'",
")",
"# Activations:",
"layers",
"=",
"list",
"(",
"self",
".",
"_get_activations",
"(",
")",
")",
"layers",
"=",
"', '",
".",
"join",
"(",
"layers",
")",
"layers",
"=",
"temp_arr_",
".",
"format",
"(",
"type",
"=",
"'int'",
",",
"name",
"=",
"'layers'",
",",
"values",
"=",
"layers",
")",
"# Coefficients (weights):",
"coefficients",
"=",
"[",
"]",
"for",
"layer",
"in",
"self",
".",
"coefficients",
":",
"layer_weights",
"=",
"[",
"]",
"for",
"weights",
"in",
"layer",
":",
"weights",
"=",
"', '",
".",
"join",
"(",
"[",
"self",
".",
"repr",
"(",
"w",
")",
"for",
"w",
"in",
"weights",
"]",
")",
"layer_weights",
".",
"append",
"(",
"temp_arr",
".",
"format",
"(",
"weights",
")",
")",
"layer_weights",
"=",
"', '",
".",
"join",
"(",
"layer_weights",
")",
"coefficients",
".",
"append",
"(",
"temp_arr",
".",
"format",
"(",
"layer_weights",
")",
")",
"coefficients",
"=",
"', '",
".",
"join",
"(",
"coefficients",
")",
"coefficients",
"=",
"temp_arr___",
".",
"format",
"(",
"type",
"=",
"'double'",
",",
"name",
"=",
"'weights'",
",",
"values",
"=",
"coefficients",
")",
"# Intercepts (biases):",
"intercepts",
"=",
"list",
"(",
"self",
".",
"_get_intercepts",
"(",
")",
")",
"intercepts",
"=",
"', '",
".",
"join",
"(",
"intercepts",
")",
"intercepts",
"=",
"temp_arr__",
".",
"format",
"(",
"type",
"=",
"'double'",
",",
"name",
"=",
"'bias'",
",",
"values",
"=",
"intercepts",
")",
"temp_class",
"=",
"self",
".",
"temp",
"(",
"'separated.class'",
")",
"file_name",
"=",
"'{}.js'",
".",
"format",
"(",
"self",
".",
"class_name",
".",
"lower",
"(",
")",
")",
"return",
"temp_class",
".",
"format",
"(",
"class_name",
"=",
"self",
".",
"class_name",
",",
"method_name",
"=",
"self",
".",
"method_name",
",",
"hidden_activation",
"=",
"self",
".",
"hidden_activation",
",",
"output_activation",
"=",
"self",
".",
"output_activation",
",",
"n_features",
"=",
"self",
".",
"n_inputs",
",",
"weights",
"=",
"coefficients",
",",
"bias",
"=",
"intercepts",
",",
"layers",
"=",
"layers",
",",
"file_name",
"=",
"file_name",
")"
]
| 39.532258 | 16.532258 |
def stationary_distribution(self):
r""" Compute stationary distribution of hidden states if possible.
Raises
------
ValueError if the HMM is not stationary
"""
assert _tmatrix_disconnected.is_connected(self._Tij, strong=False), \
'No unique stationary distribution because transition matrix is not connected'
import msmtools.analysis as msmana
return msmana.stationary_distribution(self._Tij) | [
"def",
"stationary_distribution",
"(",
"self",
")",
":",
"assert",
"_tmatrix_disconnected",
".",
"is_connected",
"(",
"self",
".",
"_Tij",
",",
"strong",
"=",
"False",
")",
",",
"'No unique stationary distribution because transition matrix is not connected'",
"import",
"msmtools",
".",
"analysis",
"as",
"msmana",
"return",
"msmana",
".",
"stationary_distribution",
"(",
"self",
".",
"_Tij",
")"
]
| 38.25 | 20.833333 |
def fetch_captcha_store(self, name, value, attrs=None, generator=None):
"""
Fetches a new CaptchaStore
This has to be called inside render
"""
try:
reverse('captcha-image', args=('dummy',))
except NoReverseMatch:
raise ImproperlyConfigured('Make sure you\'ve included captcha.urls as explained in the INSTALLATION section on http://readthedocs.org/docs/django-simple-captcha/en/latest/usage.html#installation')
if settings.CAPTCHA_GET_FROM_POOL:
key = CaptchaStore.pick()
else:
key = CaptchaStore.generate_key(generator)
# these can be used by format_output and render
self._value = [key, u('')]
self._key = key
self.id_ = self.build_attrs(attrs).get('id', None) | [
"def",
"fetch_captcha_store",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"None",
",",
"generator",
"=",
"None",
")",
":",
"try",
":",
"reverse",
"(",
"'captcha-image'",
",",
"args",
"=",
"(",
"'dummy'",
",",
")",
")",
"except",
"NoReverseMatch",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Make sure you\\'ve included captcha.urls as explained in the INSTALLATION section on http://readthedocs.org/docs/django-simple-captcha/en/latest/usage.html#installation'",
")",
"if",
"settings",
".",
"CAPTCHA_GET_FROM_POOL",
":",
"key",
"=",
"CaptchaStore",
".",
"pick",
"(",
")",
"else",
":",
"key",
"=",
"CaptchaStore",
".",
"generate_key",
"(",
"generator",
")",
"# these can be used by format_output and render",
"self",
".",
"_value",
"=",
"[",
"key",
",",
"u",
"(",
"''",
")",
"]",
"self",
".",
"_key",
"=",
"key",
"self",
".",
"id_",
"=",
"self",
".",
"build_attrs",
"(",
"attrs",
")",
".",
"get",
"(",
"'id'",
",",
"None",
")"
]
| 41.578947 | 23.263158 |
def dump(self, fp=sys.stdout, **kwargs):
""" Serialize this keymap as a JSON formatted stream to the *fp*.
Arguments:
fp: A ``.write()``-supporting file-like object to write the
generated JSON to (default is ``sys.stdout``).
**kwargs: Options to be passed into :func:`json.dumps`.
"""
fp.write(FILE_HEADER)
fp.write(self.to_json(**kwargs))
fp.write('\n') | [
"def",
"dump",
"(",
"self",
",",
"fp",
"=",
"sys",
".",
"stdout",
",",
"*",
"*",
"kwargs",
")",
":",
"fp",
".",
"write",
"(",
"FILE_HEADER",
")",
"fp",
".",
"write",
"(",
"self",
".",
"to_json",
"(",
"*",
"*",
"kwargs",
")",
")",
"fp",
".",
"write",
"(",
"'\\n'",
")"
]
| 39.363636 | 15.545455 |
def _next_lowest_integer(group_keys):
"""
returns the lowest available integer in a set of dict keys
"""
try: #TODO Replace with max default value when dropping compatibility with Python < 3.4
largest_int= max([ int(val) for val in group_keys if _is_int(val)])
except:
largest_int= 0
return largest_int + 1 | [
"def",
"_next_lowest_integer",
"(",
"group_keys",
")",
":",
"try",
":",
"#TODO Replace with max default value when dropping compatibility with Python < 3.4",
"largest_int",
"=",
"max",
"(",
"[",
"int",
"(",
"val",
")",
"for",
"val",
"in",
"group_keys",
"if",
"_is_int",
"(",
"val",
")",
"]",
")",
"except",
":",
"largest_int",
"=",
"0",
"return",
"largest_int",
"+",
"1"
]
| 37.555556 | 19.111111 |
def create_source(srcdir, preload_image, datapusher=False):
"""
Copy ckan source, datapusher source (optional), who.ini and schema.xml
from preload image into srcdir
"""
try:
docker.web_command(
command='/bin/cp -a /project/ckan /project_target/ckan',
rw={srcdir: '/project_target'},
image=preload_image)
if datapusher:
docker.web_command(
command='/bin/cp -a /project/datapusher /project_target/datapusher',
rw={srcdir: '/project_target'},
image=preload_image)
shutil.copy(
srcdir + '/ckan/ckan/config/who.ini',
srcdir)
shutil.copy(
srcdir + '/ckan/ckan/config/solr/schema.xml',
srcdir)
finally:
# fix srcdir permissions
docker.web_command(
command='/bin/chown -R --reference=/project /project',
rw={srcdir: '/project'},
) | [
"def",
"create_source",
"(",
"srcdir",
",",
"preload_image",
",",
"datapusher",
"=",
"False",
")",
":",
"try",
":",
"docker",
".",
"web_command",
"(",
"command",
"=",
"'/bin/cp -a /project/ckan /project_target/ckan'",
",",
"rw",
"=",
"{",
"srcdir",
":",
"'/project_target'",
"}",
",",
"image",
"=",
"preload_image",
")",
"if",
"datapusher",
":",
"docker",
".",
"web_command",
"(",
"command",
"=",
"'/bin/cp -a /project/datapusher /project_target/datapusher'",
",",
"rw",
"=",
"{",
"srcdir",
":",
"'/project_target'",
"}",
",",
"image",
"=",
"preload_image",
")",
"shutil",
".",
"copy",
"(",
"srcdir",
"+",
"'/ckan/ckan/config/who.ini'",
",",
"srcdir",
")",
"shutil",
".",
"copy",
"(",
"srcdir",
"+",
"'/ckan/ckan/config/solr/schema.xml'",
",",
"srcdir",
")",
"finally",
":",
"# fix srcdir permissions",
"docker",
".",
"web_command",
"(",
"command",
"=",
"'/bin/chown -R --reference=/project /project'",
",",
"rw",
"=",
"{",
"srcdir",
":",
"'/project'",
"}",
",",
")"
]
| 35.148148 | 16.259259 |
def create_small_thumbnail(self, token, item_id):
"""
Create a 100x100 small thumbnail for the given item. It is used for
preview purpose and displayed in the 'preview' and 'thumbnails'
sidebar sections.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The item on which to set the thumbnail.
:type item_id: int | long
:returns: The item object (with the new thumbnail id) and the path
where the newly created thumbnail is stored.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['itemId'] = item_id
response = self.request(
'midas.thumbnailcreator.create.small.thumbnail', parameters)
return response | [
"def",
"create_small_thumbnail",
"(",
"self",
",",
"token",
",",
"item_id",
")",
":",
"parameters",
"=",
"dict",
"(",
")",
"parameters",
"[",
"'token'",
"]",
"=",
"token",
"parameters",
"[",
"'itemId'",
"]",
"=",
"item_id",
"response",
"=",
"self",
".",
"request",
"(",
"'midas.thumbnailcreator.create.small.thumbnail'",
",",
"parameters",
")",
"return",
"response"
]
| 40.15 | 17.05 |
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new deposit identifier.
:param object_type: The object type (Default: ``None``)
:param object_uuid: The object UUID (Default: ``None``)
:param kwargs: It contains the pid value.
"""
assert 'pid_value' in kwargs
kwargs.setdefault('status', cls.default_status)
return super(DepositProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs) | [
"def",
"create",
"(",
"cls",
",",
"object_type",
"=",
"None",
",",
"object_uuid",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"'pid_value'",
"in",
"kwargs",
"kwargs",
".",
"setdefault",
"(",
"'status'",
",",
"cls",
".",
"default_status",
")",
"return",
"super",
"(",
"DepositProvider",
",",
"cls",
")",
".",
"create",
"(",
"object_type",
"=",
"object_type",
",",
"object_uuid",
"=",
"object_uuid",
",",
"*",
"*",
"kwargs",
")"
]
| 45.727273 | 16.090909 |
def bearing(self, format='numeric'):
"""Calculate bearing between locations in segments.
Args:
format (str): Format of the bearing string to return
Returns:
list of list of float: Groups of bearings between points in
segments
"""
bearings = []
for segment in self:
if len(segment) < 2:
bearings.append([])
else:
bearings.append(segment.bearing(format))
return bearings | [
"def",
"bearing",
"(",
"self",
",",
"format",
"=",
"'numeric'",
")",
":",
"bearings",
"=",
"[",
"]",
"for",
"segment",
"in",
"self",
":",
"if",
"len",
"(",
"segment",
")",
"<",
"2",
":",
"bearings",
".",
"append",
"(",
"[",
"]",
")",
"else",
":",
"bearings",
".",
"append",
"(",
"segment",
".",
"bearing",
"(",
"format",
")",
")",
"return",
"bearings"
]
| 29.764706 | 18 |
def rc2poly(kr, r0=None):
"""convert reflection coefficients to prediction filter polynomial
:param k: reflection coefficients
"""
# Initialize the recursion
from .levinson import levup
p = len(kr) #% p is the order of the prediction polynomial.
a = numpy.array([1, kr[0]]) #% a is a true polynomial.
e = numpy.zeros(len(kr))
if r0 is None:
e0 = 0
else:
e0 = r0
e[0] = e0 * (1. - numpy.conj(numpy.conjugate(kr[0])*kr[0]))
# Continue the recursion for k=2,3,...,p, where p is the order of the
# prediction polynomial.
for k in range(1, p):
[a, e[k]] = levup(a, kr[k], e[k-1])
efinal = e[-1]
return a, efinal | [
"def",
"rc2poly",
"(",
"kr",
",",
"r0",
"=",
"None",
")",
":",
"# Initialize the recursion",
"from",
".",
"levinson",
"import",
"levup",
"p",
"=",
"len",
"(",
"kr",
")",
"#% p is the order of the prediction polynomial.",
"a",
"=",
"numpy",
".",
"array",
"(",
"[",
"1",
",",
"kr",
"[",
"0",
"]",
"]",
")",
"#% a is a true polynomial.",
"e",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"kr",
")",
")",
"if",
"r0",
"is",
"None",
":",
"e0",
"=",
"0",
"else",
":",
"e0",
"=",
"r0",
"e",
"[",
"0",
"]",
"=",
"e0",
"*",
"(",
"1.",
"-",
"numpy",
".",
"conj",
"(",
"numpy",
".",
"conjugate",
"(",
"kr",
"[",
"0",
"]",
")",
"*",
"kr",
"[",
"0",
"]",
")",
")",
"# Continue the recursion for k=2,3,...,p, where p is the order of the",
"# prediction polynomial.",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"p",
")",
":",
"[",
"a",
",",
"e",
"[",
"k",
"]",
"]",
"=",
"levup",
"(",
"a",
",",
"kr",
"[",
"k",
"]",
",",
"e",
"[",
"k",
"-",
"1",
"]",
")",
"efinal",
"=",
"e",
"[",
"-",
"1",
"]",
"return",
"a",
",",
"efinal"
]
| 23.266667 | 24.833333 |
def _build_query(self, table, tree, visitor):
""" Build a scan/query from a statement """
kwargs = {}
index = None
if tree.using:
index_name = kwargs["index"] = tree.using[1]
index = table.get_index(index_name)
if tree.where:
constraints = ConstraintExpression.from_where(tree.where)
possible_hash = constraints.possible_hash_fields()
possible_range = constraints.possible_range_fields()
if index is None:
# See if we can find an index to query on
indexes = table.get_matching_indexes(possible_hash, possible_range)
if not indexes:
action = "scan"
kwargs["filter"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
elif len(indexes) == 1:
index = indexes[0]
action = "query"
add_query_kwargs(kwargs, visitor, constraints, index)
else:
names = ", ".join([index.name for index in indexes])
raise SyntaxError(
"No index specified with USING <index>, "
"but multiple possibilities for query: "
"%s" % names
)
else:
if index.hash_key in possible_hash:
action = "query"
add_query_kwargs(kwargs, visitor, constraints, index)
else:
action = "scan"
if not index.scannable:
raise SyntaxError("Cannot scan local index %r" % index_name)
kwargs["filter"] = constraints.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
else:
action = "scan"
return [action, kwargs, index] | [
"def",
"_build_query",
"(",
"self",
",",
"table",
",",
"tree",
",",
"visitor",
")",
":",
"kwargs",
"=",
"{",
"}",
"index",
"=",
"None",
"if",
"tree",
".",
"using",
":",
"index_name",
"=",
"kwargs",
"[",
"\"index\"",
"]",
"=",
"tree",
".",
"using",
"[",
"1",
"]",
"index",
"=",
"table",
".",
"get_index",
"(",
"index_name",
")",
"if",
"tree",
".",
"where",
":",
"constraints",
"=",
"ConstraintExpression",
".",
"from_where",
"(",
"tree",
".",
"where",
")",
"possible_hash",
"=",
"constraints",
".",
"possible_hash_fields",
"(",
")",
"possible_range",
"=",
"constraints",
".",
"possible_range_fields",
"(",
")",
"if",
"index",
"is",
"None",
":",
"# See if we can find an index to query on",
"indexes",
"=",
"table",
".",
"get_matching_indexes",
"(",
"possible_hash",
",",
"possible_range",
")",
"if",
"not",
"indexes",
":",
"action",
"=",
"\"scan\"",
"kwargs",
"[",
"\"filter\"",
"]",
"=",
"constraints",
".",
"build",
"(",
"visitor",
")",
"kwargs",
"[",
"\"expr_values\"",
"]",
"=",
"visitor",
".",
"expression_values",
"kwargs",
"[",
"\"alias\"",
"]",
"=",
"visitor",
".",
"attribute_names",
"elif",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"action",
"=",
"\"query\"",
"add_query_kwargs",
"(",
"kwargs",
",",
"visitor",
",",
"constraints",
",",
"index",
")",
"else",
":",
"names",
"=",
"\", \"",
".",
"join",
"(",
"[",
"index",
".",
"name",
"for",
"index",
"in",
"indexes",
"]",
")",
"raise",
"SyntaxError",
"(",
"\"No index specified with USING <index>, \"",
"\"but multiple possibilities for query: \"",
"\"%s\"",
"%",
"names",
")",
"else",
":",
"if",
"index",
".",
"hash_key",
"in",
"possible_hash",
":",
"action",
"=",
"\"query\"",
"add_query_kwargs",
"(",
"kwargs",
",",
"visitor",
",",
"constraints",
",",
"index",
")",
"else",
":",
"action",
"=",
"\"scan\"",
"if",
"not",
"index",
".",
"scannable",
":",
"raise",
"SyntaxError",
"(",
"\"Cannot scan local index %r\"",
"%",
"index_name",
")",
"kwargs",
"[",
"\"filter\"",
"]",
"=",
"constraints",
".",
"build",
"(",
"visitor",
")",
"kwargs",
"[",
"\"expr_values\"",
"]",
"=",
"visitor",
".",
"expression_values",
"kwargs",
"[",
"\"alias\"",
"]",
"=",
"visitor",
".",
"attribute_names",
"else",
":",
"action",
"=",
"\"scan\"",
"return",
"[",
"action",
",",
"kwargs",
",",
"index",
"]"
]
| 46.431818 | 17.363636 |
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = b""
if type(x) is str:
x = x.encode('ascii')
while x:
#l = ord(x[0])
l = x[0]
x = x[1:]
if l == 0:
if cur and cur[-1] == ord('.'):
cur = cur[:-1]
res.append(cur)
cur = b""
#if x and ord(x[0]) == 0: # single component
if x and x[0] == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+b"."
x = x[l:]
return res | [
"def",
"dnsrepr2names",
"(",
"x",
")",
":",
"res",
"=",
"[",
"]",
"cur",
"=",
"b\"\"",
"if",
"type",
"(",
"x",
")",
"is",
"str",
":",
"x",
"=",
"x",
".",
"encode",
"(",
"'ascii'",
")",
"while",
"x",
":",
"#l = ord(x[0])",
"l",
"=",
"x",
"[",
"0",
"]",
"x",
"=",
"x",
"[",
"1",
":",
"]",
"if",
"l",
"==",
"0",
":",
"if",
"cur",
"and",
"cur",
"[",
"-",
"1",
"]",
"==",
"ord",
"(",
"'.'",
")",
":",
"cur",
"=",
"cur",
"[",
":",
"-",
"1",
"]",
"res",
".",
"append",
"(",
"cur",
")",
"cur",
"=",
"b\"\"",
"#if x and ord(x[0]) == 0: # single component",
"if",
"x",
"and",
"x",
"[",
"0",
"]",
"==",
"0",
":",
"# single component",
"x",
"=",
"x",
"[",
"1",
":",
"]",
"continue",
"if",
"l",
"&",
"0xc0",
":",
"# XXX TODO : work on that -- arno",
"raise",
"Exception",
"(",
"\"DNS message can't be compressed at this point!\"",
")",
"else",
":",
"cur",
"+=",
"x",
"[",
":",
"l",
"]",
"+",
"b\".\"",
"x",
"=",
"x",
"[",
"l",
":",
"]",
"return",
"res"
]
| 30.193548 | 17.032258 |
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms) | [
"def",
"generate",
"(",
"data",
",",
"dimOrder",
",",
"maxWindowSize",
",",
"overlapPercent",
",",
"transforms",
"=",
"[",
"]",
")",
":",
"# Determine the dimensions of the input data",
"width",
"=",
"data",
".",
"shape",
"[",
"dimOrder",
".",
"index",
"(",
"'w'",
")",
"]",
"height",
"=",
"data",
".",
"shape",
"[",
"dimOrder",
".",
"index",
"(",
"'h'",
")",
"]",
"# Generate the windows",
"return",
"generateForSize",
"(",
"width",
",",
"height",
",",
"dimOrder",
",",
"maxWindowSize",
",",
"overlapPercent",
",",
"transforms",
")"
]
| 35.363636 | 19.181818 |
def write_bus_data(self, file):
""" Writes bus data to an Excel spreadsheet.
"""
bus_sheet = self.book.add_sheet("Buses")
for i, bus in enumerate(self.case.buses):
for j, attr in enumerate(BUS_ATTRS):
bus_sheet.write(i, j, getattr(bus, attr)) | [
"def",
"write_bus_data",
"(",
"self",
",",
"file",
")",
":",
"bus_sheet",
"=",
"self",
".",
"book",
".",
"add_sheet",
"(",
"\"Buses\"",
")",
"for",
"i",
",",
"bus",
"in",
"enumerate",
"(",
"self",
".",
"case",
".",
"buses",
")",
":",
"for",
"j",
",",
"attr",
"in",
"enumerate",
"(",
"BUS_ATTRS",
")",
":",
"bus_sheet",
".",
"write",
"(",
"i",
",",
"j",
",",
"getattr",
"(",
"bus",
",",
"attr",
")",
")"
]
| 37 | 11.375 |
def _get_view_result(view, raw_result, **kwargs):
""" Get view results helper. """
if raw_result:
return view(**kwargs)
if kwargs:
return Result(view, **kwargs)
return view.result | [
"def",
"_get_view_result",
"(",
"view",
",",
"raw_result",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"raw_result",
":",
"return",
"view",
"(",
"*",
"*",
"kwargs",
")",
"if",
"kwargs",
":",
"return",
"Result",
"(",
"view",
",",
"*",
"*",
"kwargs",
")",
"return",
"view",
".",
"result"
]
| 28.625 | 13.875 |
def sentences(self, nb=3, ext_word_list=None):
"""
Generate an array of sentences
:example ['Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.']
Keyword arguments:
:param nb: how many sentences to return
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: list
"""
return [self.sentence(ext_word_list=ext_word_list)
for _ in range(0, nb)] | [
"def",
"sentences",
"(",
"self",
",",
"nb",
"=",
"3",
",",
"ext_word_list",
"=",
"None",
")",
":",
"return",
"[",
"self",
".",
"sentence",
"(",
"ext_word_list",
"=",
"ext_word_list",
")",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"nb",
")",
"]"
]
| 34.285714 | 17.285714 |
def readonce(self, size = None):
"""
Read from current buffer. If current buffer is empty, returns an empty string. You can use `prepareRead`
to read the next chunk of data.
This is not a coroutine method.
"""
if self.eof:
raise EOFError
if self.errored:
raise IOError('Stream is broken before EOF')
if size is not None and size < len(self.data) - self.pos:
ret = self.data[self.pos: self.pos + size]
self.pos += size
return ret
else:
ret = self.data[self.pos:]
self.pos = len(self.data)
if self.dataeof:
self.eof = True
return ret | [
"def",
"readonce",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"self",
".",
"eof",
":",
"raise",
"EOFError",
"if",
"self",
".",
"errored",
":",
"raise",
"IOError",
"(",
"'Stream is broken before EOF'",
")",
"if",
"size",
"is",
"not",
"None",
"and",
"size",
"<",
"len",
"(",
"self",
".",
"data",
")",
"-",
"self",
".",
"pos",
":",
"ret",
"=",
"self",
".",
"data",
"[",
"self",
".",
"pos",
":",
"self",
".",
"pos",
"+",
"size",
"]",
"self",
".",
"pos",
"+=",
"size",
"return",
"ret",
"else",
":",
"ret",
"=",
"self",
".",
"data",
"[",
"self",
".",
"pos",
":",
"]",
"self",
".",
"pos",
"=",
"len",
"(",
"self",
".",
"data",
")",
"if",
"self",
".",
"dataeof",
":",
"self",
".",
"eof",
"=",
"True",
"return",
"ret"
]
| 34.095238 | 15.238095 |
def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs):
"""
Advanced search of track, album or artist.
See `Search section of Deezer API
<https://developers.deezer.com/api/search>`_ for search terms.
:returns: a list of :class:`~deezer.resources.Resource` objects.
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"})
>>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"},
... relation="track")
"""
assert isinstance(terms, dict), "terms must be a dict"
# terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)
query = " ".join(sorted(['{}:"{}"'.format(k, v) for (k, v) in terms.items()]))
return self.get_object(
"search", relation=relation, q=query, index=index, limit=limit, **kwargs
) | [
"def",
"advanced_search",
"(",
"self",
",",
"terms",
",",
"relation",
"=",
"None",
",",
"index",
"=",
"0",
",",
"limit",
"=",
"25",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"terms",
",",
"dict",
")",
",",
"\"terms must be a dict\"",
"# terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)",
"query",
"=",
"\" \"",
".",
"join",
"(",
"sorted",
"(",
"[",
"'{}:\"{}\"'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"terms",
".",
"items",
"(",
")",
"]",
")",
")",
"return",
"self",
".",
"get_object",
"(",
"\"search\"",
",",
"relation",
"=",
"relation",
",",
"q",
"=",
"query",
",",
"index",
"=",
"index",
",",
"limit",
"=",
"limit",
",",
"*",
"*",
"kwargs",
")"
]
| 47.210526 | 27.105263 |
def get_lines(self):
"""因为历史列表动态更新,需要刷新"""
self.lines = []
width = self.screen_width - 24
if self.state == 0:
# 播放列表
for index, i in enumerate(self.win.playlist):
line = i['title'] if len(i['title']) < width else i['title'][:width]
line = color_func(self.c['PLAYINGSONG']['title'])(line)
line = str(index) + ' ' + line
if i['like'] == 1:
line += self.LOVE
if i == self.win.playingsong:
line += self.play_tag
self.lines.append(line)
elif self.state == 1:
# 历史列表
for index, i in enumerate(self.win.history):
line = i['title'] if len(i['title']) < width else i['title'][:width]
line = color_func(self.c['PLAYINGSONG']['title'])(line)
line = i['time'][5:] + ' ' + line
if i['like'] == 1:
line += self.LOVE
if i == self.win.playingsong:
line += self.play_tag
self.lines.append(line)
elif self.state == 2:
# 红心列表
self.rate = []
for i in reversed(self.win.history):
if i['like'] == 1:
if i in self.rate:
self.rate.remove(i)
self.rate.insert(0, i)
else:
self.rate.insert(0, i)
for index, i in enumerate(self.rate):
line = i['title'] if len(i['title']) < width else i['title'][:width]
line = color_func(self.c['PLAYINGSONG']['title'])(line)
line = str(index) + ' ' + line + self.LOVE
if i == self.win.playingsong:
line += self.play_tag
self.lines.append(line)
self.lines.insert(0, self.subtitle[self.state]) | [
"def",
"get_lines",
"(",
"self",
")",
":",
"self",
".",
"lines",
"=",
"[",
"]",
"width",
"=",
"self",
".",
"screen_width",
"-",
"24",
"if",
"self",
".",
"state",
"==",
"0",
":",
"# 播放列表",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"self",
".",
"win",
".",
"playlist",
")",
":",
"line",
"=",
"i",
"[",
"'title'",
"]",
"if",
"len",
"(",
"i",
"[",
"'title'",
"]",
")",
"<",
"width",
"else",
"i",
"[",
"'title'",
"]",
"[",
":",
"width",
"]",
"line",
"=",
"color_func",
"(",
"self",
".",
"c",
"[",
"'PLAYINGSONG'",
"]",
"[",
"'title'",
"]",
")",
"(",
"line",
")",
"line",
"=",
"str",
"(",
"index",
")",
"+",
"' '",
"+",
"line",
"if",
"i",
"[",
"'like'",
"]",
"==",
"1",
":",
"line",
"+=",
"self",
".",
"LOVE",
"if",
"i",
"==",
"self",
".",
"win",
".",
"playingsong",
":",
"line",
"+=",
"self",
".",
"play_tag",
"self",
".",
"lines",
".",
"append",
"(",
"line",
")",
"elif",
"self",
".",
"state",
"==",
"1",
":",
"# 历史列表",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"self",
".",
"win",
".",
"history",
")",
":",
"line",
"=",
"i",
"[",
"'title'",
"]",
"if",
"len",
"(",
"i",
"[",
"'title'",
"]",
")",
"<",
"width",
"else",
"i",
"[",
"'title'",
"]",
"[",
":",
"width",
"]",
"line",
"=",
"color_func",
"(",
"self",
".",
"c",
"[",
"'PLAYINGSONG'",
"]",
"[",
"'title'",
"]",
")",
"(",
"line",
")",
"line",
"=",
"i",
"[",
"'time'",
"]",
"[",
"5",
":",
"]",
"+",
"' '",
"+",
"line",
"if",
"i",
"[",
"'like'",
"]",
"==",
"1",
":",
"line",
"+=",
"self",
".",
"LOVE",
"if",
"i",
"==",
"self",
".",
"win",
".",
"playingsong",
":",
"line",
"+=",
"self",
".",
"play_tag",
"self",
".",
"lines",
".",
"append",
"(",
"line",
")",
"elif",
"self",
".",
"state",
"==",
"2",
":",
"# 红心列表",
"self",
".",
"rate",
"=",
"[",
"]",
"for",
"i",
"in",
"reversed",
"(",
"self",
".",
"win",
".",
"history",
")",
":",
"if",
"i",
"[",
"'like'",
"]",
"==",
"1",
":",
"if",
"i",
"in",
"self",
".",
"rate",
":",
"self",
".",
"rate",
".",
"remove",
"(",
"i",
")",
"self",
".",
"rate",
".",
"insert",
"(",
"0",
",",
"i",
")",
"else",
":",
"self",
".",
"rate",
".",
"insert",
"(",
"0",
",",
"i",
")",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"self",
".",
"rate",
")",
":",
"line",
"=",
"i",
"[",
"'title'",
"]",
"if",
"len",
"(",
"i",
"[",
"'title'",
"]",
")",
"<",
"width",
"else",
"i",
"[",
"'title'",
"]",
"[",
":",
"width",
"]",
"line",
"=",
"color_func",
"(",
"self",
".",
"c",
"[",
"'PLAYINGSONG'",
"]",
"[",
"'title'",
"]",
")",
"(",
"line",
")",
"line",
"=",
"str",
"(",
"index",
")",
"+",
"' '",
"+",
"line",
"+",
"self",
".",
"LOVE",
"if",
"i",
"==",
"self",
".",
"win",
".",
"playingsong",
":",
"line",
"+=",
"self",
".",
"play_tag",
"self",
".",
"lines",
".",
"append",
"(",
"line",
")",
"self",
".",
"lines",
".",
"insert",
"(",
"0",
",",
"self",
".",
"subtitle",
"[",
"self",
".",
"state",
"]",
")"
]
| 42.377778 | 13.2 |
def search_lxc_bridges():
'''
Search which bridges are potentially available as LXC bridges
CLI Example:
.. code-block:: bash
salt '*' lxc.search_lxc_bridges
'''
bridges = __context__.get('lxc.bridges', None)
# either match not yet called or no bridges were found
# to handle the case where lxc was not installed on the first
# call
if not bridges:
bridges = set()
running_bridges = set()
bridges.add(DEFAULT_BR)
try:
output = __salt__['cmd.run_all']('brctl show')
for line in output['stdout'].splitlines()[1:]:
if not line.startswith(' '):
running_bridges.add(line.split()[0].strip())
except (SaltInvocationError, CommandExecutionError):
pass
for ifc, ip in six.iteritems(
__grains__.get('ip_interfaces', {})
):
if ifc in running_bridges:
bridges.add(ifc)
elif os.path.exists(
'/sys/devices/virtual/net/{0}/bridge'.format(ifc)
):
bridges.add(ifc)
bridges = list(bridges)
# if we found interfaces that have lxc in their names
# we filter them as being the potential lxc bridges
# we also try to default on br0 on other cases
def sort_bridges(a):
pref = 'z'
if 'lxc' in a:
pref = 'a'
elif 'br0' == a:
pref = 'c'
return '{0}_{1}'.format(pref, a)
bridges.sort(key=sort_bridges)
__context__['lxc.bridges'] = bridges
return bridges | [
"def",
"search_lxc_bridges",
"(",
")",
":",
"bridges",
"=",
"__context__",
".",
"get",
"(",
"'lxc.bridges'",
",",
"None",
")",
"# either match not yet called or no bridges were found",
"# to handle the case where lxc was not installed on the first",
"# call",
"if",
"not",
"bridges",
":",
"bridges",
"=",
"set",
"(",
")",
"running_bridges",
"=",
"set",
"(",
")",
"bridges",
".",
"add",
"(",
"DEFAULT_BR",
")",
"try",
":",
"output",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"'brctl show'",
")",
"for",
"line",
"in",
"output",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"' '",
")",
":",
"running_bridges",
".",
"add",
"(",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"except",
"(",
"SaltInvocationError",
",",
"CommandExecutionError",
")",
":",
"pass",
"for",
"ifc",
",",
"ip",
"in",
"six",
".",
"iteritems",
"(",
"__grains__",
".",
"get",
"(",
"'ip_interfaces'",
",",
"{",
"}",
")",
")",
":",
"if",
"ifc",
"in",
"running_bridges",
":",
"bridges",
".",
"add",
"(",
"ifc",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"'/sys/devices/virtual/net/{0}/bridge'",
".",
"format",
"(",
"ifc",
")",
")",
":",
"bridges",
".",
"add",
"(",
"ifc",
")",
"bridges",
"=",
"list",
"(",
"bridges",
")",
"# if we found interfaces that have lxc in their names",
"# we filter them as being the potential lxc bridges",
"# we also try to default on br0 on other cases",
"def",
"sort_bridges",
"(",
"a",
")",
":",
"pref",
"=",
"'z'",
"if",
"'lxc'",
"in",
"a",
":",
"pref",
"=",
"'a'",
"elif",
"'br0'",
"==",
"a",
":",
"pref",
"=",
"'c'",
"return",
"'{0}_{1}'",
".",
"format",
"(",
"pref",
",",
"a",
")",
"bridges",
".",
"sort",
"(",
"key",
"=",
"sort_bridges",
")",
"__context__",
"[",
"'lxc.bridges'",
"]",
"=",
"bridges",
"return",
"bridges"
]
| 31.88 | 18.36 |
def execute_and_reset(
expr, params=None, scope=None, aggcontext=None, **kwargs
):
"""Execute an expression against data that are bound to it. If no data
are bound, raise an Exception.
Notes
-----
The difference between this function and :func:`~ibis.pandas.core.execute`
is that this function resets the index of the result, if the result has
an index.
Parameters
----------
expr : ibis.expr.types.Expr
The expression to execute
params : Mapping[ibis.expr.types.Expr, object]
The data that an unbound parameter in `expr` maps to
scope : Mapping[ibis.expr.operations.Node, object]
Additional scope, mapping ibis operations to data
aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext]
An object indicating how to compute aggregations. For example,
a rolling mean needs to be computed differently than the mean of a
column.
kwargs : Dict[str, object]
Additional arguments that can potentially be used by individual node
execution
Returns
-------
result : Union[
pandas.Series, pandas.DataFrame, ibis.pandas.core.simple_types
]
Raises
------
ValueError
* If no data are bound to the input expression
"""
result = execute(
expr, params=params, scope=scope, aggcontext=aggcontext, **kwargs
)
if isinstance(result, pd.DataFrame):
schema = expr.schema()
df = result.reset_index()
return df.loc[:, schema.names]
elif isinstance(result, pd.Series):
return result.reset_index(drop=True)
return result | [
"def",
"execute_and_reset",
"(",
"expr",
",",
"params",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"aggcontext",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"execute",
"(",
"expr",
",",
"params",
"=",
"params",
",",
"scope",
"=",
"scope",
",",
"aggcontext",
"=",
"aggcontext",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"result",
",",
"pd",
".",
"DataFrame",
")",
":",
"schema",
"=",
"expr",
".",
"schema",
"(",
")",
"df",
"=",
"result",
".",
"reset_index",
"(",
")",
"return",
"df",
".",
"loc",
"[",
":",
",",
"schema",
".",
"names",
"]",
"elif",
"isinstance",
"(",
"result",
",",
"pd",
".",
"Series",
")",
":",
"return",
"result",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"return",
"result"
]
| 32.44898 | 22.387755 |
def create(cls, name, enabled=True, superuser=True):
"""
Create a new API Client. Once client is created,
you can create a new password by::
>>> client = ApiClient.create('myclient')
>>> print(client)
ApiClient(name=myclient)
>>> client.change_password('mynewpassword')
:param str name: name of client
:param bool enabled: enable client
:param bool superuser: is superuser account
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: ApiClient
"""
json = {
'enabled': enabled,
'name': name,
'superuser': superuser}
return ElementCreator(cls, json) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"enabled",
"=",
"True",
",",
"superuser",
"=",
"True",
")",
":",
"json",
"=",
"{",
"'enabled'",
":",
"enabled",
",",
"'name'",
":",
"name",
",",
"'superuser'",
":",
"superuser",
"}",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
]
| 32.913043 | 13.608696 |
def int_to_date(date):
"""
Convert an int of form yyyymmdd to a python date object.
"""
year = date // 10**4
month = date % 10**4 // 10**2
day = date % 10**2
return datetime.date(year, month, day) | [
"def",
"int_to_date",
"(",
"date",
")",
":",
"year",
"=",
"date",
"//",
"10",
"**",
"4",
"month",
"=",
"date",
"%",
"10",
"**",
"4",
"//",
"10",
"**",
"2",
"day",
"=",
"date",
"%",
"10",
"**",
"2",
"return",
"datetime",
".",
"date",
"(",
"year",
",",
"month",
",",
"day",
")"
]
| 21.7 | 16.1 |
def read(self):
"Connect to the feedback service and read all data."
log.msg('APNSService read (connecting)')
try:
server, port = ((FEEDBACK_SERVER_SANDBOX_HOSTNAME
if self.environment == 'sandbox'
else FEEDBACK_SERVER_HOSTNAME), FEEDBACK_SERVER_PORT)
factory = self.feedbackProtocolFactory()
context = self.getContextFactory()
reactor.connectSSL(server, port, factory, context)
factory.deferred.addErrback(log_errback('apns-feedback-read'))
timeout = reactor.callLater(self.timeout,
lambda: factory.deferred.called or factory.deferred.errback(
Exception('Feedbcak fetch timed out after %i seconds' % self.timeout)))
def cancel_timeout(r):
try: timeout.cancel()
except: pass
return r
factory.deferred.addBoth(cancel_timeout)
except Exception, e:
log.err('APNService feedback error initializing: %s' % str(e))
raise
return factory.deferred | [
"def",
"read",
"(",
"self",
")",
":",
"log",
".",
"msg",
"(",
"'APNSService read (connecting)'",
")",
"try",
":",
"server",
",",
"port",
"=",
"(",
"(",
"FEEDBACK_SERVER_SANDBOX_HOSTNAME",
"if",
"self",
".",
"environment",
"==",
"'sandbox'",
"else",
"FEEDBACK_SERVER_HOSTNAME",
")",
",",
"FEEDBACK_SERVER_PORT",
")",
"factory",
"=",
"self",
".",
"feedbackProtocolFactory",
"(",
")",
"context",
"=",
"self",
".",
"getContextFactory",
"(",
")",
"reactor",
".",
"connectSSL",
"(",
"server",
",",
"port",
",",
"factory",
",",
"context",
")",
"factory",
".",
"deferred",
".",
"addErrback",
"(",
"log_errback",
"(",
"'apns-feedback-read'",
")",
")",
"timeout",
"=",
"reactor",
".",
"callLater",
"(",
"self",
".",
"timeout",
",",
"lambda",
":",
"factory",
".",
"deferred",
".",
"called",
"or",
"factory",
".",
"deferred",
".",
"errback",
"(",
"Exception",
"(",
"'Feedbcak fetch timed out after %i seconds'",
"%",
"self",
".",
"timeout",
")",
")",
")",
"def",
"cancel_timeout",
"(",
"r",
")",
":",
"try",
":",
"timeout",
".",
"cancel",
"(",
")",
"except",
":",
"pass",
"return",
"r",
"factory",
".",
"deferred",
".",
"addBoth",
"(",
"cancel_timeout",
")",
"except",
"Exception",
",",
"e",
":",
"log",
".",
"err",
"(",
"'APNService feedback error initializing: %s'",
"%",
"str",
"(",
"e",
")",
")",
"raise",
"return",
"factory",
".",
"deferred"
]
| 39.8 | 19.8 |
def register_user(self, data):
""" Parses input and register user """
error = False
msg = ""
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
# Check input format
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif email_re.match(data["email"]) is None:
error = True
msg = _("Invalid email format.")
elif len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]})
if existing_user is not None:
error = True
if existing_user["username"] == data["username"]:
msg = _("This username is already taken !")
else:
msg = _("This email address is already in use !")
else:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
self.database.users.insert({"username": data["username"],
"realname": data["realname"],
"email": data["email"],
"password": passwd_hash,
"activate": activate_hash,
"bindings": {},
"language": self.user_manager._session.get("language", "en")})
try:
web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"),
_("""Welcome on INGInious !
To activate your account, please click on the following link :
""")
+ web.ctx.home + "/register?activate=" + activate_hash)
msg = _("You are succesfully registered. An email has been sent to you for activation.")
except:
error = True
msg = _("Something went wrong while sending you activation email. Please contact the administrator.")
return msg, error | [
"def",
"register_user",
"(",
"self",
",",
"data",
")",
":",
"error",
"=",
"False",
"msg",
"=",
"\"\"",
"email_re",
"=",
"re",
".",
"compile",
"(",
"r\"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\"",
"# dot-atom",
"r'|^\"([\\001-\\010\\013\\014\\016-\\037!#-\\[\\]-\\177]|\\\\[\\001-011\\013\\014\\016-\\177])*\"'",
"# quoted-string",
"r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+[A-Z]{2,6}\\.?$'",
",",
"re",
".",
"IGNORECASE",
")",
"# domain",
"# Check input format",
"if",
"re",
".",
"match",
"(",
"r\"^[-_|~0-9A-Z]{4,}$\"",
",",
"data",
"[",
"\"username\"",
"]",
",",
"re",
".",
"IGNORECASE",
")",
"is",
"None",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Invalid username format.\"",
")",
"elif",
"email_re",
".",
"match",
"(",
"data",
"[",
"\"email\"",
"]",
")",
"is",
"None",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Invalid email format.\"",
")",
"elif",
"len",
"(",
"data",
"[",
"\"passwd\"",
"]",
")",
"<",
"6",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Password too short.\"",
")",
"elif",
"data",
"[",
"\"passwd\"",
"]",
"!=",
"data",
"[",
"\"passwd2\"",
"]",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Passwords don't match !\"",
")",
"if",
"not",
"error",
":",
"existing_user",
"=",
"self",
".",
"database",
".",
"users",
".",
"find_one",
"(",
"{",
"\"$or\"",
":",
"[",
"{",
"\"username\"",
":",
"data",
"[",
"\"username\"",
"]",
"}",
",",
"{",
"\"email\"",
":",
"data",
"[",
"\"email\"",
"]",
"}",
"]",
"}",
")",
"if",
"existing_user",
"is",
"not",
"None",
":",
"error",
"=",
"True",
"if",
"existing_user",
"[",
"\"username\"",
"]",
"==",
"data",
"[",
"\"username\"",
"]",
":",
"msg",
"=",
"_",
"(",
"\"This username is already taken !\"",
")",
"else",
":",
"msg",
"=",
"_",
"(",
"\"This email address is already in use !\"",
")",
"else",
":",
"passwd_hash",
"=",
"hashlib",
".",
"sha512",
"(",
"data",
"[",
"\"passwd\"",
"]",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"activate_hash",
"=",
"hashlib",
".",
"sha512",
"(",
"str",
"(",
"random",
".",
"getrandbits",
"(",
"256",
")",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"self",
".",
"database",
".",
"users",
".",
"insert",
"(",
"{",
"\"username\"",
":",
"data",
"[",
"\"username\"",
"]",
",",
"\"realname\"",
":",
"data",
"[",
"\"realname\"",
"]",
",",
"\"email\"",
":",
"data",
"[",
"\"email\"",
"]",
",",
"\"password\"",
":",
"passwd_hash",
",",
"\"activate\"",
":",
"activate_hash",
",",
"\"bindings\"",
":",
"{",
"}",
",",
"\"language\"",
":",
"self",
".",
"user_manager",
".",
"_session",
".",
"get",
"(",
"\"language\"",
",",
"\"en\"",
")",
"}",
")",
"try",
":",
"web",
".",
"sendmail",
"(",
"web",
".",
"config",
".",
"smtp_sendername",
",",
"data",
"[",
"\"email\"",
"]",
",",
"_",
"(",
"\"Welcome on INGInious\"",
")",
",",
"_",
"(",
"\"\"\"Welcome on INGInious !\n\nTo activate your account, please click on the following link :\n\"\"\"",
")",
"+",
"web",
".",
"ctx",
".",
"home",
"+",
"\"/register?activate=\"",
"+",
"activate_hash",
")",
"msg",
"=",
"_",
"(",
"\"You are succesfully registered. An email has been sent to you for activation.\"",
")",
"except",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Something went wrong while sending you activation email. Please contact the administrator.\"",
")",
"return",
"msg",
",",
"error"
]
| 50.309091 | 27.763636 |
def generate_stack_policy_args(stack_policy=None):
""" Converts a stack policy object into keyword args.
Args:
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
Returns:
dict: A dictionary of keyword arguments to be used elsewhere.
"""
args = {}
if stack_policy:
logger.debug("Stack has a stack policy")
if stack_policy.url:
# stacker currently does not support uploading stack policies to
# S3, so this will never get hit (unless your implementing S3
# uploads, and then you're probably reading this comment about why
# the exception below was raised :))
#
# args["StackPolicyURL"] = stack_policy.url
raise NotImplementedError
else:
args["StackPolicyBody"] = stack_policy.body
return args | [
"def",
"generate_stack_policy_args",
"(",
"stack_policy",
"=",
"None",
")",
":",
"args",
"=",
"{",
"}",
"if",
"stack_policy",
":",
"logger",
".",
"debug",
"(",
"\"Stack has a stack policy\"",
")",
"if",
"stack_policy",
".",
"url",
":",
"# stacker currently does not support uploading stack policies to",
"# S3, so this will never get hit (unless your implementing S3",
"# uploads, and then you're probably reading this comment about why",
"# the exception below was raised :))",
"#",
"# args[\"StackPolicyURL\"] = stack_policy.url",
"raise",
"NotImplementedError",
"else",
":",
"args",
"[",
"\"StackPolicyBody\"",
"]",
"=",
"stack_policy",
".",
"body",
"return",
"args"
]
| 35.92 | 22.16 |
def label_from_instance(self, obj):
"""
Create labels which represent the tree level of each node
when generating option labels.
"""
label = smart_text(obj)
prefix = self.level_indicator * getattr(obj, obj._mptt_meta.level_attr)
if prefix:
return '%s %s' % (prefix, label)
return label | [
"def",
"label_from_instance",
"(",
"self",
",",
"obj",
")",
":",
"label",
"=",
"smart_text",
"(",
"obj",
")",
"prefix",
"=",
"self",
".",
"level_indicator",
"*",
"getattr",
"(",
"obj",
",",
"obj",
".",
"_mptt_meta",
".",
"level_attr",
")",
"if",
"prefix",
":",
"return",
"'%s %s'",
"%",
"(",
"prefix",
",",
"label",
")",
"return",
"label"
]
| 35.2 | 12.6 |
def _get_closest_samp_num(self, ref_samp_num, start_test_samp_num):
"""
Return the closest testing sample number for the given reference
sample number. Limit the search from start_test_samp_num.
"""
if start_test_samp_num >= self.n_test:
raise ValueError('Invalid starting test sample number.')
ref_samp = self.ref_sample[ref_samp_num]
test_samp = self.test_sample[start_test_samp_num]
samp_diff = ref_samp - test_samp
# Initialize running parameters
closest_samp_num = start_test_samp_num
smallest_samp_diff = abs(samp_diff)
# Iterate through the testing samples
for test_samp_num in range(start_test_samp_num, self.n_test):
test_samp = self.test_sample[test_samp_num]
samp_diff = ref_samp - test_samp
abs_samp_diff = abs(samp_diff)
# Found a better match
if abs_samp_diff < smallest_samp_diff:
closest_samp_num = test_samp_num
smallest_samp_diff = abs_samp_diff
# Stop iterating when the ref sample is first passed or reached
if samp_diff <= 0:
break
return closest_samp_num, smallest_samp_diff | [
"def",
"_get_closest_samp_num",
"(",
"self",
",",
"ref_samp_num",
",",
"start_test_samp_num",
")",
":",
"if",
"start_test_samp_num",
">=",
"self",
".",
"n_test",
":",
"raise",
"ValueError",
"(",
"'Invalid starting test sample number.'",
")",
"ref_samp",
"=",
"self",
".",
"ref_sample",
"[",
"ref_samp_num",
"]",
"test_samp",
"=",
"self",
".",
"test_sample",
"[",
"start_test_samp_num",
"]",
"samp_diff",
"=",
"ref_samp",
"-",
"test_samp",
"# Initialize running parameters",
"closest_samp_num",
"=",
"start_test_samp_num",
"smallest_samp_diff",
"=",
"abs",
"(",
"samp_diff",
")",
"# Iterate through the testing samples",
"for",
"test_samp_num",
"in",
"range",
"(",
"start_test_samp_num",
",",
"self",
".",
"n_test",
")",
":",
"test_samp",
"=",
"self",
".",
"test_sample",
"[",
"test_samp_num",
"]",
"samp_diff",
"=",
"ref_samp",
"-",
"test_samp",
"abs_samp_diff",
"=",
"abs",
"(",
"samp_diff",
")",
"# Found a better match",
"if",
"abs_samp_diff",
"<",
"smallest_samp_diff",
":",
"closest_samp_num",
"=",
"test_samp_num",
"smallest_samp_diff",
"=",
"abs_samp_diff",
"# Stop iterating when the ref sample is first passed or reached",
"if",
"samp_diff",
"<=",
"0",
":",
"break",
"return",
"closest_samp_num",
",",
"smallest_samp_diff"
]
| 37.181818 | 18.090909 |
def _remove_pre_formatting(self):
"""
Removes formatting tags added to pre elements.
"""
preformatted_wrappers = [
'pre',
'code'
]
for wrapper in preformatted_wrappers:
for formatter in FORMATTERS:
tag = FORMATTERS[formatter]
character = formatter
regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format(
t=tag,
w=wrapper
)
repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character)
self.cleaned_html = re.sub(regex, repl, self.cleaned_html) | [
"def",
"_remove_pre_formatting",
"(",
"self",
")",
":",
"preformatted_wrappers",
"=",
"[",
"'pre'",
",",
"'code'",
"]",
"for",
"wrapper",
"in",
"preformatted_wrappers",
":",
"for",
"formatter",
"in",
"FORMATTERS",
":",
"tag",
"=",
"FORMATTERS",
"[",
"formatter",
"]",
"character",
"=",
"formatter",
"regex",
"=",
"r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'",
".",
"format",
"(",
"t",
"=",
"tag",
",",
"w",
"=",
"wrapper",
")",
"repl",
"=",
"r'\\g<1>{c}\\g<2>{c}\\g<3>'",
".",
"format",
"(",
"c",
"=",
"character",
")",
"self",
".",
"cleaned_html",
"=",
"re",
".",
"sub",
"(",
"regex",
",",
"repl",
",",
"self",
".",
"cleaned_html",
")"
]
| 31.7 | 16.6 |
def ip_v6(self) -> str:
"""Generate a random IPv6 address.
:return: Random IPv6 address.
:Example:
2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3
"""
ipv6 = IPv6Address(
self.random.randint(
0, 2 ** 128 - 1,
),
)
return str(ipv6) | [
"def",
"ip_v6",
"(",
"self",
")",
"->",
"str",
":",
"ipv6",
"=",
"IPv6Address",
"(",
"self",
".",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"128",
"-",
"1",
",",
")",
",",
")",
"return",
"str",
"(",
"ipv6",
")"
]
| 22.785714 | 16.857143 |
def _processHandler(self, securityHandler, param_dict):
"""proceses the handler and returns the cookiejar"""
cj = None
handler = None
if securityHandler is None:
cj = cookiejar.CookieJar()
elif securityHandler.method.lower() == "token":
param_dict['token'] = securityHandler.token
if hasattr(securityHandler, 'cookiejar'):
cj = securityHandler.cookiejar
if hasattr(securityHandler, 'handler'):
handler = securityHandler.handler
elif securityHandler.method.lower() == "handler":
handler = securityHandler.handler
cj = securityHandler.cookiejar
return param_dict, handler, cj | [
"def",
"_processHandler",
"(",
"self",
",",
"securityHandler",
",",
"param_dict",
")",
":",
"cj",
"=",
"None",
"handler",
"=",
"None",
"if",
"securityHandler",
"is",
"None",
":",
"cj",
"=",
"cookiejar",
".",
"CookieJar",
"(",
")",
"elif",
"securityHandler",
".",
"method",
".",
"lower",
"(",
")",
"==",
"\"token\"",
":",
"param_dict",
"[",
"'token'",
"]",
"=",
"securityHandler",
".",
"token",
"if",
"hasattr",
"(",
"securityHandler",
",",
"'cookiejar'",
")",
":",
"cj",
"=",
"securityHandler",
".",
"cookiejar",
"if",
"hasattr",
"(",
"securityHandler",
",",
"'handler'",
")",
":",
"handler",
"=",
"securityHandler",
".",
"handler",
"elif",
"securityHandler",
".",
"method",
".",
"lower",
"(",
")",
"==",
"\"handler\"",
":",
"handler",
"=",
"securityHandler",
".",
"handler",
"cj",
"=",
"securityHandler",
".",
"cookiejar",
"return",
"param_dict",
",",
"handler",
",",
"cj"
]
| 44.875 | 9.875 |
def setOpts(self, **opts):
"""
Changes the behavior of the SpinBox. Accepts most of the arguments
allowed in :func:`__init__ <pyqtgraph.SpinBox.__init__>`.
"""
#print opts
for k in opts:
if k == 'bounds':
#print opts[k]
self.setMinimum(opts[k][0], update=False)
self.setMaximum(opts[k][1], update=False)
#for i in [0,1]:
#if opts[k][i] is None:
#self.opts[k][i] = None
#else:
#self.opts[k][i] = D(unicode(opts[k][i]))
elif k in ['step', 'minStep']:
self.opts[k] = D(asUnicode(opts[k]))
elif k == 'value':
pass ## don't set value until bounds have been set
else:
self.opts[k] = opts[k]
if 'value' in opts:
self.setValue(opts['value'])
## If bounds have changed, update value to match
if 'bounds' in opts and 'value' not in opts:
self.setValue()
## sanity checks:
if self.opts['int']:
if 'step' in opts:
step = opts['step']
## not necessary..
#if int(step) != step:
#raise Exception('Integer SpinBox must have integer step size.')
else:
self.opts['step'] = int(self.opts['step'])
if 'minStep' in opts:
step = opts['minStep']
if int(step) != step:
raise Exception('Integer SpinBox must have integer minStep size.')
else:
ms = int(self.opts.get('minStep', 1))
if ms < 1:
ms = 1
self.opts['minStep'] = ms
if 'delay' in opts:
self.proxy.setDelay(opts['delay'])
self.updateText() | [
"def",
"setOpts",
"(",
"self",
",",
"*",
"*",
"opts",
")",
":",
"#print opts",
"for",
"k",
"in",
"opts",
":",
"if",
"k",
"==",
"'bounds'",
":",
"#print opts[k]",
"self",
".",
"setMinimum",
"(",
"opts",
"[",
"k",
"]",
"[",
"0",
"]",
",",
"update",
"=",
"False",
")",
"self",
".",
"setMaximum",
"(",
"opts",
"[",
"k",
"]",
"[",
"1",
"]",
",",
"update",
"=",
"False",
")",
"#for i in [0,1]:",
"#if opts[k][i] is None:",
"#self.opts[k][i] = None",
"#else:",
"#self.opts[k][i] = D(unicode(opts[k][i]))",
"elif",
"k",
"in",
"[",
"'step'",
",",
"'minStep'",
"]",
":",
"self",
".",
"opts",
"[",
"k",
"]",
"=",
"D",
"(",
"asUnicode",
"(",
"opts",
"[",
"k",
"]",
")",
")",
"elif",
"k",
"==",
"'value'",
":",
"pass",
"## don't set value until bounds have been set",
"else",
":",
"self",
".",
"opts",
"[",
"k",
"]",
"=",
"opts",
"[",
"k",
"]",
"if",
"'value'",
"in",
"opts",
":",
"self",
".",
"setValue",
"(",
"opts",
"[",
"'value'",
"]",
")",
"## If bounds have changed, update value to match",
"if",
"'bounds'",
"in",
"opts",
"and",
"'value'",
"not",
"in",
"opts",
":",
"self",
".",
"setValue",
"(",
")",
"## sanity checks:",
"if",
"self",
".",
"opts",
"[",
"'int'",
"]",
":",
"if",
"'step'",
"in",
"opts",
":",
"step",
"=",
"opts",
"[",
"'step'",
"]",
"## not necessary..",
"#if int(step) != step:",
"#raise Exception('Integer SpinBox must have integer step size.')",
"else",
":",
"self",
".",
"opts",
"[",
"'step'",
"]",
"=",
"int",
"(",
"self",
".",
"opts",
"[",
"'step'",
"]",
")",
"if",
"'minStep'",
"in",
"opts",
":",
"step",
"=",
"opts",
"[",
"'minStep'",
"]",
"if",
"int",
"(",
"step",
")",
"!=",
"step",
":",
"raise",
"Exception",
"(",
"'Integer SpinBox must have integer minStep size.'",
")",
"else",
":",
"ms",
"=",
"int",
"(",
"self",
".",
"opts",
".",
"get",
"(",
"'minStep'",
",",
"1",
")",
")",
"if",
"ms",
"<",
"1",
":",
"ms",
"=",
"1",
"self",
".",
"opts",
"[",
"'minStep'",
"]",
"=",
"ms",
"if",
"'delay'",
"in",
"opts",
":",
"self",
".",
"proxy",
".",
"setDelay",
"(",
"opts",
"[",
"'delay'",
"]",
")",
"self",
".",
"updateText",
"(",
")"
]
| 35.759259 | 15.277778 |
def rpc_get_account_at(self, address, block_height, **con_info):
"""
Get the account's statuses at a particular block height.
Returns the sequence of history states on success
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_block(block_height):
return {'error': 'Invalid start block', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account_states = db.get_account_at(address, block_height)
db.close()
# return credit_value and debit_value as strings, so the unwitting JS developer doesn't get confused
# as to why large balances get mysteriously converted to doubles.
ret = [self.export_account_state(hist) for hist in account_states]
return self.success_response({'history': ret}) | [
"def",
"rpc_get_account_at",
"(",
"self",
",",
"address",
",",
"block_height",
",",
"*",
"*",
"con_info",
")",
":",
"if",
"not",
"check_account_address",
"(",
"address",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid address'",
",",
"'http_status'",
":",
"400",
"}",
"if",
"not",
"check_block",
"(",
"block_height",
")",
":",
"return",
"{",
"'error'",
":",
"'Invalid start block'",
",",
"'http_status'",
":",
"400",
"}",
"# must be b58",
"if",
"is_c32_address",
"(",
"address",
")",
":",
"address",
"=",
"c32ToB58",
"(",
"address",
")",
"db",
"=",
"get_db_state",
"(",
"self",
".",
"working_dir",
")",
"account_states",
"=",
"db",
".",
"get_account_at",
"(",
"address",
",",
"block_height",
")",
"db",
".",
"close",
"(",
")",
"# return credit_value and debit_value as strings, so the unwitting JS developer doesn't get confused",
"# as to why large balances get mysteriously converted to doubles.",
"ret",
"=",
"[",
"self",
".",
"export_account_state",
"(",
"hist",
")",
"for",
"hist",
"in",
"account_states",
"]",
"return",
"self",
".",
"success_response",
"(",
"{",
"'history'",
":",
"ret",
"}",
")"
]
| 42 | 22.173913 |
def ContainsAddressStr(self, address):
"""
Determine if the wallet contains the address.
Args:
address (str): a string representing the public key.
Returns:
bool: True, if the address is present in the wallet. False otherwise.
"""
for key, contract in self._contracts.items():
if contract.Address == address:
return True
return False | [
"def",
"ContainsAddressStr",
"(",
"self",
",",
"address",
")",
":",
"for",
"key",
",",
"contract",
"in",
"self",
".",
"_contracts",
".",
"items",
"(",
")",
":",
"if",
"contract",
".",
"Address",
"==",
"address",
":",
"return",
"True",
"return",
"False"
]
| 30.714286 | 18.571429 |
def VectorLen(self, off):
"""VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return ret | [
"def",
"VectorLen",
"(",
"self",
",",
"off",
")",
":",
"N",
".",
"enforce_number",
"(",
"off",
",",
"N",
".",
"UOffsetTFlags",
")",
"off",
"+=",
"self",
".",
"Pos",
"off",
"+=",
"encode",
".",
"Get",
"(",
"N",
".",
"UOffsetTFlags",
".",
"packer_type",
",",
"self",
".",
"Bytes",
",",
"off",
")",
"ret",
"=",
"encode",
".",
"Get",
"(",
"N",
".",
"UOffsetTFlags",
".",
"packer_type",
",",
"self",
".",
"Bytes",
",",
"off",
")",
"return",
"ret"
]
| 41 | 17.888889 |
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
0 if self == other
eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
1 if self > other
eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
IPv6('1080::1:200C:417A/112') >
IPv6('1080::0:200C:417A/112')
If the IP versions of self and other are different, returns:
-1 if self._version < other._version
eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
1 if self._version > other._version
eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
"""
if self._version < other._version:
return -1
if self._version > other._version:
return 1
# self._version == other._version below here:
if self.network < other.network:
return -1
if self.network > other.network:
return 1
# self.network == other.network below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
# self.network == other.network and self.netmask == other.netmask
return 0 | [
"def",
"compare_networks",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"_version",
"<",
"other",
".",
"_version",
":",
"return",
"-",
"1",
"if",
"self",
".",
"_version",
">",
"other",
".",
"_version",
":",
"return",
"1",
"# self._version == other._version below here:",
"if",
"self",
".",
"network",
"<",
"other",
".",
"network",
":",
"return",
"-",
"1",
"if",
"self",
".",
"network",
">",
"other",
".",
"network",
":",
"return",
"1",
"# self.network == other.network below here:",
"if",
"self",
".",
"netmask",
"<",
"other",
".",
"netmask",
":",
"return",
"-",
"1",
"if",
"self",
".",
"netmask",
">",
"other",
".",
"netmask",
":",
"return",
"1",
"# self.network == other.network and self.netmask == other.netmask",
"return",
"0"
]
| 36.58 | 18.28 |
def objective_bounds(self):
"""
Return objective bounds
Returns
-------
lower : list of floats
Lower boundaries for the objectives
Upper : list of floats
Upper boundaries for the objectives
"""
if self.ideal and self.nadir:
return self.ideal, self.nadir
raise NotImplementedError(
"Ideal and nadir value calculation is not yet implemented"
) | [
"def",
"objective_bounds",
"(",
"self",
")",
":",
"if",
"self",
".",
"ideal",
"and",
"self",
".",
"nadir",
":",
"return",
"self",
".",
"ideal",
",",
"self",
".",
"nadir",
"raise",
"NotImplementedError",
"(",
"\"Ideal and nadir value calculation is not yet implemented\"",
")"
]
| 23.947368 | 17.736842 |
def permissions(self, addr, permissions=None):
"""
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
"""
out = self.mem.permissions(addr, permissions)
# if unicorn is in play and we've marked a page writable, it must be uncached
if permissions is not None and self.state.solver.is_true(permissions & 2 == 2):
if self.state.has_plugin('unicorn'):
self.state.unicorn.uncache_page(addr)
return out | [
"def",
"permissions",
"(",
"self",
",",
"addr",
",",
"permissions",
"=",
"None",
")",
":",
"out",
"=",
"self",
".",
"mem",
".",
"permissions",
"(",
"addr",
",",
"permissions",
")",
"# if unicorn is in play and we've marked a page writable, it must be uncached",
"if",
"permissions",
"is",
"not",
"None",
"and",
"self",
".",
"state",
".",
"solver",
".",
"is_true",
"(",
"permissions",
"&",
"2",
"==",
"2",
")",
":",
"if",
"self",
".",
"state",
".",
"has_plugin",
"(",
"'unicorn'",
")",
":",
"self",
".",
"state",
".",
"unicorn",
".",
"uncache_page",
"(",
"addr",
")",
"return",
"out"
]
| 49.285714 | 22.285714 |
def until_synced(self, timeout=None):
"""Return a tornado Future; resolves when all subordinate clients are synced"""
futures = [r.until_synced(timeout) for r in dict.values(self.children)]
yield tornado.gen.multi(futures, quiet_exceptions=tornado.gen.TimeoutError) | [
"def",
"until_synced",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"futures",
"=",
"[",
"r",
".",
"until_synced",
"(",
"timeout",
")",
"for",
"r",
"in",
"dict",
".",
"values",
"(",
"self",
".",
"children",
")",
"]",
"yield",
"tornado",
".",
"gen",
".",
"multi",
"(",
"futures",
",",
"quiet_exceptions",
"=",
"tornado",
".",
"gen",
".",
"TimeoutError",
")"
]
| 71.5 | 21.25 |
def groups_archive(self, room_id, **kwargs):
"""Archives a private group, only if you’re part of the group."""
return self.__call_api_post('groups.archive', roomId=room_id, kwargs=kwargs) | [
"def",
"groups_archive",
"(",
"self",
",",
"room_id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__call_api_post",
"(",
"'groups.archive'",
",",
"roomId",
"=",
"room_id",
",",
"kwargs",
"=",
"kwargs",
")"
]
| 67 | 16 |
def quote_attrib(self, inStr):
"""
Transforms characters between xml notation and python notation.
"""
s1 = (isinstance(inStr, str) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
# if "'" in s1:
s1 = '%s' % s1.replace('"', """)
# else:
# s1 = "'%s'" % s1
#else:
# s1 = '"%s"' % s1
return s1 | [
"def",
"quote_attrib",
"(",
"self",
",",
"inStr",
")",
":",
"s1",
"=",
"(",
"isinstance",
"(",
"inStr",
",",
"str",
")",
"and",
"inStr",
"or",
"'%s'",
"%",
"inStr",
")",
"s1",
"=",
"s1",
".",
"replace",
"(",
"'&'",
",",
"'&'",
")",
"s1",
"=",
"s1",
".",
"replace",
"(",
"'<'",
",",
"'<'",
")",
"s1",
"=",
"s1",
".",
"replace",
"(",
"'>'",
",",
"'>'",
")",
"if",
"'\"'",
"in",
"s1",
":",
"# if \"'\" in s1:",
"s1",
"=",
"'%s'",
"%",
"s1",
".",
"replace",
"(",
"'\"'",
",",
"\""\"",
")",
"# else:",
"# s1 = \"'%s'\" % s1",
"#else:",
"# s1 = '\"%s\"' % s1",
"return",
"s1"
]
| 30.294118 | 12.058824 |
def get_build_log_lines(self, project, build_id, log_id, start_line=None, end_line=None):
"""GetBuildLogLines.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response)) | [
"def",
"get_build_log_lines",
"(",
"self",
",",
"project",
",",
"build_id",
",",
"log_id",
",",
"start_line",
"=",
"None",
",",
"end_line",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"build_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'buildId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'build_id'",
",",
"build_id",
",",
"'int'",
")",
"if",
"log_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'logId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'log_id'",
",",
"log_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"start_line",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'startLine'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'start_line'",
",",
"start_line",
",",
"'long'",
")",
"if",
"end_line",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'endLine'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'end_line'",
",",
"end_line",
",",
"'long'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'35a80daf-7f30-45fc-86e8-6b813d9c90df'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[str]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
]
| 52.285714 | 19.892857 |
def set_sum_w2(self, w, ix, iy=0, iz=0):
"""
Sets the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
self.GetSumw2().SetAt(w, idx) | [
"def",
"set_sum_w2",
"(",
"self",
",",
"w",
",",
"ix",
",",
"iy",
"=",
"0",
",",
"iz",
"=",
"0",
")",
":",
"if",
"self",
".",
"GetSumw2N",
"(",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Attempting to access Sumw2 in histogram \"",
"\"where weights were not stored\"",
")",
"xl",
"=",
"self",
".",
"nbins",
"(",
"axis",
"=",
"0",
",",
"overflow",
"=",
"True",
")",
"yl",
"=",
"self",
".",
"nbins",
"(",
"axis",
"=",
"1",
",",
"overflow",
"=",
"True",
")",
"idx",
"=",
"xl",
"*",
"yl",
"*",
"iz",
"+",
"xl",
"*",
"iy",
"+",
"ix",
"if",
"not",
"0",
"<=",
"idx",
"<",
"self",
".",
"GetSumw2N",
"(",
")",
":",
"raise",
"IndexError",
"(",
"\"bin index out of range\"",
")",
"self",
".",
"GetSumw2",
"(",
")",
".",
"SetAt",
"(",
"w",
",",
"idx",
")"
]
| 40.357143 | 7.214286 |
def create(self, username, password):
"""
Create Token by given username and password.
Authenticate user by given username and password.
Returning a :class:`Token` object contains username and token that you should pass to the all requests
(in X-TM-Username and X-TM-Key, respectively).
:Example:
token = client.tokens.create(username="my_username", password="my_password")
:param username: Account username or email. Required.
:param password: Account password. Required.
"""
data = dict(username=username, password=password)
response, instance = self.request("POST", self.uri, data=data)
return self.load_instance(instance) | [
"def",
"create",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"data",
"=",
"dict",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"response",
",",
"instance",
"=",
"self",
".",
"request",
"(",
"\"POST\"",
",",
"self",
".",
"uri",
",",
"data",
"=",
"data",
")",
"return",
"self",
".",
"load_instance",
"(",
"instance",
")"
]
| 42.117647 | 22.705882 |
def ListDirectoryAbsolute(directory):
"""Yields all files in the given directory. The paths are absolute."""
return (os.path.join(directory, path)
for path in tf.io.gfile.listdir(directory)) | [
"def",
"ListDirectoryAbsolute",
"(",
"directory",
")",
":",
"return",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"path",
")",
"for",
"path",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"directory",
")",
")"
]
| 50.25 | 4.25 |
def gzip_compress(data):
"""
Compress a string. Same as gzip.compress in Python3.
"""
buf = BytesIO()
with gzip.GzipFile(fileobj=buf, mode='wb') as fd:
fd.write(data)
return buf.getvalue() | [
"def",
"gzip_compress",
"(",
"data",
")",
":",
"buf",
"=",
"BytesIO",
"(",
")",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"buf",
",",
"mode",
"=",
"'wb'",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"data",
")",
"return",
"buf",
".",
"getvalue",
"(",
")"
]
| 26.625 | 12.375 |
def clean(self):
'''
Check if such an assignment configuration makes sense, and reject it otherwise.
This mainly relates to interdependencies between the different fields, since
single field constraints are already clatified by the Django model configuration.
'''
super(AssignmentAdminForm, self).clean()
d = defaultdict(lambda: False)
d.update(self.cleaned_data)
# Having validation or full test enabled demands file upload
if d['attachment_test_validity'] and not d['has_attachment']:
raise ValidationError('You cannot have a validation script without allowing file upload.')
if d['attachment_test_full'] and not d['has_attachment']:
raise ValidationError('You cannot have a full test script without allowing file upload.')
# Having validation or full test enabled demands a test machine
if d['attachment_test_validity'] and 'test_machines' in d and not len(d['test_machines'])>0:
raise ValidationError('You cannot have a validation script without specifying test machines.')
if d['attachment_test_full'] and 'test_machines' in d and not len(d['test_machines'])>0:
raise ValidationError('You cannot have a full test script without specifying test machines.')
if d['download'] and d['description']:
raise ValidationError('You can only have a description link OR a description file.')
if not d['download'] and not d['description']:
raise ValidationError('You need a description link OR a description file.')
# Having test machines demands compilation or validation scripts
if 'test_machines' in d and len(d['test_machines'])>0 \
and not 'attachment_test_validity' in d \
and not 'attachment_test_full' in d:
raise ValidationError('For using test machines, you need to enable validation or full test.') | [
"def",
"clean",
"(",
"self",
")",
":",
"super",
"(",
"AssignmentAdminForm",
",",
"self",
")",
".",
"clean",
"(",
")",
"d",
"=",
"defaultdict",
"(",
"lambda",
":",
"False",
")",
"d",
".",
"update",
"(",
"self",
".",
"cleaned_data",
")",
"# Having validation or full test enabled demands file upload",
"if",
"d",
"[",
"'attachment_test_validity'",
"]",
"and",
"not",
"d",
"[",
"'has_attachment'",
"]",
":",
"raise",
"ValidationError",
"(",
"'You cannot have a validation script without allowing file upload.'",
")",
"if",
"d",
"[",
"'attachment_test_full'",
"]",
"and",
"not",
"d",
"[",
"'has_attachment'",
"]",
":",
"raise",
"ValidationError",
"(",
"'You cannot have a full test script without allowing file upload.'",
")",
"# Having validation or full test enabled demands a test machine",
"if",
"d",
"[",
"'attachment_test_validity'",
"]",
"and",
"'test_machines'",
"in",
"d",
"and",
"not",
"len",
"(",
"d",
"[",
"'test_machines'",
"]",
")",
">",
"0",
":",
"raise",
"ValidationError",
"(",
"'You cannot have a validation script without specifying test machines.'",
")",
"if",
"d",
"[",
"'attachment_test_full'",
"]",
"and",
"'test_machines'",
"in",
"d",
"and",
"not",
"len",
"(",
"d",
"[",
"'test_machines'",
"]",
")",
">",
"0",
":",
"raise",
"ValidationError",
"(",
"'You cannot have a full test script without specifying test machines.'",
")",
"if",
"d",
"[",
"'download'",
"]",
"and",
"d",
"[",
"'description'",
"]",
":",
"raise",
"ValidationError",
"(",
"'You can only have a description link OR a description file.'",
")",
"if",
"not",
"d",
"[",
"'download'",
"]",
"and",
"not",
"d",
"[",
"'description'",
"]",
":",
"raise",
"ValidationError",
"(",
"'You need a description link OR a description file.'",
")",
"# Having test machines demands compilation or validation scripts",
"if",
"'test_machines'",
"in",
"d",
"and",
"len",
"(",
"d",
"[",
"'test_machines'",
"]",
")",
">",
"0",
"and",
"not",
"'attachment_test_validity'",
"in",
"d",
"and",
"not",
"'attachment_test_full'",
"in",
"d",
":",
"raise",
"ValidationError",
"(",
"'For using test machines, you need to enable validation or full test.'",
")"
]
| 69.75 | 36.107143 |
def cortex_plot_colors(the_map,
color=None, cmap=None, vmin=None, vmax=None, alpha=None,
underlay='curvature', mask=None):
'''
cortex_plot_colors(mesh, opts...) yields the cortex colors as a matrix of RGBA rows for the
given mesh and options.
The following options are accepted:
* color (default: None) specifies the color to plot for each vertex; this argument may take a
number of forms:
* None, do not plot a color over the underlay (the default)
* a matrix of RGB or RGBA values, one per vertex
* a property vector or a string naming a property, in which case the cmap, vmin, and vmax
arguments are used to generate colors
* a function that, when passed a single argument, a dict of the properties of a single
vertex, yields an RGB or RGBA list for that vertex.
* cmap (default: 'log_eccentricity') specifies the colormap to use in plotting if the color
argument provided is a property.
* vmin (default: None) specifies the minimum value for scaling the property when one is passed
as the color option. None means to use the min value of the property.
* vmax (default: None) specifies the maximum value for scaling the property when one is passed
as the color option. None means to use the max value of the property.
* underlay (default: 'curvature') specifies the default underlay color to plot for the
cortical surface; it may be None, 'curvature', or a color.
* alpha (default None) specifies the alpha values to use for the color plot. If None, then
leaves the alpha values from color unchanged. If a single number, then all alpha values in
color are multiplied by that value. If a list of values, one per vertex, then this vector
is multiplied by the alpha values. Finally, any negative value is set instead of multiplied.
So, for example, if there were 3 vertices with:
* color = ((0,0,0,1), (0,0,1,0.5), (0,0,0.75,0,8))
* alpha = (-0.5, 1, 0.5)
then the resulting colors plotted will be ((0,0,0,0.5), (0,0,1,0.5), (0,0,0.75,0,4)).
* mask (default: None) specifies a mask to use for the mesh; this is passed through to_mask()
to figure out the masking. Those vertices not in the mask are not plotted (but they will be
plotted in the underlay if it is not None).
'''
# okay, let's interpret the color
if color is None:
color = np.full((the_map.vertex_count, 4), 0.5)
color[:,3] = 0
try:
clr = matplotlib.colors.to_rgba(color)
# This is an rgb color to plot...
color = np.ones((the_map.vertex_count,4)) * matplotlib.colors.to_rgba(clr)
except Exception: pass
if pimms.is_vector(color) or pimms.is_str(color):
# it's a property that gets interpreted via the colormap
p = the_map.property(color)
# if the colormap is none, we can try to guess it
if cmap is None:
(cmap,(vmn,vmx)) = guess_cortex_cmap(color)
if vmin is None: vmin = vmn
if vmax is None: vmax = vmx
color = apply_cmap(p, cmap, vmin=vmin, vmax=vmax)
if not pimms.is_matrix(color):
# must be a function; let's try it...
color = to_rgba(the_map.map(color))
color = np.array(color)
if color.shape[1] != 4: color = np.hstack([color, np.ones([color.shape[0], 1])])
# okay, and the underlay...
if underlay is not None:
if pimms.is_str(underlay) and underlay.lower() in ['curvature', 'curv']:
underlay = apply_cmap(the_map.prop('curvature'), cmap_curvature, vmin=-1, vmax=1)
else:
try: underlay = np.ones((the_map.vertex_count, 4)) * to_rgba(underlay)
except Exception: raise ValueError('plot underlay failed: must be a color or curvature')
# okay, let's check on alpha...
if alpha is not None:
if pimms.is_number(alpha): alpha = np.full(color.shape[0], alpha)
else: alpha = the_map.property(alpha)
color[:,3] *= alpha
neg = (alpha < 0)
color[neg,3] = -alpha[neg]
alpha = color[:,3]
# and the mask...
if mask is not None:
ii = the_map.mask(mask, indices=True)
tmp = np.zeros(len(color))
tmp[ii] = color[ii,3]
color[:,3] = tmp
# then, blend with the underlay if need be
if underlay is not None:
color = color_overlap(underlay, color)
return color | [
"def",
"cortex_plot_colors",
"(",
"the_map",
",",
"color",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"vmin",
"=",
"None",
",",
"vmax",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"underlay",
"=",
"'curvature'",
",",
"mask",
"=",
"None",
")",
":",
"# okay, let's interpret the color",
"if",
"color",
"is",
"None",
":",
"color",
"=",
"np",
".",
"full",
"(",
"(",
"the_map",
".",
"vertex_count",
",",
"4",
")",
",",
"0.5",
")",
"color",
"[",
":",
",",
"3",
"]",
"=",
"0",
"try",
":",
"clr",
"=",
"matplotlib",
".",
"colors",
".",
"to_rgba",
"(",
"color",
")",
"# This is an rgb color to plot...",
"color",
"=",
"np",
".",
"ones",
"(",
"(",
"the_map",
".",
"vertex_count",
",",
"4",
")",
")",
"*",
"matplotlib",
".",
"colors",
".",
"to_rgba",
"(",
"clr",
")",
"except",
"Exception",
":",
"pass",
"if",
"pimms",
".",
"is_vector",
"(",
"color",
")",
"or",
"pimms",
".",
"is_str",
"(",
"color",
")",
":",
"# it's a property that gets interpreted via the colormap",
"p",
"=",
"the_map",
".",
"property",
"(",
"color",
")",
"# if the colormap is none, we can try to guess it",
"if",
"cmap",
"is",
"None",
":",
"(",
"cmap",
",",
"(",
"vmn",
",",
"vmx",
")",
")",
"=",
"guess_cortex_cmap",
"(",
"color",
")",
"if",
"vmin",
"is",
"None",
":",
"vmin",
"=",
"vmn",
"if",
"vmax",
"is",
"None",
":",
"vmax",
"=",
"vmx",
"color",
"=",
"apply_cmap",
"(",
"p",
",",
"cmap",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"if",
"not",
"pimms",
".",
"is_matrix",
"(",
"color",
")",
":",
"# must be a function; let's try it...",
"color",
"=",
"to_rgba",
"(",
"the_map",
".",
"map",
"(",
"color",
")",
")",
"color",
"=",
"np",
".",
"array",
"(",
"color",
")",
"if",
"color",
".",
"shape",
"[",
"1",
"]",
"!=",
"4",
":",
"color",
"=",
"np",
".",
"hstack",
"(",
"[",
"color",
",",
"np",
".",
"ones",
"(",
"[",
"color",
".",
"shape",
"[",
"0",
"]",
",",
"1",
"]",
")",
"]",
")",
"# okay, and the underlay...",
"if",
"underlay",
"is",
"not",
"None",
":",
"if",
"pimms",
".",
"is_str",
"(",
"underlay",
")",
"and",
"underlay",
".",
"lower",
"(",
")",
"in",
"[",
"'curvature'",
",",
"'curv'",
"]",
":",
"underlay",
"=",
"apply_cmap",
"(",
"the_map",
".",
"prop",
"(",
"'curvature'",
")",
",",
"cmap_curvature",
",",
"vmin",
"=",
"-",
"1",
",",
"vmax",
"=",
"1",
")",
"else",
":",
"try",
":",
"underlay",
"=",
"np",
".",
"ones",
"(",
"(",
"the_map",
".",
"vertex_count",
",",
"4",
")",
")",
"*",
"to_rgba",
"(",
"underlay",
")",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"'plot underlay failed: must be a color or curvature'",
")",
"# okay, let's check on alpha...",
"if",
"alpha",
"is",
"not",
"None",
":",
"if",
"pimms",
".",
"is_number",
"(",
"alpha",
")",
":",
"alpha",
"=",
"np",
".",
"full",
"(",
"color",
".",
"shape",
"[",
"0",
"]",
",",
"alpha",
")",
"else",
":",
"alpha",
"=",
"the_map",
".",
"property",
"(",
"alpha",
")",
"color",
"[",
":",
",",
"3",
"]",
"*=",
"alpha",
"neg",
"=",
"(",
"alpha",
"<",
"0",
")",
"color",
"[",
"neg",
",",
"3",
"]",
"=",
"-",
"alpha",
"[",
"neg",
"]",
"alpha",
"=",
"color",
"[",
":",
",",
"3",
"]",
"# and the mask...",
"if",
"mask",
"is",
"not",
"None",
":",
"ii",
"=",
"the_map",
".",
"mask",
"(",
"mask",
",",
"indices",
"=",
"True",
")",
"tmp",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"color",
")",
")",
"tmp",
"[",
"ii",
"]",
"=",
"color",
"[",
"ii",
",",
"3",
"]",
"color",
"[",
":",
",",
"3",
"]",
"=",
"tmp",
"# then, blend with the underlay if need be",
"if",
"underlay",
"is",
"not",
"None",
":",
"color",
"=",
"color_overlap",
"(",
"underlay",
",",
"color",
")",
"return",
"color"
]
| 53 | 24.904762 |
def assoc(m, *args, **kw):
'''
assoc(m, k1, v1, k2, v2...) yields a map equivalent to m without the arguments k1, k2, etc.
associated with the values v1, v2, etc. If m is a mutable python dictionary, this is
equivalent to using m[k] = v for all the given key-value pairs then returning m itself. If m
is a persistent map, this is equivalent to calling m = m.set(k, v) for all the key-value pairs
then returning m.
Keys given in the arguments list are always associated in order, then keys in the keyword
arguments are associated, meaning that the keyword arguments have precedence.
Note that in addition to key-value pairs in the ordinary arguments list, assoc() also uses the
key-value pairs given as named/keyword arguments.
If m is not a map but is instead a list or persistent vector, this operates as if the
list/vector is a map whose keys are integers. If an item is added beyond the extent of the list
then it is extended by adding None to the list. If negative indices are given, they are always
interpreted relative to the end of the initial input list m and not in terms of the list as it
has grown at the point that the key-value pair is processed.
'''
if len(args) % 2 != 0: raise ValueError('assoc arguments must be given as key-value pairs')
args = (u for it in [zip(args[::2],args[1::2]), six.iteritems(kw)] for u in it)
if is_pmap(m):
for (k,v) in args: m = m.set(k, v)
return m
elif is_map(m):
for (k,v) in args: m[k] = v
return m
elif isinstance(m, tuple_type):
args = list(args)
return m if len(args) == 0 else tuple(assoc(list(m), *args, **kw))
elif isinstance(m, list_type):
n0 = len(m)
n = n0
for (k,v) in args:
if not is_int(k): TypeError('Keys for list args must be integers')
if k < -n: KeyError(k)
if k < 0: k = n0 + k
if k >= n: (n,m) = (k+1, m + [None]*(k - n + 1))
m[k] = v
return m
elif is_nparray(m):
args = list(args)
if len(args) == 0: return m
return np.asarray(assoc(m.tolist(), *[x for u in args for x in u]))
else: raise ValueError('Cannot assoc given type: %s' % type(m)) | [
"def",
"assoc",
"(",
"m",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"len",
"(",
"args",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'assoc arguments must be given as key-value pairs'",
")",
"args",
"=",
"(",
"u",
"for",
"it",
"in",
"[",
"zip",
"(",
"args",
"[",
":",
":",
"2",
"]",
",",
"args",
"[",
"1",
":",
":",
"2",
"]",
")",
",",
"six",
".",
"iteritems",
"(",
"kw",
")",
"]",
"for",
"u",
"in",
"it",
")",
"if",
"is_pmap",
"(",
"m",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"args",
":",
"m",
"=",
"m",
".",
"set",
"(",
"k",
",",
"v",
")",
"return",
"m",
"elif",
"is_map",
"(",
"m",
")",
":",
"for",
"(",
"k",
",",
"v",
")",
"in",
"args",
":",
"m",
"[",
"k",
"]",
"=",
"v",
"return",
"m",
"elif",
"isinstance",
"(",
"m",
",",
"tuple_type",
")",
":",
"args",
"=",
"list",
"(",
"args",
")",
"return",
"m",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"else",
"tuple",
"(",
"assoc",
"(",
"list",
"(",
"m",
")",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
")",
"elif",
"isinstance",
"(",
"m",
",",
"list_type",
")",
":",
"n0",
"=",
"len",
"(",
"m",
")",
"n",
"=",
"n0",
"for",
"(",
"k",
",",
"v",
")",
"in",
"args",
":",
"if",
"not",
"is_int",
"(",
"k",
")",
":",
"TypeError",
"(",
"'Keys for list args must be integers'",
")",
"if",
"k",
"<",
"-",
"n",
":",
"KeyError",
"(",
"k",
")",
"if",
"k",
"<",
"0",
":",
"k",
"=",
"n0",
"+",
"k",
"if",
"k",
">=",
"n",
":",
"(",
"n",
",",
"m",
")",
"=",
"(",
"k",
"+",
"1",
",",
"m",
"+",
"[",
"None",
"]",
"*",
"(",
"k",
"-",
"n",
"+",
"1",
")",
")",
"m",
"[",
"k",
"]",
"=",
"v",
"return",
"m",
"elif",
"is_nparray",
"(",
"m",
")",
":",
"args",
"=",
"list",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"return",
"m",
"return",
"np",
".",
"asarray",
"(",
"assoc",
"(",
"m",
".",
"tolist",
"(",
")",
",",
"*",
"[",
"x",
"for",
"u",
"in",
"args",
"for",
"x",
"in",
"u",
"]",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot assoc given type: %s'",
"%",
"type",
"(",
"m",
")",
")"
]
| 48.673913 | 30.021739 |
def _set_information(self, v, load=False):
"""
Setter method for information, mapped from YANG variable /routing_system/ip/dhcp/relay/information (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_information is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_information() directly.
YANG Description: DHCP Relay Information Option
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=information.information, is_container='container', presence=False, yang_name="information", rest_name="information", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DHCP Relay Information Option', u'callpoint': u'DhcpRelayCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """information must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=information.information, is_container='container', presence=False, yang_name="information", rest_name="information", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DHCP Relay Information Option', u'callpoint': u'DhcpRelayCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)""",
})
self.__information = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_information",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"information",
".",
"information",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"information\"",
",",
"rest_name",
"=",
"\"information\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure DHCP Relay Information Option'",
",",
"u'callpoint'",
":",
"u'DhcpRelayCallpoint'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'cli-incomplete-command'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-dhcp'",
",",
"defining_module",
"=",
"'brocade-dhcp'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"information must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=information.information, is_container='container', presence=False, yang_name=\"information\", rest_name=\"information\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DHCP Relay Information Option', u'callpoint': u'DhcpRelayCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-dhcp', defining_module='brocade-dhcp', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__information",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| 78.958333 | 38.208333 |
def _rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
5.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return stamp - delta
except OverflowError:
return None | [
"def",
"_rfc822",
"(",
"date",
")",
":",
"daynames",
"=",
"set",
"(",
"[",
"'mon'",
",",
"'tue'",
",",
"'wed'",
",",
"'thu'",
",",
"'fri'",
",",
"'sat'",
",",
"'sun'",
"]",
")",
"months",
"=",
"{",
"'jan'",
":",
"1",
",",
"'feb'",
":",
"2",
",",
"'mar'",
":",
"3",
",",
"'apr'",
":",
"4",
",",
"'may'",
":",
"5",
",",
"'jun'",
":",
"6",
",",
"'jul'",
":",
"7",
",",
"'aug'",
":",
"8",
",",
"'sep'",
":",
"9",
",",
"'oct'",
":",
"10",
",",
"'nov'",
":",
"11",
",",
"'dec'",
":",
"12",
",",
"}",
"timezonenames",
"=",
"{",
"'ut'",
":",
"0",
",",
"'gmt'",
":",
"0",
",",
"'z'",
":",
"0",
",",
"'adt'",
":",
"-",
"3",
",",
"'ast'",
":",
"-",
"4",
",",
"'at'",
":",
"-",
"4",
",",
"'edt'",
":",
"-",
"4",
",",
"'est'",
":",
"-",
"5",
",",
"'et'",
":",
"-",
"5",
",",
"'cdt'",
":",
"-",
"5",
",",
"'cst'",
":",
"-",
"6",
",",
"'ct'",
":",
"-",
"6",
",",
"'mdt'",
":",
"-",
"6",
",",
"'mst'",
":",
"-",
"7",
",",
"'mt'",
":",
"-",
"7",
",",
"'pdt'",
":",
"-",
"7",
",",
"'pst'",
":",
"-",
"8",
",",
"'pt'",
":",
"-",
"8",
",",
"'a'",
":",
"-",
"1",
",",
"'n'",
":",
"1",
",",
"'m'",
":",
"-",
"12",
",",
"'y'",
":",
"12",
",",
"}",
"parts",
"=",
"date",
".",
"lower",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"5",
":",
"# Assume that the time and timezone are missing",
"parts",
".",
"extend",
"(",
"(",
"'00:00:00'",
",",
"'0000'",
")",
")",
"# Remove the day name",
"if",
"parts",
"[",
"0",
"]",
"[",
":",
"3",
"]",
"in",
"daynames",
":",
"parts",
"=",
"parts",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"parts",
")",
"<",
"5",
":",
"# If there are still fewer than five parts, there's not enough",
"# information to interpret this",
"return",
"None",
"try",
":",
"day",
"=",
"int",
"(",
"parts",
"[",
"0",
"]",
")",
"except",
"ValueError",
":",
"# Check if the day and month are swapped",
"if",
"months",
".",
"get",
"(",
"parts",
"[",
"0",
"]",
"[",
":",
"3",
"]",
")",
":",
"try",
":",
"day",
"=",
"int",
"(",
"parts",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"return",
"None",
"else",
":",
"parts",
"[",
"1",
"]",
"=",
"parts",
"[",
"0",
"]",
"else",
":",
"return",
"None",
"month",
"=",
"months",
".",
"get",
"(",
"parts",
"[",
"1",
"]",
"[",
":",
"3",
"]",
")",
"if",
"not",
"month",
":",
"return",
"None",
"try",
":",
"year",
"=",
"int",
"(",
"parts",
"[",
"2",
"]",
")",
"except",
"ValueError",
":",
"return",
"None",
"# Normalize two-digit years:",
"# Anything in the 90's is interpreted as 1990 and on",
"# Anything 89 or less is interpreted as 2089 or before",
"if",
"len",
"(",
"parts",
"[",
"2",
"]",
")",
"<=",
"2",
":",
"year",
"+=",
"(",
"1900",
",",
"2000",
")",
"[",
"year",
"<",
"90",
"]",
"timeparts",
"=",
"parts",
"[",
"3",
"]",
".",
"split",
"(",
"':'",
")",
"timeparts",
"=",
"timeparts",
"+",
"(",
"[",
"0",
"]",
"*",
"(",
"3",
"-",
"len",
"(",
"timeparts",
")",
")",
")",
"try",
":",
"(",
"hour",
",",
"minute",
",",
"second",
")",
"=",
"map",
"(",
"int",
",",
"timeparts",
")",
"except",
"ValueError",
":",
"return",
"None",
"tzhour",
"=",
"0",
"tzmin",
"=",
"0",
"# Strip 'Etc/' from the timezone",
"if",
"parts",
"[",
"4",
"]",
".",
"startswith",
"(",
"'etc/'",
")",
":",
"parts",
"[",
"4",
"]",
"=",
"parts",
"[",
"4",
"]",
"[",
"4",
":",
"]",
"# Normalize timezones that start with 'gmt':",
"# GMT-05:00 => -0500",
"# GMT => GMT",
"if",
"parts",
"[",
"4",
"]",
".",
"startswith",
"(",
"'gmt'",
")",
":",
"parts",
"[",
"4",
"]",
"=",
"''",
".",
"join",
"(",
"parts",
"[",
"4",
"]",
"[",
"3",
":",
"]",
".",
"split",
"(",
"':'",
")",
")",
"or",
"'gmt'",
"# Handle timezones like '-0500', '+0500', and 'EST'",
"if",
"parts",
"[",
"4",
"]",
"and",
"parts",
"[",
"4",
"]",
"[",
"0",
"]",
"in",
"(",
"'-'",
",",
"'+'",
")",
":",
"try",
":",
"tzhour",
"=",
"int",
"(",
"parts",
"[",
"4",
"]",
"[",
"1",
":",
"3",
"]",
")",
"tzmin",
"=",
"int",
"(",
"parts",
"[",
"4",
"]",
"[",
"3",
":",
"]",
")",
"except",
"ValueError",
":",
"return",
"None",
"if",
"parts",
"[",
"4",
"]",
".",
"startswith",
"(",
"'-'",
")",
":",
"tzhour",
"=",
"tzhour",
"*",
"-",
"1",
"tzmin",
"=",
"tzmin",
"*",
"-",
"1",
"else",
":",
"tzhour",
"=",
"timezonenames",
".",
"get",
"(",
"parts",
"[",
"4",
"]",
",",
"0",
")",
"# Create the datetime object and timezone delta objects",
"try",
":",
"stamp",
"=",
"datetime",
".",
"datetime",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")",
"except",
"ValueError",
":",
"return",
"None",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"tzmin",
",",
"tzhour",
")",
"# Return the date and timestamp in a UTC 9-tuple",
"try",
":",
"return",
"stamp",
"-",
"delta",
"except",
"OverflowError",
":",
"return",
"None"
]
| 31.892157 | 15.823529 |
def __updateJobResultsPeriodic(self):
"""
Periodic check to see if this is the best model. This should only have an
effect if this is the *first* model to report its progress
"""
if self._isBestModelStored and not self._isBestModel:
return
while True:
jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is None:
jobResults = {}
else:
self._isBestModelStored = True
if not self._isBestModel:
return
jobResults = json.loads(jobResultsStr)
bestModel = jobResults.get('bestModel', None)
bestMetric = jobResults.get('bestValue', None)
isSaved = jobResults.get('saved', False)
# If there is a best model, and it is not the same as the current model
# we should wait till we have processed all of our records to see if
# we are the the best
if (bestModel is not None) and (self._modelID != bestModel):
self._isBestModel = False
return
# Make sure prediction output stream is ready before we present our model
# as "bestModel"; sometimes this takes a long time, so update the model's
# timestamp to help avoid getting orphaned
self.__flushPredictionCache()
self._jobsDAO.modelUpdateTimestamp(self._modelID)
metrics = self._getMetrics()
jobResults['bestModel'] = self._modelID
jobResults['bestValue'] = metrics[self._optimizedMetricLabel]
jobResults['metrics'] = metrics
jobResults['saved'] = False
newResults = json.dumps(jobResults)
isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=newResults)
if isUpdated or (not isUpdated and newResults==jobResultsStr):
self._isBestModel = True
break | [
"def",
"__updateJobResultsPeriodic",
"(",
"self",
")",
":",
"if",
"self",
".",
"_isBestModelStored",
"and",
"not",
"self",
".",
"_isBestModel",
":",
"return",
"while",
"True",
":",
"jobResultsStr",
"=",
"self",
".",
"_jobsDAO",
".",
"jobGetFields",
"(",
"self",
".",
"_jobID",
",",
"[",
"'results'",
"]",
")",
"[",
"0",
"]",
"if",
"jobResultsStr",
"is",
"None",
":",
"jobResults",
"=",
"{",
"}",
"else",
":",
"self",
".",
"_isBestModelStored",
"=",
"True",
"if",
"not",
"self",
".",
"_isBestModel",
":",
"return",
"jobResults",
"=",
"json",
".",
"loads",
"(",
"jobResultsStr",
")",
"bestModel",
"=",
"jobResults",
".",
"get",
"(",
"'bestModel'",
",",
"None",
")",
"bestMetric",
"=",
"jobResults",
".",
"get",
"(",
"'bestValue'",
",",
"None",
")",
"isSaved",
"=",
"jobResults",
".",
"get",
"(",
"'saved'",
",",
"False",
")",
"# If there is a best model, and it is not the same as the current model",
"# we should wait till we have processed all of our records to see if",
"# we are the the best",
"if",
"(",
"bestModel",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_modelID",
"!=",
"bestModel",
")",
":",
"self",
".",
"_isBestModel",
"=",
"False",
"return",
"# Make sure prediction output stream is ready before we present our model",
"# as \"bestModel\"; sometimes this takes a long time, so update the model's",
"# timestamp to help avoid getting orphaned",
"self",
".",
"__flushPredictionCache",
"(",
")",
"self",
".",
"_jobsDAO",
".",
"modelUpdateTimestamp",
"(",
"self",
".",
"_modelID",
")",
"metrics",
"=",
"self",
".",
"_getMetrics",
"(",
")",
"jobResults",
"[",
"'bestModel'",
"]",
"=",
"self",
".",
"_modelID",
"jobResults",
"[",
"'bestValue'",
"]",
"=",
"metrics",
"[",
"self",
".",
"_optimizedMetricLabel",
"]",
"jobResults",
"[",
"'metrics'",
"]",
"=",
"metrics",
"jobResults",
"[",
"'saved'",
"]",
"=",
"False",
"newResults",
"=",
"json",
".",
"dumps",
"(",
"jobResults",
")",
"isUpdated",
"=",
"self",
".",
"_jobsDAO",
".",
"jobSetFieldIfEqual",
"(",
"self",
".",
"_jobID",
",",
"fieldName",
"=",
"'results'",
",",
"curValue",
"=",
"jobResultsStr",
",",
"newValue",
"=",
"newResults",
")",
"if",
"isUpdated",
"or",
"(",
"not",
"isUpdated",
"and",
"newResults",
"==",
"jobResultsStr",
")",
":",
"self",
".",
"_isBestModel",
"=",
"True",
"break"
]
| 37.288462 | 21.788462 |
def cartesian_product(parameter_dict, combined_parameters=()):
""" Generates a Cartesian product of the input parameter dictionary.
For example:
>>> print cartesian_product({'param1':[1,2,3], 'param2':[42.0, 52.5]})
{'param1':[1,1,2,2,3,3],'param2': [42.0,52.5,42.0,52.5,42.0,52.5]}
:param parameter_dict:
Dictionary containing parameter names as keys and iterables of data to explore.
:param combined_parameters:
Tuple of tuples. Defines the order of the parameters and parameters that are
linked together.
If an inner tuple contains only a single item, you can spare the
inner tuple brackets.
For example:
>>> print cartesian_product( {'param1': [42.0, 52.5], 'param2':['a', 'b'], 'param3' : [1,2,3]}, ('param3',('param1', 'param2')))
{param3':[1,1,2,2,3,3],'param1' : [42.0,52.5,42.0,52.5,42.0,52.5], 'param2':['a','b','a','b','a','b']}
:returns: Dictionary with cartesian product lists.
"""
if not combined_parameters:
combined_parameters = list(parameter_dict)
else:
combined_parameters = list(combined_parameters)
for idx, item in enumerate(combined_parameters):
if isinstance(item, str):
combined_parameters[idx] = (item,)
iterator_list = []
for item_tuple in combined_parameters:
inner_iterator_list = [parameter_dict[key] for key in item_tuple]
zipped_iterator = zip(*inner_iterator_list)
iterator_list.append(zipped_iterator)
result_dict = {}
for key in parameter_dict:
result_dict[key] = []
cartesian_iterator = itools.product(*iterator_list)
for cartesian_tuple in cartesian_iterator:
for idx, item_tuple in enumerate(combined_parameters):
for inneridx, key in enumerate(item_tuple):
result_dict[key].append(cartesian_tuple[idx][inneridx])
return result_dict | [
"def",
"cartesian_product",
"(",
"parameter_dict",
",",
"combined_parameters",
"=",
"(",
")",
")",
":",
"if",
"not",
"combined_parameters",
":",
"combined_parameters",
"=",
"list",
"(",
"parameter_dict",
")",
"else",
":",
"combined_parameters",
"=",
"list",
"(",
"combined_parameters",
")",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"combined_parameters",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"combined_parameters",
"[",
"idx",
"]",
"=",
"(",
"item",
",",
")",
"iterator_list",
"=",
"[",
"]",
"for",
"item_tuple",
"in",
"combined_parameters",
":",
"inner_iterator_list",
"=",
"[",
"parameter_dict",
"[",
"key",
"]",
"for",
"key",
"in",
"item_tuple",
"]",
"zipped_iterator",
"=",
"zip",
"(",
"*",
"inner_iterator_list",
")",
"iterator_list",
".",
"append",
"(",
"zipped_iterator",
")",
"result_dict",
"=",
"{",
"}",
"for",
"key",
"in",
"parameter_dict",
":",
"result_dict",
"[",
"key",
"]",
"=",
"[",
"]",
"cartesian_iterator",
"=",
"itools",
".",
"product",
"(",
"*",
"iterator_list",
")",
"for",
"cartesian_tuple",
"in",
"cartesian_iterator",
":",
"for",
"idx",
",",
"item_tuple",
"in",
"enumerate",
"(",
"combined_parameters",
")",
":",
"for",
"inneridx",
",",
"key",
"in",
"enumerate",
"(",
"item_tuple",
")",
":",
"result_dict",
"[",
"key",
"]",
".",
"append",
"(",
"cartesian_tuple",
"[",
"idx",
"]",
"[",
"inneridx",
"]",
")",
"return",
"result_dict"
]
| 34.054545 | 26.727273 |
def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum | [
"def",
"getMd5Checksum",
"(",
"self",
")",
":",
"references",
"=",
"sorted",
"(",
"self",
".",
"getReferences",
"(",
")",
",",
"key",
"=",
"lambda",
"ref",
":",
"ref",
".",
"getMd5Checksum",
"(",
")",
")",
"checksums",
"=",
"''",
".",
"join",
"(",
"[",
"ref",
".",
"getMd5Checksum",
"(",
")",
"for",
"ref",
"in",
"references",
"]",
")",
"md5checksum",
"=",
"hashlib",
".",
"md5",
"(",
"checksums",
")",
".",
"hexdigest",
"(",
")",
"return",
"md5checksum"
]
| 44.846154 | 16.692308 |
def save_model(self, path, output_format=None):
"""Save the :class:`pybel.BELGraph` using one of the outputs from
:py:mod:`pybel`
Parameters
----------
path : str
The path to output to
output_format : Optional[str]
Output format as ``cx``, ``pickle``, ``json`` or defaults to ``bel``
"""
if output_format == 'pickle':
pybel.to_pickle(self.model, path)
else:
with open(path, 'w') as fh:
if output_format == 'json':
pybel.to_json_file(self.model, fh)
elif output_format == 'cx':
pybel.to_cx_file(self.model, fh)
else: # output_format == 'bel':
pybel.to_bel(self.model, fh) | [
"def",
"save_model",
"(",
"self",
",",
"path",
",",
"output_format",
"=",
"None",
")",
":",
"if",
"output_format",
"==",
"'pickle'",
":",
"pybel",
".",
"to_pickle",
"(",
"self",
".",
"model",
",",
"path",
")",
"else",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"fh",
":",
"if",
"output_format",
"==",
"'json'",
":",
"pybel",
".",
"to_json_file",
"(",
"self",
".",
"model",
",",
"fh",
")",
"elif",
"output_format",
"==",
"'cx'",
":",
"pybel",
".",
"to_cx_file",
"(",
"self",
".",
"model",
",",
"fh",
")",
"else",
":",
"# output_format == 'bel':",
"pybel",
".",
"to_bel",
"(",
"self",
".",
"model",
",",
"fh",
")"
]
| 37.095238 | 12.52381 |
def consistent_shuffle(*lists):
"""
Shuffle lists consistently.
Parameters
----------
*lists
Variable length number of lists
Returns
-------
shuffled_lists : tuple of lists
All of the lists are shuffled consistently
Examples
--------
>>> import mpu, random; random.seed(8)
>>> mpu.consistent_shuffle([1,2,3], ['a', 'b', 'c'], ['A', 'B', 'C'])
([3, 2, 1], ['c', 'b', 'a'], ['C', 'B', 'A'])
"""
perm = list(range(len(lists[0])))
random.shuffle(perm)
lists = tuple([sublist[index] for index in perm]
for sublist in lists)
return lists | [
"def",
"consistent_shuffle",
"(",
"*",
"lists",
")",
":",
"perm",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"lists",
"[",
"0",
"]",
")",
")",
")",
"random",
".",
"shuffle",
"(",
"perm",
")",
"lists",
"=",
"tuple",
"(",
"[",
"sublist",
"[",
"index",
"]",
"for",
"index",
"in",
"perm",
"]",
"for",
"sublist",
"in",
"lists",
")",
"return",
"lists"
]
| 24.64 | 18 |
def rot1(angle, form='c'):
"""Euler rotation about first axis
This computes the rotation matrix associated with a rotation about the first
axis. It will output matrices assuming column or row format vectors.
For example, to transform a vector from reference frame b to reference frame a:
Column Vectors : a = rot1(angle, 'c').dot(b)
Row Vectors : a = b.dot(rot1(angle, 'r'))
It should be clear that rot1(angle, 'c') = rot1(angle, 'r').T
Parameters
----------
angle : float
Angle of rotation about first axis. In radians
form : str
Flag to choose between row or column vector convention.
Returns
-------
mat : numpy.ndarray of shape (3,3)
Rotation matrix
"""
cos_a = np.cos(angle)
sin_a = np.sin(angle)
rot_mat = np.identity(3)
if form=='c':
rot_mat[1, 1] = cos_a
rot_mat[1, 2] = -sin_a
rot_mat[2, 1] = sin_a
rot_mat[2, 2] = cos_a
elif form=='r':
rot_mat[1, 1] = cos_a
rot_mat[1, 2] = sin_a
rot_mat[2, 1] = -sin_a
rot_mat[2, 2] = cos_a
else:
print("Unknown input. 'r' or 'c' for row/column notation.")
return 1
return rot_mat | [
"def",
"rot1",
"(",
"angle",
",",
"form",
"=",
"'c'",
")",
":",
"cos_a",
"=",
"np",
".",
"cos",
"(",
"angle",
")",
"sin_a",
"=",
"np",
".",
"sin",
"(",
"angle",
")",
"rot_mat",
"=",
"np",
".",
"identity",
"(",
"3",
")",
"if",
"form",
"==",
"'c'",
":",
"rot_mat",
"[",
"1",
",",
"1",
"]",
"=",
"cos_a",
"rot_mat",
"[",
"1",
",",
"2",
"]",
"=",
"-",
"sin_a",
"rot_mat",
"[",
"2",
",",
"1",
"]",
"=",
"sin_a",
"rot_mat",
"[",
"2",
",",
"2",
"]",
"=",
"cos_a",
"elif",
"form",
"==",
"'r'",
":",
"rot_mat",
"[",
"1",
",",
"1",
"]",
"=",
"cos_a",
"rot_mat",
"[",
"1",
",",
"2",
"]",
"=",
"sin_a",
"rot_mat",
"[",
"2",
",",
"1",
"]",
"=",
"-",
"sin_a",
"rot_mat",
"[",
"2",
",",
"2",
"]",
"=",
"cos_a",
"else",
":",
"print",
"(",
"\"Unknown input. 'r' or 'c' for row/column notation.\"",
")",
"return",
"1",
"return",
"rot_mat"
]
| 26.931818 | 22.318182 |
def to_text(self, digest, blob, mime_type):
"""Convert a file to plain text.
Useful for full-text indexing. Returns a Unicode string.
"""
# Special case, for now (XXX).
if mime_type.startswith("image/"):
return ""
cache_key = "txt:" + digest
text = self.cache.get(cache_key)
if text:
return text
# Direct conversion possible
for handler in self.handlers:
if handler.accept(mime_type, "text/plain"):
text = handler.convert(blob)
self.cache[cache_key] = text
return text
# Use PDF as a pivot format
pdf = self.to_pdf(digest, blob, mime_type)
for handler in self.handlers:
if handler.accept("application/pdf", "text/plain"):
text = handler.convert(pdf)
self.cache[cache_key] = text
return text
raise HandlerNotFound(f"No handler found to convert from {mime_type} to text") | [
"def",
"to_text",
"(",
"self",
",",
"digest",
",",
"blob",
",",
"mime_type",
")",
":",
"# Special case, for now (XXX).",
"if",
"mime_type",
".",
"startswith",
"(",
"\"image/\"",
")",
":",
"return",
"\"\"",
"cache_key",
"=",
"\"txt:\"",
"+",
"digest",
"text",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"text",
":",
"return",
"text",
"# Direct conversion possible",
"for",
"handler",
"in",
"self",
".",
"handlers",
":",
"if",
"handler",
".",
"accept",
"(",
"mime_type",
",",
"\"text/plain\"",
")",
":",
"text",
"=",
"handler",
".",
"convert",
"(",
"blob",
")",
"self",
".",
"cache",
"[",
"cache_key",
"]",
"=",
"text",
"return",
"text",
"# Use PDF as a pivot format",
"pdf",
"=",
"self",
".",
"to_pdf",
"(",
"digest",
",",
"blob",
",",
"mime_type",
")",
"for",
"handler",
"in",
"self",
".",
"handlers",
":",
"if",
"handler",
".",
"accept",
"(",
"\"application/pdf\"",
",",
"\"text/plain\"",
")",
":",
"text",
"=",
"handler",
".",
"convert",
"(",
"pdf",
")",
"self",
".",
"cache",
"[",
"cache_key",
"]",
"=",
"text",
"return",
"text",
"raise",
"HandlerNotFound",
"(",
"f\"No handler found to convert from {mime_type} to text\"",
")"
]
| 32.290323 | 15.677419 |
def cmd_as_file(cmd, *args, **kwargs):
"""Launch `cmd` and treat its stdout as a file object"""
kwargs['stdout'] = subprocess.PIPE
stdin = kwargs.pop('stdin', None)
if isinstance(stdin, basestring):
with tempfile.TemporaryFile() as stdin_file:
stdin_file.write(stdin)
stdin_file.seek(0)
kwargs['stdin'] = stdin_file
p = subprocess.Popen(cmd, *args, **kwargs)
else:
p = subprocess.Popen(cmd, *args, **kwargs)
try:
yield p.stdout
finally:
p.stdout.close()
if p.wait():
raise subprocess.CalledProcessError(p.returncode, cmd) | [
"def",
"cmd_as_file",
"(",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'stdout'",
"]",
"=",
"subprocess",
".",
"PIPE",
"stdin",
"=",
"kwargs",
".",
"pop",
"(",
"'stdin'",
",",
"None",
")",
"if",
"isinstance",
"(",
"stdin",
",",
"basestring",
")",
":",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"stdin_file",
":",
"stdin_file",
".",
"write",
"(",
"stdin",
")",
"stdin_file",
".",
"seek",
"(",
"0",
")",
"kwargs",
"[",
"'stdin'",
"]",
"=",
"stdin_file",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"yield",
"p",
".",
"stdout",
"finally",
":",
"p",
".",
"stdout",
".",
"close",
"(",
")",
"if",
"p",
".",
"wait",
"(",
")",
":",
"raise",
"subprocess",
".",
"CalledProcessError",
"(",
"p",
".",
"returncode",
",",
"cmd",
")"
]
| 35.111111 | 12.888889 |
def _advapi32_load_key(key_object, key_info, container):
"""
Loads a certificate, public key or private key into a Certificate,
PublicKey or PrivateKey object via CryptoAPI
:param key_object:
An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or
asn1crypto.keys.PrivateKeyInfo object
:param key_info:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:param container:
The class of the object to hold the key_handle
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PrivateKey, PublicKey or Certificate object, based on container
"""
key_type = 'public' if isinstance(key_info, keys.PublicKeyInfo) else 'private'
algo = key_info.algorithm
if algo == 'rsa':
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
else:
provider = Advapi32Const.MS_ENH_DSS_DH_PROV
context_handle = None
key_handle = None
try:
context_handle = open_context_handle(provider, verify_only=key_type == 'public')
blob = _advapi32_create_blob(key_info, key_type, algo)
buffer_ = buffer_from_bytes(blob)
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
buffer_,
len(blob),
null(),
0,
key_handle_pointer
)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
output = container(key_handle, key_object)
output.context_handle = context_handle
if algo == 'rsa':
ex_blob = _advapi32_create_blob(key_info, key_type, algo, signing=False)
ex_buffer = buffer_from_bytes(ex_blob)
ex_key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
ex_buffer,
len(ex_blob),
null(),
0,
ex_key_handle_pointer
)
handle_error(res)
output.ex_key_handle = unwrap(ex_key_handle_pointer)
return output
except (Exception):
if key_handle:
advapi32.CryptDestroyKey(key_handle)
if context_handle:
close_context_handle(context_handle)
raise | [
"def",
"_advapi32_load_key",
"(",
"key_object",
",",
"key_info",
",",
"container",
")",
":",
"key_type",
"=",
"'public'",
"if",
"isinstance",
"(",
"key_info",
",",
"keys",
".",
"PublicKeyInfo",
")",
"else",
"'private'",
"algo",
"=",
"key_info",
".",
"algorithm",
"if",
"algo",
"==",
"'rsa'",
":",
"provider",
"=",
"Advapi32Const",
".",
"MS_ENH_RSA_AES_PROV",
"else",
":",
"provider",
"=",
"Advapi32Const",
".",
"MS_ENH_DSS_DH_PROV",
"context_handle",
"=",
"None",
"key_handle",
"=",
"None",
"try",
":",
"context_handle",
"=",
"open_context_handle",
"(",
"provider",
",",
"verify_only",
"=",
"key_type",
"==",
"'public'",
")",
"blob",
"=",
"_advapi32_create_blob",
"(",
"key_info",
",",
"key_type",
",",
"algo",
")",
"buffer_",
"=",
"buffer_from_bytes",
"(",
"blob",
")",
"key_handle_pointer",
"=",
"new",
"(",
"advapi32",
",",
"'HCRYPTKEY *'",
")",
"res",
"=",
"advapi32",
".",
"CryptImportKey",
"(",
"context_handle",
",",
"buffer_",
",",
"len",
"(",
"blob",
")",
",",
"null",
"(",
")",
",",
"0",
",",
"key_handle_pointer",
")",
"handle_error",
"(",
"res",
")",
"key_handle",
"=",
"unwrap",
"(",
"key_handle_pointer",
")",
"output",
"=",
"container",
"(",
"key_handle",
",",
"key_object",
")",
"output",
".",
"context_handle",
"=",
"context_handle",
"if",
"algo",
"==",
"'rsa'",
":",
"ex_blob",
"=",
"_advapi32_create_blob",
"(",
"key_info",
",",
"key_type",
",",
"algo",
",",
"signing",
"=",
"False",
")",
"ex_buffer",
"=",
"buffer_from_bytes",
"(",
"ex_blob",
")",
"ex_key_handle_pointer",
"=",
"new",
"(",
"advapi32",
",",
"'HCRYPTKEY *'",
")",
"res",
"=",
"advapi32",
".",
"CryptImportKey",
"(",
"context_handle",
",",
"ex_buffer",
",",
"len",
"(",
"ex_blob",
")",
",",
"null",
"(",
")",
",",
"0",
",",
"ex_key_handle_pointer",
")",
"handle_error",
"(",
"res",
")",
"output",
".",
"ex_key_handle",
"=",
"unwrap",
"(",
"ex_key_handle_pointer",
")",
"return",
"output",
"except",
"(",
"Exception",
")",
":",
"if",
"key_handle",
":",
"advapi32",
".",
"CryptDestroyKey",
"(",
"key_handle",
")",
"if",
"context_handle",
":",
"close_context_handle",
"(",
"context_handle",
")",
"raise"
]
| 30.650602 | 23.39759 |
def geostatistical_prior_builder(pst, struct_dict,sigma_range=4,
par_knowledge_dict=None,verbose=False):
""" a helper function to construct a full prior covariance matrix using
a mixture of geostastical structures and parameter bounds information.
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Sounds complicated...
Parameters
----------
pst : pyemu.Pst
a control file (or the name of control file)
struct_dict : dict
a python dict of GeoStruct (or structure file), and list of pp tpl files pairs
If the values in the dict are pd.DataFrames, then they must have an
'x','y', and 'parnme' column. If the filename ends in '.csv',
then a pd.DataFrame is loaded, otherwise a pilot points file is loaded.
sigma_range : float
a float representing the number of standard deviations implied by parameter bounds.
Default is 4.0, which implies 95% confidence parameter bounds.
par_knowledge_dict : dict
used to condition on existing knowledge about parameters. This functionality is
currently in dev - don't use it.
verbose : bool
stdout flag
Returns
-------
Cov : pyemu.Cov
a covariance matrix that includes all adjustable parameters in the control
file.
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}``
``>>>cov = pyemu.helpers.geostatistical_prior_builder(pst,struct_dict=sd)``
``>>>cov.to_ascii("prior.cov")``
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
assert isinstance(pst,pyemu.Pst),"pst arg must be a Pst instance, not {0}".\
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst,sigma_range=sigma_range)
full_cov_dict = {n:float(v) for n,v in zip(full_cov.col_names,full_cov.x)}
#full_cov = None
par = pst.parameter_data
for gs,items in struct_dict.items():
if verbose: print("processing ",gs)
if isinstance(gs,str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss,list):
warnings.warn("using first geostat structure in file {0}".\
format(gs),PyemuWarning)
gs = gss[0]
else:
gs = gss
if not isinstance(items,list):
items = [items]
for item in items:
if isinstance(item,str):
assert os.path.exists(item),"file {0} not found".\
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ['x','y','parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x : x not in par.parnme),"parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}".\
format(','.join(missing)),PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:,"zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone==zone,:].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
#df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x,df_zone.y,df_zone.parnme)
if verbose: print("done")
# find the variance in the diagonal cov
if verbose: print("getting diag var cov",df_zone.shape[0])
#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
#if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
#tpl_var = tpl_var.max()
if verbose: print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose: print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".
format(cov.row_names[:3]))
if verbose: print('replace in full cov')
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
if par_knowledge_dict is not None:
full_cov = condition_on_par_knowledge(full_cov,
par_knowledge_dict=par_knowledge_dict)
return full_cov | [
"def",
"geostatistical_prior_builder",
"(",
"pst",
",",
"struct_dict",
",",
"sigma_range",
"=",
"4",
",",
"par_knowledge_dict",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"pst",
",",
"str",
")",
":",
"pst",
"=",
"pyemu",
".",
"Pst",
"(",
"pst",
")",
"assert",
"isinstance",
"(",
"pst",
",",
"pyemu",
".",
"Pst",
")",
",",
"\"pst arg must be a Pst instance, not {0}\"",
".",
"format",
"(",
"type",
"(",
"pst",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"building diagonal cov\"",
")",
"full_cov",
"=",
"pyemu",
".",
"Cov",
".",
"from_parameter_data",
"(",
"pst",
",",
"sigma_range",
"=",
"sigma_range",
")",
"full_cov_dict",
"=",
"{",
"n",
":",
"float",
"(",
"v",
")",
"for",
"n",
",",
"v",
"in",
"zip",
"(",
"full_cov",
".",
"col_names",
",",
"full_cov",
".",
"x",
")",
"}",
"#full_cov = None",
"par",
"=",
"pst",
".",
"parameter_data",
"for",
"gs",
",",
"items",
"in",
"struct_dict",
".",
"items",
"(",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"processing \"",
",",
"gs",
")",
"if",
"isinstance",
"(",
"gs",
",",
"str",
")",
":",
"gss",
"=",
"pyemu",
".",
"geostats",
".",
"read_struct_file",
"(",
"gs",
")",
"if",
"isinstance",
"(",
"gss",
",",
"list",
")",
":",
"warnings",
".",
"warn",
"(",
"\"using first geostat structure in file {0}\"",
".",
"format",
"(",
"gs",
")",
",",
"PyemuWarning",
")",
"gs",
"=",
"gss",
"[",
"0",
"]",
"else",
":",
"gs",
"=",
"gss",
"if",
"not",
"isinstance",
"(",
"items",
",",
"list",
")",
":",
"items",
"=",
"[",
"items",
"]",
"for",
"item",
"in",
"items",
":",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"item",
")",
",",
"\"file {0} not found\"",
".",
"format",
"(",
"item",
")",
"if",
"item",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".tpl\"",
")",
":",
"df",
"=",
"pyemu",
".",
"pp_utils",
".",
"pp_tpl_to_dataframe",
"(",
"item",
")",
"elif",
"item",
".",
"lower",
".",
"endswith",
"(",
"\".csv\"",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"item",
")",
"else",
":",
"df",
"=",
"item",
"for",
"req",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'parnme'",
"]",
":",
"if",
"req",
"not",
"in",
"df",
".",
"columns",
":",
"raise",
"Exception",
"(",
"\"{0} is not in the columns\"",
".",
"format",
"(",
"req",
")",
")",
"missing",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"par",
".",
"parnme",
")",
",",
"\"parnme\"",
"]",
"if",
"len",
"(",
"missing",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"the following parameters are not \"",
"+",
"\"in the control file: {0}\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"missing",
")",
")",
",",
"PyemuWarning",
")",
"df",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"missing",
")",
"]",
"if",
"\"zone\"",
"not",
"in",
"df",
".",
"columns",
":",
"df",
".",
"loc",
"[",
":",
",",
"\"zone\"",
"]",
"=",
"1",
"zones",
"=",
"df",
".",
"zone",
".",
"unique",
"(",
")",
"aset",
"=",
"set",
"(",
"pst",
".",
"adj_par_names",
")",
"for",
"zone",
"in",
"zones",
":",
"df_zone",
"=",
"df",
".",
"loc",
"[",
"df",
".",
"zone",
"==",
"zone",
",",
":",
"]",
".",
"copy",
"(",
")",
"df_zone",
"=",
"df_zone",
".",
"loc",
"[",
"df_zone",
".",
"parnme",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
"in",
"aset",
")",
",",
":",
"]",
"if",
"df_zone",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"all parameters in zone {0} tied and/or fixed, skipping...\"",
".",
"format",
"(",
"zone",
")",
",",
"PyemuWarning",
")",
"continue",
"#df_zone.sort_values(by=\"parnme\",inplace=True)",
"df_zone",
".",
"sort_index",
"(",
"inplace",
"=",
"True",
")",
"if",
"verbose",
":",
"print",
"(",
"\"build cov matrix\"",
")",
"cov",
"=",
"gs",
".",
"covariance_matrix",
"(",
"df_zone",
".",
"x",
",",
"df_zone",
".",
"y",
",",
"df_zone",
".",
"parnme",
")",
"if",
"verbose",
":",
"print",
"(",
"\"done\"",
")",
"# find the variance in the diagonal cov",
"if",
"verbose",
":",
"print",
"(",
"\"getting diag var cov\"",
",",
"df_zone",
".",
"shape",
"[",
"0",
"]",
")",
"#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()",
"tpl_var",
"=",
"max",
"(",
"[",
"full_cov_dict",
"[",
"pn",
"]",
"for",
"pn",
"in",
"df_zone",
".",
"parnme",
"]",
")",
"#if np.std(tpl_var) > 1.0e-6:",
"# warnings.warn(\"pars have different ranges\" +\\",
"# \" , using max range as variance for all pars\")",
"#tpl_var = tpl_var.max()",
"if",
"verbose",
":",
"print",
"(",
"\"scaling full cov by diag var cov\"",
")",
"cov",
"*=",
"tpl_var",
"if",
"verbose",
":",
"print",
"(",
"\"test for inversion\"",
")",
"try",
":",
"ci",
"=",
"cov",
".",
"inv",
"except",
":",
"df_zone",
".",
"to_csv",
"(",
"\"prior_builder_crash.csv\"",
")",
"raise",
"Exception",
"(",
"\"error inverting cov {0}\"",
".",
"format",
"(",
"cov",
".",
"row_names",
"[",
":",
"3",
"]",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"'replace in full cov'",
")",
"full_cov",
".",
"replace",
"(",
"cov",
")",
"# d = np.diag(full_cov.x)",
"# idx = np.argwhere(d==0.0)",
"# for i in idx:",
"# print(full_cov.names[i])",
"if",
"par_knowledge_dict",
"is",
"not",
"None",
":",
"full_cov",
"=",
"condition_on_par_knowledge",
"(",
"full_cov",
",",
"par_knowledge_dict",
"=",
"par_knowledge_dict",
")",
"return",
"full_cov"
]
| 43.804511 | 20.87218 |
def true_ces(subsystem, previous_state, next_state):
"""Set of all sets of elements that have true causes and true effects.
.. note::
Since the true |CauseEffectStructure| is always about the full system,
the background conditions don't matter and the subsystem should be
conditioned on the current state.
"""
network = subsystem.network
nodes = subsystem.node_indices
state = subsystem.state
_events = events(network, previous_state, state, next_state, nodes)
if not _events:
log.info("Finished calculating, no echo events.")
return None
result = tuple([event.actual_cause for event in _events] +
[event.actual_effect for event in _events])
log.info("Finished calculating true events.")
log.debug("RESULT: \n%s", result)
return result | [
"def",
"true_ces",
"(",
"subsystem",
",",
"previous_state",
",",
"next_state",
")",
":",
"network",
"=",
"subsystem",
".",
"network",
"nodes",
"=",
"subsystem",
".",
"node_indices",
"state",
"=",
"subsystem",
".",
"state",
"_events",
"=",
"events",
"(",
"network",
",",
"previous_state",
",",
"state",
",",
"next_state",
",",
"nodes",
")",
"if",
"not",
"_events",
":",
"log",
".",
"info",
"(",
"\"Finished calculating, no echo events.\"",
")",
"return",
"None",
"result",
"=",
"tuple",
"(",
"[",
"event",
".",
"actual_cause",
"for",
"event",
"in",
"_events",
"]",
"+",
"[",
"event",
".",
"actual_effect",
"for",
"event",
"in",
"_events",
"]",
")",
"log",
".",
"info",
"(",
"\"Finished calculating true events.\"",
")",
"log",
".",
"debug",
"(",
"\"RESULT: \\n%s\"",
",",
"result",
")",
"return",
"result"
]
| 34.333333 | 21.208333 |
def _initialize_background(self):
"""Set up background state (zonal flow and PV gradients)."""
self.H = self.Hi.sum()
if np.asarray(self.U).ndim == 2:
self.Ubg = self.U * np.ones((self.ny))
else:
self.Ubg = np.expand_dims(self.U,axis=1) * np.ones((self.ny))
if not (self.nz==2):
self.gpi = self.g*(self.rhoi[1:]-self.rhoi[:-1])/self.rhoi[:-1]
self.f2gpi = (self.f2/self.gpi)[:,np.newaxis,np.newaxis]
assert self.gpi.size == self.nz-1, "Invalid size of gpi"
assert np.all(self.gpi>0.), "Buoyancy jump has negative sign!"
assert self.Hi.size == self.nz, self.logger.error('size of Hi does not' +
'match number of vertical levels nz')
assert self.rhoi.size == self.nz, self.logger.error('size of rhoi does not' +
'match number of vertical levels nz')
assert self.Ubg.size == self.nz * self.ny, self.logger.error('size of Ubg does not' +
'match number of vertical levels nz')
assert self.Vbg.size == self.nz, self.logger.error('size of Vbg does not' +
'match number of vertical levels nz')
else:
self.f2gpi = np.array(self.rd**-2 *
(self.Hi[0]*self.Hi[1])/self.H)[np.newaxis]
self._initialize_stretching_matrix()
# the meridional PV gradients in each layer
self.Qy = self.beta - np.dot(self.S, self.Ubg) + np.gradient(np.gradient(self.Ubg, self.dy, axis=1), self.dy, axis=1)
self.Qx = np.dot(self.S,self.Vbg)
# complex versions, multiplied by k, speeds up computations to precompute
self.ikQy = self.Qy[:,:,np.newaxis]*1j*self.k
self.ilQx = self.Qx[:,np.newaxis,np.newaxis]*1j*self.l | [
"def",
"_initialize_background",
"(",
"self",
")",
":",
"self",
".",
"H",
"=",
"self",
".",
"Hi",
".",
"sum",
"(",
")",
"if",
"np",
".",
"asarray",
"(",
"self",
".",
"U",
")",
".",
"ndim",
"==",
"2",
":",
"self",
".",
"Ubg",
"=",
"self",
".",
"U",
"*",
"np",
".",
"ones",
"(",
"(",
"self",
".",
"ny",
")",
")",
"else",
":",
"self",
".",
"Ubg",
"=",
"np",
".",
"expand_dims",
"(",
"self",
".",
"U",
",",
"axis",
"=",
"1",
")",
"*",
"np",
".",
"ones",
"(",
"(",
"self",
".",
"ny",
")",
")",
"if",
"not",
"(",
"self",
".",
"nz",
"==",
"2",
")",
":",
"self",
".",
"gpi",
"=",
"self",
".",
"g",
"*",
"(",
"self",
".",
"rhoi",
"[",
"1",
":",
"]",
"-",
"self",
".",
"rhoi",
"[",
":",
"-",
"1",
"]",
")",
"/",
"self",
".",
"rhoi",
"[",
":",
"-",
"1",
"]",
"self",
".",
"f2gpi",
"=",
"(",
"self",
".",
"f2",
"/",
"self",
".",
"gpi",
")",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"assert",
"self",
".",
"gpi",
".",
"size",
"==",
"self",
".",
"nz",
"-",
"1",
",",
"\"Invalid size of gpi\"",
"assert",
"np",
".",
"all",
"(",
"self",
".",
"gpi",
">",
"0.",
")",
",",
"\"Buoyancy jump has negative sign!\"",
"assert",
"self",
".",
"Hi",
".",
"size",
"==",
"self",
".",
"nz",
",",
"self",
".",
"logger",
".",
"error",
"(",
"'size of Hi does not'",
"+",
"'match number of vertical levels nz'",
")",
"assert",
"self",
".",
"rhoi",
".",
"size",
"==",
"self",
".",
"nz",
",",
"self",
".",
"logger",
".",
"error",
"(",
"'size of rhoi does not'",
"+",
"'match number of vertical levels nz'",
")",
"assert",
"self",
".",
"Ubg",
".",
"size",
"==",
"self",
".",
"nz",
"*",
"self",
".",
"ny",
",",
"self",
".",
"logger",
".",
"error",
"(",
"'size of Ubg does not'",
"+",
"'match number of vertical levels nz'",
")",
"assert",
"self",
".",
"Vbg",
".",
"size",
"==",
"self",
".",
"nz",
",",
"self",
".",
"logger",
".",
"error",
"(",
"'size of Vbg does not'",
"+",
"'match number of vertical levels nz'",
")",
"else",
":",
"self",
".",
"f2gpi",
"=",
"np",
".",
"array",
"(",
"self",
".",
"rd",
"**",
"-",
"2",
"*",
"(",
"self",
".",
"Hi",
"[",
"0",
"]",
"*",
"self",
".",
"Hi",
"[",
"1",
"]",
")",
"/",
"self",
".",
"H",
")",
"[",
"np",
".",
"newaxis",
"]",
"self",
".",
"_initialize_stretching_matrix",
"(",
")",
"# the meridional PV gradients in each layer",
"self",
".",
"Qy",
"=",
"self",
".",
"beta",
"-",
"np",
".",
"dot",
"(",
"self",
".",
"S",
",",
"self",
".",
"Ubg",
")",
"+",
"np",
".",
"gradient",
"(",
"np",
".",
"gradient",
"(",
"self",
".",
"Ubg",
",",
"self",
".",
"dy",
",",
"axis",
"=",
"1",
")",
",",
"self",
".",
"dy",
",",
"axis",
"=",
"1",
")",
"self",
".",
"Qx",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"S",
",",
"self",
".",
"Vbg",
")",
"# complex versions, multiplied by k, speeds up computations to precompute",
"self",
".",
"ikQy",
"=",
"self",
".",
"Qy",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"1j",
"*",
"self",
".",
"k",
"self",
".",
"ilQx",
"=",
"self",
".",
"Qx",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"*",
"1j",
"*",
"self",
".",
"l"
]
| 39.688889 | 29.422222 |
def clmixhess(obj, exe, arg1, arg2, delta=DELTA):
"""
Returns numerical mixed Hessian function of given class method
with respect to two class attributes
Input: obj, general object
exe (str), name of object method
arg1(str), name of object attribute
arg2(str), name of object attribute
delta(float, optional), finite difference step
Output: Hessian function object
"""
f, x = get_method_and_copy_of_attribute(obj, exe, arg1)
_, y = get_method_and_copy_of_attribute(obj, exe, arg2)
def hess_f(*args, **kwargs):
hess_val = numpy.zeros(x.shape + y.shape)
it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])
for xi in it:
i = it.multi_index
jt = numpy.nditer(y, op_flags=['readwrite'], flags=['multi_index'])
for yj in jt:
j = jt.multi_index
xi += delta/2
yj += delta/2
fpp = f(*args, **kwargs)
yj -= delta
fpm = f(*args, **kwargs)
xi -= delta
fmm = f(*args, **kwargs)
yj += delta
fmp = f(*args, **kwargs)
xi += delta/2
yj -= delta/2
hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2
return hess_val
return hess_f | [
"def",
"clmixhess",
"(",
"obj",
",",
"exe",
",",
"arg1",
",",
"arg2",
",",
"delta",
"=",
"DELTA",
")",
":",
"f",
",",
"x",
"=",
"get_method_and_copy_of_attribute",
"(",
"obj",
",",
"exe",
",",
"arg1",
")",
"_",
",",
"y",
"=",
"get_method_and_copy_of_attribute",
"(",
"obj",
",",
"exe",
",",
"arg2",
")",
"def",
"hess_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"hess_val",
"=",
"numpy",
".",
"zeros",
"(",
"x",
".",
"shape",
"+",
"y",
".",
"shape",
")",
"it",
"=",
"numpy",
".",
"nditer",
"(",
"x",
",",
"op_flags",
"=",
"[",
"'readwrite'",
"]",
",",
"flags",
"=",
"[",
"'multi_index'",
"]",
")",
"for",
"xi",
"in",
"it",
":",
"i",
"=",
"it",
".",
"multi_index",
"jt",
"=",
"numpy",
".",
"nditer",
"(",
"y",
",",
"op_flags",
"=",
"[",
"'readwrite'",
"]",
",",
"flags",
"=",
"[",
"'multi_index'",
"]",
")",
"for",
"yj",
"in",
"jt",
":",
"j",
"=",
"jt",
".",
"multi_index",
"xi",
"+=",
"delta",
"/",
"2",
"yj",
"+=",
"delta",
"/",
"2",
"fpp",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"yj",
"-=",
"delta",
"fpm",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"xi",
"-=",
"delta",
"fmm",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"yj",
"+=",
"delta",
"fmp",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"xi",
"+=",
"delta",
"/",
"2",
"yj",
"-=",
"delta",
"/",
"2",
"hess_val",
"[",
"i",
"+",
"j",
"]",
"=",
"(",
"fpp",
"+",
"fmm",
"-",
"fpm",
"-",
"fmp",
")",
"/",
"delta",
"**",
"2",
"return",
"hess_val",
"return",
"hess_f"
]
| 38.628571 | 11.714286 |
def _check_value(self, ovsrec_row, column_value):
"""
:type column_value: tuple of column and value_json
"""
column, value_json = column_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if column_schema.type.is_map():
for k, v in value.items():
if k in datum and datum[k] == v:
return True
elif datum == value:
return True
return False | [
"def",
"_check_value",
"(",
"self",
",",
"ovsrec_row",
",",
"column_value",
")",
":",
"column",
",",
"value_json",
"=",
"column_value",
"column_schema",
"=",
"ovsrec_row",
".",
"_table",
".",
"columns",
"[",
"column",
"]",
"value",
"=",
"ovs",
".",
"db",
".",
"data",
".",
"Datum",
".",
"from_json",
"(",
"column_schema",
".",
"type",
",",
"value_json",
")",
".",
"to_python",
"(",
"ovs",
".",
"db",
".",
"idl",
".",
"_uuid_to_row",
")",
"datum",
"=",
"getattr",
"(",
"ovsrec_row",
",",
"column",
")",
"if",
"column_schema",
".",
"type",
".",
"is_map",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"datum",
"and",
"datum",
"[",
"k",
"]",
"==",
"v",
":",
"return",
"True",
"elif",
"datum",
"==",
"value",
":",
"return",
"True",
"return",
"False"
]
| 36.411765 | 11.705882 |
def delete_vault(self, vault_id):
"""Deletes a ``Vault``.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` to
remove
raise: NotFound - ``vault_id`` not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.delete_bin_template
if self._catalog_session is not None:
return self._catalog_session.delete_catalog(catalog_id=vault_id)
collection = JSONClientValidated('authorization',
collection='Vault',
runtime=self._runtime)
if not isinstance(vault_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
for object_catalog in ['Authorization', 'Function', 'Qualifier', 'Vault']:
obj_collection = JSONClientValidated('authorization',
collection=object_catalog,
runtime=self._runtime)
if obj_collection.find({'assignedVaultIds': {'$in': [str(vault_id)]}}).count() != 0:
raise errors.IllegalState('catalog is not empty')
collection.delete_one({'_id': ObjectId(vault_id.get_identifier())}) | [
"def",
"delete_vault",
"(",
"self",
",",
"vault_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinAdminSession.delete_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"delete_catalog",
"(",
"catalog_id",
"=",
"vault_id",
")",
"collection",
"=",
"JSONClientValidated",
"(",
"'authorization'",
",",
"collection",
"=",
"'Vault'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"if",
"not",
"isinstance",
"(",
"vault_id",
",",
"ABCId",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'the argument is not a valid OSID Id'",
")",
"for",
"object_catalog",
"in",
"[",
"'Authorization'",
",",
"'Function'",
",",
"'Qualifier'",
",",
"'Vault'",
"]",
":",
"obj_collection",
"=",
"JSONClientValidated",
"(",
"'authorization'",
",",
"collection",
"=",
"object_catalog",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"if",
"obj_collection",
".",
"find",
"(",
"{",
"'assignedVaultIds'",
":",
"{",
"'$in'",
":",
"[",
"str",
"(",
"vault_id",
")",
"]",
"}",
"}",
")",
".",
"count",
"(",
")",
"!=",
"0",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'catalog is not empty'",
")",
"collection",
".",
"delete_one",
"(",
"{",
"'_id'",
":",
"ObjectId",
"(",
"vault_id",
".",
"get_identifier",
"(",
")",
")",
"}",
")"
]
| 53.678571 | 22.607143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.