code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : ``PolygonSet``
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle)
sa = numpy.array((-sa, sa))
c0 = numpy.array(center)
self.polygons = [(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons]
return self | Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : ``PolygonSet``
This object. |
def groups_leave(self, room_id, **kwargs):
"""Causes the callee to be removed from the private group, if they’re part of it and are not the last owner."""
return self.__call_api_post('groups.leave', roomId=room_id, kwargs=kwargs) | Causes the callee to be removed from the private group, if they’re part of it and are not the last owner. |
def _recv_callback(self, msg):
"""
Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String.
"""
m2req = MongrelRequest.parse(msg[0])
MongrelConnection(m2req, self._sending_stream, self.request_callback,
no_keep_alive=self.no_keep_alive, xheaders=self.xheaders) | Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String. |
def add_positional_embedding(x, max_length, name=None, positions=None):
"""Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x.
"""
with tf.name_scope("add_positional_embedding"):
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
pad_length = tf.maximum(0, length - max_length)
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, pad_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions)) | Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x. |
def unusedoptions(self, sections):
"""Lists options that have not been used to format other values in
their sections.
Good for finding out if the user has misspelled any of the options.
"""
unused = set([])
for section in _list(sections):
if not self.has_section(section):
continue
options = self.options(section)
raw_values = [self.get(section, option, raw=True) for option in options]
for option in options:
formatter = "%(" + option + ")s"
for raw_value in raw_values:
if formatter in raw_value:
break
else:
unused.add(option)
return list(unused) | Lists options that have not been used to format other values in
their sections.
Good for finding out if the user has misspelled any of the options. |
def update(self,
message=None,
subject=None,
days=None,
downloads=None,
notify=None):
"""Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool``
"""
method, url = get_URL('update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken'),
'transferid': self.transfer_id,
}
data = {
'message': message or self.transfer_info.get('message'),
'message': subject or self.transfer_info.get('subject'),
'days': days or self.transfer_info.get('days'),
'downloads': downloads or self.transfer_info.get('downloads'),
'notify': notify or self.transfer_info.get('notify')
}
payload.update(data)
res = getattr(self.session, method)(url, params=payload)
if res.status_code:
self.transfer_info.update(data)
return True
hellraiser(res) | Update properties for a transfer.
:param message: updated message to recipient(s)
:param subject: updated subject for trasfer
:param days: updated amount of days transfer is available
:param downloads: update amount of downloads allowed for transfer
:param notify: update whether to notifiy on downloads or not
:type message: ``str`` or ``unicode``
:type subject: ``str`` or ``unicode``
:type days: ``int``
:type downloads: ``int``
:type notify: ``bool``
:rtype: ``bool`` |
def get_assets(self):
"""Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AssetList(self._results, runtime=self._runtime) | Gets the asset list resulting from a search.
return: (osid.repository.AssetList) - the asset list
raise: IllegalState - the list has already been retrieved
*compliance: mandatory -- This method must be implemented.* |
def find_motif_disruptions(
position,
ref,
alt,
genome_fasta,
matrices,
):
"""
Determine whether there is a difference between the ref and alt
alleles for TF binding. Requires samtools in your path.
Parameters
----------
position : str
Zero based genomic coordinates of the reference allele of the form
chrom:start-end (chr5:100-101 for a SNV for instance). The value end -
start should equal the length of the ref allele.
ref : str
Reference allele. This should match the reference sequence at "position"
in genome_fasta.
alt : str
Alternate allele.
genome_fasta : str
Path to genome fasta file. This file should be indexed.
matrices : dict
Dict whose keys are motif names and whose values are pandas data frames
or numpy arrays containing PWMs with columns ACGT.
Returns
-------
out : pandas.DataFrame
Pandas data frame with motifs whose best matches that overlapped the
variant differed between the reference and alternate sequences. A score
of zero and a strand of '' indicates that there was not a match for the
motif on the given allele.
"""
import subprocess
import MOODS
# import pybedtools as pbt
max_motif_length = max([x.shape[0] for x in matrices.values()])
chrom, coords = position.split(':')
start,end = [int(x) for x in coords.split('-')]
s = '{}:{}-{}'.format(chrom, start - max_motif_length + 1, end +
max_motif_length - 1)
c = 'samtools faidx {} {}'.format(genome_fasta, s)
seq_lines = subprocess.check_output(c, shell=True).strip().split()
ref_seq = seq_lines[1]
alt_seq = ref_seq[0:max_motif_length - 1] + alt + ref_seq[max_motif_length + len(ref) - 1:]
ref_variant_start = max_motif_length - 1
ref_variant_end = max_motif_length - 1 + len(ref)
alt_variant_start = max_motif_length - 1
alt_variant_end = max_motif_length - 1 + len(alt)
ms = [matrices[x].T.values.tolist() for x in matrices.keys()]
ref_res = MOODS.search(ref_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
ref_res = dict(zip(matrices.keys(), ref_res))
alt_res = MOODS.search(alt_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
alt_res = dict(zip(matrices.keys(), alt_res))
# First we'll remove any motif matches that don't overlap the variant of interest (and thus
# can't be affected by the variant and will be the same for ref and alt). Then we'll get the
# best match for each motif for ref and alt.
rows = []
for motif in ref_res.keys():
ref_res[motif] = _filter_variant_motif_res(ref_res[motif], ref_variant_start, ref_variant_end,
matrices[motif].shape[0], ref_seq)
alt_res[motif] = _filter_variant_motif_res(alt_res[motif], alt_variant_start, alt_variant_end,
matrices[motif].shape[0], alt_seq)
if len(ref_res[motif]) > 0:
ref_pos, ref_score = sorted(ref_res[motif], key=lambda x: x[1], reverse=True)[0]
ref_strand = {True:'+', False:'-'}[ref_pos > 0]
else:
ref_score = 0
ref_strand = ''
if len(alt_res[motif]) > 0:
alt_pos, alt_score = sorted(alt_res[motif], key=lambda x: x[1], reverse=True)[0]
alt_strand = {True:'+', False:'-'}[alt_pos > 0]
else:
alt_score = 0
alt_strand = ''
if ref_score > 0 or alt_score > 0:
diff = ref_score - alt_score
rows.append([motif, ref_score, ref_strand, alt_score, alt_strand, diff])
out = pd.DataFrame(rows, columns=['motif', 'ref_score', 'ref_strand', 'alt_score',
'alt_strand', 'score_diff'])
out.index = out.motif
out = out.drop('motif', axis=1)
out = out[out.score_diff != 0]
return out | Determine whether there is a difference between the ref and alt
alleles for TF binding. Requires samtools in your path.
Parameters
----------
position : str
Zero based genomic coordinates of the reference allele of the form
chrom:start-end (chr5:100-101 for a SNV for instance). The value end -
start should equal the length of the ref allele.
ref : str
Reference allele. This should match the reference sequence at "position"
in genome_fasta.
alt : str
Alternate allele.
genome_fasta : str
Path to genome fasta file. This file should be indexed.
matrices : dict
Dict whose keys are motif names and whose values are pandas data frames
or numpy arrays containing PWMs with columns ACGT.
Returns
-------
out : pandas.DataFrame
Pandas data frame with motifs whose best matches that overlapped the
variant differed between the reference and alternate sequences. A score
of zero and a strand of '' indicates that there was not a match for the
motif on the given allele. |
def write_table(self, table, rows, append=False, gzip=False):
"""
Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended.
"""
_write_table(self.root,
table,
rows,
self.table_relations(table),
append=append,
gzip=gzip,
encoding=self.encoding) | Encode and write out *table* to the profile directory.
Args:
table: The name of the table to write
rows: The rows to write to the table
append: If `True`, append the encoded rows to any existing
data.
gzip: If `True`, compress the resulting table with `gzip`.
The table's filename will have `.gz` appended. |
def modfacl(acl_type, acl_name='', perms='', *args, **kwargs):
'''
Add or modify a FACL for the specified file(s)
CLI Examples:
.. code-block:: bash
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen
salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen
salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen
salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen raise_err=True
'''
recursive = kwargs.pop('recursive', False)
raise_err = kwargs.pop('raise_err', False)
_raise_on_no_files(*args)
cmd = 'setfacl'
if recursive:
cmd += ' -R' # -R must come first as -m needs the acl_* arguments that come later
cmd += ' -m'
cmd = '{0} {1}:{2}:{3}'.format(cmd, _acl_prefix(acl_type), acl_name, perms)
for dentry in args:
cmd += ' "{0}"'.format(dentry)
__salt__['cmd.run'](cmd, python_shell=False, raise_err=raise_err)
return True | Add or modify a FACL for the specified file(s)
CLI Examples:
.. code-block:: bash
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen
salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen
salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen
salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen raise_err=True |
def _huffman_encode_char(cls, c):
# type: (Union[str, EOS]) -> Tuple[int, int]
""" huffman_encode_char assumes that the static_huffman_tree was
previously initialized
@param str|EOS c: a symbol to encode
@return (int, int): the bitstring of the symbol and its bitlength
@raise AssertionError
"""
if isinstance(c, EOS):
return cls.static_huffman_code[-1]
else:
assert(isinstance(c, int) or len(c) == 1)
return cls.static_huffman_code[orb(c)] | huffman_encode_char assumes that the static_huffman_tree was
previously initialized
@param str|EOS c: a symbol to encode
@return (int, int): the bitstring of the symbol and its bitlength
@raise AssertionError |
def get_proto(self):
"""
Return the prototype of the method
:rtype: string
"""
if self.proto_idx_value is None:
self.proto_idx_value = self.CM.get_proto(self.proto_idx)
return self.proto_idx_value | Return the prototype of the method
:rtype: string |
def open_channel(self):
"""Open a new channel with RabbitMQ.
When RabbitMQ responds that the channel is open, the on_channel_open
callback will be invoked by pika.
"""
logger.debug('Creating new channel')
self._connection.channel(on_open_callback=self.on_channel_open) | Open a new channel with RabbitMQ.
When RabbitMQ responds that the channel is open, the on_channel_open
callback will be invoked by pika. |
def fromBrdict(cls, master, brdict):
"""
Construct a new L{BuildRequest} from a dictionary as returned by
L{BuildRequestsConnectorComponent.getBuildRequest}.
This method uses a cache, which may result in return of stale objects;
for the most up-to-date information, use the database connector
methods.
@param master: current build master
@param brdict: build request dictionary
@returns: L{BuildRequest}, via Deferred
"""
cache = master.caches.get_cache("BuildRequests", cls._make_br)
return cache.get(brdict['buildrequestid'], brdict=brdict, master=master) | Construct a new L{BuildRequest} from a dictionary as returned by
L{BuildRequestsConnectorComponent.getBuildRequest}.
This method uses a cache, which may result in return of stale objects;
for the most up-to-date information, use the database connector
methods.
@param master: current build master
@param brdict: build request dictionary
@returns: L{BuildRequest}, via Deferred |
def create(server_):
'''
Create a single BareMetal server from a data dict.
'''
try:
# Check for required profile parameters before sending any API calls.
if server_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'scaleway',
server_['profile'],
vm_=server_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(server_['name']),
args=__utils__['cloud.filter_event']('creating', server_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating a BareMetal server %s', server_['name'])
access_key = config.get_cloud_config_value(
'access_key', get_configured_provider(), __opts__, search_global=False
)
commercial_type = config.get_cloud_config_value(
'commercial_type', server_, __opts__, default='C1'
)
key_filename = config.get_cloud_config_value(
'ssh_key_file', server_, __opts__, search_global=False, default=None
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename \'{0}\' does not exist'.format(
key_filename
)
)
ssh_password = config.get_cloud_config_value(
'ssh_password', server_, __opts__
)
kwargs = {
'name': server_['name'],
'organization': access_key,
'image': get_image(server_),
'commercial_type': commercial_type,
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(server_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
ret = create_node(kwargs)
except Exception as exc:
log.error(
'Error creating %s on Scaleway\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: %s',
server_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def __query_node_data(server_name):
''' Called to check if the server has a public IP address.
'''
data = show_instance(server_name, 'action')
if data and data.get('public_ip'):
return data
return False
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(server_['name'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', server_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', server_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(server_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
server_['ssh_host'] = data['public_ip']['address']
server_['ssh_password'] = ssh_password
server_['key_filename'] = key_filename
ret = __utils__['cloud.bootstrap'](server_, __opts__)
ret.update(data)
log.info('Created BareMetal server \'%s\'', server_['name'])
log.debug(
'\'%s\' BareMetal server creation details:\n%s',
server_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(server_['name']),
args=__utils__['cloud.filter_event']('created', server_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | Create a single BareMetal server from a data dict. |
def add_widgets_context(request, context):
"""
WIDGETS:
* Eighth signup (STUDENT)
* Eighth attendance (TEACHER or ADMIN)
* Bell schedule (ALL)
* Birthdays (ALL)
* Administration (ADMIN)
* Links (ALL)
* Seniors (STUDENT; graduation countdown if senior, link to destinations otherwise)
"""
user = context["user"]
if context["is_student"] or context["eighth_sponsor"]:
num_blocks = 6
surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks)
if context["is_student"]:
schedule, no_signup_today = gen_schedule(user, num_blocks, surrounding_blocks)
context.update({
"schedule": schedule,
"last_displayed_block": schedule[-1] if schedule else None,
"no_signup_today": no_signup_today,
"senior_graduation": settings.SENIOR_GRADUATION,
"senior_graduation_year": settings.SENIOR_GRADUATION_YEAR
})
if context["eighth_sponsor"]:
sponsor_date = request.GET.get("sponsor_date", None)
if sponsor_date:
sponsor_date = decode_date(sponsor_date)
if sponsor_date:
block = EighthBlock.objects.filter(date__gte=sponsor_date).first()
if block:
surrounding_blocks = [block] + list(block.next_blocks(num_blocks - 1))
else:
surrounding_blocks = []
sponsor_sch = gen_sponsor_schedule(user, context["eighth_sponsor"], num_blocks, surrounding_blocks, sponsor_date)
context.update(sponsor_sch)
# "sponsor_schedule", "no_attendance_today", "num_attendance_acts",
# "sponsor_schedule_cur_date", "sponsor_schedule_prev_date", "sponsor_schedule_next_date"
birthdays = find_birthdays(request)
context["birthdays"] = find_visible_birthdays(request, birthdays)
sched_ctx = schedule_context(request)
context.update(sched_ctx)
return context | WIDGETS:
* Eighth signup (STUDENT)
* Eighth attendance (TEACHER or ADMIN)
* Bell schedule (ALL)
* Birthdays (ALL)
* Administration (ADMIN)
* Links (ALL)
* Seniors (STUDENT; graduation countdown if senior, link to destinations otherwise) |
def ReadUnicodeTable(filename, nfields, doline):
"""Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2).
"""
if nfields < 2:
raise InputError("invalid number of fields %d" % (nfields,))
if type(filename) == str:
if filename.startswith("http://"):
fil = urllib2.urlopen(filename)
else:
fil = open(filename, "r")
else:
fil = filename
first = None # first code in multiline range
expect_last = None # tag expected for "Last" line in multiline range
lineno = 0 # current line number
for line in fil:
lineno += 1
try:
# Chop # comments and white space; ignore empty lines.
sharp = line.find("#")
if sharp >= 0:
line = line[:sharp]
line = line.strip()
if not line:
continue
# Split fields on ";", chop more white space.
# Must have the expected number of fields.
fields = [s.strip() for s in line.split(";")]
if len(fields) != nfields:
raise InputError("wrong number of fields %d %d - %s" %
(len(fields), nfields, line))
# The Unicode text files have two different ways
# to list a Unicode range. Either the first field is
# itself a range (0000..FFFF), or the range is split
# across two lines, with the second field noting
# the continuation.
codes = _URange(fields[0])
(name, cont) = _ParseContinue(fields[1])
if expect_last is not None:
# If the last line gave the First code in a range,
# this one had better give the Last one.
if (len(codes) != 1 or codes[0] <= first or
cont != "Last" or name != expect_last):
raise InputError("expected Last line for %s" %
(expect_last,))
codes = range(first, codes[0] + 1)
first = None
expect_last = None
fields[0] = "%04X..%04X" % (codes[0], codes[-1])
fields[1] = name
elif cont == "First":
# Otherwise, if this is the First code in a range,
# remember it and go to the next line.
if len(codes) != 1:
raise InputError("bad First line: range given")
expect_last = name
first = codes[0]
continue
doline(codes, fields)
except Exception, e:
print "%s:%d: %s" % (filename, lineno, e)
raise
if expect_last is not None:
raise InputError("expected Last line for %s; got EOF" %
(expect_last,)) | Generic Unicode table text file reader.
The reader takes care of stripping out comments and also
parsing the two different ways that the Unicode tables specify
code ranges (using the .. notation and splitting the range across
multiple lines).
Each non-comment line in the table is expected to have the given
number of fields. The first field is known to be the Unicode value
and the second field its description.
The reader calls doline(codes, fields) for each entry in the table.
If fn raises an exception, the reader prints that exception,
prefixed with the file name and line number, and continues
processing the file. When done with the file, the reader re-raises
the first exception encountered during the file.
Arguments:
filename: the Unicode data file to read, or a file-like object.
nfields: the number of expected fields per line in that file.
doline: the function to call for each table entry.
Raises:
InputError: nfields is invalid (must be >= 2). |
def x_rolls(self, number, count=0, func=sum):
'''Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
'''
for x in range(number):
yield self.roll(count, func) | Iterator of number dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([]) |
def visible_object_layers(self):
""" This must return layer objects
This is not required for custom data formats.
:return: Sequence of pytmx object layers/groups
"""
return (layer for layer in self.tmx.visible_layers
if isinstance(layer, pytmx.TiledObjectGroup)) | This must return layer objects
This is not required for custom data formats.
:return: Sequence of pytmx object layers/groups |
def chebyshev_distance(point1, point2):
"""!
@brief Calculate Chebyshev distance between between two vectors.
\f[
dist(a, b) = \max_{}i\left (\left | a_{i} - b_{i} \right |\right );
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Chebyshev distance between two vectors.
@see euclidean_distance_square, euclidean_distance, minkowski_distance
"""
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance = max(distance, abs(point1[i] - point2[i]))
return distance | !
@brief Calculate Chebyshev distance between between two vectors.
\f[
dist(a, b) = \max_{}i\left (\left | a_{i} - b_{i} \right |\right );
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Chebyshev distance between two vectors.
@see euclidean_distance_square, euclidean_distance, minkowski_distance |
def deconstruct(self):
"""Deconstruct method."""
name, path, args, kwargs = super().deconstruct()
if self.populate_from is not None:
kwargs['populate_from'] = self.populate_from
if self.unique_with != ():
kwargs['unique_with'] = self.unique_with
kwargs.pop('unique', None)
return name, path, args, kwargs | Deconstruct method. |
async def create_cred(
self,
cred_offer_json,
cred_req_json: str,
cred_attrs: dict,
rr_size: int = None) -> (str, str):
"""
Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise).
"""
LOGGER.debug(
'Issuer.create_cred >>> cred_offer_json: %s, cred_req_json: %s, cred_attrs: %s, rr_size: %s',
cred_offer_json,
cred_req_json,
cred_attrs,
rr_size)
if not self.wallet.handle:
LOGGER.debug('Issuer.create_cred <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
cd_id = json.loads(cred_offer_json)['cred_def_id']
if not ok_cred_def_id(cd_id):
LOGGER.debug('Issuer.create_cred <!< Bad cred def id %s', cd_id)
raise BadIdentifier('Bad cred def id {}'.format(cd_id))
cred_def = json.loads(await self.get_cred_def(cd_id)) # ensure cred def is in cache
if 'revocation' in cred_def['value']:
with REVO_CACHE.lock:
rr_id = Tails.current_rev_reg_id(self.dir_tails, cd_id)
tails = REVO_CACHE[rr_id].tails
assert tails # at (re)start, at cred def, Issuer sync_revoc_for_issue() sets this index in revo cache
try:
(cred_json, cred_revoc_id, _) = await anoncreds.issuer_create_credential( # issue by default to rr
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
rr_id,
tails.reader_handle)
rv = (cred_json, cred_revoc_id)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsRevocationRegistryFullError:
(tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id)
rr_id = rev_reg_id(cd_id, tag)
if self.rrbx:
await self._set_rev_reg(rr_id, rr_size)
else:
await self.rrb.create_rev_reg(rr_id, rr_size or rr_size_suggested)
await self._send_rev_reg_def(rr_id)
REVO_CACHE[rr_id].tails = await Tails(self.dir_tails, cd_id).open() # symlink OK now
return await self.create_cred(cred_offer_json, cred_req_json, cred_attrs)
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
else:
try:
(cred_json, _, _) = await anoncreds.issuer_create_credential(
self.wallet.handle,
cred_offer_json,
cred_req_json,
json.dumps({k: cred_attr_value(cred_attrs[k]) for k in cred_attrs}),
None,
None)
rv = (cred_json, None)
except IndyError as x_indy:
LOGGER.debug('Issuer.create_cred <!< cannot create cred, indy error code %s', x_indy.error_code)
raise
LOGGER.debug('Issuer.create_cred <<< %s', rv)
return rv | Create credential as Issuer out of credential request and dict of key:value (raw, unencoded)
entries for attributes.
Return credential json, and if cred def supports revocation, credential revocation identifier.
Raise WalletState for closed wallet.
If the credential definition supports revocation, and the current revocation registry is full,
the processing creates a new revocation registry en passant. Depending on the revocation
registry size (by default starting at 64 and doubling iteratively through a maximum of 100000)
and the revocation registry builder posture (see RevRegBuilder.__init__()), this operation may
delay credential creation by several seconds. The use of an external revocation registry builder
runs a parallel process, skirting this delay, but is more costly at initialization.
:param cred_offer_json: credential offer json as created by Issuer
:param cred_req_json: credential request json as created by HolderProver
:param cred_attrs: dict mapping each attribute to its original value (the operation encodes it); e.g.,
::
{
'favourite_drink': 'martini',
'height': 180,
'last_visit_date': '2017-12-31',
'weaknesses': None
}
:param rr_size: size of new revocation registry (default as per RevRegBuilder.create_rev_reg()) if necessary
:return: tuple with newly issued credential json, credential revocation identifier (if cred def
supports revocation, None otherwise). |
def bookmarks_changed(self):
"""Bookmarks list has changed."""
bookmarks = self.editor.get_bookmarks()
if self.editor.bookmarks != bookmarks:
self.editor.bookmarks = bookmarks
self.sig_save_bookmarks.emit(self.filename, repr(bookmarks)) | Bookmarks list has changed. |
def has_gradient(self):
"""Returns true if _backward and _forward_backward can be called
by an attack, False otherwise.
"""
try:
self.__model.gradient
self.__model.predictions_and_gradient
except AttributeError:
return False
else:
return True | Returns true if _backward and _forward_backward can be called
by an attack, False otherwise. |
def ws_url(self):
"""websocket url matching the current request
turns http[s]://host[:port] into
ws[s]://host[:port]
"""
proto = self.request.protocol.replace('http', 'ws')
host = self.application.ipython_app.websocket_host # default to config value
if host == '':
host = self.request.host # get from request
return "%s://%s" % (proto, host) | websocket url matching the current request
turns http[s]://host[:port] into
ws[s]://host[:port] |
def open(server=None, url=None, ip=None, port=None, name=None, https=None, auth=None, verify_ssl_certificates=True,
proxy=None, cookies=None, verbose=True, _msgs=None):
r"""
Establish connection to an existing H2O server.
The connection is not kept alive, so what this method actually does is it attempts to connect to the
specified server, and checks that the server is healthy and responds to REST API requests. If the H2O server
cannot be reached, an :class:`H2OConnectionError` will be raised. On success this method returns a new
:class:`H2OConnection` object, and it is the only "official" way to create instances of this class.
There are 3 ways to specify the target to connect to (these settings are mutually exclusive):
* pass a ``server`` option,
* pass the full ``url`` for the connection,
* provide a triple of parameters ``ip``, ``port``, ``https``.
:param H2OLocalServer server: connect to the specified local server instance. There is a slight difference
between connecting to a local server by specifying its ip and address, and connecting through
an H2OLocalServer instance: if the server becomes unresponsive, then having access to its process handle
will allow us to query the server status through OS, and potentially provide snapshot of the server's
error log in the exception information.
:param url: full url of the server to connect to.
:param ip: target server's IP address or hostname (default "localhost").
:param port: H2O server's port (default 54321).
:param name: H2O cluster name.
:param https: if True then connect using https instead of http (default False).
:param verify_ssl_certificates: if False then SSL certificate checking will be disabled (default True). This
setting should rarely be disabled, as it makes your connection vulnerable to man-in-the-middle attacks. When
used, it will generate a warning from the requests library. Has no effect when ``https`` is False.
:param auth: authentication token for connecting to the remote server. This can be either a
(username, password) tuple, or an authenticator (AuthBase) object. Please refer to the documentation in
the ``requests.auth`` module.
:param proxy: url address of a proxy server. If you do not specify the proxy, then the requests module
will attempt to use a proxy specified in the environment (in HTTP_PROXY / HTTPS_PROXY variables). We
check for the presence of these variables and issue a warning if they are found. In order to suppress
that warning and use proxy from the environment, pass ``proxy="(default)"``.
:param cookies: Cookie (or list of) to add to requests
:param verbose: if True, then connection progress info will be printed to the stdout.
:param _msgs: custom messages to display during connection. This is a tuple (initial message, success message,
failure message).
:returns: A new :class:`H2OConnection` instance.
:raises H2OConnectionError: if the server cannot be reached.
:raises H2OServerError: if the server is in an unhealthy state (although this might be a recoverable error, the
client itself should decide whether it wants to retry or not).
"""
if server is not None:
assert_is_type(server, H2OLocalServer)
assert_is_type(ip, None, "`ip` should be None when `server` parameter is supplied")
assert_is_type(url, None, "`url` should be None when `server` parameter is supplied")
assert_is_type(name, None, "`name` should be None when `server` parameter is supplied")
if not server.is_running():
raise H2OConnectionError("Unable to connect to server because it is not running")
ip = server.ip
port = server.port
scheme = server.scheme
context_path = ''
elif url is not None:
assert_is_type(url, str)
assert_is_type(ip, None, "`ip` should be None when `url` parameter is supplied")
assert_is_type(name, str, None)
# We don't allow any Unicode characters in the URL. Maybe some day we will...
match = assert_matches(url, H2OConnection.url_pattern)
scheme = match.group(1)
ip = match.group(2)
port = int(match.group(3))
context_path = '' if match.group(4) is None else "%s" % (match.group(4))
else:
if ip is None: ip = str("localhost")
if port is None: port = 54321
if https is None: https = False
if is_type(port, str) and port.isdigit(): port = int(port)
assert_is_type(ip, str)
assert_is_type(port, int)
assert_is_type(name, str, None)
assert_is_type(https, bool)
assert_matches(ip, r"(?:[\w-]+\.)*[\w-]+")
assert_satisfies(port, 1 <= port <= 65535)
scheme = "https" if https else "http"
context_path = ''
if verify_ssl_certificates is None: verify_ssl_certificates = True
assert_is_type(verify_ssl_certificates, bool)
assert_is_type(proxy, str, None)
assert_is_type(auth, AuthBase, (str, str), None)
assert_is_type(cookies, str, [str], None)
assert_is_type(_msgs, None, (str, str, str))
conn = H2OConnection()
conn._verbose = bool(verbose)
conn._local_server = server
conn._base_url = "%s://%s:%d%s" % (scheme, ip, port, context_path)
conn._name = server.name if server else name
conn._verify_ssl_cert = bool(verify_ssl_certificates)
conn._auth = auth
conn._cookies = cookies
conn._proxies = None
if proxy and proxy != "(default)":
conn._proxies = {scheme: proxy}
elif not proxy:
# Give user a warning if there are any "*_proxy" variables in the environment. [PUBDEV-2504]
# To suppress the warning pass proxy = "(default)".
for name in os.environ:
if name.lower() == scheme + "_proxy":
warn("Proxy is defined in the environment: %s. "
"This may interfere with your H2O Connection." % name)
try:
retries = 20 if server else 5
conn._stage = 1
conn._timeout = 3.0
conn._cluster = conn._test_connection(retries, messages=_msgs)
# If a server is unable to respond within 1s, it should be considered a bug. However we disable this
# setting for now, for no good reason other than to ignore all those bugs :(
conn._timeout = None
# This is a good one! On the surface it registers a callback to be invoked when the script is about
# to finish, but it also has a side effect in that the reference to current connection will be held
# by the ``atexit`` service till the end -- which means it will never be garbage-collected.
atexit.register(lambda: conn.close())
except Exception:
# Reset _session_id so that we know the connection was not initialized properly.
conn._stage = 0
raise
return conn | r"""
Establish connection to an existing H2O server.
The connection is not kept alive, so what this method actually does is it attempts to connect to the
specified server, and checks that the server is healthy and responds to REST API requests. If the H2O server
cannot be reached, an :class:`H2OConnectionError` will be raised. On success this method returns a new
:class:`H2OConnection` object, and it is the only "official" way to create instances of this class.
There are 3 ways to specify the target to connect to (these settings are mutually exclusive):
* pass a ``server`` option,
* pass the full ``url`` for the connection,
* provide a triple of parameters ``ip``, ``port``, ``https``.
:param H2OLocalServer server: connect to the specified local server instance. There is a slight difference
between connecting to a local server by specifying its ip and address, and connecting through
an H2OLocalServer instance: if the server becomes unresponsive, then having access to its process handle
will allow us to query the server status through OS, and potentially provide snapshot of the server's
error log in the exception information.
:param url: full url of the server to connect to.
:param ip: target server's IP address or hostname (default "localhost").
:param port: H2O server's port (default 54321).
:param name: H2O cluster name.
:param https: if True then connect using https instead of http (default False).
:param verify_ssl_certificates: if False then SSL certificate checking will be disabled (default True). This
setting should rarely be disabled, as it makes your connection vulnerable to man-in-the-middle attacks. When
used, it will generate a warning from the requests library. Has no effect when ``https`` is False.
:param auth: authentication token for connecting to the remote server. This can be either a
(username, password) tuple, or an authenticator (AuthBase) object. Please refer to the documentation in
the ``requests.auth`` module.
:param proxy: url address of a proxy server. If you do not specify the proxy, then the requests module
will attempt to use a proxy specified in the environment (in HTTP_PROXY / HTTPS_PROXY variables). We
check for the presence of these variables and issue a warning if they are found. In order to suppress
that warning and use proxy from the environment, pass ``proxy="(default)"``.
:param cookies: Cookie (or list of) to add to requests
:param verbose: if True, then connection progress info will be printed to the stdout.
:param _msgs: custom messages to display during connection. This is a tuple (initial message, success message,
failure message).
:returns: A new :class:`H2OConnection` instance.
:raises H2OConnectionError: if the server cannot be reached.
:raises H2OServerError: if the server is in an unhealthy state (although this might be a recoverable error, the
client itself should decide whether it wants to retry or not). |
def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer):
"""
Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns:
"""
if len(y_true) <= batch_size * window_size:
raise ValueError("Window size (%s) larger than y_true (len=%s)."
% (batch_size, len(y_true)))
num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size)
anomalies_indices = []
for i in range(num_windows + 1):
prev_index = i * batch_size
curr_index = (window_size * batch_size) + (i * batch_size)
if i == num_windows + 1:
curr_index = len(y_true)
window_smoothed_errors = smoothed_errors[prev_index:curr_index]
window_y_true = y_true[prev_index:curr_index]
epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer)
window_anom_indices = get_anomalies(
window_smoothed_errors,
window_y_true,
sd_threshold,
i,
anomalies_indices,
error_buffer
)
# get anomalies from inverse of smoothed errors
# This was done in the implementation of NASA paper but
# wasn't referenced in the paper
# we get the inverse by flipping around the mean
mu = np.mean(window_smoothed_errors)
smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors]
epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer)
inv_anom_indices = get_anomalies(
smoothed_errors_inv,
window_y_true,
sd_inv,
i,
anomalies_indices,
len(y_true)
)
anomalies_indices = list(set(anomalies_indices + inv_anom_indices))
anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices])
# group anomalies
anomalies_indices = sorted(list(set(anomalies_indices)))
anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)]
anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]]
# generate "scores" for anomalies based on the max distance from epsilon for each sequence
anomalies_scores = []
for e_seq in anomaly_sequences:
denominator = np.mean(smoothed_errors) + np.std(smoothed_errors)
score = max([
abs(smoothed_errors[x] - epsilon) / denominator
for x in range(e_seq[0], e_seq[1])
])
anomalies_scores.append(score)
return anomaly_sequences, anomalies_scores | Extracts anomalies from the errors.
Args:
y_true ():
smoothed_errors ():
window_size (int):
batch_size (int):
error_buffer (int):
Returns: |
def _update_limits_from_api(self):
"""
Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information.
"""
try:
self.connect()
resp = self.conn.get_send_quota()
except EndpointConnectionError as ex:
logger.warning('Skipping SES: %s', str(ex))
return
except ClientError as ex:
if ex.response['Error']['Code'] in ['AccessDenied', '503']:
logger.warning('Skipping SES: %s', ex)
return
raise
self.limits['Daily sending quota']._set_api_limit(resp['Max24HourSend']) | Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information. |
def _get_quantile_ratio(self, X, y):
"""find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1]
"""
y_pred = self.predict(X)
return (y_pred > y).mean() | find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1] |
def top_priority_effect(effects):
"""
Given a collection of variant transcript effects,
return the top priority object. ExonicSpliceSite variants require special
treatment since they actually represent two effects -- the splicing modification
and whatever else would happen to the exonic sequence if nothing else gets
changed. In cases where multiple transcripts give rise to multiple
effects, use a variety of filtering and sorting heuristics to pick
the canonical transcript.
"""
if len(effects) == 0:
raise ValueError("List of effects cannot be empty")
effects = map(
select_between_exonic_splice_site_and_alternate_effect,
effects)
effects_grouped_by_gene = apply_groupby(
effects, fn=gene_id_of_associated_transcript, skip_none=False)
if None in effects_grouped_by_gene:
effects_without_genes = effects_grouped_by_gene.pop(None)
else:
effects_without_genes = []
# if we had any effects associated with genes then choose one of those
if len(effects_grouped_by_gene) > 0:
effects_with_genes = [
top_priority_effect_for_single_gene(gene_effects)
for gene_effects in effects_grouped_by_gene.values()
]
return max(effects_with_genes, key=multi_gene_effect_sort_key)
else:
# if all effects were without genes then choose the best among those
assert len(effects_without_genes) > 0
return max(effects_without_genes, key=multi_gene_effect_sort_key) | Given a collection of variant transcript effects,
return the top priority object. ExonicSpliceSite variants require special
treatment since they actually represent two effects -- the splicing modification
and whatever else would happen to the exonic sequence if nothing else gets
changed. In cases where multiple transcripts give rise to multiple
effects, use a variety of filtering and sorting heuristics to pick
the canonical transcript. |
def get_file(self, name, filename):
"""Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file.
"""
stream, vname = self.get_stream(name)
path, version = split_name(vname)
dir_path = os.path.dirname(filename)
if dir_path:
mkdir(dir_path)
with open(filename, 'wb') as f:
shutil.copyfileobj(stream, f)
return vname | Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file. |
def apply_strategy(self):
""" Apply deduplication with the configured strategy.
Transform strategy keyword into its method ID, and call it.
"""
method_id = self.conf.strategy.replace('-', '_')
if not hasattr(DuplicateSet, method_id):
raise NotImplementedError(
"DuplicateSet.{}() method.".format(method_id))
return getattr(self, method_id)() | Apply deduplication with the configured strategy.
Transform strategy keyword into its method ID, and call it. |
def do_create(marfile, files, compress, productversion=None, channel=None,
signing_key=None, signing_algorithm=None):
"""Create a new MAR file."""
with open(marfile, 'w+b') as f:
with MarWriter(f, productversion=productversion, channel=channel,
signing_key=signing_key,
signing_algorithm=signing_algorithm,
) as m:
for f in files:
m.add(f, compress=compress) | Create a new MAR file. |
def run(self, clf):
""" run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
'running activity-based voxel selection via Searchlight'
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
'data preparation done'
)
# obtain a 3D array with accuracy numbers
result_volume = self.sl.run_searchlight(_sfn)
# get result tuple list from the volume
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
'activity-based voxel selection via Searchlight is done'
)
return result_volume, results | run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels |
def purge(self):
"""
Purge the stream. This removes all data and clears the calculated intervals
:return: None
"""
self.channel.purge_stream(self.stream_id, remove_definition=False, sandbox=None) | Purge the stream. This removes all data and clears the calculated intervals
:return: None |
def check(self, feature):
"""Check that fit can be called on reference data"""
mapper = feature.as_dataframe_mapper()
mapper.fit(self.X, y=self.y) | Check that fit can be called on reference data |
def isempty(path):
"""Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories!
"""
if op.isdir(path):
return [] == os.listdir(path)
elif op.isfile(path):
return 0 == os.stat(path).st_size
return None | Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories! |
def get_scaled(self, factor):
""" Get a new time unit, scaled by the given factor """
res = TimeUnit(self)
res._factor = self._factor * factor
res._unit = self._unit
return res | Get a new time unit, scaled by the given factor |
def load_essentiality(self, model):
"""Load and validate all data files."""
data = self.config.get("essentiality")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data", "experimental", "essentiality"))
for exp_id, exp in iteritems(experiments):
if exp is None:
exp = dict()
filename = exp.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(exp_id))
elif not isabs(filename):
filename = join(path, filename)
experiment = EssentialityExperiment(
identifier=exp_id, obj=exp, filename=filename)
if experiment.medium is not None:
assert experiment.medium in self.media, \
"Experiment '{}' has an undefined medium '{}'.".format(
exp_id, experiment.medium)
experiment.medium = self.media[experiment.medium]
experiment.load()
experiment.validate(model)
self.essentiality[exp_id] = experiment | Load and validate all data files. |
def _get_nop_length(cls, insns):
"""
Calculate the total size of leading nop instructions.
:param insns: A list of capstone insn objects.
:return: Number of bytes of leading nop instructions.
:rtype: int
"""
nop_length = 0
if insns and cls._is_noop_insn(insns[0]):
# see where those nop instructions terminate
for insn in insns:
if cls._is_noop_insn(insn):
nop_length += insn.size
else:
break
return nop_length | Calculate the total size of leading nop instructions.
:param insns: A list of capstone insn objects.
:return: Number of bytes of leading nop instructions.
:rtype: int |
def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Task object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTask just
created.
"""
for n,a in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambutaskclass = MambuTask
task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs)
task.init(a, *args, **kwargs)
self.attrs[n] = task | The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Task object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTask just
created. |
def parse_deps(orig_doc, options={}):
"""Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
"""
doc = Doc(orig_doc.vocab).from_bytes(orig_doc.to_bytes())
if not doc.is_parsed:
user_warning(Warnings.W005)
if options.get("collapse_phrases", False):
with doc.retokenize() as retokenizer:
for np in list(doc.noun_chunks):
attrs = {
"tag": np.root.tag_,
"lemma": np.root.lemma_,
"ent_type": np.root.ent_type_,
}
retokenizer.merge(np, attrs=attrs)
if options.get("collapse_punct", True):
spans = []
for word in doc[:-1]:
if word.is_punct or not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start:end]
spans.append((span, word.tag_, word.lemma_, word.ent_type_))
with doc.retokenize() as retokenizer:
for span, tag, lemma, ent_type in spans:
attrs = {"tag": tag, "lemma": lemma, "ent_type": ent_type}
retokenizer.merge(span, attrs=attrs)
if options.get("fine_grained"):
words = [{"text": w.text, "tag": w.tag_} for w in doc]
else:
words = [{"text": w.text, "tag": w.pos_} for w in doc]
arcs = []
for word in doc:
if word.i < word.head.i:
arcs.append(
{"start": word.i, "end": word.head.i, "label": word.dep_, "dir": "left"}
)
elif word.i > word.head.i:
arcs.append(
{
"start": word.head.i,
"end": word.i,
"label": word.dep_,
"dir": "right",
}
)
return {"words": words, "arcs": arcs, "settings": get_doc_settings(orig_doc)} | Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse.
RETURNS (dict): Generated dependency parse keyed by words and arcs. |
def post_async(self, url, data, callback=None, params=None, headers=None):
"""
Asynchronous POST request with the process pool.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_post_request,
args=(endpoint, data, params, headers),
callback=callback) | Asynchronous POST request with the process pool. |
def supports_object_type(self, object_type=None):
"""Tests if the given object type is supported.
arg: object_type (osid.type.Type): an object Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not an ``OBJECT``
raise: NullArgument - ``object_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not object_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``OBJECT``']:
raise IllegalState('put more meaninful message here')
return object_type in self.get_object_types | Tests if the given object type is supported.
arg: object_type (osid.type.Type): an object Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not an ``OBJECT``
raise: NullArgument - ``object_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def mosh_args(conn):
"""Create SSH command for connecting specified server."""
I, = conn.identities
identity = I.identity_dict
args = []
if 'port' in identity:
args += ['-p', identity['port']]
if 'user' in identity:
args += [identity['user']+'@'+identity['host']]
else:
args += [identity['host']]
return args | Create SSH command for connecting specified server. |
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True | Preconditions:
- our agent applies to this entry
- filename is URL decoded |
def bgseq(code):
"""
Returns the background color terminal escape sequence for the given color code number.
"""
if isinstance(code, str):
code = nametonum(code)
if code == -1:
return ""
s = termcap.get('setab', code) or termcap.get('setb', code)
return s | Returns the background color terminal escape sequence for the given color code number. |
def consume_messages(self, max_next_messages):
""" Get messages batch from Kafka (list at output) """
# get messages list from kafka
if self.__next_messages == 0:
self.set_next_messages(min(1000, max_next_messages))
self.set_next_messages(min(self.__next_messages, max_next_messages))
mark = time.time()
for record in self._get_messages_from_consumer():
yield record.partition, record.offset, record.key, record.value
newmark = time.time()
if newmark - mark > 30:
self.set_next_messages(self.__next_messages / 2 or 1)
elif newmark - mark < 5:
self.set_next_messages(min(self.__next_messages + 100, max_next_messages)) | Get messages batch from Kafka (list at output) |
def _search_files(path):
"""Search a folder for data files
.. versionchanged:: 0.6.0
`path` is not searched recursively anymore
"""
path = pathlib.Path(path)
fifo = []
for fp in path.glob("*"):
if fp.is_dir():
continue
for fmt in formats:
# series data is not supported in SeriesFolder
if not fmt.is_series and fmt.verify(fp):
fifo.append((fp, fmt.__name__))
break
# ignore qpimage formats if multiple formats were
# detected.
theformats = [ff[1] for ff in fifo]
formset = set(theformats)
if len(formset) > 1:
fmts_qpimage = ["SingleHdf5Qpimage", "SeriesHdf5Qpimage"]
fifo = [ff for ff in fifo if ff[1] not in fmts_qpimage]
# ignore raw tif files if single_tif_phasics is detected
if len(formset) > 1 and "SingleTifPhasics" in theformats:
fmts_badtif = "SingleTifHolo"
fifo = [ff for ff in fifo if ff[1] not in fmts_badtif]
# otherwise, prevent multiple file formats
theformats2 = [ff[1] for ff in fifo]
formset2 = set(theformats2)
if len(formset2) > 1:
msg = "Qpformat does not support multiple different file " \
+ "formats within one directory: {}".format(formset2)
raise MultipleFormatsNotSupportedError(msg)
# sort the lists
fifo = sorted(fifo)
return fifo | Search a folder for data files
.. versionchanged:: 0.6.0
`path` is not searched recursively anymore |
def semiyearly(date=datetime.date.today()):
"""
Twice a year.
"""
return datetime.date(date.year, 1 if date.month < 7 else 7, 1) | Twice a year. |
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore | Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric |
def aliases(*names):
"""
Defines alternative command name(s) for given function (along with its
original name). Usage::
@aliases('co', 'check')
def checkout(args):
...
The resulting command will be available as ``checkout``, ``check`` and ``co``.
.. note::
This decorator only works with a recent version of argparse (see `Python
issue 9324`_ and `Python rev 4c0426`_). Such version ships with
**Python 3.2+** and may be available in other environments as a separate
package. Argh does not issue warnings and simply ignores aliases if
they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`.
.. _Python issue 9324: http://bugs.python.org/issue9324
.. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/
.. versionadded:: 0.19
"""
def wrapper(func):
setattr(func, ATTR_ALIASES, names)
return func
return wrapper | Defines alternative command name(s) for given function (along with its
original name). Usage::
@aliases('co', 'check')
def checkout(args):
...
The resulting command will be available as ``checkout``, ``check`` and ``co``.
.. note::
This decorator only works with a recent version of argparse (see `Python
issue 9324`_ and `Python rev 4c0426`_). Such version ships with
**Python 3.2+** and may be available in other environments as a separate
package. Argh does not issue warnings and simply ignores aliases if
they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`.
.. _Python issue 9324: http://bugs.python.org/issue9324
.. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/
.. versionadded:: 0.19 |
def get_channelstate_by_token_network_and_partner(
chain_state: ChainState,
token_network_id: TokenNetworkID,
partner_address: Address,
) -> Optional[NettingChannelState]:
""" Return the NettingChannelState if it exists, None otherwise. """
token_network = get_token_network_by_identifier(
chain_state,
token_network_id,
)
channel_state = None
if token_network:
channels = [
token_network.channelidentifiers_to_channels[channel_id]
for channel_id in token_network.partneraddresses_to_channelidentifiers[partner_address]
]
states = filter_channels_by_status(
channels,
[CHANNEL_STATE_UNUSABLE],
)
if states:
channel_state = states[-1]
return channel_state | Return the NettingChannelState if it exists, None otherwise. |
def dumps(obj, *args, **kwargs):
''' Typeless dump an object to json string '''
return json.dumps(obj, *args, cls=TypelessSONEncoder, ensure_ascii=False, **kwargs) | Typeless dump an object to json string |
def consistency(self):
"""
Get a percentage of fill between the min and max time the moc is defined.
A value near 0 shows a sparse temporal moc (i.e. the moc does not cover a lot
of time and covers very distant times. A value near 1 means that the moc covers
a lot of time without big pauses.
Returns
-------
result : float
fill percentage (between 0 and 1.)
"""
result = self.total_duration.jd / (self.max_time - self.min_time).jd
return result | Get a percentage of fill between the min and max time the moc is defined.
A value near 0 shows a sparse temporal moc (i.e. the moc does not cover a lot
of time and covers very distant times. A value near 1 means that the moc covers
a lot of time without big pauses.
Returns
-------
result : float
fill percentage (between 0 and 1.) |
def _make_plan(plan_dict):
""" Construct a Plan or ProfiledPlan from a dictionary of metadata values.
:param plan_dict:
:return:
"""
operator_type = plan_dict["operatorType"]
identifiers = plan_dict.get("identifiers", [])
arguments = plan_dict.get("args", [])
children = [_make_plan(child) for child in plan_dict.get("children", [])]
if "dbHits" in plan_dict or "rows" in plan_dict:
db_hits = plan_dict.get("dbHits", 0)
rows = plan_dict.get("rows", 0)
return ProfiledPlan(operator_type, identifiers, arguments, children, db_hits, rows)
else:
return Plan(operator_type, identifiers, arguments, children) | Construct a Plan or ProfiledPlan from a dictionary of metadata values.
:param plan_dict:
:return: |
def GetParametro(self, clave, clave1=None, clave2=None, clave3=None, clave4=None):
"Devuelve un parámetro de salida (establecido por llamada anterior)"
# útil para parámetros de salida (por ej. campos de TransaccionPlainWS)
valor = self.params_out.get(clave)
# busco datos "anidados" (listas / diccionarios)
for clave in (clave1, clave2, clave3, clave4):
if clave is not None and valor is not None:
if isinstance(clave1, basestring) and clave.isdigit():
clave = int(clave)
try:
valor = valor[clave]
except (KeyError, IndexError):
valor = None
if valor is not None:
if isinstance(valor, basestring):
return valor
else:
return str(valor)
else:
return "" | Devuelve un parámetro de salida (establecido por llamada anterior) |
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
ret_dict = make_diffuse_comp_info_dict(components=components,
library=args['library'],
basedir=NAME_FACTORY.base_dict['basedir'])
diffuse_comp_info_dict = ret_dict['comp_info_dict']
for diffuse_comp_info_key in sorted(diffuse_comp_info_dict.keys()):
diffuse_comp_info_value = diffuse_comp_info_dict[diffuse_comp_info_key]
for comp in components:
zcut = "zmax%i" % comp.zmax
key = comp.make_key('{ebin_name}_{evtype_name}')
if diffuse_comp_info_value.components is None:
sub_comp_info = diffuse_comp_info_value
else:
sub_comp_info = diffuse_comp_info_value.get_component_info(comp)
name_keys = dict(zcut=zcut,
sourcekey=sub_comp_info.sourcekey,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
mktime='none',
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
fullpath=True)
outfile = NAME_FACTORY.srcmaps(**name_keys)
outfile_tokens = os.path.splitext(outfile)
infile_regexp = "%s_*.fits*" % outfile_tokens[0]
full_key = "%s_%s" % (sub_comp_info.sourcekey, key)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[full_key] = dict(output=outfile,
args=infile_regexp,
hdu=sub_comp_info.source_name,
logfile=logfile)
return job_configs | Hook to build job configurations |
def delay_and_stop(duration, dll, device_number):
"""Stop vibration aka force feedback aka rumble on
Windows after duration miliseconds."""
xinput = getattr(ctypes.windll, dll)
time.sleep(duration/1000)
xinput_set_state = xinput.XInputSetState
xinput_set_state.argtypes = [
ctypes.c_uint, ctypes.POINTER(XinputVibration)]
xinput_set_state.restype = ctypes.c_uint
vibration = XinputVibration(0, 0)
xinput_set_state(device_number, ctypes.byref(vibration)) | Stop vibration aka force feedback aka rumble on
Windows after duration miliseconds. |
def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256):
"""
Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not.
"""
merkle_root = hex_to_bin_reversed(merkle_root_hex)
leaf_hash = hex_to_bin_reversed(leaf_hash_hex)
path = MerkleTree.path_deserialize(serialized_path)
path = [{'order': p['order'], 'hash': hex_to_bin_reversed(p['hash'])} for p in path]
if len(path) == 0:
raise ValueError("Empty path")
cur_hash = leaf_hash
for i in range(0, len(path)):
if path[i]['order'] == 'l':
# left sibling
cur_hash = hash_function(path[i]['hash'] + cur_hash)
elif path[i]['order'] == 'r':
# right sibling
cur_hash = hash_function(cur_hash + path[i]['hash'])
elif path[i]['order'] == 'm':
# merkle root
assert len(path) == 1
return cur_hash == path[i]['hash']
return cur_hash == merkle_root | Verify a merkle path. The given path is the path from two leaf nodes to the root itself.
merkle_root_hex is a little-endian, hex-encoded hash.
serialized_path is the serialized merkle path
path_hex is a list of little-endian, hex-encoded hashes.
Return True if the path is consistent with the merkle root.
Return False if not. |
def _get_job_results(query=None):
'''
Executes a query that requires a job for completion. This function will wait for the job to complete
and return the results.
'''
if not query:
raise CommandExecutionError("Query parameters cannot be empty.")
response = __proxy__['panos.call'](query)
# If the response contains a job, we will wait for the results
if 'result' in response and 'job' in response['result']:
jid = response['result']['job']
while get_job(jid)['result']['job']['status'] != 'FIN':
time.sleep(5)
return get_job(jid)
else:
return response | Executes a query that requires a job for completion. This function will wait for the job to complete
and return the results. |
def pace(self):
"""Average pace (mm:ss/km for the workout"""
secs_per_km = self.duration / (self.distance / 1000)
return time.strftime('%M:%S', time.gmtime(secs_per_km)) | Average pace (mm:ss/km for the workout |
def cauldron_extras(self):
""" Yield extra tuples containing a field name and a callable that takes
a row
"""
for extra in super(Dimension, self).cauldron_extras:
yield extra
if self.formatters:
prop = self.id + '_raw'
else:
prop = self.id_prop
yield self.id + '_id', lambda row: getattr(row, prop) | Yield extra tuples containing a field name and a callable that takes
a row |
def add_edges(self):
"""
Draws all of the edges in the graph.
"""
for group, edgelist in self.edges.items():
for (u, v, d) in edgelist:
self.draw_edge(u, v, d, group) | Draws all of the edges in the graph. |
def _definition(self):
"""|HeaderPart| object containing content of this header."""
headerReference = self._sectPr.get_headerReference(self._hdrftr_index)
return self._document_part.header_part(headerReference.rId) | |HeaderPart| object containing content of this header. |
def update_layers_esri_mapserver(service, greedy_opt=False):
"""
Update layers for an ESRI REST MapServer.
Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json
"""
try:
esri_service = ArcMapService(service.url)
# set srs
# both mapserver and imageserver exposes just one srs at the service level
# not sure if other ones are supported, for now we just store this one
# not sure why this is needed, for now commenting out
# if wkt_text:
# params = {'exact': 'True', 'error': 'True', 'mode': 'wkt', 'terms': wkt_text}
# req = requests.get('http://prj2epsg.org/search.json', params=params)
# object = json.loads(req.content)
# srs = int(object['codes'][0]['code'])
srs_code = esri_service.spatialReference.wkid
srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code)
service.srs.add(srs)
service.update_validity()
# check if it has a WMS interface
if 'supportedExtensions' in esri_service._json_struct and greedy_opt:
if 'WMSServer' in esri_service._json_struct['supportedExtensions']:
# we need to change the url
# http://cga1.cga.harvard.edu/arcgis/rest/services/ecuador/ecuadordata/MapServer?f=pjson
# http://cga1.cga.harvard.edu/arcgis/services/ecuador/
# ecuadordata/MapServer/WMSServer?request=GetCapabilities&service=WMS
wms_url = service.url.replace('/rest/services/', '/services/')
if '?f=pjson' in wms_url:
wms_url = wms_url.replace('?f=pjson', 'WMSServer?')
if '?f=json' in wms_url:
wms_url = wms_url.replace('?f=json', 'WMSServer?')
LOGGER.debug('This ESRI REST endpoint has an WMS interface to process: %s' % wms_url)
# import here as otherwise is circular (TODO refactor)
from utils import create_service_from_endpoint
create_service_from_endpoint(wms_url, 'OGC:WMS', catalog=service.catalog)
# now process the REST interface
layer_n = 0
total = len(esri_service.layers)
for esri_layer in esri_service.layers:
# in some case the json is invalid
# esri_layer._json_struct
# {u'currentVersion': 10.01,
# u'error':
# {u'message': u'An unexpected error occurred processing the request.', u'code': 500, u'details': []}}
if 'error' not in esri_layer._json_struct:
LOGGER.debug('Updating layer %s' % esri_layer.name)
layer, created = Layer.objects.get_or_create(
name=esri_layer.id,
service=service,
catalog=service.catalog
)
if layer.active:
layer.type = 'ESRI:ArcGIS:MapServer'
links = [[layer.type, service.url],
['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]]
layer.title = esri_layer.name
layer.abstract = esri_service.serviceDescription
layer.url = service.url
layer.page_url = layer.get_absolute_url
links.append([
'WWW:LINK',
settings.SITE_URL.rstrip('/') + layer.page_url
])
try:
layer.bbox_x0 = esri_layer.extent.xmin
layer.bbox_y0 = esri_layer.extent.ymin
layer.bbox_x1 = esri_layer.extent.xmax
layer.bbox_y1 = esri_layer.extent.ymax
except KeyError:
pass
try:
layer.bbox_x0 = esri_layer._json_struct['extent']['xmin']
layer.bbox_y0 = esri_layer._json_struct['extent']['ymin']
layer.bbox_x1 = esri_layer._json_struct['extent']['xmax']
layer.bbox_y1 = esri_layer._json_struct['extent']['ymax']
except Exception:
pass
layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1])
layer.xml = create_metadata_record(
identifier=str(layer.uuid),
source=service.url,
links=links,
format='ESRI:ArcGIS:MapServer',
type=layer.csw_type,
relation=service.id_string,
title=layer.title,
alternative=layer.title,
abstract=layer.abstract,
wkt_geometry=layer.wkt_geometry
)
layer.anytext = gen_anytext(layer.title, layer.abstract)
layer.save()
# dates
add_mined_dates(layer)
layer_n = layer_n + 1
# exits if DEBUG_SERVICES
LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
message = "update_layers_esri_mapserver: {0}".format(
err
)
check = Check(
content_object=service,
success=False,
response_time=0,
message=message
)
check.save() | Update layers for an ESRI REST MapServer.
Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json |
def list_ec2(region, filter_by_kwargs):
"""List running ec2 instances."""
conn = boto.ec2.connect_to_region(region)
instances = conn.get_only_instances()
return lookup(instances, filter_by=filter_by_kwargs) | List running ec2 instances. |
def callprop(self, prop, *args):
'''Call a property prop as a method (this will be self).
NOTE: dont pass this and arguments here, these will be added
automatically!'''
if not isinstance(prop, basestring):
prop = prop.to_string().value
cand = self.get(prop)
if not cand.is_callable():
raise MakeError('TypeError',
'%s is not a function' % cand.typeof())
return cand.call(self, args) | Call a property prop as a method (this will be self).
NOTE: dont pass this and arguments here, these will be added
automatically! |
def jens_transformation_beta(graph: BELGraph) -> DiGraph:
"""Apply Jens' Transformation (Type 2) to the graph.
1. Induce a sub-graph over causal and correlative relations
2. Transform edges with the following rules:
- increases => backwards decreases
- decreases => decreases
- positive correlation => delete
- negative correlation => two way decreases
The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,
``A -| C`` and ``B negativeCorrelation C``.
"""
result = DiGraph()
for u, v, d in graph.edges(data=True):
relation = d[RELATION]
if relation == NEGATIVE_CORRELATION:
result.add_edge(u, v)
result.add_edge(v, u)
elif relation in CAUSAL_INCREASE_RELATIONS:
result.add_edge(v, u)
elif relation in CAUSAL_DECREASE_RELATIONS:
result.add_edge(u, v)
return result | Apply Jens' Transformation (Type 2) to the graph.
1. Induce a sub-graph over causal and correlative relations
2. Transform edges with the following rules:
- increases => backwards decreases
- decreases => decreases
- positive correlation => delete
- negative correlation => two way decreases
The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,
``A -| C`` and ``B negativeCorrelation C``. |
def readLiteralContextModes(self):
"""Read literal context modes.
LSB6: lower 6 bits of last char
MSB6: upper 6 bits of last char
UTF8: rougly dependent on categories:
upper 4 bits depend on category of last char:
control/whitespace/space/ punctuation/quote/%/open/close/
comma/period/=/digits/ VOWEL/CONSONANT/vowel/consonant
lower 2 bits depend on category of 2nd last char:
space/punctuation/digit or upper/lowercase
signed: hamming weight of last 2 chars
"""
print('Context modes'.center(60, '-'))
self.literalContextModes = []
for i in range(self.numberOfBlockTypes[L]):
self.literalContextModes.append(
self.verboseRead(LiteralContextMode(number=i))) | Read literal context modes.
LSB6: lower 6 bits of last char
MSB6: upper 6 bits of last char
UTF8: rougly dependent on categories:
upper 4 bits depend on category of last char:
control/whitespace/space/ punctuation/quote/%/open/close/
comma/period/=/digits/ VOWEL/CONSONANT/vowel/consonant
lower 2 bits depend on category of 2nd last char:
space/punctuation/digit or upper/lowercase
signed: hamming weight of last 2 chars |
def get_missing_simulations(self, param_list, runs=None):
"""
Return a list of the simulations among the required ones that are not
available in the database.
Args:
param_list (list): a list of dictionaries containing all the
parameters combinations.
runs (int): an integer representing how many repetitions are wanted
for each parameter combination, None if the dictionaries in
param_list already feature the desired RngRun value.
"""
params_to_simulate = []
if runs is not None: # Get next available runs from the database
next_runs = self.db.get_next_rngruns()
available_params = [r['params'] for r in self.db.get_results()]
for param_comb in param_list:
# Count how many param combinations we found, and remove them
# from the list of available_params for faster searching in the
# future
needed_runs = runs
for i, p in enumerate(available_params):
if param_comb == {k: p[k] for k in p.keys() if k != "RngRun"}:
needed_runs -= 1
new_param_combs = []
for needed_run in range(needed_runs):
# Here it's important that we make copies of the
# dictionaries, so that if we modify one we don't modify
# the others. This is necessary because after this step,
# typically, we will add the RngRun key which must be
# different for each copy.
new_param = deepcopy(param_comb)
new_param['RngRun'] = next(next_runs)
new_param_combs += [new_param]
params_to_simulate += new_param_combs
else:
for param_comb in param_list:
if not self.db.get_results(param_comb):
params_to_simulate += [param_comb]
return params_to_simulate | Return a list of the simulations among the required ones that are not
available in the database.
Args:
param_list (list): a list of dictionaries containing all the
parameters combinations.
runs (int): an integer representing how many repetitions are wanted
for each parameter combination, None if the dictionaries in
param_list already feature the desired RngRun value. |
def nnz_obs_groups(self):
""" get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation
"""
og = []
obs = self.observation_data
for g in self.obs_groups:
if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0:
og.append(g)
return og | get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation |
def put(self, key, value):
'''Stores the object `value` named by `key`.
Serializes values on the way in, and stores the serialized data into the
``child_datastore``.
Args:
key: Key naming `value`
value: the object to store.
'''
value = self.serializedValue(value)
self.child_datastore.put(key, value) | Stores the object `value` named by `key`.
Serializes values on the way in, and stores the serialized data into the
``child_datastore``.
Args:
key: Key naming `value`
value: the object to store. |
def create_api_key(awsclient, api_name, api_key_name):
"""Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('create api key: %s' % api_key_name)
response = client_api.create_api_key(
name=api_key_name,
description='Created for ' + api_name,
enabled=True
)
#print(json2table(response))
print('Add this api key \'%s\' to your api.conf' % response['id'])
return response['id'] | Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key |
def _execute(self, method_function, method_name, resource, **params):
"""
Generic TeleSign REST API request handler.
:param method_function: The Requests HTTP request function to perform the request.
:param method_name: The HTTP method name, as an upper case string.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the HTTP request with, as a dictionary.
:return: The RestClient Response object.
"""
resource_uri = "{api_host}{resource}".format(api_host=self.api_host, resource=resource)
url_encoded_fields = self._encode_params(params)
headers = RestClient.generate_telesign_headers(self.customer_id,
self.api_key,
method_name,
resource,
url_encoded_fields,
user_agent=self.user_agent)
if method_name in ['POST', 'PUT']:
payload = {'data': url_encoded_fields}
else:
payload = {'params': url_encoded_fields}
response = self.Response(method_function(resource_uri,
headers=headers,
timeout=self.timeout,
**payload))
return response | Generic TeleSign REST API request handler.
:param method_function: The Requests HTTP request function to perform the request.
:param method_name: The HTTP method name, as an upper case string.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the HTTP request with, as a dictionary.
:return: The RestClient Response object. |
def set_weekly(self, interval, *, days_of_week, first_day_of_week,
**kwargs):
""" Set to repeat every week on specified days for every x no. of days
:param int interval: no. of days to repeat at
:param str first_day_of_week: starting day for a week
:param list[str] days_of_week: list of days of the week to repeat
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs)
"""
self.set_daily(interval, **kwargs)
self.__days_of_week = set(days_of_week)
self.__first_day_of_week = first_day_of_week | Set to repeat every week on specified days for every x no. of days
:param int interval: no. of days to repeat at
:param str first_day_of_week: starting day for a week
:param list[str] days_of_week: list of days of the week to repeat
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs) |
def get_recover_position(gzfile, last_good_position):
# type: (gzip.GzipFile, int) -> int
"""
Return position of a next gzip stream in a GzipFile,
or -1 if it is not found.
XXX: caller must ensure that the same last_good_position
is not used multiple times for the same gzfile.
"""
with closing(mmap.mmap(gzfile.fileno(), 0, access=mmap.ACCESS_READ)) as m:
return m.find(GZIP_SIGNATURE, last_good_position + 1) | Return position of a next gzip stream in a GzipFile,
or -1 if it is not found.
XXX: caller must ensure that the same last_good_position
is not used multiple times for the same gzfile. |
def _parse(reactor, directory, pemdir, *args, **kwargs):
"""
Parse a txacme endpoint description.
:param reactor: The Twisted reactor.
:param directory: ``twisted.python.url.URL`` for the ACME directory to use
for issuing certs.
:param str pemdir: The path to the certificate directory to use.
"""
def colon_join(items):
return ':'.join([item.replace(':', '\\:') for item in items])
sub = colon_join(list(args) + ['='.join(item) for item in kwargs.items()])
pem_path = FilePath(pemdir).asTextMode()
acme_key = load_or_create_client_key(pem_path)
return AutoTLSEndpoint(
reactor=reactor,
directory=directory,
client_creator=partial(Client.from_url, key=acme_key, alg=RS256),
cert_store=DirectoryStore(pem_path),
cert_mapping=HostDirectoryMap(pem_path),
sub_endpoint=serverFromString(reactor, sub)) | Parse a txacme endpoint description.
:param reactor: The Twisted reactor.
:param directory: ``twisted.python.url.URL`` for the ACME directory to use
for issuing certs.
:param str pemdir: The path to the certificate directory to use. |
def closest_pixel_to_set(self, start, pixel_set, direction, w=13, t=0.5):
"""Starting at pixel, moves start by direction * t until there is a
pixel from pixel_set within a radius w of start. Then, returns start.
Parameters
----------
start : :obj:`numpy.ndarray` of float
The initial pixel location at which to start.
pixel_set : set of 2-tuples of float
The set of pixels to check set intersection with
direction : :obj:`numpy.ndarray` of float
The 2D direction vector in which to move pixel.
w : int
A circular diameter in which to check for pixels.
As soon as the current pixel has some non-zero pixel with a diameter
w of it, this function returns the current pixel location.
t : float
The step size with which to move pixel along direction.
Returns
-------
:obj:`numpy.ndarray` of float
The first pixel location along the direction vector at which there
exists some intersection with pixel_set within a radius w.
"""
# create circular structure for checking clearance
y, x = np.meshgrid(np.arange(w) - w / 2, np.arange(w) - w / 2)
cur_px_y = np.ravel(y + start[0]).astype(np.uint16)
cur_px_x = np.ravel(x + start[1]).astype(np.uint16)
# create comparison set, check set overlap
cur_px = set(zip(cur_px_y, cur_px_x))
includes = True
if np.all(
cur_px_y >= 0) and np.all(
cur_px_y < self.height) and np.all(
cur_px_x >= 0) and np.all(
cur_px_x < self.width):
includes = not cur_px.isdisjoint(pixel_set)
else:
return None
# Continue until out of bounds or sets overlap
while not includes:
start = start + t * direction
cur_px_y = np.ravel(y + start[0]).astype(np.uint16)
cur_px_x = np.ravel(x + start[1]).astype(np.uint16)
cur_px = set(zip(cur_px_y, cur_px_x))
if np.all(
cur_px_y >= 0) and np.all(
cur_px_y < self.height) and np.all(
cur_px_x >= 0) and np.all(
cur_px_x < self.width):
includes = not cur_px.isdisjoint(pixel_set)
else:
return None
return start | Starting at pixel, moves start by direction * t until there is a
pixel from pixel_set within a radius w of start. Then, returns start.
Parameters
----------
start : :obj:`numpy.ndarray` of float
The initial pixel location at which to start.
pixel_set : set of 2-tuples of float
The set of pixels to check set intersection with
direction : :obj:`numpy.ndarray` of float
The 2D direction vector in which to move pixel.
w : int
A circular diameter in which to check for pixels.
As soon as the current pixel has some non-zero pixel with a diameter
w of it, this function returns the current pixel location.
t : float
The step size with which to move pixel along direction.
Returns
-------
:obj:`numpy.ndarray` of float
The first pixel location along the direction vector at which there
exists some intersection with pixel_set within a radius w. |
def JoinKeyPath(path_segments):
"""Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path.
"""
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith('HKEY_'):
key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path | Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path. |
def segment_snrs(filters, stilde, psd, low_frequency_cutoff):
""" This functions calculates the snr of each bank veto template against
the segment
Parameters
----------
filters: list of FrequencySeries
The list of bank veto templates filters.
stilde: FrequencySeries
The current segment of data.
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
snr (list): List of snr time series.
norm (list): List of normalizations factors for the snr time series.
"""
snrs = []
norms = []
for bank_template in filters:
# For every template compute the snr against the stilde segment
snr, _, norm = matched_filter_core(
bank_template, stilde, h_norm=bank_template.sigmasq(psd),
psd=None, low_frequency_cutoff=low_frequency_cutoff)
# SNR time series stored here
snrs.append(snr)
# Template normalization factor stored here
norms.append(norm)
return snrs, norms | This functions calculates the snr of each bank veto template against
the segment
Parameters
----------
filters: list of FrequencySeries
The list of bank veto templates filters.
stilde: FrequencySeries
The current segment of data.
psd: FrequencySeries
low_frequency_cutoff: float
Returns
-------
snr (list): List of snr time series.
norm (list): List of normalizations factors for the snr time series. |
def access_key(self, data):
"""
Set a new access code which will be required for future re-programmings of your YubiKey.
Supply data as either a raw string, or a hexlified string prefixed by 'h:'.
The result, after any hex decoding, must be 6 bytes.
"""
if data.startswith(b'h:'):
new = binascii.unhexlify(data[2:])
else:
new = data
if len(new) == 6:
self.access_code = new
else:
raise yubico_exception.InputError('Access key must be exactly 6 bytes') | Set a new access code which will be required for future re-programmings of your YubiKey.
Supply data as either a raw string, or a hexlified string prefixed by 'h:'.
The result, after any hex decoding, must be 6 bytes. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'section_titles') and self.section_titles is not None:
_dict['section_titles'] = [
x._to_dict() for x in self.section_titles
]
if hasattr(self,
'leading_sentences') and self.leading_sentences is not None:
_dict['leading_sentences'] = [
x._to_dict() for x in self.leading_sentences
]
return _dict | Return a json dictionary representing this model. |
def transliterate(self, target_language="en"):
"""Transliterate the string to the target language."""
return WordList([w.transliterate(target_language) for w in self.words],
language=target_language, parent=self) | Transliterate the string to the target language. |
def SetSelected( self, node, point=None, propagate=True ):
"""Set the given node selected in the square-map"""
if node == self.selectedNode:
return
self.selectedNode = node
self.UpdateDrawing()
if node:
wx.PostEvent( self, SquareSelectionEvent( node=node, point=point, map=self ) ) | Set the given node selected in the square-map |
def get_operator(self, operator):
"""
Get a comparison suffix to be used in Django ORM & inversion flag for it
:param operator: string, DjangoQL comparison operator
:return: (suffix, invert) - a tuple with 2 values:
suffix - suffix to be used in ORM query, for example '__gt' for '>'
invert - boolean, True if this comparison needs to be inverted
"""
op = {
'=': '',
'>': '__gt',
'>=': '__gte',
'<': '__lt',
'<=': '__lte',
'~': '__icontains',
'in': '__in',
}.get(operator)
if op is not None:
return op, False
op = {
'!=': '',
'!~': '__icontains',
'not in': '__in',
}[operator]
return op, True | Get a comparison suffix to be used in Django ORM & inversion flag for it
:param operator: string, DjangoQL comparison operator
:return: (suffix, invert) - a tuple with 2 values:
suffix - suffix to be used in ORM query, for example '__gt' for '>'
invert - boolean, True if this comparison needs to be inverted |
def delete_contribution(self, url):
"""Delete the contribution with this identifier
:rtype: bool
:returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist)
"""
# first validate that this is a real contrib
try:
result = self.api_request(url)
if 'url' in result and 'documents' in result:
self.api_request(result['url'], method='DELETE')
return True
except:
pass
return False | Delete the contribution with this identifier
:rtype: bool
:returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist) |
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions]) | Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change. |
def get_bound_form(self, noun, gender):
"""Return bound form of nound, given its gender."""
syllables = self.syllabifier.syllabify(noun)
stem = self.stemmer.get_stem(noun, gender)
cv_pattern = self.cv_patterner.get_cv_pattern(stem)
# Based on Huehnergard Appendix 6.C.1: base in -VC
if [letter[0] for letter in cv_pattern[-2:]] == ['V', 'C'] or stem in ['nakr']:
# a. 2-syllable
if len(syllables) > 2:
# awīlum > awīl, nakrum > naker
if stem in ['nakr']:
return 'naker'
else:
return stem
# b. 1-syllable
elif len(syllables) > 1:
# bēlum > bēl
return stem
# c. abum, aḫum
if stem in ['ab', 'aḫ']:
return stem + 'i'
# Appendix 6.C.2: base in -C₁C₁
if cv_pattern[-1][:2] == cv_pattern[-2][:2]:
# a. 1-syllable
if 3 > len(syllables) > 1:
return stem + 'i'
# b. 2-syllable, -tt
if len(syllables) > 2 and cv_pattern[-1][2] + cv_pattern[-2][2] == 'tt':
return stem + 'i'
# c. 2-syllable, other
if len(syllables) > 2:
return stem[:-1]
# Appendix 6.C.3: base in -C₁C₂, C₂ ≠ t, i.e. pVrs
if cv_pattern[-1][0] == cv_pattern[-2][0] and cv_pattern[-1][1] != cv_pattern[-2][1]:
return stem[:-1] + stem[1] + stem[-1]
# Appendix 6.C.4: base in -Ct (fem.)
if cv_pattern[-1][2] == 't' and cv_pattern[-2][0] == 'C':
if len(syllables) > 2:
return stem + 'i'
# Need to deal with fem. Ptcpl. māḫirtum -> māḫirat
if len(syllables) > 1:
# These are case by case
if stem in ['qīšt']:
return stem + 'i'
if stem in ['mārt']:
return stem[:-1] + stem[1] + stem[-1] | Return bound form of nound, given its gender. |
def datatype(dbtype, description, cursor):
"""Google AppEngine Helper to convert a data type into a string."""
dt = cursor.db.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt | Google AppEngine Helper to convert a data type into a string. |
def cudaMemcpy_htod(dst, src, count):
"""
Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy.
"""
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyHostToDevice)
cudaCheckStatus(status) | Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy. |
def run(self, clock, generalLedger):
"""
Execute the component at the current clock cycle.
:param clock: The clock containing the current execution time and
period information.
:param generalLedger: The general ledger into which to create the
transactions.
"""
for c in self.components:
c.run(clock, generalLedger)
for a in self.activities:
a.run(clock, generalLedger) | Execute the component at the current clock cycle.
:param clock: The clock containing the current execution time and
period information.
:param generalLedger: The general ledger into which to create the
transactions. |
def IPNetwork(address, version=None, strict=False):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given.
"""
if version:
if version == 4:
return IPv4Network(address, strict)
elif version == 6:
return IPv6Network(address, strict)
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address) | Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
version: An Integer, if set, don't try to automatically
determine what the IP address type is. important for things
like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
'::1/128'.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if a strict network was requested and a strict
network wasn't given. |
def safe_getattr(brain_or_object, attr, default=_marker):
"""Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj
"""
try:
value = getattr(brain_or_object, attr, _marker)
if value is _marker:
if default is not _marker:
return default
fail("Attribute '{}' not found.".format(attr))
if callable(value):
return value()
return value
except Unauthorized:
if default is not _marker:
return default
fail("You are not authorized to access '{}' of '{}'.".format(
attr, repr(brain_or_object))) | Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj |
def setval(self, varname, value):
"""
Set the value of the variable with the given name.
"""
if varname in self:
self[varname]['value'] = value
else:
self[varname] = Variable(self.default_type, value=value) | Set the value of the variable with the given name. |
def retrieve_pt(cls, request, service):
"""`request` should be the current HttpRequest object
`service` a string representing the service for witch we want to
retrieve a ticket.
The function return a Proxy Ticket or raise `ProxyError`
"""
try:
pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt
except cls.DoesNotExist:
raise ProxyError(
"INVALID_TICKET",
"No proxy ticket found for this HttpRequest object"
)
else:
client = get_cas_client(service_url=service, request=request)
try:
return client.get_proxy_ticket(pgt)
# change CASError to ProxyError nicely
except CASError as error:
raise ProxyError(*error.args)
# just embed other errors
except Exception as e:
raise ProxyError(e) | `request` should be the current HttpRequest object
`service` a string representing the service for witch we want to
retrieve a ticket.
The function return a Proxy Ticket or raise `ProxyError` |
def chunks(l:Collection, n:int)->Iterable:
"Yield successive `n`-sized chunks from `l`."
for i in range(0, len(l), n): yield l[i:i+n] | Yield successive `n`-sized chunks from `l`. |
def handle(self, source, target, app=None, **options):
""" command execution """
translation.activate(settings.LANGUAGE_CODE)
if app:
unpack = app.split('.')
if len(unpack) == 2:
models = [get_model(unpack[0], unpack[1])]
elif len(unpack) == 1:
models = get_models(get_app(unpack[0]))
else:
models = get_models()
for model in models:
if hasattr(model, 'localized_fields'):
model_full_name = '%s.%s' % (model._meta.app_label, model._meta.module_name)
update_instances = set()
messages = []
for instance in model.objects.all():
for field in model.localized_fields:
source_field = get_real_fieldname(field, source)
target_field = get_real_fieldname(field, target)
if hasattr(instance, source_field) and hasattr(instance, target_field):
source_field_value = getattr(instance, source_field)
target_field_value = getattr(instance, target_field)
if target_field_value in (None, u'')\
and source_field_value not in (None, u''):
setattr(instance, target_field, force_unicode(source_field_value))
update_instances.add(instance)
messages.append(u"%s %s %s will become %s" % (model_full_name, instance, target_field, force_unicode(source_field_value)))
if len(update_instances):
if self.ask_for_confirmation(messages, u'%s.%s' % (model._meta.app_label, model._meta.module_name)):
for update_instance in update_instances:
print u"saving %s" % update_instance
update_instance.save() | command execution |
def dollarfy(x):
"""Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'."""
def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces Math elements"""
if key == 'Math':
return Str('$' + value[1] + '$')
return None
return walk(x, _dollarfy, '', {}) | Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'. |
def _check_forest(self, sensors):
"""Validate that this sensor doesn't end up referencing itself."""
if self in sensors:
raise ValueError('Circular dependency in sensors: %s is its own'
'parent.' % (self.name,))
sensors.add(self)
for parent in self._parents:
parent._check_forest(sensors) | Validate that this sensor doesn't end up referencing itself. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.