Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,400 |
def list_all_quantities(self, include_native=False, with_info=False):
q = set(self._quantity_modifiers)
if include_native:
q.update(self._native_quantities)
return {k: self.get_quantity_info(k) for k in q} if with_info else list(q)
|
Return a list of all available quantities in this catalog.
If *include_native* is `True`, includes native quantities.
If *with_info* is `True`, return a dict with quantity info.
See also: list_all_native_quantities
|
26,401 |
def find_config(config_path: str) -> str:
if path.isdir(config_path):
config_path = path.join(config_path, CXF_CONFIG_FILE)
assert path.exists(config_path), .format(config_path)
return config_path
|
Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path
|
26,402 |
def create_essay_set(text, score, prompt_string, generate_additional=True):
x = EssaySet()
for i in xrange(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x
|
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
|
26,403 |
def fetch(self, end=values.unset, start=values.unset):
return self._proxy.fetch(end=end, start=start, )
|
Fetch a UsageInstance
:param unicode end: The end
:param unicode start: The start
:returns: Fetched UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageInstance
|
26,404 |
def adjust_frame(proc_obj, name, pos, absolute_pos):
if not proc_obj.curframe:
proc_obj.errmsg("No stack.")
return
if absolute_pos:
if pos >= 0:
pos = frame_num(proc_obj, pos)
else:
pos = -pos - 1
pass
else:
pos += proc_obj.curindex
pass
if pos < 0:
proc_obj.errmsg("Adjusting would put us beyond the oldest frame.")
return
elif pos >= len(proc_obj.stack):
proc_obj.errmsg("Adjusting would put us beyond the newest frame.")
return
proc_obj.curindex = pos
proc_obj.curframe = proc_obj.stack[proc_obj.curindex][0]
proc_obj.location()
proc_obj.list_lineno = None
proc_obj.list_offset = proc_obj.curframe.f_lasti
proc_obj.list_object = proc_obj.curframe
proc_obj.list_filename = proc_obj.curframe.f_code.co_filename
return
|
Adjust stack frame by pos positions. If absolute_pos then
pos is an absolute number. Otherwise it is a relative number.
A negative number indexes from the other end.
|
26,405 |
def update_team(self, slug):
if self._org:
if not self._org.has_team(slug):
return self._org.update()
return self._org.update_team(slug)
return False
|
Trigger update and cache invalidation for the team identified by the
given `slug`, if any. Returns `True` if the update was successful,
`False` otherwise.
:param slug: GitHub 'slug' name for the team to be updated.
|
26,406 |
def day_night_duration(
self,
daybreak: datetime.time = datetime.time(NORMAL_DAY_START_H),
nightfall: datetime.time = datetime.time(NORMAL_DAY_END_H)) \
-> Tuple[datetime.timedelta, datetime.timedelta]:
daytotal = datetime.timedelta()
nighttotal = datetime.timedelta()
startdate = self.start.date()
enddate = self.end.date()
ndays = (enddate - startdate).days + 1
for i in range(ndays):
date = startdate + datetime.timedelta(days=i)
component = self.component_on_date(date)
day = Interval.daytime(date, daybreak, nightfall)
daypart = component.intersection(day)
if daypart is not None:
daytotal += daypart.duration()
nighttotal += component.duration() - daypart.duration()
else:
nighttotal += component.duration()
return daytotal, nighttotal
|
Returns a ``(day, night)`` tuple of ``datetime.timedelta`` objects
giving the duration of this interval that falls into day and night
respectively.
|
26,407 |
def hdr_vals_for_overscan(root):
with fits.open(root + ) as hdu:
spthdr = hdu[0].header
with fits.open(root + ) as hdu:
prihdr = hdu[0].header
xstart = spthdr[]
ystart = spthdr[]
xsize = spthdr[]
ysize = spthdr[]
ccdamp = prihdr[]
return ccdamp, xstart, ystart, xsize, ysize
|
Retrieve header keyword values from RAW and SPT
FITS files to pass on to :func:`check_oscntab` and
:func:`check_overscan`.
Parameters
----------
root : str
Rootname of the observation. Can be relative path
to the file excluding the type of FITS file and
extension, e.g., '/my/path/jxxxxxxxq'.
Returns
-------
ccdamp : str
Amplifiers used to read out the CCDs.
xstart : int
Starting column of the readout in detector
coordinates.
ystart : int
Starting row of the readout in detector
coordinates.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
|
26,408 |
def send_execute_request(self, socket, code, silent=True, subheader=None, ident=None):
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
subheader = subheader if subheader is not None else {}
if not isinstance(code, basestring):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(subheader, dict):
raise TypeError("subheader must be dict, not %s" % type(subheader))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
subheader=subheader)
msg_id = msg[][]
self.outstanding.add(msg_id)
if ident:
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id][] = datetime.now()
return msg
|
construct and send an execute request via a socket.
|
26,409 |
def sort_header(header_text):
lines = header_text.rstrip().split("\n")
rlens = {}
for ln in lines:
m = re.match(,ln)
if m:
rlens[m.group(1)] = m.group(2)
output =
done_lens = False
for ln in lines:
if re.match(,ln):
if not done_lens:
done_lens = True
for chr in sorted(rlens.keys()):
output += "@SQ\tSN:"+chr+"\tLN:"+str(rlens[chr])+"\n"
else:
output += ln.rstrip("\n")+"\n"
return output
|
sort the chromosomes in a header text
|
26,410 |
def logs_for_job(self, job_name, wait=False, poll=10):
description = self.sagemaker_client.describe_training_job(TrainingJobName=job_name)
print(secondary_training_status_message(description, None), end=)
instance_count = description[][]
status = description[]
stream_names = []
positions = {}
last_describe_job_call = time.time()
last_description = description
while True:
if len(stream_names) < instance_count:
try:
streams = client.describe_log_streams(logGroupName=log_group, logStreamNamePrefix=job_name + ,
orderBy=, limit=instance_count)
stream_names = [s[] for s in streams[]]
positions.update([(s, sagemaker.logs.Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except ClientError as e:
|
Display the logs for a given training job, optionally tailing them until the
job is complete. If the output is a tty or a Jupyter cell, it will be color-coded
based on which instance the log entry is from.
Args:
job_name (str): Name of the training job to display the logs for.
wait (bool): Whether to keep looking for new log entries until the job completes (default: False).
poll (int): The interval in seconds between polling for new log entries and job completion (default: 5).
Raises:
ValueError: If waiting and the training job fails.
|
26,411 |
def deploy_docker(self, dockerfile_path, virtualbox_name=):
title = % self.__class__.__name__
input_fields = {
: dockerfile_path,
: virtualbox_name
}
for key, value in input_fields.items():
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
if not self.subdomain:
raise Exception( % self.__class__.__name__)
from os import path
from labpack.platforms.docker import dockerClient
dockerClient(virtualbox_name, self.verbose)
if not path.exists(dockerfile_path):
raise Exception( % dockerfile_path)
dockerfile_root, dockerfile_node = path.split(dockerfile_path)
if dockerfile_node != :
raise Exception()
from os import devnull
from subprocess import check_output
self.printer(, flush=True)
sys_command =
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, )).decode()
if heroku_plugins.find() == -1 and heroku_plugins.find() == -1:
sys_command =
heroku_plugins = check_output(sys_command, shell=True, stderr=open(devnull, )).decode()
if heroku_plugins.find() == -1 and heroku_plugins.find() == -1:
self.printer()
raise Exception(
)
self.printer()
self.printer(, flush=True)
sys_command =
self._handle_command(sys_command)
self.printer()
self.printer()
sys_command = % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
sys_command = % (dockerfile_root, self.subdomain)
self._handle_command(sys_command, print_pipe=True)
self.printer()
return True
|
a method to deploy app to heroku using docker
|
26,412 |
def add_ruleclause_name(self, ns_name, rid) -> bool:
ns_name.parser_tree = parsing.Rule(self.value(rid))
return True
|
Create a tree.Rule
|
26,413 |
def to_comm(self, light_request=False):
data = None
if not light_request:
data = read_as_base64(self.file_pointer)
return Publication(
title=self.title,
author=self.author,
pub_year=self.pub_year,
isbn=self.isbn,
urnnbn=self.urnnbn,
uuid=self.uuid,
aleph_id=self.aleph_id,
producent_id=self.producent_id,
is_public=self.is_public,
filename=self.filename,
is_periodical=self.is_periodical,
path=self.path,
b64_data=data,
url=self.url,
file_pointer=self.file_pointer,
)
|
Convert `self` to :class:`.Publication`.
Returns:
obj: :class:`.Publication` instance.
|
26,414 |
def h2z(text, ignore=, kana=True, ascii=False, digit=False):
def _conv_dakuten(text):
text = text.replace("ガ", "ガ").replace("ギ", "ギ")
text = text.replace("グ", "グ").replace("ゲ", "ゲ")
text = text.replace("ゴ", "ゴ").replace("ザ", "ザ")
text = text.replace("ジ", "ジ").replace("ズ", "ズ")
text = text.replace("ゼ", "ゼ").replace("ゾ", "ゾ")
text = text.replace("ダ", "ダ").replace("ヂ", "ヂ")
text = text.replace("ヅ", "ヅ").replace("デ", "デ")
text = text.replace("ド", "ド").replace("バ", "バ")
text = text.replace("ビ", "ビ").replace("ブ", "ブ")
text = text.replace("ベ", "ベ").replace("ボ", "ボ")
text = text.replace("パ", "パ").replace("ピ", "ピ")
text = text.replace("プ", "プ").replace("ペ", "ペ")
return text.replace("ポ", "ポ").replace("ヴ", "ヴ")
if ascii:
if digit:
if kana:
h2z_map = H2Z_ALL
else:
h2z_map = H2Z_AD
elif kana:
h2z_map = H2Z_AK
else:
h2z_map = H2Z_A
elif digit:
if kana:
h2z_map = H2Z_DK
else:
h2z_map = H2Z_D
else:
h2z_map = H2Z_K
if kana:
text = _conv_dakuten(text)
if ignore:
h2z_map = _exclude_ignorechar(ignore, h2z_map.copy())
return _convert(text, h2z_map)
|
Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana
Parameters
----------
text : str
Half-width Katakana string.
ignore : str
Characters to be ignored in converting.
kana : bool
Either converting Kana or not.
ascii : bool
Either converting ascii or not.
digit : bool
Either converting digit or not.
Return
------
str
Full-width Katakana string.
Examples
--------
>>> print(jaconv.h2z('ティロフィナーレ'))
ティロフィナーレ
>>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ'))
ティロフィナーレ
>>> print(jaconv.h2z('abcd', ascii=True))
ABCD
>>> print(jaconv.h2z('1234', digit=True))
1234
|
26,415 |
def get(ctx):
user, project_name, _job = get_job_or_local(ctx.obj.get(), ctx.obj.get())
try:
response = PolyaxonClient().job.get_job(user, project_name, _job)
cache.cache(config_manager=JobManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(.format(_job))
Printer.print_error(.format(e))
sys.exit(1)
get_job_details(response)
|
Get job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job --job=1 get
```
\b
```bash
$ polyaxon job --job=1 --project=project_name get
```
|
26,416 |
def is_prime( n ):
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True
|
Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
|
26,417 |
def expand_tile(units, axis):
assert axis in (1, 2)
n_time_steps = K.int_shape(units)[1]
repetitions = [1, 1, 1, 1]
repetitions[axis] = n_time_steps
if axis == 1:
expanded = Reshape(target_shape=( (1,) + K.int_shape(units)[1:] ))(units)
else:
expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
return K.tile(expanded, repetitions)
|
Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
|
26,418 |
def _join_summary_file(data, summary_filename="msd_summary_file.h5"):
msd = h5py.File(summary_filename)
track_lookup = dict((t.encode("utf8"), i) for i, t in enumerate(data[].cat.categories))
track_info = np.empty(shape=(len(track_lookup), 4), dtype=np.object)
with tqdm.tqdm(total=len(track_info)) as progress:
for song in msd[][]:
trackid = song[17]
if trackid in track_lookup:
pos = track_lookup[trackid]
track_info[pos] = [x.decode("utf8") for x in (trackid, song[9], song[14], song[18])]
progress.update(1)
return track_info
|
Gets the trackinfo array by joining taste profile to the track summary file
|
26,419 |
def get_code_indices(s: Union[str, ]) -> Dict[int, str]:
indices = {}
i = 0
codes = get_codes(s)
for code in codes:
codeindex = s.index(code)
realindex = i + codeindex
indices[realindex] = code
codelen = len(code)
i = realindex + codelen
s = s[codeindex + codelen:]
return indices
|
Retrieve a dict of {index: escape_code} for a given string.
If no escape codes are found, an empty dict is returned.
|
26,420 |
def decode(self, file_name):
try:
file_tag = self._filename_decoder_new.decode(file_name)
except:
try:
file_tag = self._filename_decoder_old.decode(file_name)
except:
file_tag = FileTag(0, 0, , , )
return file_tag
|
Parses the filename, creating a FileTag from it.
It will try both the old and the new conventions, if the filename does
not conform any of them, then an empty FileTag will be returned.
:param file_name: filename to parse
:return: a FileTag instance
|
26,421 |
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
VirtualNetworkGatewayVnetLocalInternetVirtualApplianceNoneVirtualAppliance192.168.0.0/16
ret = {
: name,
: False,
: ,
: {}
}
if not isinstance(connection_auth, dict):
ret[] =
return ret
route = __salt__[](
name,
route_table,
resource_group,
azurearm_log_level=,
**connection_auth
)
if not in route:
if address_prefix != route.get():
ret[][] = {
: route.get(),
: address_prefix
}
if next_hop_type.lower() != route.get(, ).lower():
ret[][] = {
: route.get(),
: next_hop_type
}
if next_hop_type.lower() == and next_hop_ip_address != route.get():
ret[][] = {
: route.get(),
: next_hop_ip_address
}
if not ret[]:
ret[] = True
ret[] = .format(name)
return ret
if __opts__[]:
ret[] = None
ret[] = .format(name)
return ret
else:
ret[] = {
: {},
: {
: name,
: address_prefix,
: next_hop_type,
: next_hop_ip_address
}
}
if __opts__[]:
ret[] = .format(name)
ret[] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__[](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if not in route:
ret[] = True
ret[] = .format(name)
return ret
ret[] = .format(name, route.get())
return ret
|
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
|
26,422 |
def _create(archive, compression, cmd, format, verbosity, filenames):
if len(filenames) > 1:
raise util.PatoolError()
try:
with lzma.LZMAFile(archive, mode=, **_get_lzma_options(format, preset=9)) as lzmafile:
filename = filenames[0]
with open(filename, ) as srcfile:
data = srcfile.read(READ_SIZE_BYTES)
while data:
lzmafile.write(data)
data = srcfile.read(READ_SIZE_BYTES)
except Exception as err:
msg = "error creating %s: %s" % (archive, err)
raise util.PatoolError(msg)
return None
|
Create an LZMA or XZ archive with the lzma Python module.
|
26,423 |
def example_bigbeds():
hits = []
d = data_dir()
for fn in os.listdir(d):
fn = os.path.join(d, fn)
if os.path.splitext(fn)[-1] == :
hits.append(os.path.abspath(fn))
return hits
|
Returns list of example bigBed files
|
26,424 |
def iterable_source(iterable, target):
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter()
|
Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items.
|
26,425 |
def send(vm, target, key=):
vm**
ret = {}
if key not in [, , ]:
ret[] =
return ret
if not os.path.isdir(target):
ret[] =
return ret
vm = lookup(.format(key, vm), one=True)
if in vm:
return vm
cmd = .format(
uuid=vm,
target=os.path.join(target, .format(vm))
)
res = __salt__[](cmd, python_shell=True)
retcode = res[]
if retcode != 0:
ret[] = res[] if in res else _exit_status(retcode)
return ret
vmobj = get(vm)
if not in vmobj:
return True
log.warning()
log.warning()
for dataset in vmobj[]:
name = dataset.split()
name = name[-1]
cmd = .format(
dataset=dataset,
target=os.path.join(target, .format(vm, name))
)
res = __salt__[](cmd, python_shell=True)
retcode = res[]
if retcode != 0:
ret[] = res[] if in res else _exit_status(retcode)
return ret
return True
|
Send a vm to a directory
vm : string
vm to be sent
target : string
target directory
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.send 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups
salt '*' vmadm.send vm=nacl target=/opt/backups key=alias
|
26,426 |
def urls(model,form_class=None,fields=None,redirect=None,object_list=None,fail_if_empty=True):
if form_class is None and fields is None:
raise ImproperlyConfigured("Must define either `form_class` or `fields`.")
if object_list is None and redirect is None:
raise ImproperlyConfigured("Must define `redirect` when `object_list` is missing.")
prefix = model.__name__.lower()
if redirect is None: redirect = reverse_lazy(prefix + )
urlpatterns = patterns(,
url( + prefix + ,
CreateView.as_view(model=model,form_class=form_class,fields=fields,success_url=redirect),
name = prefix +
),
url( + prefix + ,
UpdateView.as_view(model=model,form_class=form_class,fields=fields,success_url=redirect),
name = prefix +
),
url( + prefix + ,
DeleteView.as_view(model=model,success_url=redirect),
name = prefix +
),
)
if object_list:
urlpatterns += patterns(,
url( + prefix + ,
ListView.as_view(model=model,object_list=object_list,fail_if_empty=fail_if_empty),
name = prefix +
),
url( + prefix + ,
FormsetView.as_view(model=model,form_class=form_class,fields=fields,object_list=object_list,fail_if_empty=fail_if_empty),
name = prefix +
),
)
return urlpatterns
|
Returns URL patterns for creating, updating and deleting models. Supports lists and formsets as well
model Model class
form_class Form class for use in create, update and formset views (default is None)
fields Required if form_class is not provided
redirect Redirection URL for create, update and delete views
object_list Queryset for list and formset. If absent, these views are not created
fail_if_empty Raise ImproperlyConfigured exception in formset and list views when object_list is empty
|
26,427 |
def _get_fuzzy_padding(self, lean):
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result
|
This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness.
|
26,428 |
def getExpectedValue(distribution):
k = np.array(distribution.possibleValues)
return np.sum(k * distribution.pmf(k))
|
Calculates E[X] where X is a distribution.
|
26,429 |
def migrate(config):
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
application_lock.stop = True
application_lock.join()
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join()
|
Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
|
26,430 |
def handle_termination(cls, pid, is_cancel=True):
s cancel_flag.
'
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
try:
os.kill(child_proc.pid, signal.SIGKILL)
except (TypeError, OSError):
pass
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error, OSError):
try:
os.kill(pid, signal.SIGKILL)
except (OSError):
pass
|
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
|
26,431 |
def recover_and_supervise(recovery_file):
try:
logging.info("Attempting to recover Supervisor data from " + recovery_file)
with open(recovery_file) as rf:
recovery_data = json.load(rf)
monitor_data = recovery_data[]
dependencies = recovery_data[]
args = recovery_data[]
except:
logging.error("Could not recover monitor data, exiting...")
return 1
logging.info("Data successfully loaded, resuming Supervisor")
supervise_until_complete(monitor_data, dependencies, args, recovery_file)
|
Retrieve monitor data from recovery_file and resume monitoring
|
26,432 |
def qrcode(self, data, **kwargs):
barcode.validate_qrcode_args(**kwargs)
return self._qrcode_impl(data, **kwargs)
|
Render given ``data`` as `QRCode <http://www.qrcode.com/en/>`_.
|
26,433 |
async def pack_message(wallet_handle: int,
message: str,
recipient_verkeys: list,
sender_verkey: Optional[str]) -> bytes:
logger = logging.getLogger(__name__)
logger.debug("pack_message: >>> wallet_handle: %r, message: %r, recipient_verkeys: %r, sender_verkey: %r",
wallet_handle,
message,
recipient_verkeys,
sender_verkey)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(pack_message, "cb"):
logger.debug("pack_message: Creating callback")
pack_message.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
msg_bytes = message.encode("utf-8")
c_msg_len = c_uint32(len(msg_bytes))
c_recipient_verkeys = c_char_p(json.dumps(recipient_verkeys).encode())
c_sender_vk = c_char_p(sender_verkey.encode()) if sender_verkey is not None else None
res = await do_call(,
c_wallet_handle,
msg_bytes,
c_msg_len,
c_recipient_verkeys,
c_sender_vk,
pack_message.cb)
logger.debug("pack_message: <<< res: %r", res)
return res
|
Packs a message by encrypting the message and serializes it in a JWE-like format (Experimental)
Note to use DID keys with this function you can call did.key_for_did to get key id (verkey)
for specific DID.
#Params
command_handle: command handle to map callback to user context.
wallet_handle: wallet handler (created by open_wallet)
message: the message being sent as a string. If it's JSON formatted it should be converted to a string
recipient_verkeys: a list of Strings which are recipient verkeys
sender_verkey: the sender's verkey as a string. -> When None is passed in this parameter, anoncrypt mode is used
returns an Agent Wire Message format as a byte array. See HIPE 0028 for detailed formats
|
26,434 |
def from_series(self, series, add_index_column=True):
if series.name:
self.headers = [series.name]
else:
self.headers = ["value"]
self.type_hints = [self.__get_typehint_from_dtype(series.dtype)]
if add_index_column:
self.headers = [""] + self.headers
if self.type_hints:
self.type_hints = [None] + self.type_hints
self.value_matrix = [
[index] + [value] for index, value in zip(series.index.tolist(), series.tolist())
]
else:
self.value_matrix = [[value] for value in series.tolist()]
|
Set tabular attributes to the writer from :py:class:`pandas.Series`.
Following attributes are set by the method:
- :py:attr:`~.headers`
- :py:attr:`~.value_matrix`
- :py:attr:`~.type_hints`
Args:
series(pandas.Series):
Input pandas.Series object.
add_index_column(bool, optional):
If |True|, add a column of ``index`` of the ``series``.
Defaults to |True|.
|
26,435 |
def extended_stats(G, connectivity=False, anc=False, ecc=False, bc=False, cc=False):
stats = {}
full_start_time = time.time()
G_dir = nx.DiGraph(G)
G_undir = nx.Graph(G)
G_strong = get_largest_component(G, strongly=True)
avg_neighbor_degree = nx.average_neighbor_degree(G)
stats[] = avg_neighbor_degree
stats[] = sum(avg_neighbor_degree.values())/len(avg_neighbor_degree)
avg_weighted_neighbor_degree = nx.average_neighbor_degree(G, weight=)
stats[] = avg_weighted_neighbor_degree
stats[] = sum(avg_weighted_neighbor_degree.values())/len(avg_weighted_neighbor_degree)
degree_centrality = nx.degree_centrality(G)
stats[] = degree_centrality
stats[] = sum(degree_centrality.values())/len(degree_centrality)
stats[] = nx.clustering(G_undir)
stats[] = nx.average_clustering(G_undir)
stats[] = nx.clustering(G_undir, weight=)
stats[] = nx.average_clustering(G_undir, weight=)
pagerank = nx.pagerank(G_dir, weight=)
stats[] = pagerank
pagerank_max_node = max(pagerank, key=lambda x: pagerank[x])
stats[] = pagerank_max_node
stats[] = pagerank[pagerank_max_node]
pagerank_min_node = min(pagerank, key=lambda x: pagerank[x])
stats[] = pagerank_min_node
stats[] = pagerank[pagerank_min_node]
if connectivity:
start_time = time.time()
stats[] = nx.node_connectivity(G_strong)
stats[] = nx.edge_connectivity(G_strong)
log(.format(time.time() - start_time))
if anc:
start_time = time.time()
stats[] = nx.average_node_connectivity(G)
log(.format(time.time() - start_time))
if ecc:
start_time = time.time()
sp = {source:dict(nx.single_source_dijkstra_path_length(G_strong, source, weight=)) for source in G_strong.nodes()}
log(.format(time.time() - start_time))
eccentricity = nx.eccentricity(G_strong, sp=sp)
stats[] = eccentricity
diameter = nx.diameter(G_strong, e=eccentricity)
stats[] = diameter
radius = nx.radius(G_strong, e=eccentricity)
stats[] = radius
center = nx.center(G_strong, e=eccentricity)
stats[] = center
periphery = nx.periphery(G_strong, e=eccentricity)
stats[] = periphery
if cc:
start_time = time.time()
closeness_centrality = nx.closeness_centrality(G, distance=)
stats[] = closeness_centrality
stats[] = sum(closeness_centrality.values())/len(closeness_centrality)
log(.format(time.time() - start_time))
if bc:
start_time = time.time()
betweenness_centrality = nx.betweenness_centrality(G, weight=)
stats[] = betweenness_centrality
stats[] = sum(betweenness_centrality.values())/len(betweenness_centrality)
log(.format(time.time() - start_time))
log(.format(time.time()-full_start_time))
return stats
|
Calculate extended topological stats and metrics for a graph.
Many of these algorithms have an inherently high time complexity. Global
topological analysis of large complex networks is extremely time consuming
and may exhaust computer memory. Consider using function arguments to not
run metrics that require computation of a full matrix of paths if they
will not be needed.
Parameters
----------
G : networkx multidigraph
connectivity : bool
if True, calculate node and edge connectivity
anc : bool
if True, calculate average node connectivity
ecc : bool
if True, calculate shortest paths, eccentricity, and topological metrics
that use eccentricity
bc : bool
if True, calculate node betweenness centrality
cc : bool
if True, calculate node closeness centrality
Returns
-------
stats : dict
dictionary of network measures containing the following elements (some
only calculated/returned optionally, based on passed parameters):
- avg_neighbor_degree
- avg_neighbor_degree_avg
- avg_weighted_neighbor_degree
- avg_weighted_neighbor_degree_avg
- degree_centrality
- degree_centrality_avg
- clustering_coefficient
- clustering_coefficient_avg
- clustering_coefficient_weighted
- clustering_coefficient_weighted_avg
- pagerank
- pagerank_max_node
- pagerank_max
- pagerank_min_node
- pagerank_min
- node_connectivity
- node_connectivity_avg
- edge_connectivity
- eccentricity
- diameter
- radius
- center
- periphery
- closeness_centrality
- closeness_centrality_avg
- betweenness_centrality
- betweenness_centrality_avg
|
26,436 |
def get(self, entity_id: EntityId, load: bool = False) -> Entity:
try:
entity = self.identity_map[entity_id]
except KeyError:
entity = Entity(entity_id, self)
self.identity_map[entity_id] = entity
if load:
entity.load()
return entity
|
Get a Wikidata entity by its :class:`~.entity.EntityId`.
:param entity_id: The :attr:`~.entity.Entity.id` of
the :class:`~.entity.Entity` to find.
:type eneity_id: :class:`~.entity.EntityId`
:param load: Eager loading on :const:`True`.
Lazy loading (:const:`False`) by default.
:type load: :class:`bool`
:return: The found entity.
:rtype: :class:`~.entity.Entity`
.. versionadded:: 0.3.0
The ``load`` option.
|
26,437 |
def connect(self):
try:
self.telnet = Telnet(self.host, self.port)
time.sleep(1)
self.get()
self.get()
self.update()
except socket.gaierror:
self.telnet = None
LOGGER.error("Cannot connect to %s (%d)",
self.host, self.retries)
|
Simple connect
|
26,438 |
def diff_result_to_cell(item):
state = item[]
if state == :
new_cell = item[].data
old_cell = item[].data
new_cell[][] = state
new_cell[][] = old_cell
cell = new_cell
else:
cell = item[].data
cell[][] = state
return cell
|
diff.diff returns a dictionary with all the information we need,
but we want to extract the cell and change its metadata.
|
26,439 |
def _get_node_text(self, goid, goobj):
txt = []
txt.append(self.pltvars.fmthdr.format(
GO=goobj.id.replace("GO:", "GO"),
level=goobj.level,
depth=goobj.depth))
name = goobj.name.replace(",", "\n")
txt.append(name)
study_txt = self._get_study_txt(goid)
if study_txt is not None:
txt.append(study_txt)
return "\n".join(txt)
|
Return a string to be printed in a GO term box.
|
26,440 |
def write_to(self, f):
f.write(self.version + "\r\n")
for name, value in self.items():
name = name.title()
name = name.replace("Warc-", "WARC-").replace("-Ip-", "-IP-").replace("-Id", "-ID").replace("-Uri", "-URI")
f.write(name)
f.write(": ")
f.write(value)
f.write("\r\n")
f.write("\r\n")
|
Writes this header to a file, in the format specified by WARC.
|
26,441 |
def list_all_products(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_products_with_http_info(**kwargs)
else:
(data) = cls._list_all_products_with_http_info(**kwargs)
return data
|
List Products
Return a list of Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_products(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Product]
If the method is called asynchronously,
returns the request thread.
|
26,442 |
def map(self, func):
if self._train_set:
self._train_set = map(func, self._train_set)
if self._valid_set:
self._valid_set = map(func, self._valid_set)
if self._test_set:
self._test_set = map(func, self._test_set)
|
Process all data with given function.
The scheme of function should be x,y -> x,y.
|
26,443 |
def add_entry(self, row):
var_call = VCFEntry(self.individuals)
var_call.parse_entry( row )
self.entries[(var_call.chrom, var_call.pos)] = var_call
return var_call
|
This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well.
|
26,444 |
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
|
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
|
26,445 |
def fetch(self):
params = values.of({})
payload = self._version.fetch(
,
self._uri,
params=params,
)
return AddOnResultInstance(
self._version,
payload,
account_sid=self._solution[],
reference_sid=self._solution[],
sid=self._solution[],
)
|
Fetch a AddOnResultInstance
:returns: Fetched AddOnResultInstance
:rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
|
26,446 |
def cancel(self):
LOG.debug(, self)
self._cancelled = True
self.clear_callbacks()
while True:
try:
self._waitables.get_nowait().put_nowait(self.sentinel)
except queue.Empty:
break
|
Cancels the observer
No more notifications will be passed on
|
26,447 |
def ensure_sphinx_astropy_installed():
if LooseVersion(sys.version) < LooseVersion():
dist.fetch_build_eggs()
eggs = dist.fetch_build_eggs()
if eggs is not None:
for egg in eggs:
if egg.project_name == :
sphinx_astropy_version = egg.parsed_version.public
break
eggs_path = os.path.abspath()
for egg in glob.glob(os.path.join(eggs_path, )):
sys_path_inserts.append(egg)
return sphinx_astropy_version, sys_path_inserts
|
Make sure that sphinx-astropy is available, installing it temporarily if not.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available.
|
26,448 |
def _delete_extraneous_files(self):
if not self._spec.options.delete_extraneous_destination:
return
checked = set()
deleted = 0
for sa, container, vpath, dpath in self._get_destination_paths():
key = .join((sa.name, sa.endpoint, str(dpath)))
if key in checked:
continue
logger.debug(
.format(
key))
if (self._spec.options.mode ==
blobxfer.models.azure.StorageModes.File):
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
try:
pathlib.Path(file).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info(.format(
file))
else:
if self._general_options.verbose:
logger.debug(.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
try:
pathlib.Path(blob.name).relative_to(vpath)
except ValueError:
continue
id = blobxfer.operations.upload.Uploader.\
create_destination_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info(.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug(.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info(.format(deleted))
|
Delete extraneous files on the remote
:param Uploader self: this
|
26,449 |
def get_instance_field(self, field_name):
try:
field = super(ModelWithDynamicFieldMixin, self).get_instance_field(field_name)
except AttributeError:
dynamic_field = self._get_dynamic_field_for(field_name)
dynamic_field = self.get_field(dynamic_field.name)
field = self._add_dynamic_field_to_instance(dynamic_field, field_name)
return field
|
Add management of dynamic fields: if a normal field cannot be retrieved,
check if it can be a dynamic field and in this case, create a copy with
the given name and associate it to the instance.
|
26,450 |
def to_dict(self):
result = {
key[1:]: getattr(self, key)
for key in self.__slots__
if key[0] == and hasattr(self, key)
}
try:
colour = result.pop()
except KeyError:
pass
else:
if colour:
result[] = colour.value
try:
timestamp = result.pop()
except KeyError:
pass
else:
if timestamp:
result[] = timestamp.isoformat()
if self.type:
result[] = self.type
if self.description:
result[] = self.description
if self.url:
result[] = self.url
if self.title:
result[] = self.title
return result
|
Converts this embed object into a dict.
|
26,451 |
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r
prefix = if prefix is None else prefix +
name = prefix + _pretty_name(tensor) if name is None else prefix + name
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + , tensor, sample_rate)
|
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
|
26,452 |
def _proc_uri(self, request, result):
if result:
return
for prefix, version in self.uris:
if (request.path_info == prefix or
request.path_info.startswith(prefix + )):
result.set_version(version)
request.script_name += prefix
request.path_info = request.path_info[len(prefix):]
if not request.path_info:
request.path_info =
break
for format, ctype in self.formats.items():
if request.path_info.endswith(format):
result.set_ctype(ctype)
request.path_info = request.path_info[:-len(format)]
break
|
Process the URI rules for the request. Both the desired API
version and desired content type can be determined from those
rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
|
26,453 |
def with_continuations(**c):
if len(c): keys, k = zip(*c.items())
else: keys, k = tuple([]), tuple([])
def d(f):
return C(
lambda kself, *conts:
lambda *args:
f(*args, self=kself, **dict(zip(keys, conts)))) (*k)
return d
|
A decorator for defining tail-call optimized functions.
Example
-------
@with_continuations()
def factorial(n, k, self=None):
return self(n-1, k*n) if n > 1 else k
@with_continuations()
def identity(x, self=None):
return x
@with_continuations(out=identity)
def factorial2(n, k, self=None, out=None):
return self(n-1, k*n) if n > 1 else out(k)
print(factorial(7,1))
print(factorial2(7,1))
|
26,454 |
def f_lock_parameters(self):
for par in self._parameters.values():
if not par.f_is_empty():
par.f_lock()
|
Locks all non-empty parameters
|
26,455 |
def generate_rss(self, path=, only_excerpt=True, https=False):
feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https)
feed.rss_file(self._get_dist_path(path))
|
Generate the RSS feed.
Args:
path (str): Where to save the RSS file. Make sure that your jinja
templates refer to the same path using <link>.
only_excerpt (bool): If True (the default), don't include the full
body of posts in the RSS. Instead, include the first paragraph and
a "read more" link to your website.
https (bool): If True, links inside the RSS with relative scheme (e.g.
//example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP.
|
26,456 |
def instance_present(name, instance_name=None, instance_id=None, image_id=None,
image_name=None, tags=None, key_name=None,
security_groups=None, user_data=None, instance_type=None,
placement=None, kernel_id=None, ramdisk_id=None,
vpc_id=None, vpc_name=None, monitoring_enabled=None,
subnet_id=None, subnet_name=None, private_ip_address=None,
block_device_map=None, disable_api_termination=None,
instance_initiated_shutdown_behavior=None,
placement_group=None, client_token=None,
security_group_ids=None, security_group_names=None,
additional_info=None, tenancy=None,
instance_profile_arn=None, instance_profile_name=None,
ebs_optimized=None, network_interfaces=None,
network_interface_name=None,
network_interface_id=None,
attributes=None, target_state=None, public_ip=None,
allocation_id=None, allocate_eip=False, region=None,
key=None, keyid=None, profile=None):
instance_idinstance_namestopterminatell want to releaase this address when terminating the instance,
either manually or via the flag to .
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
nameresultcommentchangespendingrebootingrunningstoppingstoppedExactly one of image_id OR image_name must be provided.At most one of public_ip, allocation_id OR allocate_eip may be provided.boto_ec2.existsboto_ec2.find_instancesMultiple instances matching criteria found - cannot determine a singular instance-idll just have to bail later....
else:
instance_id = instances[0]
if _create:
if __opts__[]:
ret[] = .format(name)
ret[] = None
return ret
if image_name:
args = {: image_name, : region, : key,
: keyid, : profile}
image_ids = __salt__[](**args)
if image_ids:
image_id = image_ids[0]
else:
image_id = image_name
r = __salt__[](image_id, instance_name if instance_name else name,
tags=tags, key_name=key_name,
security_groups=security_groups, user_data=user_data,
instance_type=instance_type, placement=placement,
kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id,
vpc_name=vpc_name, monitoring_enabled=monitoring_enabled,
subnet_id=subnet_id, subnet_name=subnet_name,
private_ip_address=private_ip_address,
block_device_map=block_device_map,
disable_api_termination=disable_api_termination,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
placement_group=placement_group, client_token=client_token,
security_group_ids=security_group_ids,
security_group_names=security_group_names,
additional_info=additional_info, tenancy=tenancy,
instance_profile_arn=instance_profile_arn,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized, network_interfaces=network_interfaces,
network_interface_name=network_interface_name,
network_interface_id=network_interface_id,
region=region, key=key, keyid=keyid, profile=profile)
if not r or not in r:
ret[] = False
ret[] = .format(instance_name if instance_name else name)
return ret
instance_id = r[]
ret[] = {: {}, : {}}
ret[][][] = None
ret[][][] = instance_id
if allocate_eip:
if __opts__[]:
ret[] =
ret[] = None
return ret
domain = if vpc_id or vpc_name else None
r = __salt__[](
domain=domain, region=region, key=key, keyid=keyid,
profile=profile)
if not r:
ret[] = False
ret[] =
return ret
allocation_id = r[]
log.info("New EIP with address %s allocated.", r[])
else:
log.info("EIP not requested.")
if public_ip or allocation_id:
tries = 10
secs = 3
for t in range(tries):
r = __salt__[](
addresses=public_ip, allocation_ids=allocation_id,
region=region, key=key, keyid=keyid, profile=profile)
if r:
break
else:
log.info(
,
tries * secs, public_ip or allocation_id
)
time.sleep(secs)
if not r:
ret[] = False
ret[] = .format(public_ip or allocation_id)
return ret
ip = r[0][]
if r[0].get():
if r[0][] != instance_id:
ret[] = False
ret[] = (
.format(public_ip if public_ip else
allocation_id, r[0][]))
return ret
else:
if __opts__[]:
ret[] = .format(name)
ret[] = None
return ret
r = __salt__[](
instance_id=instance_id, public_ip=public_ip,
allocation_id=allocation_id, region=region, key=key,
keyid=keyid, profile=profile)
if r:
if not in ret[]:
ret[][] = {}
ret[][][] = ip
else:
ret[] = False
ret[] = .format(
instance_name if instance_name else name)
return ret
if attributes:
for k, v in six.iteritems(attributes):
curr = __salt__[](k, instance_id=instance_id, region=region, key=key,
keyid=keyid, profile=profile)
curr = {} if not isinstance(curr, dict) else curr
if curr.get(k) == v:
continue
else:
if __opts__[]:
changed_attrs[k] = {1}\{2}\.format(
k, curr.get(k), v)
continue
try:
r = __salt__[](attribute=k, attribute_value=v,
instance_id=instance_id, region=region,
key=key, keyid=keyid, profile=profile)
except SaltInvocationError as e:
ret[] = False
ret[] = .format(k, v, instance_name)
return ret
ret[] = ret[] if ret[] else {: {}, : {}}
ret[][][k] = curr.get(k)
ret[][][k] = v
if __opts__[]:
if changed_attrs:
ret[][] = changed_attrs
ret[] = None
else:
ret[] = .format(instance_name if instance_name else name)
ret[] = True
if tags and instance_id is not None:
tags = dict(tags)
curr_tags = dict(__salt__[](filters={: instance_id},
region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {}))
current = set(curr_tags.keys())
desired = set(tags.keys())
remove = list(current - desired)
return ret
|
Ensure an EC2 instance is running with the given attributes and state.
name
(string) - The name of the state definition. Recommended that this
match the instance_name attribute (generally the FQDN of the instance).
instance_name
(string) - The name of the instance, generally its FQDN. Exclusive with
'instance_id'.
instance_id
(string) - The ID of the instance (if known). Exclusive with
'instance_name'.
image_id
(string) – The ID of the AMI image to run.
image_name
(string) – The name of the AMI image to run.
tags
(dict) - Tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The EC2 instance size/type. Note that only certain types are
compatible with HVM based AMIs.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
vpc_id
(string) - The ID of a VPC to attach the instance to.
vpc_name
(string) - The name of a VPC to attach the instance to.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
subnet_id
(string) – The ID of the subnet within which to launch the instances for
VPC.
subnet_name
(string) – The name of the subnet within which to launch the instances
for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g., 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are:
- 'stop'
- 'terminate'
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The IDs of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The names of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and a tuned
configuration stack to provide optimal EBS I/O performance. This
optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_name
(string) - The name of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
network_interface_id
(string) - The id of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
attributes
(dict) - Instance attributes and value to be applied to the instance.
Available options are:
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’]
- sourceDestCheck - Boolean (true)
- groupSet - Set of Security Groups or IDs
- ebsOptimized - Boolean (false)
- sriovNetSupport - String - ie: ‘simple’
target_state
(string) - The desired target state of the instance. Available options
are:
- running
- stopped
Note that this option is currently UNIMPLEMENTED.
public_ip:
(string) - The IP of a previously allocated EIP address, which will be
attached to the instance. EC2 Classic instances ONLY - for VCP pass in
an allocation_id instead.
allocation_id:
(string) - The ID of a previously allocated EIP address, which will be
attached to the instance. VPC instances ONLY - for Classic pass in
a public_ip instead.
allocate_eip:
(bool) - Allocate and attach an EIP on-the-fly for this instance. Note
you'll want to releaase this address when terminating the instance,
either manually or via the 'release_eip' flag to 'instance_absent'.
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
|
26,457 |
def status_for_all_orders(self):
url_fragment = .format(
venue=self.venue,
account=self.account,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json()
|
Status for all orders
https://starfighter.readme.io/docs/status-for-all-orders
|
26,458 |
def with_path(self, path, *, encoded=False):
if not encoded:
path = self._PATH_QUOTER(path)
if self.is_absolute():
path = self._normalize_path(path)
if len(path) > 0 and path[0] != "/":
path = "/" + path
return URL(self._val._replace(path=path, query="", fragment=""), encoded=True)
|
Return a new URL with path replaced.
|
26,459 |
def eventFilter( self, object, event ):
if ( event.type() != event.Resize ):
return False
size = event.size()
w = size.width()
h = size.height()
hpolicy = Qt.ScrollBarAlwaysOff
vpolicy = Qt.ScrollBarAlwaysOff
if ( self._minimumHeight != -1 and h < self._minimumHeight ):
h = self._minimumHeight
vpolicy = Qt.ScrollBarAsNeeded
if ( self._maximumHeight != -1 and self._maximumHeight < h ):
h = self._maximumHeight
vpolicy = Qt.ScrollBarAsNeeded
if ( self._minimumWidth != -1 and w < self._minimumWidth ):
w = self._minimumWidth
hpolicy = Qt.ScrollBarAsNeeded
if ( self._maximumWidth != -1 and self._maximumWidth < w ):
w = self._maximumWidth
hpolicy = Qt.ScrollBarAsNeeded
hruler = self.horizontalRuler()
vruler = self.verticalRuler()
hlen = hruler.minLength(Qt.Horizontal)
vlen = hruler.minLength(Qt.Vertical)
offset_w = 0
offset_h = 0
self.setSceneRect(0, 0, w - offset_w, h - offset_h)
object.setVerticalScrollBarPolicy(vpolicy)
object.setHorizontalScrollBarPolicy(hpolicy)
return False
|
Filters the chart widget for the resize event to modify this scenes
rect.
:param object | <QObject>
event | <QEvent>
|
26,460 |
def dropHistoricalTable(apps, schema_editor):
table_name =
if table_name in connection.introspection.table_names():
migrations.DeleteModel(
name=table_name,
)
|
Drops the historical sap_success_factors table named herein.
|
26,461 |
def update_os_image_from_image_reference(self, image_name, os_image):
_validate_not_none(, image_name)
_validate_not_none(, os_image)
return self._perform_put(self._get_image_path(image_name),
_XmlSerializer.update_os_image_to_xml(os_image), as_async=True
)
|
Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows
|
26,462 |
def _get_asset(self, asset_uid):
uri = self.uri + + asset_uid
headers = self._get_headers()
return self.service._get(uri, headers=headers)
|
Returns raw response for an given asset by its unique id.
|
26,463 |
def is_type(self):
if self.__is_type_result is not None:
return self.__is_type_result
self.__is_type_result = self.__is_type()
return self.__is_type_result
|
:return:
:rtype: bool
|
26,464 |
def update_rejection_permissions(portal):
updated = update_rejection_permissions_for(portal, "bika_ar_workflow",
"Reject Analysis Request")
if updated:
brains = api.search(dict(portal_type="AnalysisRequest"),
CATALOG_ANALYSIS_REQUEST_LISTING)
update_rolemappings_for(brains, "bika_ar_workflow")
updated = update_rejection_permissions_for(portal, "bika_sample_workflow",
"Reject Sample")
if updated:
brains = api.search(dict(portal_type="Sample"), "bika_catalog")
update_rolemappings_for(brains, "bika_sample_workflow")
|
Adds the permission 'Reject Analysis Request' and update the permission
mappings accordingly
|
26,465 |
def pos_development_directory(templates,
inventory,
context,
topics,
user,
item):
replacement_fields = replacement_fields_from_context(context)
binding = binding_from_item(inventory, item)
pattern = pattern_from_template(templates, binding)
positional_arguments = find_positional_arguments(pattern)
highest_argument = find_highest_position(positional_arguments)
highest_available = len(topics) - 1
if highest_available < highest_argument:
echo("Template for \"%s\" requires at least %i arguments" % (
item, highest_argument + 1))
sys.exit(USER_ERROR)
try:
return pattern.format(*topics, **replacement_fields).replace("\\", "/")
except KeyError as exc:
echo("TEMPLATE ERROR: %s is not an available key\n" % exc)
echo("Available tokens:")
for key in replacement_fields:
echo("\n- %s" % key)
sys.exit(TEMPLATE_ERROR)
|
Return absolute path to development directory
Arguments:
templates (dict): templates.yaml
inventory (dict): inventory.yaml
context (dict): The be context, from context()
topics (list): Arguments to `in`
user (str): Current `be` user
item (str): Item from template-binding address
|
26,466 |
def detect_images_and_galleries(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in itertools.chain(generator.articles, generator.translations, generator.drafts):
detect_image(generator, article)
detect_gallery(generator, article)
elif isinstance(generator, PagesGenerator):
for page in itertools.chain(generator.pages, generator.translations, generator.hidden_pages):
detect_image(generator, page)
detect_gallery(generator, page)
|
Runs generator on both pages and articles.
|
26,467 |
def invert(self):
result = defaultdict(set)
for k, val in self.items():
result[val].add(k)
return MultiDict(dict(result))
|
Invert by swapping each value with its key.
Returns
-------
MultiDict
Inverted multi-dict.
Examples
--------
>>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert()
MultiDict({1: {1,2}, 2: {2}, 3: {2}})
|
26,468 |
def select(self, template_name):
return [t for t in self.templates if t.name == template_name][0]
|
Select a particular template from the tribe.
:type template_name: str
:param template_name: Template name to look-up
:return: Template
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'),
... Template(name='a')])
>>> tribe.select('b') # doctest: +NORMALIZE_WHITESPACE
Template b:
0 channels;
lowcut: None Hz;
highcut: None Hz;
sampling rate None Hz;
filter order: None;
process length: None s
|
26,469 |
def applies(self, src, dst):
if self._src_pattern and (src is None or re.search(self._src_pattern, src) is None):
return False
elif self._dst_pattern and (dst is None or re.search(self._dst_pattern, dst) is None):
return False
return True
|
Checks if this rule applies to the given src and dst paths, based on the src pattern and
dst pattern given in the constructor.
If src pattern was None, this rule will apply to any given src path (same for dst).
|
26,470 |
def get_account(self):
api = self._get_api(iam.DeveloperApi)
return Account(api.get_my_account_info(include="limits, policies"))
|
Get details of the current account.
:returns: an account object.
:rtype: Account
|
26,471 |
def container_query(self, query, quiet=False):
results = self._list_containers()
matches = []
for result in results:
for key,val in result.metadata.items():
if query in val and result not in matches:
matches.append(result)
if not quiet:
bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches)))
for image in matches:
size = round(image.size / (1024*1024.0))
bot.custom(prefix=image.name, color="CYAN")
bot.custom(prefix=, message=image.id)
bot.custom(prefix=, message=image.metadata[])
bot.custom(prefix=, message=image.updated)
bot.custom(prefix=, message= %(size))
bot.custom(prefix=, message=image.md5_hash)
if "public_url" in image.metadata:
public_url = image.metadata[]
bot.custom(prefix=, message=public_url)
bot.newline()
return matches
|
search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
|
26,472 |
def _classic_get_grouped_dicoms(dicom_input):
if [d for d in dicom_input if in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
grouped_dicoms = []
stack_position_tag = Tag(0x0020, 0x0012)
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
if stack_position_tag not in dicom_:
stack_index = 0
else:
stack_index = dicom_[stack_position_tag].value - 1
while len(grouped_dicoms) <= stack_index:
grouped_dicoms.append([])
grouped_dicoms[stack_index].append(dicom_)
return grouped_dicoms
|
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
|
26,473 |
def stage(self, name, pipeline_counter=None):
return Stage(
self.server,
pipeline_name=self.name,
stage_name=name,
pipeline_counter=pipeline_counter,
)
|
Helper to instantiate a :class:`gocd.api.stage.Stage` object
Args:
name: The name of the stage
pipeline_counter:
Returns:
|
26,474 |
def watch_docs(ctx):
try:
import sphinx_autobuild
except ImportError:
print()
print()
print()
sys.exit(1)
docs(ctx)
ctx.run(.format(docs_dir, build_dir), pty=True)
|
Run build the docs when a file changes.
|
26,475 |
def format_time(time):
h, r = divmod(time / 1000, 3600)
m, s = divmod(r, 60)
return "%02d:%02d:%02d" % (h, m, s)
|
Formats the given time into HH:MM:SS
|
26,476 |
def _show_notification(self,
event, summary, message, icon,
*actions):
notification = self._notify(summary, message, icon)
timeout = self._get_timeout(event)
if timeout != -1:
notification.set_timeout(int(timeout * 1000))
for action in actions:
if action and self._action_enabled(event, action[0]):
self._add_action(notification, *action)
try:
notification.show()
except GLib.GError as exc:
self._log.error(_("Failed to show notification: {0}", exc_message(exc)))
self._log.debug(format_exc())
|
Show a notification.
:param str event: event name
:param str summary: notification title
:param str message: notification body
:param str icon: icon name
:param actions: each item is a tuple with parameters for _add_action
|
26,477 |
def register_app(self, app):
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self
|
Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes)
|
26,478 |
def add_scalar(self, name, value, step):
self.writer.add_scalar(name, value, step)
|
Log a scalar variable.
|
26,479 |
def timezone(client, location, timestamp=None, language=None):
params = {
"location": convert.latlng(location),
"timestamp": convert.time(timestamp or datetime.utcnow())
}
if language:
params["language"] = language
return client._request( "/maps/api/timezone/json", params)
|
Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict
|
26,480 |
def qtrim_front(self, name, size=1):
size = get_positive_integer("size", size)
return self.execute_command(, name, size)
|
Sets the list element at ``index`` to ``value``. An error is returned for out of
range indexes.
:param string name: the queue name
:param int size: the max length of removed elements
:return: the length of removed elements
:rtype: int
|
26,481 |
def expand(self, v):
if not is_measure(v) or v[] not in [,
, ]:
raise TypeError("Can only expand baselines, positions, or uvw")
vw = v.copy()
vw[] = "uvw"
vw[] = "J2000"
outm = _measures.expand(self, vw)
outm[] = dq.quantity(outm[])
outm[][] = v[]
outm[][] = v[]
return outm
|
Calculates the differences between a series of given measure values:
it calculates baseline values from position values.
:params v: a measure (of type 'baseline', 'position' or 'uvw')
:returns: a `dict` with the value for key `measures` being a measure
and the value for key `xyz` a quantity containing the
differences.
Example::
>>> from casacore.quanta import quantity
>>> x = quantity([10,50],'m')
>>> y = quantity([20,100],'m')
>>> z = quantity([30,150],'m')
>>> sb = dm.baseline('itrf', x, y, z)
>>> out = dm.expand(sb)
>>> print out['xyz']
[40.000000000000014, 80.0, 120.0] m
|
26,482 |
def volatility(tnet, distance_func_name=, calc=, communities=None, event_displacement=None):
r
tnet, netinfo = process_input(tnet, [, , ])
distance_func_name = check_distance_funciton_input(
distance_func_name, netinfo)
if not isinstance(distance_func_name, str):
raise ValueError()
if netinfo[][1] == :
ind = np.triu_indices(tnet.shape[0], k=-tnet.shape[0])
elif netinfo[][1] == :
ind = np.triu_indices(tnet.shape[0], k=1)
if calc == :
communities = np.array(communities)
if len(communities) != netinfo[][0]:
raise ValueError(
)
if communities.min() < 0:
raise ValueError(
)
distance_func = getDistanceFunction(distance_func_name)
if calc == :
vol = np.mean([distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)])
elif calc == :
vol = [distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)]
elif calc == :
vol = [distance_func(tnet[ind[0], ind[1], event_displacement],
tnet[ind[0], ind[1], t]) for t in range(0, tnet.shape[-1])]
elif calc == or calc == :
vol = np.zeros([tnet.shape[0], tnet.shape[1]])
for i in ind[0]:
for j in ind[1]:
vol[i, j] = np.mean([distance_func(
tnet[i, j, t], tnet[i, j, t + 1]) for t in range(0, tnet.shape[-1] - 1)])
if netinfo[][1] == :
vol = vol + np.transpose(vol)
if calc == :
vol = np.mean(vol, axis=1)
elif calc == :
net_id = set(communities)
vol = np.zeros([max(net_id) + 1, max(net_id) +
1, netinfo[][-1] - 1])
for net1 in net_id:
for net2 in net_id:
if net1 != net2:
vol[net1, net2, :] = [distance_func(tnet[communities == net1][:, communities == net2, t].flatten(),
tnet[communities == net1][:, communities == net2, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
else:
nettmp = tnet[communities ==
net1][:, communities == net2, :]
triu = np.triu_indices(nettmp.shape[0], k=1)
nettmp = nettmp[triu[0], triu[1], :]
vol[net1, net2, :] = [distance_func(nettmp[:, t].flatten(
), nettmp[:, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
elif calc == :
withi = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] == communities[ind[1][n]]])
vol = [distance_func(tnet[withi[:, 0], withi[:, 1], t], tnet[withi[:, 0],
withi[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
elif calc == :
beti = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] != communities[ind[1][n]]])
vol = [distance_func(tnet[beti[:, 0], beti[:, 1], t], tnet[beti[:, 0],
beti[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
return vol
|
r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array
|
26,483 |
def getPointOnLine(x1, y1, x2, y2, n):
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y)
|
Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
|
26,484 |
def checkBim(fileName, minNumber, chromosome):
nbMarkers = 0
with open(fileName, ) as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
if row[0] == chromosome:
nbMarkers += 1
if nbMarkers < minNumber:
return False
return True
|
Checks the BIM file for chrN markers.
:param fileName:
:param minNumber:
:param chromosome:
:type fileName: str
:type minNumber: int
:type chromosome: str
:returns: ``True`` if there are at least ``minNumber`` markers on
chromosome ``chromosome``, ``False`` otherwise.
|
26,485 |
def can_take(attrs_to_freeze=(), defaults=None, source_attr=, instance_property_name=, inner_class_name=):
def wrapper(klass):
Snapshot = namedtuple(inner_class_name, tuple(attrs_to_freeze) + (source_attr,))
doc = % (.join(attrs_to_freeze), source_attr)
def instance_method(self):
return Snapshot(**dict({
k: (getattr(self, k, defaults(k)) if callable(defaults) else getattr(self, k)) for k in attrs_to_freeze
}, **{source_attr: self}))
instance_method.__doc__ = doc
setattr(klass, instance_property_name, property(instance_method))
setattr(klass, inner_class_name, Snapshot)
return klass
return wrapper
|
Decorator to make a class allow their instances to generate
snapshot of themselves.
Decorates the class by allowing it to have:
* A custom class to serve each snapshot. Such class
will have a subset of attributes to serve from the object,
and a special designed attribute ('source', by default) to
serve the originating object. Such class will be stored
under custom name under the generating (decorated) class.
* An instance method (actually: property) which will yield
the snapshot for the instance.
|
26,486 |
def show_warning(self, index):
try:
val_size = index.model().sizes[index.row()]
val_type = index.model().types[index.row()]
except:
return False
if val_type in [, , , ] and \
int(val_size) > 1e5:
return True
else:
return False
|
Decide if showing a warning when the user is trying to view
a big variable associated to a Tablemodel index
This avoids getting the variables' value to know its
size and type, using instead those already computed by
the TableModel.
The problem is when a variable is too big, it can take a
lot of time just to get its value
|
26,487 |
def add_group(self, number, name, led_type):
group = group_factory(self, number, name, led_type)
self.groups.append(group)
return group
|
Add a group.
:param number: Group number (1-4).
:param name: Group name.
:param led_type: Either `RGBW`, `WRGB`, `RGBWW`, `WHITE`, `DIMMER` or `BRIDGE_LED`.
:returns: Added group.
|
26,488 |
def __parse(value):
match_result = _RE_DATETIME.match(value)
if match_result is None:
raise ValueError("date data has invalid format " % (value,))
date = _date_from_match(match_result)
time, round_up = _time_from_match(match_result)
tzinfo = _tzinfo_from_match(match_result)
value = datetime.datetime.combine(date, time)
value = value.replace(tzinfo=tzinfo)
if round_up:
value += datetime.timedelta(microseconds=1)
return value
|
Parse the string datetime.
Supports the subset of ISO8601 used by xsd:dateTime, but is lenient
with what is accepted, handling most reasonable syntax.
Subsecond information is rounded to microseconds due to a restriction
in the python datetime.datetime/time implementation.
@param value: A datetime string.
@type value: str
@return: A datetime object.
@rtype: B{datetime}.I{datetime}
|
26,489 |
def get_instance(self, payload):
return SipInstance(self._version, payload, account_sid=self._solution[], )
|
Build an instance of SipInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.SipInstance
:rtype: twilio.rest.api.v2010.account.sip.SipInstance
|
26,490 |
def _initialize_installation(self):
private_key_client = security.generate_rsa_private_key()
installation = core.Installation.create(
self,
security.public_key_to_string(private_key_client.publickey())
).value
token = installation.token.token
public_key_server_string = \
installation.server_public_key.server_public_key
public_key_server = RSA.import_key(public_key_server_string)
self._installation_context = InstallationContext(
token,
private_key_client,
public_key_server
)
|
:rtype: None
|
26,491 |
def initialize(self, **kwargs):
if not set(kwargs.keys()).issuperset(self.init_keys):
raise Exception("TransferFn needs to be initialized with %s"
% .join(repr(el) for el in self.init_keys))
|
Transfer functions may need additional information before the
supplied numpy array can be modified in place. For instance,
transfer functions may have state which needs to be allocated
in memory with a certain size. In other cases, the transfer
function may need to know about the coordinate system
associated with the input data.
|
26,492 |
def _run_coro(self, value):
if self._options.mode is MODE.LAST_DISTINCT and \
value == self._last_emit:
self._future = None
return
self._last_emit = value
self.scheduled.notify(value)
values = value if self._options.unpack else (value,)
coro = self._options.coro(*values, *self._options.args,
**self._options.kwargs)
self._future = asyncio.ensure_future(coro)
self._future.add_done_callback(self._future_done)
|
Start the coroutine as task
|
26,493 |
def update(self, story, params={}, **options):
path = "/stories/%s" % (story)
return self.client.put(path, params, **options)
|
Updates the story and returns the full record for the updated story.
Only comment stories can have their text updated, and only comment stories and
attachment stories can be pinned. Only one of `text` and `html_text` can be specified.
Parameters
----------
story : {Id} Globally unique identifier for the story.
[data] : {Object} Data for the request
- [text] : {String} The plain text with which to update the comment.
- [html_text] : {String} The rich text with which to update the comment.
- [is_pinned] : {Boolean} Whether the story should be pinned on the resource.
|
26,494 |
def set_interval(self, start, end, value, compact=False):
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
if i == 0:
self.set(s, value, compact)
else:
del self[s]
self.set(end, v, compact)
|
Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
|
26,495 |
def filesampler(files, testsetsize = 0.1, devsetsize = 0, trainsetsize = 0, outputdir = , encoding=):
if not isinstance(files, list):
files = list(files)
total = 0
for filename in files:
f = io.open(filename,, encoding=encoding)
count = 0
for line in f:
count += 1
f.close()
if total == 0:
total = count
elif total != count:
raise Exception("Size mismatch, when multiple files are specified they must contain the exact same amount of lines! (" +str(count) + " vs " + str(total) +")")
if testsetsize < 1:
testsetsize = int(total * testsetsize)
if devsetsize < 1 and devsetsize > 0:
devsetsize = int(total * devsetsize)
if testsetsize >= total or devsetsize >= total or testsetsize + devsetsize >= total:
raise Exception("Test set and/or development set too large! No samples left for training set!")
trainset = {}
testset = {}
devset = {}
for i in range(1,total+1):
trainset[i] = True
for i in random.sample(trainset.keys(), int(testsetsize)):
testset[i] = True
del trainset[i]
if devsetsize > 0:
for i in random.sample(trainset.keys(), int(devsetsize)):
devset[i] = True
del trainset[i]
if trainsetsize > 0:
newtrainset = {}
for i in random.sample(trainset.keys(), int(trainsetsize)):
newtrainset[i] = True
trainset = newtrainset
for filename in files:
if not outputdir:
ftrain = io.open(filename + ,,encoding=encoding)
else:
ftrain = io.open(outputdir + + os.path.basename(filename) + ,,encoding=encoding)
if not outputdir:
ftest = io.open(filename + ,,encoding=encoding)
else:
ftest = io.open(outputdir + + os.path.basename(filename) + ,,encoding=encoding)
if devsetsize > 0:
if not outputdir:
fdev = io.open(filename + ,,encoding=encoding)
else:
fdev = io.open(outputdir + + os.path.basename(filename) + ,,encoding=encoding)
f = io.open(filename,,encoding=encoding)
for linenum, line in enumerate(f):
if linenum+1 in trainset:
ftrain.write(line)
elif linenum+1 in testset:
ftest.write(line)
elif devsetsize > 0 and linenum+1 in devset:
fdev.write(line)
f.close()
ftrain.close()
ftest.close()
if devsetsize > 0: fdev.close()
|
Extract a training set, test set and optimally a development set from one file, or multiple *interdependent* files (such as a parallel corpus). It is assumed each line contains one instance (such as a word or sentence for example).
|
26,496 |
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
changes = ChangeSet( % self.method_name)
if resources is None:
resources = self.project.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.project)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = libutils.get_string_module(
self.project, result, resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes
|
Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
|
26,497 |
def _set_child(self, name, child):
if not isinstance(child, Parentable):
raise ValueError(.format(child=child))
child._set_parent(self)
self._store_child(name, child)
|
Set child.
:param name: Child name.
:param child: Parentable object.
|
26,498 |
def _ParseVSSProcessingOptions(self, options):
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, , False)
if self._process_vss:
vss_only = getattr(options, , False)
vss_stores = getattr(options, , None)
if vss_stores:
try:
self._ParseVolumeIdentifiersString(vss_stores, prefix=)
except ValueError:
raise errors.BadConfigOption()
self._vss_only = vss_only
self._vss_stores = vss_stores
|
Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
26,499 |
def macro_list(self, args: argparse.Namespace) -> None:
if args.name:
for cur_name in utils.remove_duplicates(args.name):
if cur_name in self.macros:
self.poutput("macro create {} {}".format(cur_name, self.macros[cur_name].value))
else:
self.perror("Macro not found".format(cur_name), traceback_war=False)
else:
sorted_macros = utils.alphabetical_sort(self.macros)
for cur_macro in sorted_macros:
self.poutput("macro create {} {}".format(cur_macro, self.macros[cur_macro].value))
|
List some or all macros
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.