content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def apply_fixes(args, tmpdir):
"""Calls clang-apply-fixes on a given directory."""
invocation = [args.clang_apply_replacements_binary]
if args.format:
invocation.append('-format')
if args.style:
invocation.append('-style=' + args.style)
invocation.append(tmpdir)
subprocess.call(invocation)
| 5,351,900 |
def __get_ll_type__(ll_type):
"""
Given an lltype value, retrieve its definition.
"""
res = [llt for llt in __LL_TYPES__
if llt[1] == ll_type]
assert len(res) < 2, 'Duplicate linklayer types.'
if res:
return res[0]
else:
return None
| 5,351,901 |
def is_cloaked(path, names):
""" Return True if this is likely to be a cloaked encrypted post """
fname = unicoder(os.path.split(path)[1]).lower()
fname = os.path.splitext(fname)[0]
for name in names:
name = os.path.split(name.lower())[1]
name, ext = os.path.splitext(unicoder(name))
if ext == u'.rar' and fname.startswith(name) and (len(fname) - len(name)) < 8 and len(names) < 3 and not RE_SUBS.search(fname):
logging.debug('File %s is probably encrypted due to RAR with same name inside this RAR', fname)
return True
elif 'password' in name:
logging.debug('RAR %s is probably encrypted: "password" in filename %s', fname, name)
return True
return False
| 5,351,902 |
def dump_recarray(filename, recarray):
"""
Dumpy a recarray to an ESV file.
"""
ESV.from_recarray(recarray).dump_file(filename)
| 5,351,903 |
def warm_restart(ctx, redis_unix_socket_path):
"""warm_restart-related configuration tasks"""
# Note: redis_unix_socket_path is a path string, and the ground truth is now from database_config.json.
# We only use it as a bool indicator on either unix_socket_path or tcp port
use_unix_socket_path = bool(redis_unix_socket_path)
config_db = ConfigDBConnector(use_unix_socket_path=use_unix_socket_path)
config_db.connect(wait_for_init=False)
# warm restart enable/disable config is put in stateDB, not persistent across cold reboot, not saved to config_DB.json file
state_db = SonicV2Connector(use_unix_socket_path=use_unix_socket_path)
state_db.connect(state_db.STATE_DB, False)
TABLE_NAME_SEPARATOR = '|'
prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR
ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix}
| 5,351,904 |
def parse_email_body(email_body, client_path):
"""
Parses email body to extract magnet link. Once the link is found,
the torrent client is launched and begins downloading the torrent.
After the download completes, a confirmation SMS message is sent.
:param str email_body: body of the email
:param Path client_path: path to torrent client
"""
lines = email_body.split('\n')
for line in lines:
if line.startswith('magnet:?'):
torrent_process = subprocess.Popen([client_path, line])
torrent_process.wait()
# removes trackers with torrent info from shared magnet link
text_myself(message=f'Finished downloading the following torrent: '
f'\n\n{line[0:line.find("&")]}')
| 5,351,905 |
def slice_node(node, split):
"""Splits a node up into two sides.
For text nodes, this will return two text nodes.
For text elements, this will return two of the source nodes with children
distributed on either side. Children that live on the split will be
split further.
Parameters
----------
node : docutils.nodes.Text or docutils.nodes.TextElement
split : int
Location of the represented text to split at.
Returns
-------
(left, right) : (type(node), type(node))
"""
if isinstance(node, Text):
return Text(node[:split]), Text(node[split:])
elif isinstance(node, docutils.nodes.TextElement):
if split < 0:
split = len(node.astext())+split
right = node.deepcopy()
left = node.deepcopy()
left.clear()
offset = 0
while offset < split:
try:
child = right.pop(0)
except IndexError:
break
child_strlen = len(child.astext())
if offset+child_strlen < split:
left.append(child)
offset += child_strlen
continue
elif offset+child_strlen != split:
child_left, child_right = slice_node(child, split-offset)
left.append(child_left)
right.insert(0, child_right)
offset += child_strlen
return left, right
else:
raise ValueError('Cannot split {}'.format(repr(node)))
| 5,351,906 |
def main(args):
"""
Use FindRouteLeaks and print leaks detected on stdout or save to file.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("pfx_file",
help="full path of file with prefixes data - "
"from prepare_data.prepare module")
parser.add_argument("cfl_file",
help="full path of file with conflicts data - "
"from prepare_data.prepare module")
parser.add_argument("--out", default=None,
help="full path of file where results will be saved - default stdout")
parser.add_argument("--pfx_peak_min_value", default=None, help="heuristics parameter")
parser.add_argument("--cfl_peak_min_value", default=None, help="heuristics parameter")
parser.add_argument("--max_nb_peaks", default=None, help="heuristics parameter")
parser.add_argument("--percent_similarity", default=None, help="heuristics parameter")
parser.add_argument("--percent_std", default=None, help="heuristics parameter")
parser.add_argument("--fit_params", action="store_true", default=False,
help="if specified, best params will be calculated (when not given) before "
"running the detection.")
args = parser.parse_args(args)
if not os.path.isfile(args.pfx_file):
raise AssertionError("Prefixes file %s not found", args.pfx_file)
if not os.path.isfile(args.cfl_file):
raise AssertionError("Conflicts file %s not found", args.cfl_file)
params = {}
if args.pfx_peak_min_value is not None:
params["pfx_peak_min_value"] = float(args.pfx_peak_min_value)
if args.cfl_peak_min_value is not None:
params["cfl_peak_min_value"] = float(args.cfl_peak_min_value)
if args.max_nb_peaks is not None:
params["max_nb_peaks"] = float(args.max_nb_peaks)
if args.percent_similarity is not None:
params["percent_similarity"] = float(args.percent_similarity)
if args.percent_std is not None:
params["percent_std"] = float(args.percent_std)
params["data_already_processed"] = True
if args.fit_params:
finder = FittedFindRouteLeaks(args.pfx_file, args.cfl_file, **params)
else:
finder = FindRouteLeaks(args.pfx_file, args.cfl_file, **params)
leaks = finder.get_route_leaks()
if args.out:
with open(args.out, "w") as f:
f.write(json.dumps(leaks))
else:
for elt in leaks:
print json.dumps({elt: leaks[elt]["leaks"]})
| 5,351,907 |
async def geo(ctx, *, ip):
"""looks up an ip address"""
#above is the description for the command
#runs the command
try:
#gets ip address
ip_address = socket.gethostbyname(ip)
#sends the info about the ip
await ctx.send(await lookup_ip(ip_address))
#message if there is socket error aka if there is no such an ip or domain
except socket.gaierror:
await ctx.send('There is no such an ip or domain')
#if some other kind of error occurs
except Exception as e:
await ctx.send('Error has occured!')
print(f'{e}\nError has occured!')
| 5,351,908 |
def cascade_visibility_down(element, visibility_mode):
"""Sets visibility for all descendents of an element. (cascades down)."""
# Does nothing to given element.
# Find all related objects, and set them all to the appropriate visibility mode.
links = [rel.get_accessor_name() for rel in element._meta.get_all_related_objects()]
for link in links:
objects = getattr(element, link).all()
for object in objects:
try:
if visibility_mode == 'private':
if object.public:
object.public = False
object.save()
elif visibility_mode == 'public':
if not object.public:
object.public = True
object.save()
except Exception as e:
# Must not be a public/ private object
#print("Can't set object private:", object, e)
pass
# Check if this object has related objects, if so continue cascading.
if object._meta.get_all_related_objects():
cascade_visibility_down(object, visibility_mode)
| 5,351,909 |
def k_radius(x,centroids):
"""
Maximal distance between centroids and corresponding samples in partition
"""
labels = partition_labels(x,centroids)
radii = []
for idx in range(centroids.shape[0]):
mask = labels == idx
radii.append(
np.max(
np.linalg.norm(x[mask]-centroids[idx],axis=-1))
)
return np.asarray(radii)
| 5,351,910 |
def build_feature_df(data, default=True, custom_features={}):
"""
Computes the feature matrix for the dataset of components.
Args:
data (dataset): A mapping of {ic_id: IC}. Compatible with the dataset representaion produced by load_dataset().
default (bool, optional): Determines wether to compute a standard selection of features for the dataset. Defaults to True.
custom_features (dict, optional): A mapping of custom features that will be computed for the dataset.
The format is {feature_name: compute_feature} where compute_feature is a function with the only argument IC. Defaults to {}.
Returns:
pd.Dataframe: The feature matrix for the dataset.
"""
feature_df = pd.DataFrame(index=data.keys())
def get_iter():
if default:
return default_features.items()
else:
return chain(default_features.items(), custom_features.items())
features = [feature_name for feature_name, _ in get_iter()]
idx = []
rows = []
for ic_id, ic in data.items():
row = []
idx.append(ic_id)
for feature_name, compute_feature in get_iter():
row.append(compute_feature(ic))
rows.append(row)
feature_df = pd.DataFrame(rows, index=idx, columns=features)
return feature_df
| 5,351,911 |
def update_uid_digests_cache(uid, digest):
"""
Updates uid_digest cache, also updates rd_digest and rd_digest_dict cache also.
"""
debug = False
try:
if debug:
print '\n debug -- Entered update_uid_digests_cache...'
dump_dict(digest, debug)
# Get the cache; If cache exists, update the 'uid_digests' cache.
uid_digests = cache.get('uid_digests')
if uid_digests and uid_digests is not None:
if uid in uid_digests:
if debug: print '\n debug -- uid (%s) in uid_digests...' % uid
uid_digests[uid] = digest
uid_digests_cache_update(uid_digests)
# Update rd_digests and rd_digests dict cache.
if not update_rd_digests_cache(uid):
message = '********* Failed to update rd_digests cache **********'
current_app.logger.info(message)
return
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None
| 5,351,912 |
def decode_complex(data, complex_names=(None, None)):
""" Decodes possibly complex data read from an HDF5 file.
Decodes possibly complex datasets read from an HDF5 file. HDF5
doesn't have a native complex type, so they are stored as
H5T_COMPOUND types with fields such as 'r' and 'i' for the real and
imaginary parts. As there is no standardization for field names, the
field names have to be given explicitly, or the fieldnames in `data`
analyzed for proper decoding to figure out the names. A variety of
reasonably expected combinations of field names are checked and used
if available to decode. If decoding is not possible, it is returned
as is.
Parameters
----------
data : arraylike
The data read from an HDF5 file, that might be complex, to
decode into the proper Numpy complex type.
complex_names : tuple of 2 str and/or Nones, optional
``tuple`` of the names to use (in order) for the real and
imaginary fields. A ``None`` indicates that various common
field names should be tried.
Returns
-------
c : decoded data or data
If `data` can be decoded into a complex type, the decoded
complex version is returned. Otherwise, `data` is returned
unchanged.
See Also
--------
encode_complex
Notes
-----
Currently looks for real field names of ``('r', 're', 'real')`` and
imaginary field names of ``('i', 'im', 'imag', 'imaginary')``
ignoring case.
"""
# Now, complex types are stored in HDF5 files as an H5T_COMPOUND type
# with fields along the lines of ('r', 're', 'real') and ('i', 'im',
# 'imag', 'imaginary') for the real and imaginary parts, which most
# likely won't be properly extracted back into making a Python
# complex type unless the proper h5py configuration is set. Since we
# can't depend on it being set and adjusting it is hazardous (the
# setting is global), it is best to just decode it manually. These
# fields are obtained from the fields of its dtype. Obviously, if
# there are no fields, then there is nothing to do.
if data.dtype.fields is None:
return data
fields = list(data.dtype.fields)
# If there aren't exactly two fields, then it can't be complex.
if len(fields) != 2:
return data
# We need to grab the field names for the real and imaginary
# parts. This will be done by seeing which list, if any, each field
# is and setting variables to the proper name if it is in it (they
# are initialized to None so that we know if one isn't found).
real_fields = ['r', 're', 'real']
imag_fields = ['i', 'im', 'imag', 'imaginary']
cnames = list(complex_names)
for s in fields:
if s.lower() in real_fields:
cnames[0] = s
elif s.lower() in imag_fields:
cnames[1] = s
# If the real and imaginary fields were found, construct the complex
# form from the fields. This is done by finding the complex type
# that they cast to, making an array, and then setting the
# parts. Otherwise, return what we were given because it isn't in
# the right form.
if cnames[0] is not None and cnames[1] is not None:
cdata = np.result_type(data[cnames[0]].dtype, \
data[cnames[1]].dtype, 'complex64').type(data[cnames[0]])
cdata.imag = data[cnames[1]]
return cdata
else:
return data
| 5,351,913 |
def CLYH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-29", **kwargs
) -> Graph:
"""Return CLYH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-29"
Version to retrieve
The available versions are:
- 2020-05-29
"""
return AutomaticallyRetrievedGraph(
"CLYH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
| 5,351,914 |
def mock_exception_run_publish_pricing_update(
**kwargs):
"""mock_exception_run_publish_pricing_update
:param kwargs: keyword args dict
"""
raise Exception(
'test throwing mock_exception_run_publish_pricing_update')
| 5,351,915 |
def tif_to_array(
filename,
image_descriptions=False,
verbose=False,
):
"""Load a tif into memory and return it as a numpy array.
This is primarily a tool we use to interact with ImageJ, so that's
the only case it's really been debugged for. I bet somebody made
nice python bindings for LibTIFF, if you want a more general purpose
reader.
"""
ifds, endian = parse_tif(filename, verbose)
"""
Ensure that the various IFD's are consistent: same length, width,
bit depth, data format, etc.
Also check that our assumptions about other tags are true.
"""
width = ifds[0]['ImageWidth']
length = ifds[0]['ImageLength']
bit_depth = ifds[0]['BitsPerSample']
data_format = ifds[0].get('SampleFormat', 1) #Default to unsigned int
for d in ifds:
try:
assert width == d['ImageWidth']
assert length == d['ImageLength']
assert bit_depth == d['BitsPerSample']
assert data_format == d.get('SampleFormat', 1)
except AssertionError:
print("To load a TIF as a numpy array, the IFDs all have to match.")
print("IFD A:", ifds[0])
print("IFD B:", d)
raise UserWarning("The TIF we're trying to load has mismatched IFD's")
try:
assert d.get('SamplesPerPixel', 1) == 1
assert d.get('NewSubFileType', 0) == 0
assert d.get('Compression', 1) == 1
assert d.get('PhotometricInterpretation', 0) in (0, 1)
except AssertionError:
print("Offending IFD:", d)
raise UserWarning(
"The TIF we're trying to load" +
" uses options that np_tif doesn't support.")
"""
Collect the strip offsets and the strip byte counts
"""
strip_offsets = []
strip_byte_counts = []
for d in ifds:
try: #Just one strip per IFD
strip_offsets.append(int(d['StripOffsets']))
strip_byte_counts.append(int(d['StripByteCounts']))
except TypeError: #Many strips per IFD
strip_offsets.extend(int(x) for x in d['StripOffsets'])
strip_byte_counts.extend(int(x) for x in d['StripByteCounts'])
assert len(strip_offsets) == len(strip_byte_counts)
"""
Allocate our numpy array, and load data into our array from disk,
one strip at a time.
"""
data = np.zeros(sum(strip_byte_counts), dtype=np.ubyte)
data_offset = 0
with open(filename, 'rb') as f:
for i in range(len(strip_offsets)):
file_offset = strip_offsets[i]
num_bytes = strip_byte_counts[i]
data[data_offset:data_offset + num_bytes] = np.frombuffer(
get_bytes_from_file(f, file_offset, num_bytes),
dtype=np.ubyte)
data_offset += num_bytes
"""
Determine the numpy data type from the TIF bit depth and data
format, and reshape based on width, height, and number of ifd's:
"""
data_type = {
1: 'uint',
2: 'int',
3: 'float',
4: 'undefined',
}[data_format] + ascii(bit_depth)
try:
data_type = getattr(np, data_type)
except AttributeError:
raise UserWarning("Unsupported data format: " + data_type)
data = data.view(data_type)
if endian == 'big':
data = data.byteswap()
data = data.reshape(len(ifds), length, width)
"""
Optionally, return the image descriptions.
"""
if image_descriptions:
image_descriptions = [d.get('ImageDescription', '') for d in ifds]
for desc in image_descriptions:
if desc != image_descriptions[0]:
break
else:
image_descriptions = image_descriptions[0:1]
return data, image_descriptions
return data
| 5,351,916 |
def lineParPlot(parDict, FigAx=None, **kwargs):
"""
Plot the results of lineParameters().
Parameters
----------
parDict : dict
The relevant parameters:
xPerc : tuple, (xPerc1, xPerc2)
Left and right x-axis values of the line profile at perc% of the peak flux.
Xc : float
The center of x-axis value calculated at perc% of the peak flux.
Fperc : float
Fpeak * perc / 100.
FigAx : tuple (optional)
The tuple of (fig, ax) of the figure.
**kwargs : dict
The keywords for the plotting.
Returns
-------
FigAx : tuple
The tuple of (fig, ax) of the figure.
"""
if FigAx is None:
fig = plt.figure(figsize=(8, 4))
ax = plt.gca()
else:
fig, ax = FigAx
x1, x2 = parDict["xPerc"]
xc = parDict["Xc"]
yperc = parDict["Fperc"]
ax.axvline(x=x1, **kwargs)
kwargs["label"] = None
ax.axvline(x=x2, **kwargs)
ax.axhline(y=yperc, **kwargs)
kwargs["ls"] = "-"
ax.axvline(x=xc, **kwargs)
return (fig, ax)
| 5,351,917 |
def prepare_data_arrays(tr_df, te_df, target):
"""
tr_df: train dataset made by "prepare_dataset" function
te_df: test dataset made by "prepare_dataset" function
target: name of target y
return: (numpy array of train dataset),
(numpy array of test dataset: y will be filled with NaN),
(column ID of y)
"""
col_to_id = {k: v for v, k in enumerate(tr_df.columns)}
train_array = np.array(tr_df)
test_array = np.array(te_df)
target_id = col_to_id[target]
# fill target values with nan
test_array[:, target_id] = np.nan
return train_array, test_array, target_id
| 5,351,918 |
def read_input(files):
"""Print all FILES file names."""
for filename in files:
click.echo(filename)
| 5,351,919 |
def revoke_task(task):
"""
递归revoke
"""
if task.children:
for child in task.children:
revoke_task(child)
# 终止未执行的任务
# if not task.ready():
# task.revoke(terminate=True)
try:
task.revoke(terminate=True)
except:
pass
| 5,351,920 |
def download_git_repo(repo: str):
"""
Download remote git repo
"""
local_filename = repo.split('/')[-1]
class CloneProgress(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
if message:
print(message)
td = tempfile.mkdtemp()
repo_local_path = os.path.join(td, local_filename)
git.Repo.clone_from(repo, repo_local_path,
branch='master', progress=CloneProgress(), depth=1)
return repo_local_path
| 5,351,921 |
def _logMe():
"""Add log entries to test the handler."""
logger.info("Solange Norwegen nicht untergeht,")
logger.warning("gilt hier warten, und weiter rufen:")
logger.debug("LAND IN SICHT!")
logger.error("Du kannst nicht bleiben, kannst nicht gehen,")
logger.critical("dich nicht ertragen, gerade, wenn mir die Worte fehlen.")
| 5,351,922 |
def export_model(model, clf_path = '../models/disaster_response_clf.pkl'):
"""
Function: save model as pickle file
Args:
model (GridSearch obj): trained and tuned classifier model
clf_path (str): path of pickle file destination
Return:
None
"""
with open(clf_path, 'wb') as f:
pickle.dump(model, f)
| 5,351,923 |
def unique_bytestring_gen():
"""Generates unique sequences of bytes.
"""
characters = (b"abcdefghijklmnopqrstuvwxyz"
b"0123456789")
characters = [characters[i:i + 1] for i in irange(len(characters))]
rng = random.Random()
while True:
letters = [rng.choice(characters) for i in irange(10)]
yield b''.join(letters)
| 5,351,924 |
def move_vvol_shadow_vm_from_aggr_to_aggr(ds_info, aggr1, aggr2, lun, vm):
"""
suggest a move of a vm from one na aggr to another and adjust aggr and ds usage values accordingly
"""
# IMPORTANT: we can only keep track about the state for the aggr and the ds
# and not for the fvol as we do not know onto which fvol the na
# will move the lun
# remove vm from source aggr
source_aggr_usage_before = aggr1.usage
aggr1.remove_shadow_vm_lun(lun)
source_aggr_usage_after = aggr1.usage
# add the vm to the target aggr
target_aggr_usage_before = aggr2.usage
aggr2.add_shadow_vm_lun(lun)
target_aggr_usage_after = aggr2.usage
# get the vc ds names based on the na aggr names
ds1 = ds_info.get_by_name(aggr_name_to_ds_name(aggr1.host, aggr1.name))
ds2 = ds_info.get_by_name(aggr_name_to_ds_name(aggr2.host, aggr2.name))
# check that both really exist in the vcenter
if not ds1:
log.warning("- WARN - the aggr {} seems to be not connected in the vc (no ds)".format(aggr1.name))
return
if not ds2:
log.warning("- WARN - the aggr {} seems to be not connected in the vc (no ds)".format(aggr2.name))
return
# remove vm from source ds
source_ds_usage_before = ds1.usage
ds1.remove_shadow_vm(vm)
source_ds_usage_after = ds1.usage
# add the vm to the target ds
target_ds_usage_before = ds2.usage
ds2.add_shadow_vm(vm)
target_ds_usage_after = ds2.usage
# for now just print out the move . later: do the actual move
log.info(
"- INFO - move vm {} ({:.0f}G) from aggr {} to aggr {}".format(vm.name, lun.used / 1024**3, aggr1.name, aggr2.name))
log.info(
"- INFO - move vm {} ({:.0f}G) from ds {} to ds {}".format(vm.name, vm.get_total_disksize() / 1024**3, ds1.name, ds2.name))
log.info(
"- INFO - source aggr: {:.1f}% -> {:.1f}% target aggr: {:.1f}% -> {:.1f}%".format(source_aggr_usage_before, source_aggr_usage_after, target_aggr_usage_before, target_aggr_usage_after))
log.info(
"- INFO - source ds: {:.1f}% -> {:.1f}% target ds: {:.1f}% -> {:.1f}%".format(source_ds_usage_before, source_ds_usage_after, target_ds_usage_before, target_ds_usage_after))
log.info("- CMND - svmotion_cinder_v2.py {} {}".format(vm.name, ds2.name))
| 5,351,925 |
def plot(model_set, actual_mdot=True, qnuc=0.0, verbose=True, ls='-', offset=True,
bprops=('rate', 'fluence', 'peak'), display=True, grid_version=0):
"""Plot predefined set of mesa model comparisons
model_set : int
ID for set of models (defined below)
"""
mesa_info = get_mesa_set(model_set)
if actual_mdot:
mdots = mesa_info['mdots_actual']
else:
mdots = mesa_info['mdots']
mesa_info['params']['qnuc'] = qnuc
fig, ax = plot_compare(mesa_runs=mesa_info['runs'], display=display,
mesa_mdots=mdots, bprops=bprops,
params=mesa_info['params'], verbose=verbose,
grid_version=grid_version, ls=ls, offset=offset)
return fig, ax
| 5,351,926 |
def test_cliargs_0(tmp_path_):
"""Test default parameters."""
parser = mtscomp_parser()
args = ['somefile']
pargs, config = _args_to_config(parser, args)
assert config.algorithm == 'zlib'
assert config.check_after_compress
assert config.check_after_decompress
assert config.do_time_diff
assert not config.do_spatial_diff
pargs, config = _args_to_config(parser, args + ['-p 3'])
assert config.n_threads == 3
assert config.check_after_compress
assert config.check_after_decompress
pargs, config = _args_to_config(parser, args + ['-c 2', '-s 10000', '-n 123', '-d uint8'])
assert config.chunk_duration == 2
assert config.sample_rate == 10000
assert config.n_channels == 123
assert config.dtype == 'uint8'
assert config.check_after_compress
assert config.check_after_decompress
assert not pargs.debug
pargs, config = _args_to_config(parser, args + ['-c 2', '-nc', '--debug'])
assert not config.check_after_compress
assert config.check_after_decompress
assert pargs.debug
| 5,351,927 |
def main():
"""Main documentation builder script."""
parser = ArgumentParser(
description="build GGRC documentation",
)
parser.add_argument(
'-c', '--clean',
action='store_true',
default=False,
help='clean cache before build',
dest='clean',
)
parser.add_argument(
'-s', '--strict',
action='store_true',
default=False,
help='treat warnings as errors',
dest='strict',
)
args = parser.parse_args()
docs_src = os.path.join(DOCS_DIR, 'source')
docs_build = os.path.join(DOCS_DIR, 'build')
builder.build('API', os.path.join(docs_src, 'api'))
if args.clean:
shutil.rmtree(docs_build, ignore_errors=True)
if not os.path.isdir(docs_build):
os.mkdir(docs_build)
sphinx = Sphinx(
srcdir=docs_src,
confdir=docs_src,
outdir=os.path.join(docs_build, 'html'),
doctreedir=os.path.join(docs_build, 'doctrees'),
buildername='html',
warningiserror=args.strict,
)
sphinx.build()
return sphinx.statuscode
| 5,351,928 |
def preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
"""
processed_batch = []
for clip in batch:
signal = clip.astype(np.float64)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(signal)
if signal_length < WINDOW_LENGTH:
signal = np.concatenate((signal, np.zeros(WINDOW_LENGTH-signal_length)))
else:
np.random.seed(signal_length)
signal_start = np.random.randint(0, signal_length-WINDOW_LENGTH)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch)
| 5,351,929 |
def Py_Main(space, argc, argv):
"""The main program for the standard interpreter. This is made available for
programs which embed Python. The argc and argv parameters should be
prepared exactly as those which are passed to a C program's main()
function (converted to wchar_t according to the user's locale). It is
important to note that the argument list may be modified (but the contents of
the strings pointed to by the argument list are not). The return value will
be 0 if the interpreter exits normally (i.e., without an exception),
1 if the interpreter exits due to an exception, or 2 if the parameter
list does not represent a valid Python command line.
Note that if an otherwise unhandled SystemExit is raised, this
function will not return 1, but exit the process, as long as
Py_InspectFlag is not set."""
raise NotImplementedError
| 5,351,930 |
def generate_random_bond_list(atom_count, bond_count, seed=0):
"""
Generate a random :class:`BondList`.
"""
np.random.seed(seed)
# Create random bonds between atoms of
# a potential atom array of length ATOM_COUNT
bonds = np.random.randint(atom_count, size=(bond_count, 3))
# Clip bond types to allowed BondType values
bonds[:, 2] %= len(struc.BondType)
# Remove bonds of atoms to itself
bonds = bonds[bonds[:,0] != bonds[:,1]]
assert len(bonds) > 0
return struc.BondList(atom_count, bonds)
| 5,351,931 |
def open_cosmos_files():
"""
This function opens files related to the COSMOS field.
Returns:
A lot of stuff. Check the code to see what it returns
"""
COSMOS_mastertable = pd.read_csv('data/zfire/zfire_cosmos_master_table_dr1.1.csv',index_col='Nameobj')
ZF_cat = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.cat')
ZF_EAZY = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.zout')
ZF_FAST = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.fout')
#load in colours using spec-z
#only ZFIRE
U_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.153.rf')
V_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.155.rf')
J_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.161.rf')
#load in colours using photo-z
U_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.153.rf')
V_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.155.rf')
J_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.161.rf')
#galaxy colours derived by Lee's catalogue
#This uses the older EAZY method of fitting colours
UV_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.153-155.rf')
VJ_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.155-161.rf')
UV_IR_SFRs = ascii.read('data/zfourge/sfrs/cosmos.sfr.v0.5.cat')
MOSDEF_ZFOURGE = ascii.read('data/catalogue_crossmatch/MOSDEF_COSMOS.dat')
#ZFIRE and MOSDEF colours
U_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.153.rf')
V_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.155.rf')
J_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.161.rf')
VUDS_ZFOURGE = ascii.read('data/catalogue_crossmatch/VUDS_COSMOS.dat')
VUDS_extra = ascii.read('data/vuds/cesam_vuds_spectra_dr1_cosmos_catalog_additional_info.txt')
#ZFIRE and VUDS colours
U_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.153.rf')
V_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.155.rf')
J_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.161.rf')
return COSMOS_mastertable, ZF_cat, ZF_EAZY, ZF_FAST, U_spec, V_spec, J_spec,\
U_photo, V_photo, J_photo, UV_lee, VJ_lee, UV_IR_SFRs, MOSDEF_ZFOURGE,\
U_ZM,V_ZM, J_ZM, VUDS_ZFOURGE, VUDS_extra, U_ZV, V_ZV, J_ZV
| 5,351,932 |
def convert_image_points_to_points(image_positions, distances):
"""Convert image points to 3d points.
Returns:
positions
"""
hypotenuse_small = numpy.sqrt(
image_positions[:, 0]**2 +
image_positions[:, 1]**2 + 1.0)
ratio = distances / hypotenuse_small
n = image_positions.shape[0]
positions = numpy.zeros([n, 3])
positions[:, 0] = -image_positions[:, 0] * ratio
positions[:, 1] = ratio
positions[:, 2] = -image_positions[:, 1] * ratio
return positions
| 5,351,933 |
def W(i, j):
"""The Wilson functions.
:func:`W` corresponds to formula (2) on page 16 in `the technical paper`_
defined as:
.. math::
W(t, u_j)= \\
e^{-UFR\cdot (t+u_j)}\cdot \\
\left\{ \\
\\alpha\cdot\min(t, u_j) \\
-0.5\cdot e^{-\\alpha\cdot\max(t, u_j)}\cdot( \\
e^{\\alpha\cdot\min(t, u_j)} \\
-e^{-\\alpha\cdot\min(t, u_j)} \\
) \\
\\right\}
where :math:`t = u_i`.
Args:
i(int): Time index (1, 2, ..., :attr:`N`)
j(int): Time index (1, 2, ..., :attr:`N`)
"""
t = u[i]
uj = u[j]
return exp(-UFR * (t+uj)) * (
alpha * min(t, uj) - 0.5 * exp(-alpha * max(t, uj)) * (
exp(alpha*min(t, uj)) - exp(-alpha*min(t, uj))
))
| 5,351,934 |
def set_boot_flag(dev_path, use_mbr=False):
"""Set modern or legacy boot flag."""
cmd = [
'sudo',
'parted', dev_path,
'set', '1',
'boot' if use_mbr else 'legacy_boot',
'on',
]
run_program(cmd)
| 5,351,935 |
def get_id(ctx):
"""
Get METS id if any
"""
workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=basename(ctx.mets_url))
ID = workspace.mets.unique_identifier
if ID:
print(ID)
| 5,351,936 |
def reddening_fm(wave, ebv=None, a_v=None, r_v=3.1, model='f99'):
"""Determines a Fitzpatrick & Massa reddening curve.
Parameters
----------
wave: ~numpy.ndarray
wavelength in Angstroms
ebv: float
E(B-V) differential extinction; specify either this or a_v.
a_v: float
A(V) extinction; specify either this or ebv.
r_v: float, optional
defaults to standard Milky Way average of 3.1
model: {'f99', 'fm07'}, optional
* 'f99' is the default Fitzpatrick (1999) [1]_
* 'fm07' is Fitzpatrick & Massa (2007) [2]_. Currently not R dependent.
Returns
-------
reddening_curve: ~numpy.ndarray
Multiply to deredden flux, divide to redden.
Notes
-----
Uses Fitzpatrick (1999) [1]_ by default, which relies on the UV
parametrization of Fitzpatrick & Massa (1990) [2]_ and spline fitting in the
optical and IR. This function is defined from 910 A to 6 microns, but note
the claimed validity goes down only to 1150 A. The optical spline points are
not taken from F99 Table 4, but rather updated versions from E. Fitzpatrick
(this matches the Goddard IDL astrolib routine FM_UNRED).
The fm07 model uses the Fitzpatrick & Massa (2007) [3]_ parametrization,
which has a slightly different functional form. That paper claims it
preferable, although it is unclear if signficantly (Gordon et al. 2009)
[4]_. It is not the literature standard, so not default here.
References
----------
[1] Fitzpatrick, E. L. 1999, PASP, 111, 63
[2] Fitpatrick, E. L. & Massa, D. 1990, ApJS, 72, 163
[3] Fitpatrick, E. L. & Massa, D. 2007, ApJ, 663, 320
[4] Gordon, K. D., Cartledge, S., & Clayton, G. C. 2009, ApJ, 705, 1320
"""
from scipy.interpolate import interp1d
model = model.lower()
if model not in ['f99','fm07']:
raise ValueError('model must be f99 or fm07')
if (a_v is None) and (ebv is None):
raise ValueError('Must specify either a_v or ebv')
if (a_v is not None) and (ebv is not None):
raise ValueError('Cannot specify both a_v and ebv')
if a_v is not None:
ebv = a_v / r_v
if model == 'fm07':
raise ValueError('TEMPORARY: fm07 currently not properly R dependent')
x = 1e4 / wave # inverse microns
k = np.zeros(x.size)
if any(x < 0.167) or any(x > 11):
raise ValueError('fm_dered valid only for wavelengths from 910 A to '+
'6 microns')
# UV region
uvsplit = 10000. / 2700. # Turn 2700A split into inverse microns.
uv_region = (x >= uvsplit)
y = x[uv_region]
k_uv = np.zeros(y.size)
# Fitzpatrick (1999) model
if model == 'f99':
x0, gamma = 4.596, 0.99
c3, c4 = 3.23, 0.41
c2 = -0.824 + 4.717 / r_v
c1 = 2.030 - 3.007 * c2
D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2)
F = np.zeros(y.size)
valid = (y >= 5.9)
F[valid] = 0.5392 * (y[valid]-5.9)**2 + 0.05644 * (y[valid]-5.9)**3
k_uv = c1 + c2*y + c3*D + c4*F
# Fitzpatrick & Massa (2007) model
if model == 'fm07':
x0, gamma = 4.592, 0.922
c1, c2, c3, c4, c5 = -0.175, 0.807, 2.991, 0.319, 6.097
D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2)
valid = (y <= c5)
k_uv[valid] = c1 + c2*y[valid] + c3*D[valid]
valid = (y > c5)
k_uv[valid] = c1 + c2*y[valid] + c3*D[valid] + c4*(y[valid]-c5)**2
k[uv_region] = k_uv
# Calculate values for UV spline points to anchor OIR fit
x_uv_spline = 10000. / np.array([2700., 2600.])
D = x_uv_spline**2 / ((x_uv_spline**2-x0**2)**2 + x_uv_spline**2 * gamma**2)
k_uv_spline = c1 + c2*x_uv_spline +c3*D
# Optical / IR
OIR_region = (x < uvsplit)
y = x[OIR_region]
k_OIR = np.zeros(y.size)
# Fitzpatrick (1999) model
if model == 'f99':
# The OIR anchors are up from IDL astrolib, not F99.
anchors_extinction = np.array([0, 0.26469*r_v/3.1, 0.82925*r_v/3.1, # IR
-0.422809 + 1.00270*r_v + 2.13572e-04*r_v**2, # optical
-5.13540e-02 + 1.00216*r_v - 7.35778e-05*r_v**2,
0.700127 + 1.00184*r_v - 3.32598e-05*r_v**2,
(1.19456 + 1.01707*r_v - 5.46959e-03*r_v**2 + 7.97809e-04*r_v**3 +
-4.45636e-05*r_v**4)])
anchors_k = np.append(anchors_extinction-r_v, k_uv_spline)
# Note that interp1d requires that the input abscissa is monotonically
# _increasing_. This is opposite the usual ordering of a spectrum, but
# fortunately the _output_ abscissa does not have the same requirement.
anchors_x = 1e4 / np.array([26500., 12200., 6000., 5470., 4670., 4110.])
anchors_x = np.append(0., anchors_x) # For well-behaved spline.
anchors_x = np.append(anchors_x, x_uv_spline)
OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic')
k_OIR = OIR_spline(y)
# Fitzpatrick & Massa (2007) model
if model == 'fm07':
anchors_k_opt = np.array([0., 1.322, 2.055])
IR_wave = np.array([float('inf'), 4., 2., 1.333, 1.])
anchors_k_IR = (-0.83 + 0.63*r_v) * IR_wave**-1.84 - r_v
anchors_k = np.append(anchors_k_IR, anchors_k_opt)
anchors_k = np.append(anchors_k, k_uv_spline)
anchors_x = np.array([0., 0.25, 0.50, 0.75, 1.]) # IR
opt_x = 1e4 / np.array([5530., 4000., 3300.]) # optical
anchors_x = np.append(anchors_x, opt_x)
anchors_x = np.append(anchors_x, x_uv_spline)
OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic')
k_OIR = OIR_spline(y)
k[OIR_region] = k_OIR
reddening_curve = 10**(0.4 * ebv * (k+r_v))
return reddening_curve
| 5,351,937 |
def cross_entropy(Y, P):
"""A function that takes as input two lists Y, P,
and returns the float corresponding to their cross-entropy.
"""
| 5,351,938 |
def test_cummax_multi_dims(data_type):
"""
Feature: Op Cummax
Description: test Cummax operator with multiple dimension.
Expectation: the result match expectation.
"""
op = "Cummax"
axis = 1
x = [[[6, 11, 4, 9, 15], [1, 2, 14, 13, 15], [15, 10, 6, 13, 6], [9, 4, 11, 10, 11]],
[[5, 1, 5, 13, 7], [19, 4, 14, 11, 14], [5, 15, 6, 20, 0], [6, 2, 4, 15, 16]],
[[17, 4, 16, 13, 3], [15, 15, 14, 9, 13], [11, 0, 2, 19, 17], [20, 18, 13, 15, 17]]]
cummax_output = ([[[6, 11, 4, 9, 15], [6, 11, 14, 13, 15], [15, 11, 14, 13, 15], [15, 11, 14, 13, 15]],
[[5, 1, 5, 13, 7], [19, 4, 14, 13, 14], [19, 15, 14, 20, 14], [19, 15, 14, 20, 16]],
[[17, 4, 16, 13, 3], [17, 15, 16, 13, 13], [17, 15, 16, 19, 17], [20, 18, 16, 19, 17]]],
[[[0, 0, 0, 0, 0], [0, 0, 1, 1, 1], [2, 0, 1, 2, 1], [2, 0, 1, 2, 1]],
[[0, 0, 0, 0, 0], [1, 1, 1, 0, 1], [1, 2, 1, 2, 1], [1, 2, 1, 2, 3]],
[[0, 0, 0, 0, 0], [0, 1, 0, 0, 1], [0, 1, 0, 2, 2], [3, 3, 0, 2, 3]]])
cum_minmax_compare(op, x, cummax_output, axis, data_type)
| 5,351,939 |
def render_web_page(file_directory, ihl_config_file_path, current_date):
"""
Writing to html files for each IHL.
Check all the IHL's names and the filepath
Load config file from ihlconfig.json which contains details of the IHLs.
:param file_directory:
:param ihl_config_file_path:
:param current_date: datetime.date
:return:
"""
first_date = datetime.date(day=1, month=current_date.month, year=current_date.year)
if current_date.month == 12:
last_date = datetime.date(day=31, month=current_date.month, year=current_date.year)
else:
last_date = datetime.date(day=1, month=current_date.month + 1, year=current_date.year) - datetime.timedelta(1)
month_words = current_date.strftime('%b')
year = current_date.strftime('%Y')
ihl_configuration = json.load(open(ihl_config_file_path))
ihl_names = [ihl.upper() for ihl in ihl_configuration if ihl != 'etlr']
print(ihl_names)
for ihl_name in ihl_names:
ihl_name_lower_case = ihl_name.lower()
with open(os.path.join(file_directory, '{}.html'.format(ihl_name_lower_case)), 'w') as html_out:
html_out.write(ihl_utilisation_web_page_template(ihl_name, month_words, year, first_date, last_date))
print("Finished writing html files for all IHLs!")
| 5,351,940 |
def list_registered_stateful_ops_without_inputs():
"""Returns set of registered stateful ops that do not expect inputs.
This list is used to identify the ops to be included in the state-graph and
that are subsequently fed into the apply-graphs.
Returns:
A set of strings.
"""
return set([
name
for name, op in op_def_registry.get_registered_ops().items()
if op.is_stateful and not op.input_arg
])
| 5,351,941 |
def load_frame_from_video(path: str, frame_index: int) -> np.ndarray:
"""load a full trajectory video file and return a single frame from it"""
vid = load_video(path)
img = vid[frame_index]
return img
| 5,351,942 |
def get_settings_patterns(project_id: int) -> Dict[str, str]:
"""Returning project patterns settings"""
track_patterns: List[Dict[str, str]] = ProjectSettings.objects.get(project_id=project_id).trackPatterns
return {pattern['pattern']: pattern['regex'] for pattern in track_patterns}
| 5,351,943 |
def video_to_array(filepath):
"""Process the video into an array."""
cap = cv2.VideoCapture(filepath)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
channel = 3
frame_buffer = np.empty((num_frames, height, width, channel), dtype=np.float32)
frame_num = 0
returned = True
while (frame_num < num_frames and returned):
returned, frame = cap.read()
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = frame.astype(np.float32)
frame = frame / 255.0
if np.sum(frame) > 0.0:
frame_buffer[frame_num] = frame
frame_num += 1
cap.release()
return frame_buffer
| 5,351,944 |
def test__Ticker_history(requests_mock):
"""Ticker instance history."""
COLUMNS = ["Open", "High", "Low", "Close", "Volume"]
tid = "_yf_history_IBM"
resp, data, params = fetchTestData(yf.endpoints.responses.bundle.responses, tid)
# params = {'period': 'max', 'auto_adjust': True, 'back_adjust': False}
params = {
"period": "max",
"auto_adjust": False,
"back_adjust": True,
"actions": True,
}
tid = "_je_history_backadjust"
resp = fetchFullResponse(tid)
# now hack the class, not the instance!
setattr(yf.endpoints.History, "DOMAIN", API_URL)
r = yf.endpoints.History("IBM", params=params)
rawdata = fetchRawData("yahoo_history.raw")
requests_mock.register_uri("GET", "{}/{}".format(API_URL, r), text=rawdata)
ticker = yf.Ticker("IBM")
respDF = pd.DataFrame(resp["ohlcdata"]).set_index("timestamp")
respDF.drop(columns=["adjclose"], inplace=True)
respDF.index = pd.to_datetime(respDF.index, unit="s")
# rename open->Open etc, fabricate the columns dict
respDF = respDF.rename(columns=dict(zip([c.lower() for c in COLUMNS], COLUMNS)))
respDF = respDF[COLUMNS]
TH = ticker.history(**params)
TH = TH[COLUMNS]
assert TH.equals(respDF)
tid = "_je_history_backadjust"
resp = fetchFullResponse(tid)
assert (
sorted(list(ticker.splits.values))
== sorted([s["numerator"] / s["denominator"] for s in resp["splits"]])
and sorted(list(ticker.dividends.values))
== sorted([d["amount"] for d in resp["dividends"]])
and ticker.actions.to_json()
== pd.DataFrame(pd.concat([ticker.dividends, ticker.splits], axis=1))
.replace(np.NaN, 0.0)
.to_json()
)
| 5,351,945 |
async def test_10_request(requests_mock: Mock) -> None:
"""Test `async request()`."""
result = {"result": "the result"}
rpc = RestClient("http://test", "passkey", timeout=0.1)
def response(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result).encode("utf-8")
requests_mock.post("/test", content=response)
ret = await rpc.request("POST", "test", {})
assert requests_mock.called
auth_parts = requests_mock.last_request.headers['Authorization'].split(' ', 1)
assert auth_parts[0].lower() == 'bearer'
assert auth_parts[1] == 'passkey'
assert ret == result
result2 = {"result2": "the result 2"}
def response2(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result2).encode("utf-8")
requests_mock.post("/test2", content=response2)
ret = await rpc.request("POST", "/test2")
assert requests_mock.called
assert ret == result2
| 5,351,946 |
def ncvue(ncfile='', miss=np.nan):
"""
The main function to start the data frame GUI.
Parameters
----------
ncfile : str, optional
Name of netcdf file (default: '').
miss : float, optional
Add value to list of missing values: _FillValue, missing_value,
and the standard netCDF missing value for current datatype from
netcdf4.default_fillvals (default: np.nan).
"""
ios = platform.system() # Windows, Darwin, Linux
if ios == 'Windows':
# make Windows aware of high resolution displays
# https://stackoverflow.com/questions/41315873/attempting-to-resolve-blurred-tkinter-text-scaling-on-windows-10-high-dpi-disp
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
# Pyinstaller sets _MEIPASS if macOS app
bundle_dir = getattr(sys, '_MEIPASS',
os.path.abspath(os.path.dirname(__file__)))
top = tk.Tk()
top.withdraw()
# top.option_add("*Font", "Helvetica 10")
# Check light/dark mode
# https://stackoverflow.com/questions/65294987/detect-os-dark-mode-in-python
# style = ttk.Style()
# print(style.theme_names(), style.theme_use())
if ios == 'Darwin':
theme = 'aqua'
style = ttk.Style()
style.theme_use(theme)
elif ios == 'Windows':
top.option_add("*Font", "Helvetica 10")
plt.rc('font', size=13)
# standard Windows themes
# ('winnative', 'clam', 'alt', 'default', 'classic', 'vista', 'xpnative')
# theme = 'vista'
# style = ttk.Style()
# style.theme_use(theme)
# style packages
# Download from https://sourceforge.net/projects/tcl-awthemes/
# top.tk.call('lappend', 'auto_path',
# bundle_dir + '/themes/awthemes-10.3.2')
# theme = 'awdark' # 'awlight', 'awdark'
# top.tk.call('package', 'require', theme)
# style = ttk.Style()
# style.theme_use(theme)
# single file styles
# 'azure' and 'azure-dark' v1.x, 'Breeze'
# top.tk.call('source', bundle_dir + '/themes/breeze/breeze.tcl')
# theme = 'Breeze'
# top.tk.call('source', bundle_dir + '/themes/azure-1.3/azure.tcl')
# theme = 'azure'
# top.tk.call('source', bundle_dir + '/themes/azure-1.3/azure-dark.tcl')
# theme = 'azure-dark'
# style = ttk.Style()
# style.theme_use(theme)
# 'azure' v2.x, 'sun-valley', 'forest' of rdbende
top.tk.call('source', bundle_dir + '/themes/azure-2.0/azure.tcl')
# top.tk.call('source', bundle_dir + '/themes/sun-valley-1.0/sun-valley.tcl')
theme = 'light' # light, dark
top.tk.call("set_theme", theme)
elif ios == 'Linux':
# standard Linux schemes
# theme = 'clam' # 'clam', 'alt', 'default', 'classic'
# style = ttk.Style()
# style.theme_use(theme)
# 'azure' v2.x, 'sun-valley', 'forest' of rdbende
top.tk.call('source', bundle_dir + '/themes/azure-2.0/azure.tcl')
theme = 'light' # light, dark
top.tk.call("set_theme", theme)
# set titlebar and taskbar icon only if "standalone",
# i.e. not ipython or jupyter
try:
whichpy = get_ipython().__class__.__name__
except NameError:
whichpy = ''
if not whichpy:
icon = tk.PhotoImage(file=bundle_dir + '/images/ncvue_icon.png')
top.iconphoto(True, icon) # True: apply to all future toplevels
else:
icon = None
root = tk.Toplevel()
root.name = 'ncvOne'
root.title("ncvue " + ncfile)
root.geometry('1000x800+100+100')
# Connect netcdf file and extracted information to top
top.os = ios # operating system
top.theme = theme # current theme
top.icon = icon # app icon
top.fi = ncfile # file name or file handle
top.miss = miss # extra missing value
top.dunlim = '' # name of unlimited dimension
top.time = None # datetime variable
top.tname = '' # datetime variable name
top.tvar = '' # datetime variable name in netcdf file
top.dtime = None # decimal year
top.latvar = '' # name of latitude variable
top.lonvar = '' # name of longitude variable
top.latdim = '' # name of latitude dimension
top.londim = '' # name of longitude dimension
top.maxdim = 1 # maximum number of dimensions of all variables
# > 0 so that dimension spinboxes present
top.cols = [] # variable list
if ncfile:
top.fi = nc.Dataset(ncfile, 'r')
# Analyse netcdf file
analyse_netcdf(top)
root.top = top
def on_closing():
if top.fi:
top.fi.close()
top.quit()
top.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# 1st plotting window
main_frame = ncvMain(root)
top.mainloop()
| 5,351,947 |
def entmax15(X, axis=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : paddle.Tensor
The input tensor.
axis : int must
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : paddle tensor, same shape as X
The projection result, such that P.sum(axis=axis) == 1 elementwise.
"""
assert axis in [-1, X.ndim - 1]
return Entmax15Function.apply(X, axis, k)
| 5,351,948 |
def aprint(artname, number=1):
"""
Print 1-line art.
:param artname: artname
:type artname : str
:param number: number of repeats
:type number: int
:return: None
"""
try:
if artname == "UnicodeEncodeError":
raise UnicodeEncodeError(
'test', u"", 42, 43, 'test unicode-encode-error')
print(art(artname=artname, number=number))
except UnicodeEncodeError:
print(ART_ENVIRONMENT_WARNING.format(artname))
| 5,351,949 |
def test_cholesky_inverse():
"""Checks that our Cholesky inverse matches `torch.cholesky_inverse()`."""
torch.autograd.set_detect_anomaly(True)
batch_dims = (5,)
matrix_dim = 3
L = fannypack.utils.tril_from_vector(
torch.randn(
batch_dims + (fannypack.utils.tril_count_from_matrix_dim(matrix_dim),),
)
)
for i, our_inverse in enumerate(fannypack.utils.cholesky_inverse(L)):
torch.testing.assert_allclose(our_inverse, torch.cholesky_inverse(L[i]))
| 5,351,950 |
def build(target, method: list, dataset_name, limit: int, number_of_topics):
"""
Build page.
:param target: Target file
:param method: List of methods to use.
:param limit: Limit processing into N candidates.
"""
click.echo("Loading dataset ... ", nl=False)
dataset = importlib.import_module(f".{dataset_name}", "agora_analytica.data")
df = dataset.load_dataset()
if limit < 2:
raise click.BadParameter("Build should include more than 2 candidates.", param_hint="--limit")
preferred_list_file = settings.get("build", "preferred_candidates", fallback=None)
if preferred_list_file:
with open(preferred_list_file) as fp:
# Fetch all preferred candidates by row, skipping ones beginning with `#`
preferred_candidates = filter(lambda x: x != "" and x[0] != "#", map(str.strip, fp.readlines()))
# Slice preferred candidates
preferred_filter = df["name"].isin(preferred_candidates)
preferred = df[preferred_filter]
# Fill to a required ammount with sampled data
df = preferred.append(df[~preferred_filter].sample(clamp(df.shape[0] - preferred.shape[0], limit - preferred.shape[0], 0)))
del preferred, preferred_filter
# sample to a correct size
df = df.sample(min(limit, df.shape[0]))
click.echo("[DONE]")
click.echo("Calculating distances ... ", nl=False)
distances = measure_distances(df, methods=method)
click.echo("[DONE]")
click.echo("Analyzing text ... ", nl=False)
if number_of_topics == -1:
# Using squareroot seems to provide pretty good default
number_of_topics = settings.getint("build", "number_of_topics", fallback=np.sqrt(limit))
number_of_topics = int(number_of_topics)
settings.set("build", "number_of_topics", str(number_of_topics))
click.echo(f"Topics: {number_of_topics} ", nl=False)
texts_df = df.text_answers().sort_index()
visualization = settings.getboolean('build', 'generate_visualization', fallback=debug)
topics = TextTopics(texts_df, number_topics=number_of_topics, generate_visualization=visualization)
words = {}
n = texts_df.shape[0]
talkinpoints = {}
for a in range(n):
a_idx = texts_df.index[a]
for b in range(a + 1, n):
b_idx = texts_df.index[b]
r = topics.compare_rows(texts_df, a_idx, b_idx)
if r:
words[(a_idx, b_idx)] = r[0][1]
words[(b_idx, a_idx)] = r[1][1]
talkinpoints[a_idx] = topics.find_talkingpoint(texts_df.loc[a_idx])
click.echo("[DONE]")
click.echo("Generating structures ... ", nl=False)
data_nodes = [{
"id": int(idx),
"name": row.get("name"),
"party": row.get("party"),
"image": row.get("image", None),
"constituency": row.get("constituency"),
"number": int(row.get("number", -1)),
"talkinpoint": talkinpoints.get(int(idx), None)
} for idx, row in df.replace(np.NaN, None).iterrows()]
data_links = [{
"source": int(i),
"source_term": words.get((i, l), None),
"distance": float(d),
"target_term": words.get((l, i), None),
"target": int(l)
} for i, d, l in distances.values]
click.echo("[DONE]")
# Build static pages
_build_pages(target / "pages")
click.echo("Writing data ... ", nl=False)
_write("nodes", data_nodes, target)
_write("links", data_links, target)
cfg = instance_path() / "app.cfg"
with cfg.open('w') as f:
settings.write(f, space_around_delimiters=True)
click.echo("[DONE]")
| 5,351,951 |
def get_dependency_graph(node, targets=None):
"""Returns the dependent nodes and the edges for the passed in node.
:param str node: The node to get dependencies for.
:param list targets: A list with the modules that are used as targets.
:return: The dependency graph info.
:rtype: GraphInfo
"""
g = _make_graph()
edges, direct_dependencies = _all_dependencies(node, g)
if targets:
targets = set(targets)
affected_targets = []
if not edges:
return graph_info.GraphInfo(
graph=g,
nodes=[],
edges=[],
direct_dependencies=[],
affected_targets=[]
)
all_nodes = set()
for n1, n2 in edges:
all_nodes.add(n1)
all_nodes.add(n2)
node_to_info = {}
for index, node_name in enumerate(all_nodes):
if node_name not in node_to_info:
node_id = index + 1
node_to_info[node_name] = {
"id": node_id,
"label": "",
"title": node_name,
"value": 1,
"color": "blue"
}
if targets and node_name in targets:
node_to_info[node_name]["color"] = 'orange'
node_to_info[node_name]["value"] = 3
affected_targets.append(node_name)
node_to_info[node]['color'] = 'red'
node_to_info[node]['value'] = 3
edges_representation = []
for n1, n2 in edges:
index1 = node_to_info[n1]["id"]
index2 = node_to_info[n2]["id"]
edge_color = 'gray'
value = 1
if n1 == node:
node_to_info[n2]['color'] = 'green'
node_to_info[n2]['value'] = 2
edge_color = 'green'
value = 2
if n2 == node:
node_to_info[n1]['color'] = 'green'
node_to_info[n1]['value'] = 2
edge_color = 'green'
value = 2
edges_representation.append(
{
"from": index1,
"to": index2,
"color": edge_color,
"value": value
},
)
info = graph_info.GraphInfo(
graph=g,
nodes=list(node_to_info.values()),
edges=edges_representation,
direct_dependencies=sorted(direct_dependencies),
affected_targets=affected_targets
)
return info
| 5,351,952 |
def is_submodule_repo(p: Path) -> bool:
"""
"""
if p.is_file() and '.git/modules' in p.read_text():
return True
return False
| 5,351,953 |
def shift_contig(df2, remove):
"""
The function append shifted fragment from
sort_cluster_seq function.
Parameters
----------
df2 : pandas DataFrame
DataFrame NRPS cluster fragment.
remove : list
List of cluster fragment, which should removed.
Returns
-------
df2 : pandas DataFrame
Corrected DataFrame with NRPS meta information.
"""
for gen in remove:
df2 = df2.append(gen)
return df2
| 5,351,954 |
def repos(repo_mapping = {}):
"""Adds external repositories/archives needed by eventuals (phase 1).
Args:
repo_mapping: passed through to all other functions that expect/use
repo_mapping, e.g., 'git_repository'
"""
stout_atomic_backoff_repos(
external = False,
repo_mapping = repo_mapping,
)
stout_stateful_tally_repos(
external = False,
repo_mapping = repo_mapping,
)
stout_borrowed_ptr_repos(
external = False,
repo_mapping = repo_mapping,
)
stout_flags_repos(
external = False,
repo_mapping = repo_mapping,
)
stout_notification_repos(
external = False,
repo_mapping = repo_mapping,
)
stout_repos(
external = False,
repo_mapping = repo_mapping,
)
pyprotoc_plugin_repos(
external = False,
repo_mapping = repo_mapping,
)
maybe(
http_archive,
name = "rules_foreign_cc",
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.5.1.tar.gz",
sha256 = "33a5690733c5cc2ede39cb62ebf89e751f2448e27f20c8b2fbbc7d136b166804",
strip_prefix = "rules_foreign_cc-0.5.1",
repo_mapping = repo_mapping,
)
maybe(
git_repository,
name = "com_github_3rdparty_bazel_rules_asio",
remote = "https://github.com/3rdparty/bazel-rules-asio",
commit = "257c93cbaf94703f1b0668b7693267bebea52b37",
shallow_since = "1650559794 +0200",
repo_mapping = repo_mapping,
)
maybe(
git_repository,
name = "com_github_3rdparty_bazel_rules_curl",
remote = "https://github.com/3rdparty/bazel-rules-curl",
commit = "5748da4b2594fab9410db9b5e6619b47cb5688e0",
shallow_since = "1651700487 +0300",
repo_mapping = repo_mapping,
)
maybe(
git_repository,
name = "com_github_3rdparty_bazel_rules_jemalloc",
remote = "https://github.com/3rdparty/bazel-rules-jemalloc",
commit = "c82c0c3856f07d53c1b76e89beeb8abab8c2d0ad",
shallow_since = "1634918242 -0700",
repo_mapping = repo_mapping,
)
maybe(
git_repository,
name = "com_github_3rdparty_bazel_rules_libuv",
remote = "https://github.com/3rdparty/bazel-rules-libuv",
commit = "f8aeba82e40cda94d6227c67d114ecc732b30be5",
shallow_since = "1638359550 +0300",
repo_mapping = repo_mapping,
)
maybe(
http_archive,
name = "com_github_grpc_grpc",
urls = ["https://github.com/grpc/grpc/archive/refs/tags/v1.45.0.tar.gz"],
strip_prefix = "grpc-1.45.0",
sha256 = "ec19657a677d49af59aa806ec299c070c882986c9fcc022b1c22c2a3caf01bcd",
)
maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz",
],
sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728",
)
| 5,351,955 |
def main():
"""Join the functions."""
input_identification()
zip_extract()
file_format()
| 5,351,956 |
def get_color_cycle(n=None):
"""Return the matplotlib color cycle.
:param Optional[int] n:
if given, return a list with exactly n elements formed by repeating
the color cycle as necessary.
Usage::
blue, green, red = get_color_cycle(3)
"""
import matplotlib as mpl
cycle = mpl.rcParams["axes.prop_cycle"].by_key()["color"]
if n is None:
return it.cycle(cycle)
return list(it.islice(it.cycle(cycle), n))
| 5,351,957 |
def test_database_connection(test_data: dict):
"""
Test database connection using the database connection string.
:param test_data: Database test data.
:type test_data: dict
"""
check_connection(test_data)
| 5,351,958 |
def _bocs_consistency_mapping(x):
"""
This is for the comparison with BOCS implementation
:param x:
:return:
"""
horizontal_ind = [0, 2, 4, 7, 9, 11, 14, 16, 18, 21, 22, 23]
vertical_ind = sorted([elm for elm in range(24) if elm not in horizontal_ind])
return x[horizontal_ind].reshape((ISING_GRID_H, ISING_GRID_W - 1)), x[vertical_ind].reshape((ISING_GRID_H - 1, ISING_GRID_W))
| 5,351,959 |
def get_document(name, key):
"""Get document from Database"""
constructor = Constructor()
inst_coll = constructor.factory(kind='Collection', name=name)
inst_doc = Document(inst_coll)
doc = inst_doc.get_document(key)
return doc
| 5,351,960 |
def numero_3():
"""numero_3"""
check50.run("python3 numeros_introescos.py").stdin("999\n1000", prompt=False).stdout("0", regex=False).exit(0)
| 5,351,961 |
def predict() -> str:
"""predict the movie genres based on the request data"""
cur = db_connection.cursor()
try:
input_params = __process_input(request.data)
input_vec = vectorizer.transform(input_params)
prediction = classifier.predict(input_vec)
predictions = binarizer.inverse_transform(prediction)
for count, i in enumerate(input_params):
pred = ", ".join(predictions[count])
cur.execute(
f"INSERT INTO prediction(input, output, time) VALUES('{i}', '{pred}', '{datetime.datetime.now()}' )"
)
db_connection.commit()
except Exception as e:
response = app.response_class(
response=json.dumps({"error": f"{e.__class__} occured"}), status=400
)
return response
response = app.response_class(
response=json.dumps({"predictions:": binarizer.inverse_transform(prediction)}),
status=200,
)
return response
| 5,351,962 |
def SignificanceWeights(serializer, decay):
"""Multiplies a binary mask with a symbol significance mask."""
def significance_weights(mask):
# (repr,) -> (batch, length, repr)
# significance = [0, 1, 2]
significance = serializer.significance_map
assert significance.shape[0] == mask.shape[2]
# significance = batch_size * [0, 1, 2]
significance = jnp.repeat(
significance[np.newaxis, ...], repeats=mask.shape[0], axis=0)
# significance = batch_size * [0, 1, 2] * mask.shape[1]
significance = jnp.repeat(
significance[..., jnp.newaxis], repeats=mask.shape[1], axis=2)
# significance = batch_size * mask.shape[1] * [0, 1, 2]
significance = jnp.swapaxes(significance, 1, 2)
assert significance.shape == mask.shape
sig_weights = mask * decay ** significance
return sig_weights
return tl.Fn('SignificanceWeights', significance_weights)
| 5,351,963 |
def lint(ctx):
"""Validate the code style (e.g. undefined names)"""
try:
importlib.import_module("flake8")
except ImportError:
sys.exit("You need to ``pip install flake8`` to lint")
# We use flake8 with minimal settings
# http://pep8.readthedocs.io/en/latest/intro.html#error-codes
cmd = [sys.executable, "-m", "flake8"] + PY_PATHS + ["--select=F,E11"]
ret_code = subprocess.call(cmd, cwd=ROOT_DIR)
if ret_code == 0:
print("No style errors found")
else:
sys.exit(ret_code)
| 5,351,964 |
def check_fun_inter_allocation(fun_inter, data, **kwargs):
"""Check allocation rules for fun_inter then returns objects if check"""
out = None
check_allocation_fun_inter = get_allocation_object(data, kwargs['xml_fun_inter_list'])
if check_allocation_fun_inter is None:
check_fe = check_fun_elem_data_consumption(
data, fun_inter,
kwargs['xml_fun_elem_list'],
kwargs['xml_function_list'],
kwargs['xml_consumer_function_list'],
kwargs['xml_producer_function_list'])
if all(i for i in check_fe):
out = [fun_inter, data]
fun_inter.add_allocated_data(data.id)
elif True in check_fe:
if check_fe[0] is True:
print(f"Data {data.name} has only consumer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
elif check_fe[1] is True:
print(f"Data {data.name} has only producer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
else:
print(f"Data {data.name} has no producer(s) nor "
f"consumer(s) allocated to functional elements "
f"exposing {fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
return out
| 5,351,965 |
def kernel_bw_lookup(
compute_device: str,
compute_kernel: str,
caching_ratio: Optional[float] = None,
) -> Optional[float]:
"""
Calculates the device bandwidth based on given compute device, compute kernel, and
caching ratio.
Args:
compute_kernel (str): compute kernel.
compute_device (str): compute device.
caching_ratio (Optional[float]): caching ratio used to determine device bandwidth
if UVM caching is enabled.
Returns:
float: the device bandwidth.
"""
caching_ratio = caching_ratio if caching_ratio else UVM_CACHING_RATIO
lookup = {
# CPU
("cpu", EmbeddingComputeKernel.DENSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.SPARSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * DDR_MEM_BW,
# CUDA
("cuda", EmbeddingComputeKernel.DENSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.SPARSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
}
return lookup.get((compute_device, compute_kernel))
| 5,351,966 |
def spkltc(targ, et, ref, abcorr, stobs):
"""
Return the state (position and velocity) of a target body
relative to an observer, optionally corrected for light time,
expressed relative to an inertial reference frame.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html
:param targ: Target body.
:type targ: int
:param et: Observer epoch.
:type et: float
:param ref: Inertial reference frame of output state.
:type ref: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param stobs: State of the observer relative to the SSB.
:type stobs: 6-Element Array of floats
:return:
One way light time between observer and target,
Derivative of light time with respect to time
:rtype: tuple
"""
assert len(stobs) == 6
targ = stypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.stringToCharP(ref)
abcorr = stypes.stringToCharP(abcorr)
stobs = stypes.toDoubleVector(stobs)
starg = stypes.emptyDoubleVector(6)
lt = ctypes.c_double()
dlt = ctypes.c_double()
libspice.spkltc_c(targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt),
ctypes.byref(dlt))
return stypes.cVectorToPython(starg), lt.value, dlt.value
| 5,351,967 |
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG
)
| 5,351,968 |
def test_mime_db_conversion(testname):
"""Test if the convert process of mime-db structure
to new one is performed correctly.
"""
mime_db, expected = CONVERSION_TESTCASES[testname]
result = convert_mime_db(mime_db)
assert result == expected
| 5,351,969 |
def create_container(
container_image: str,
name: str = None,
volumes: t.List[str] = None,
) -> str:
"""Create a new working container from provided container image.
Args:
container_image (str): The container image to start from.
name (str, optional): The container name.
volumes (t.List[str], optional): Any volumes to bind into the container.
Returns:
str: The container name/id used for further manipulation.
"""
args = []
if name:
args.extend(["--name", name])
if volumes:
args.extend(_unwind_list("--volume", volumes))
command = ["buildah", "from"] + args + [container_image]
result = platform_utils.run_command(command, capture_stdout=True)
container = result.stdout.strip()
logger.success(f"Created '{container}' from image '{container_image}'")
return container
| 5,351,970 |
def single_data_path(client, node_id):
"""
In order for a shrink to work, it should be on a single filesystem, as
shards cannot span filesystems. Return `True` if the node has a single
filesystem, and `False` otherwise.
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:rtype: bool
"""
return len(client.nodes.stats()['nodes'][node_id]['fs']['data']) == 1
| 5,351,971 |
def main():
"""Make a jazz noise here"""
args = get_args()
items = args.items
sortList = args.sorted
i = len(items) - 1
if sortList:
items.sort()
if len(items) == 1:
print("You are bringing {}.".format(items[0]))
elif len(items) == 2:
print("You are bringing {} and {}.".format(items[0], items[1]))
else:
print("You are bringing {}, and {}.".format(', '.join(items[:i]), items[-1]))
| 5,351,972 |
def sortorder(obj):
"""
Trys to smartly determine the sort order for this object ``obj``
"""
if hasattr(obj, 'last'):
return obj.last.timestamp()
if isinstance(obj, str):
# First assume pure numeric
try:
return float(obj)
except ValueError:
pass
# Assume it is of the form
# AB [N.M] PPP words'
try:
return float(obj.split('[')[1].split(']')[0])
except (IndexError, ValueError):
return strip_tags(obj).strip()
return None
| 5,351,973 |
def get_more_details_of_post(post_url: str) -> json:
"""
:param post_url: the url of an imgur post
:return: Details like Virality-score, username etc in JSON format
"""
details = {}
try:
request = HTMLSession().get(post_url)
# some times, request isn't properly made, hence call again.
if len(request.html.find('script')) < 18:
request = HTMLSession().get(post_url)
return details
# handle when its not there at all
regex = 'item: ({.+} )' # regex to isolate the `item` dict.
# 18th script tag has the `item` dict. this is tested on more than 1500 links.
matched = re.search(regex, request.html.find(
'script')[18].text).group(0)
item = json.loads(matched[5:])
details['username'] = item['account_url']
details['comment_count'] = item['comment_count']
details['downs'] = item['downs']
details['ups'] = item['ups']
details['points'] = item['points']
details['score'] = item['score']
details['timestamp'] = item['timestamp']
details['views'] = item['views']
details['favorite_count'] = item['favorite_count']
details['hot_datetime'] = item['hot_datetime']
details['nsfw'] = item['nsfw']
details['platform'] = 'Not Detected' if item['platform'] == None else item['platform']
details['virality'] = item['virality']
except Exception as e:
print(e)
return details
| 5,351,974 |
def save_totals(
year_range,
totals,
entry_list,
count,
category_list,
category_txt,
category_txt2,
filename
):
"""
Write out a bunch of report data to a spreadsheet report.
Report will be a 2D matrix:
- X-Axis = school year
- Y-Axis = 'Category' (FIPS Code, District, etc...)
Notes:
- idxes contains the data
- worksheets is a list of XLS worksheets, one per report in idxes
Arguments:
year_range - list of years for the given totals
totals - data set
entry_list - entries per datum row
count - how many of the datum to output
category_list - list of groupings (district IDs, state FIPS numbers, etc...)
category_txt - txt for groupings (district names, state names, etc...)
category_txt2 - additional txt for the groups (district state, etc...)
filename - output filename
"""
wb = Workbook()
percentages = wb.add_sheet('Student Percentages')
worksheets = [percentages]
y_offset=2
# Create the headers/labels row/col
for ws in worksheets:
ws.write(0, 0, "Agency Name")
for j, st in enumerate(category_list):
if j < count:
if len(category_txt[st]) == 2: # Don't change caps for State abbr.
ws.write(j+y_offset, 0, category_txt[st])
else:
ws.write(j+y_offset, 0, category_txt[st].title())
x_offset = 1
if category_txt2:
for ws in worksheets:
ws.write(0, 1, "State")
for j, st in enumerate(category_list):
if j < count:
ws.write(j+y_offset, 1, fips_to_st[category_txt2[st]][0])
x_offset = 2
# Print out the data
for i, year in enumerate(year_range):
print "Write Report for: %d" % year
for ws in worksheets:
ws.write_merge(0, 0, (i*len(entry_list))+x_offset, ((i+1)*len(entry_list))+x_offset-1, year)
for j, entry in enumerate(entry_list):
ws.write(1, (i*len(entry_list))+j+x_offset, entry)
for j, st in enumerate(category_list):
if j < count:
for k, total in enumerate([totals]):
for l, entry in enumerate(entry_list):
try:
if totals[i][st][entry] < 0.001:
worksheets[k].write(j+y_offset, i*len(entry_list)+l+x_offset, "")
else:
worksheets[k].write(j+y_offset, i*len(entry_list)+l+x_offset, totals[i][st][entry])
except KeyError:
worksheets[k].write(j+y_offset, i+x_offset, "")
wb.save(filename)
| 5,351,975 |
def subkey_public_pair_chain_code_pair(public_pair, chain_code_bytes, i):
"""
Yield info for a child node for this node.
public_pair:
base public pair
chain_code:
base chain code
i:
the index for this node.
Returns a pair (new_public_pair, new_chain_code)
"""
i_as_bytes = struct.pack(">l", i)
sec = public_pair_to_sec(public_pair, compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=chain_code_bytes, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
x, y = public_pair
the_point = I_left_as_exponent * ecdsa.generator_secp256k1 + \
ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
if the_point == INFINITY:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('K_{} == {}'.format(i, the_point))
I_left_as_exponent = from_bytes_32(I64[:32])
if I_left_as_exponent >= ORDER:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('I_L >= {}'.format(ORDER))
new_public_pair = the_point.pair()
new_chain_code = I64[32:]
return new_public_pair, new_chain_code
| 5,351,976 |
def get_registry_description(metaprefix: str) -> Optional[str]:
"""Get the description for the registry, if available.
:param metaprefix: The metaprefix of the registry
:return: The description for the registry, if available, otherwise ``None``.
>>> get_registry_description('prefixcommons')
'A registry of commonly used prefixes in the life sciences and linked data'
>>> get_registry_description('missing')
None
"""
registry = get_registry(metaprefix)
if registry is None:
return None
return registry.description
| 5,351,977 |
def evaluate(model, valid_exe, valid_ds, valid_prog, dev_count, metric):
"""evaluate """
acc_loss = 0
acc_top1 = 0
cc = 0
for feed_dict in tqdm.tqdm(
multi_device(valid_ds.generator(), dev_count), desc='evaluating'):
if dev_count > 1:
loss, top1 = valid_exe.run(
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
loss = np.mean(loss)
top1 = np.mean(top1)
else:
loss, top1 = valid_exe.run(
valid_prog,
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
acc_loss += loss
acc_top1 += top1
cc += 1
ret = {"loss": float(acc_loss / cc), "top1": float(acc_top1 / cc)}
return ret
| 5,351,978 |
def _get_rank(player):
"""Get the rank of a player"""
cursor = _DB.cursor()
try:
cursor.execute("SELECT score FROM scores WHERE player = ?", (player.lower(),))
rows = cursor.fetchall()
if not rows:
return 0
ps = rows[0][0]
cursor.execute("SELECT count(*) FROM scores WHERE score > ?", (ps,))
rows = cursor.fetchall()
return 1+rows[0][0]
finally:
cursor.close()
| 5,351,979 |
def ensure_branch(c, branch, repo, remote=None, fork=None, base=None):
"""
:param fork: used to validate existing remote branch is correct
using None will assume that there is no fork and branch is on 'remote'
"""
ensure_cloned_repo(c, repo)
if remote is not None:
ensure_remote(c, repo, owner=remote)
with c.cd(repo):
branches = c.run("git branch -vv", hide="out").stdout.splitlines()
branches = (re.match(
r". ([_\.\-\w]*) +\w* (\[([/_\.\-\w]*)[ \w:]*\])?.*",
b).groups() for b in branches)
branches = [(t[0], t[2]) for t in branches]
# check if there is no local branch, which means we'll create one
if not any(b for b in branches if b[0] == branch):
# if there is no remote to track, then create from base
if remote is None:
if base is None:
base = "master"
c.run("git checkout {base}".format(base=base), hide="out")
c.run("git pull".format(base=base), hide="out")
c.run("git checkout -b {branch}".format(
branch=branch, base=base), hide="out")
# else create remote tracking branch
else:
if base is None:
base = branch
c.run("git checkout -b {branch} {remote}/{base}".format(
branch=branch, remote=remote, base=base), hide="out")
return
# we now know we have a branch of that name already
# get the remote for the branch
_, tracked_branch = next(b for b in branches if b[0] == branch)
# this is a remote base, so default base to the local branch name
if base is None:
base = branch
if fork is None:
fork = remote
# check that we are tracking remote properly (if a tracking branch)
if tracked_branch is None and remote is not None:
raise Exception("{}, a local branch, is not tracking {}/{}".format(
branch, fork, base))
# if it's already created the remote tracking branch is fork/branch
if tracked_branch is not None and tracked_branch != (
fork + "/" + branch):
raise Exception(
"{} is not tracking {}/{}, it is tracking {}".format(
branch, fork, branch, tracked_branch))
# proper branch is already local, check it out
c.run("git checkout {}".format(branch), hide="both")
| 5,351,980 |
def getDataFromFileList(filedir):
"""
Reads all data from each file to one big data set ordered as:
[[info],[[residue],[data]]]
"""
data = []
filelist =os.listdir(filedir)
print("Loading data from data dir\n")
if len(filelist)>0:
print("DataFiles included:\n ----------------------------------")
else:
print("No data files found\n")
return data
for f in filelist:
if "cpmg" not in f :
continue
d1 ={}
d1["info"]= getFileInfo(filedir+f)
if not d1["info"]:
return data
if not d1["info"]["type"] =="fit" and not d1["info"]["type"] == "exp": #This is to avoid additional files that have the same name
continue
else:
print("| "+f+" |")
d1["resdata"]=readExpFile(filedir+f,d1["info"]["type"])
data.append(d1)
print(" ----------------------------------\n")
return data
| 5,351,981 |
def spec(func):
"""return a string with Python function specification"""
doc = pydoc.plain(pydoc.render_doc(func))
return doc.splitlines()[2]
| 5,351,982 |
def brm_weights(P, r, Γ, X):
"""Bellman-residual minimization fixed-point weights.
TODO: Need to actually go through the details to make sure this is right
"""
assert linalg.is_stochastic(P)
assert X.ndim == 2
assert len(X) == len(P)
ns = len(P)
Γ = as_diag(Γ, ns)
Λ = as_diag(Λ, ns)
# Calculate intermediate quantities
I = np.eye(ns)
dist = linalg.stationary(P)
D = np.diag(dist)
Π = X @ pinv(X.T @ D @ X) @ X.T @ D
A = (X - P @ Γ @ Π @ X).T @ (X - P @ Γ @ Π @ X)
b = (X - P @ Γ @ Π @ X).T @ r
return pinv(A) @ b
| 5,351,983 |
def file_age(file_name):
"""
Returns the age of a file in seconds from now. -1 if the file does not exist.
:param file_name: file name
.. versionadded:: 9.3.1
"""
if not os.path.exists(file_name):
return -1
return time.time() - os.path.getmtime(file_name)
| 5,351,984 |
def test_stage_exceptions(t, l, i, b, se):
"""
***Purpose***: Test if correct exceptions are raised when attributes are
assigned unacceptable values.
"""
s = Stage()
data_type = [t, l, i, b, se]
for data in data_type:
print 'Using: %s, %s' % (data, type(data))
if not isinstance(data, str):
with pytest.raises(TypeError):
s.name = data
with pytest.raises(TypeError):
s.tasks = data
with pytest.raises(TypeError):
s.add_tasks(data)
| 5,351,985 |
def IsVirus(mi, log):
"""Test: a virus is any message with an attached executable
I've also noticed the viruses come in as wav and midi attachements
so I trigger on those as well.
This is a very paranoid detector, since someone might send me a
binary for valid reasons. I white-list everyone who's sent me
email before so it doesn't affect me.
"""
for part in mi.msg.walk():
if part.get_main_type() == 'multipart':
continue
filename = part.get_filename()
if filename is None:
if part.get_type() in ["application/x-msdownload",
"audio/x-wav", "audio/x-midi"]:
# Only viruses send messages to me with these types
log.pass_test(VIRUS)
return ("it has a virus-like content-type (%s)" %
part.get_type())
else:
extensions = "bat com exe pif ref scr vbs wsh".split()
base, ext = posixpath.splitext(filename)
if ext[1:].lower() in extensions:
log.pass_test(VIRUS)
return "it has a virus-like attachment (%s)" % ext[1:]
return False
| 5,351,986 |
def action_reaction():
"""Run experiments with and without the action-reaction assumption."""
folder = 'action_reaction/'
env = HardSpheres(num_obj=15, width=250)
for asymmetric in (True, False):
exp = FullArch(env, steps=3000, lr=0.1, asymmetric=asymmetric)
losses, coll_losses = exp.run()
exp_name = 'asymm' if asymmetric else 'symm'
plot_coll_loss(coll_losses.cpu(), folder, exp_name)
plot_state_loss(losses, folder, exp_name)
| 5,351,987 |
def to_curl(request, compressed=False, verify=True):
"""
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
"""
parts = [
('curl', None),
('-X', request.method),
]
for k, v in sorted(request.headers.items()):
parts += [('-H', '{0}: {1}'.format(k, v))]
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
parts += [('-d', body)]
if compressed:
parts += [('--compressed', None)]
if not verify:
parts += [('--insecure', None)]
parts += [(None, request.url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(quote(k))
if v:
flat_parts.append(quote(v).replace("\n", "\\n"))
return ' '.join(flat_parts)
| 5,351,988 |
def prox_trace_indicator(a, lamda):
"""Time-varying latent variable graphical lasso prox."""
es, Q = np.linalg.eigh(a)
xi = np.maximum(es - lamda, 0)
return np.linalg.multi_dot((Q, np.diag(xi), Q.T))
| 5,351,989 |
def get_gamma_non_jitted(esys):
"""Get log gamma
Returns
-------
float[:]
"""
if isinstance(esys.species[0].logc, float):
v = np.empty(len(esys.species))
else:
v = np.empty(len(esys.species), dtype=object)
for i, sp in enumerate(esys.species):
v[i] = 10.0 ** (sp.logg)
return v
# return np.array([10.0**(sp.logg) for sp in self.species])
# v = np.empty(len(self.species))
# for i, sp in enumerate(self.species):
# v[i] = 10.0**(sp.logg)
# return v
| 5,351,990 |
def active_matrices_from_extrinsic_euler_angles(
basis1, basis2, basis3, e, out=None):
"""Compute active rotation matrices from extrinsic Euler angles.
Parameters
----------
basis1 : int
Basis vector of first rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis2 : int
Basis vector of second rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis3 : int
Basis vector of third rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
e : array-like, shape (..., 3)
Euler angles
out : array, shape (..., 3, 3), optional (default: new array)
Output array to which we write the result
Returns
-------
Rs : array, shape (..., 3, 3)
Rotation matrices
"""
e = np.asarray(e)
R_shape = e.shape + (3,)
R_alpha = active_matrices_from_angles(basis1, e[..., 0].flat)
R_beta = active_matrices_from_angles(basis2, e[..., 1].flat)
R_gamma = active_matrices_from_angles(basis3, e[..., 2].flat)
if out is None:
out = np.empty(R_shape)
out[:] = np.einsum(
"nij,njk->nik", np.einsum("nij,njk->nik", R_gamma, R_beta),
R_alpha).reshape(R_shape)
return out
| 5,351,991 |
def save(image, parent=None):
""" Save an image with appropriate dialogs (file selector)
Return the chosen save path or None
parent : parent wx Window for dialogs
"""
dialog = ImageFileDialog(parent, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if dialog.ShowModal() != wx.ID_OK :
return None
path = dialog.GetPath()
if isinstance(image, list) :
periodic_progress_dialog = PeriodicProgressDialog(0.2, "Saving files", "Saving ...")
worker_thread = WorkerThread(periodic_progress_dialog,target=medipy.io.save_serie, args=(image,path,))
worker_thread.start()
periodic_progress_dialog.start()
worker_thread.join()
periodic_progress_dialog.Destroy()
else :
try :
medipy.io.save(image, path)
return path
except :
# Try by adding a ".nii" suffix
try :
path += ".nii"
medipy.io.save(image, path)
return path
except :
exc_info = sys.exc_info()
logging.error("".join(traceback.format_exception(*exc_info)))
wx.MessageBox("Could not save image to %s: %s"%(path, exc_info[1]),
"Could not save image", wx.OK|wx.ICON_ERROR)
| 5,351,992 |
def runQuery(scenarioID):
"""
Run a query that aquires the data from the lrs for one specific dialoguetrainer scenario
\n
:param scenarioID: The id of the scenario to request the data from \t
:type scenarioID: int \n
:returns: The data for that scenario or error \t
:rtype: [Dict<string, mixed>] | {error} \n
"""
return (
lrs.Query()
.where(lrs.Attr.ACTIVITY, lrs.IS, f"https://en.dialoguetrainer.app/scenario/play/{scenarioID}")
.where(lrs.Attr.VERB, lrs.IS, "https://adlnet.gov/expapi/verbs/completed")
.select(lrs.Attr.ACTOR, lrs.Attr.RESULT)
.execute()
)
| 5,351,993 |
def is_spaceafter_yes(line):
"""
SpaceAfter="Yes" extracted from line
"""
if line[-1] == "_":
return False
for ddd in line[MISC].split("|"):
kkk, vvv = ddd.split("=")
if kkk == "SpaceAfter":
return vvv == "Yes"
raise ValueError
| 5,351,994 |
def linear_scheduler(optimizer, warmup_steps, training_steps, last_epoch=-1):
"""linear_scheduler with warmup from huggingface"""
def lr_lambda(current_step):
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
0.0,
float(training_steps - current_step)
/ float(max(1, training_steps - warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
| 5,351,995 |
def MajorityVoteN(qubits,
nrounds,
prep=[],
meas_delay=1e-6,
add_cals=False,
calRepeats=2):
"""
Majority vote across multiple measurement results (same or different qubits)
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits for majority vote
nrounds: int
Number of consecutive measurements
prep : boolean iterable, optional
Array of binary values mapping X(q) pulses to the list of qubits
proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1
before the majority vote measurement. Default = []
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = MajorityVoteN((q1, q2, q3), 10);
Compiled 1 sequences.
o INVALIDATE(channel=None, addr=0x1, mask=0x0)
o WRITEADDR(channel=None, addr=0x1, value=0xfffff)
MAJORITYMASK(in_addr=1, out_addr=0)
o INVALIDATE(channel=None, addr=0xa, mask=0xfffff)
o INVALIDATE(channel=None, addr=0xb, mask=0x1)
MAJORITY(in_addr=a, out_addr=b)
>>> mf
'/path/to/exp/exp-meta.json'
"""
nqubits = len(qubits)
seqs = [MajorityMask(1, 0, nrounds*nqubits),
Invalidate(10, nrounds*nqubits),
Invalidate(11, 1)]
if prep:
seqs += [reduce(operator.mul,
[X(q) for n,q in enumerate(qubits) if prep[n]])]
for n in range(nrounds):
seqs += [reduce(operator.mul,
[MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]),
Id(qubits[0],meas_delay)]
seqs+=MajorityVote(10,11, nrounds*nqubits)
seqs+=qwait("RAM", 11)
seqs+=[Id(qubits[0],100e-9)]
seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation
seqs=[seqs]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs,
'MajorityVote/MajorityVote',
tdm_seq=True)
return metafile
| 5,351,996 |
def numerox_example():
"""
Example of how to prepare a submission for the Numerai tournament.
It uses Numerox which you can install with: pip install numerox
For more information see: https://github.com/kwgoodman/numerox
"""
# download dataset from numerai, save it and then load it
data = nx.download('numerai_dataset.zip')
# we will use logistic regression; you will want to write your own model
model = nx.logistic()
# fit model with train data and make predictions for tournament data
prediction = nx.production(model, data, tournament='bernie')
# save predictions to csv file
prediction.to_csv('logistic.csv', verbose=True)
# upload predictions to Numerai to enter the tournament
# create the public_id and secret_key on the Numerai website
#
# nx.upload('logistic.csv', tournament='bernie', public_id, secret_key)
| 5,351,997 |
def getRatios(vect1, vect2):
"""Assumes: vect1 and vect2 are equal length lists of numbers
Returns: a list containing the meaningful values of
vect1[i]/vect2[i]"""
ratios = []
for index in range(len(vect1)):
try:
ratios.append(vect1[index]/vect2[index])
except ZeroDivisionError:
ratios.append(float('nan')) #nan = Not a Number
except:
raise ValueError('getRatios called with bad arguments')
return ratios
| 5,351,998 |
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg
| 5,351,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.