content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def VD_A_DF(data, val_col: str = None, group_col: str = None, sort=True):
"""
:param data: pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
:param val_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
:param group_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
:param sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
:return: stats : pandas DataFrame of effect sizes
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'estimate' : effect sizes
'magnitude' : magnitude
"""
x = data.copy()
if sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
groups = x[group_col].unique()
# Pairwise combinations
g1, g2 = np.array(list(it.combinations(np.arange(groups.size), 2))).T
# Compute effect size for each combination
ef = np.array([VD_A(list(x[val_col][x[group_col] == groups[i]].values),
list(x[val_col][x[group_col] == groups[j]].values)) for i, j in zip(g1, g2)])
return pd.DataFrame({
'A': np.unique(data[group_col])[g1],
'B': np.unique(data[group_col])[g2],
'estimate': ef[:, 0],
'magnitude': ef[:, 1]
}) | ee4b94c9a47d8e15e182c010ffdb954f2ccec4bb | 2,192 |
def getR2(y, y_fitted, chi=None):
"""
calculates the coefficient of determination R^2 for `y_fitted` as prediction for `y` over a region marked by chi>0 defined by
R^2=1 - S_res/S_tot
with S_res=int(chi*(y-y_fitted*1)**2, S_tot=int(chi*(y-m(y)*1)**2), m(y)=int(chi*y)/int(chi)
If R^2=1 then `y_fitted` is predicts `y` exactly. If R^2 then `y_fitted` does not make a better prediction than the mean.
:param y: target distribution
:type y: `esys.escript.Scalar`
:param y_fitted: fitted distribution
:type y_fitted: `esys.escript.Scalar`
:param chi: marker/weighting for region of interest
:type chi: `esys.escript.Scalar` or None
:rtype: `float`
"""
if chi is None:
chi=Scalar(1., Function(y_fitted.getFunctionSpace().getDomain()))
ybar=integrate(chi*y)/integrate(chi)
S_res=integrate(chi*(y-y_fitted)**2)
S_tot=integrate(chi*(y-ybar)**2)
if S_tot > 0:
R2=1-S_res/S_tot
else:
if S_res > 0:
R2=0.
else:
R2=1.
return R2 | 8ec0837d2d8443279af4142c8b8407b0b03af06a | 2,193 |
def basis(d, point_distribution='uniform', symbolic=True):
"""
Return all local basis function phi as functions of the
local point X in a 1D element with d+1 nodes.
If symbolic=True, return symbolic expressions, else
return Python functions of X.
point_distribution can be 'uniform' or 'Chebyshev'.
"""
X = sym.symbols('X')
if d == 0:
phi_sym = [1]
else:
if point_distribution == 'uniform':
if symbolic:
h = sym.Rational(1, d) # node spacing
nodes = [2*i*h - 1 for i in range(d+1)]
else:
nodes = np.linspace(-1, 1, d+1)
elif point_distribution == 'Chebyshev':
# Just numeric nodes
nodes = Chebyshev_nodes(-1, 1, d)
phi_sym = [Lagrange_polynomial(X, r, nodes)
for r in range(d+1)]
# Transform to Python functions
phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy')
for r in range(d+1)]
return phi_sym if symbolic else phi_num | 0f369ab22a12588e10826e894142a1dd115a5aa9 | 2,194 |
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch | 9ec34fb430dab0a17461f3002f1acbbd94b6e637 | 2,195 |
def mergeSort(li):
"""Sorts a list by splitting it to smaller and smaller pieces (until they
only have one or less elements) and then merges it back using the function
``merge()``.
>>> mergeSort([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> mergeSort([5, 4, 3, 2, 1])
[1, 2, 3, 4, 5]
>>> mergeSort([3, 2, 6, 1, 4, 2, 3, 1, 1, 5, 6, -2, 2.3])
[-2, 1, 1, 1, 2, 2, 2.3, 3, 3, 4, 5, 6, 6]
"""
n = len(li)
if n < 2:
return li
return merge(mergeSort(li[:n//2]), mergeSort(li[n//2:])) | c0f38ff6779bb24ebb081b5b76661189fa2767bc | 2,196 |
def test_colour_ranges(fake_readme, monkeypatch):
"""
Whatever number we provide as coverage should produce the appropriate colour
"""
readme_file = "README"
def fake_readme_location(*args, **kwargs):
return os.path.join(TESTS_DIR, readme_file)
monkeypatch.setattr(__main__, "readme_location", fake_readme_location)
for total, colour in (
("97", "brightgreen"),
("93", "green"),
("80", "yellowgreen"),
("65", "yellow"),
("45", "orange"),
("15", "red"),
("n/a", "lightgrey"),
):
__main__.get_total = lambda: total
__main__.main([])
assert __main__.get_colour(total) == colour | 0614cfa9d33e1d5f3112a79198c7fd2e762f4e3d | 2,198 |
def remove_partitions(
cube, store, conditions=None, ktk_cube_dataset_ids=None, metadata=None
):
"""
Remove given partition range from cube using a transaction.
Remove the partitions selected by ``conditions``. If no ``conditions`` are given,
remove all partitions. For each considered dataset, only the subset of
``conditions`` that refers to the partition columns of the respective dataset
is used. In particular, a dataset that is not partitioned at all is always considered
selected by ``conditions``.
Parameters
----------
cube: kartothek.core.cube.cube.Cube
Cube spec.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Store.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Select the partitions to be removed. Must be a condition only on partition columns.
ktk_cube_dataset_ids: Optional[Union[Iterable[Union[Str, Bytes]], Union[Str, Bytes]]]
Ktk_cube dataset IDs to apply the remove action to, optional. Default to "all".
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every the datasets, optional. Only given keys are updated/replaced. Deletion of
metadata keys is not possible.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Datasets, updated.
"""
if callable(store):
store_instance = store()
store_factory = store
else:
store_instance = store
def store_factory():
return store
existing_datasets = discover_datasets(cube, store)
for (
ktk_cube_dataset_id,
(ds, mp, delete_scope),
) in prepare_metapartitions_for_removal_action(
cube=cube,
store=store_instance,
conditions=conditions,
ktk_cube_dataset_ids=ktk_cube_dataset_ids,
existing_datasets=existing_datasets,
).items():
mp = mp.store_dataframes(
store=store_instance,
dataset_uuid=ds.uuid,
df_serializer=KTK_CUBE_DF_SERIALIZER,
)
ds_factory = metadata_factory_from_dataset(
ds, with_schema=True, store=store_factory
)
existing_datasets[ktk_cube_dataset_id] = update_dataset_from_partitions(
mp,
store_factory=store_factory,
dataset_uuid=ds.uuid,
ds_factory=ds_factory,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
metadata_merger=None,
delete_scope=delete_scope,
)
return existing_datasets | 0bede6d99e34edce32f42d9f78104ee3fdc45456 | 2,199 |
def update_t_new_docker_image_names(main, file):
""" Updates the names of the docker images from lasote to conanio
"""
docker_mappings = {
"lasote/conangcc49": "conanio/gcc49",
"lasote/conangcc5": "conanio/gcc5",
"lasote/conangcc6": "conanio/gcc6",
"lasote/conangcc7": "conanio/gcc7",
"lasote/conangcc8": "conanio/gcc8",
"lasote/conanclang39": "conanio/clang39",
"lasote/conanclang40": "conanio/clang40",
"lasote/conanclang50": "conanio/clang50",
"lasote/conanclang60": "conanio/clang60",
}
found_old_name = False
for old, new in docker_mappings.items():
if main.file_contains(file, old):
main.replace_in_file(file, old, new)
found_old_name = True
if found_old_name:
main.output_result_update(title="Travis: Update Docker image names from lasote/ to conanio/")
return True
return False | 6d1a1dd0f254252cf73d7a89c926dc2476fc89e8 | 2,200 |
def fit(kern, audio, file_name, max_par, fs):
"""Fit kernel to data """
# time vector for kernel
n = kern.size
xkern = np.linspace(0., (n - 1.) / fs, n).reshape(-1, 1)
# initialize parameters
if0 = gpitch.find_ideal_f0([file_name])[0]
init_f, init_v = gpitch.init_cparam(y=audio, fs=fs, maxh=max_par, ideal_f0=if0, scaled=False)[0:2]
init_l = np.array([0., 1.])
# optimization
p0 = np.hstack((init_l, init_v, init_f)) # initialize params
pstar = optimize_kern(x=xkern, y=kern, p0=p0)
# compute initial and learned kernel
kern_init = approximate_kernel(p0, xkern)
kern_approx = approximate_kernel(pstar, xkern)
# get kernel hyperparameters
npartials = (pstar.size - 2) / 2
lengthscale = pstar[1]
variance = pstar[2: npartials + 2]
frequency = pstar[npartials + 2:]
params = [lengthscale, variance, frequency]
return params, kern_init, kern_approx | e4b6519f1d9439e3d8ea20c545664cf152ce6a89 | 2,201 |
def find_next_open_date(location_pid, date):
"""Finds the next day where this location is open."""
location = current_app_ils.location_record_cls.get_record_by_pid(
location_pid
)
_infinite_loop_guard = date + timedelta(days=365)
while date < _infinite_loop_guard:
if _is_open_on(location, date):
return date
date += _ONE_DAY_INCREMENT
# Termination is normally guaranteed if there is at least one weekday open
raise IlsException(
description="Cannot find any date for which the location %s is open after the given date %s."
"Please check opening/closures dates."
% (location_pid, date.isoformat())
) | 74a38c39d2e03a2857fa6bbc7e18dc45c0f4e48a | 2,202 |
def _rowcorr(a, b):
"""Correlations between corresponding matrix rows"""
cs = np.zeros((a.shape[0]))
for idx in range(a.shape[0]):
cs[idx] = np.corrcoef(a[idx], b[idx])[0, 1]
return cs | 21df87b6f3bba58285cac0242b7c9e72b534f762 | 2,204 |
def gff_to_dict(f_gff, feat_type, idattr, txattr, attributes, input_type):
"""
It reads only exonic features because not all GFF files contain gene and trascript features. From the exonic
features it extracts gene names, biotypes, start and end positions. If any of these attributes do not exit
then they are set to NA.
"""
annotation = defaultdict(lambda: defaultdict(lambda: 'NA'))
exon_pos = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
tx_info = defaultdict(lambda: defaultdict(str))
with open(f_gff) as gff_handle:
for rec in GFF.parse(gff_handle, limit_info=dict(gff_type=[feat_type]), target_lines=1):
for sub_feature in rec.features:
start = sub_feature.location.start
end = sub_feature.location.end
strand = strandardize(sub_feature.location.strand)
try:
geneid = sub_feature.qualifiers[idattr][0]
except KeyError:
print("No '" + idattr + "' attribute found for the feature at position "
+ rec.id + ":" + str(start) + ":" + str(end) + ". Please check your GTF/GFF file.")
continue
annotation[geneid]['chr'] = rec.id
annotation[geneid]['strand'] = strand
if annotation[geneid]['start'] == 'NA' or start <= int(annotation[geneid]['start']):
annotation[geneid]['start'] = start
if annotation[geneid]['end'] == 'NA' or end >= int(annotation[geneid]['end']):
annotation[geneid]['end'] = end
for attr in attributes:
if attr in annotation[geneid]:
continue
try:
annotation[geneid][attr] = sub_feature.qualifiers[attr][0]
except KeyError:
annotation[geneid][attr] = 'NA'
# extract exon information only in case of dexseq output
if input_type != "dexseq":
continue
try:
txid = sub_feature.qualifiers[txattr][0]
tx_info[txid]['chr'] = rec.id
tx_info[txid]['strand'] = strand
exon_pos[txid][int(start)][int(end)] = 1
except KeyError:
print("No '" + txattr + "' attribute found for the feature at position " + rec.id + ":" + str(
start) + ":" + str(end) + ". Please check your GTF/GFF file.")
pass
bed_entries = []
# create BED lines only for deseq output
if input_type == "dexseq":
for txid in exon_pos.keys():
starts = sorted(exon_pos[txid])
strand = tx_info[txid]['strand']
if strand == '-':
starts = reversed(starts)
for c, start in enumerate(starts, 1):
ends = sorted(exon_pos[txid][start])
if strand == '-':
ends = reversed(ends)
for end in ends:
bed_entries.append('\t'.join([tx_info[txid]['chr'], str(start), str(end),
txid + ':' + str(c), '0', strand]))
return annotation, bed_entries | f1574cabb40f09f4f7a3b6a4f0e1b0a11d5c585d | 2,205 |
def _subtract_the_mean(point_cloud):
"""
Subtract the mean in point cloud and return its zero-mean version.
Args:
point_cloud (numpy.ndarray of size [N,3]): point cloud
Returns:
(numpy.ndarray of size [N,3]): point cloud with zero-mean
"""
point_cloud = point_cloud - np.mean(point_cloud, axis=0)
return point_cloud | 94866087a31b8268b06250d052fd12983222a066 | 2,206 |
def load_source_dataframe(method, sourcename, source_dict,
download_FBA_if_missing, fbsconfigpath=None):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param method: dictionary, FBS method
:param sourcename: str, The datasource name
:param source_dict: dictionary, The datasource parameters
:param download_FBA_if_missing: Bool, if True will download FBAs from
Data Commons. Default is False.
:param fbsconfigpath, str, optional path to an FBS method outside flowsa
repo
:return: df of identified parquet
"""
if source_dict['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter
# to filter dataframe
if 'source_fba_load_scale' in source_dict:
geo_level = source_dict['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving Flow-By-Activity for datasource %s in year %s",
sourcename, str(source_dict['year']))
flows_df = flowsa.getFlowByActivity(
datasource=sourcename,
year=source_dict['year'],
flowclass=source_dict['class'],
geographic_level=geo_level,
download_FBA_if_missing=download_FBA_if_missing)
elif source_dict['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
flows_df = flowsa.getFlowBySector(sourcename)
elif source_dict['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
fxn = source_dict.get("FBS_datapull_fxn")
if callable(fxn):
flows_df = fxn(source_dict, method, fbsconfigpath)
elif fxn:
raise flowsa.exceptions.FBSMethodConstructionError(
error_type='fxn_call')
else:
raise flowsa.exceptions.FBSMethodConstructionError(
message="Data format not specified in method "
f"file for {sourcename}")
return flows_df | 9a928441d790bb35acd0d32efeea105eeb3082c8 | 2,208 |
import json
def unpack_nwchem_basis_block(data):
"""Unserialize a NWChem basis data block and extract components
@param data: a JSON of basis set data, perhaps containing many types
@type data : str
@return: unpacked data
@rtype : dict
"""
unpacked = json.loads(data)
return unpacked | dfa920f80ae8f0caf15441c354802410c8add690 | 2,209 |
def starify(name):
"""
Replace any ints in a dotted key with stars. Used when applying defaults and widgets to fields
"""
newname = []
for key in name.split('.'):
if is_int(key):
newname.append('*')
else:
newname.append(key)
name = '.'.join(newname)
return name | f7c8baf602ecc4cd12088d4ba70524b2a316875e | 2,210 |
def hydrogens(atom: Atom) -> int:
"""Total number of hydrogen atoms (int).
"""
return atom.GetTotalNumHs() | f36e04d2c67eaf81b031651f78bb64db3a1c614c | 2,212 |
def to_field(field_tuple):
"""Create a dataframe_field from a tuple"""
return dataframe_field(*field_tuple) | 6863cc33b8eea458223b9f4b1d4432104784d245 | 2,213 |
def compute_subjobs_for_build(build_id, job_config, project_type):
"""
Calculate subjobs for a build.
:type build_id: int
:type job_config: JobConfig
:param project_type: the project_type that the build is running in
:type project_type: project_type.project_type.ProjectType
:rtype: list[Subjob]
"""
# Users can override the list of atoms to be run in this build. If the atoms_override
# was specified, we can skip the atomization step and use those overridden atoms instead.
if project_type.atoms_override is not None:
atoms_string_list = project_type.atoms_override
atoms_list = [Atom(atom_string_value) for atom_string_value in atoms_string_list]
else:
atoms_list = job_config.atomizer.atomize_in_project(project_type)
# Group the atoms together using some grouping strategy
timing_file_path = project_type.timing_file_path(job_config.name)
grouped_atoms = _grouped_atoms(
atoms_list,
job_config.max_executors,
timing_file_path,
project_type.project_directory
)
# Generate subjobs for each group of atoms
subjobs = []
for subjob_id, subjob_atoms in enumerate(grouped_atoms):
# The atom id isn't calculated until the atom has been grouped into a subjob.
for atom_id, atom in enumerate(subjob_atoms):
atom.id = atom_id
subjobs.append(Subjob(build_id, subjob_id, project_type, job_config, subjob_atoms))
return subjobs | caa16899755fbb19530c27004db3515e3eeab9d6 | 2,214 |
def pymodbus_mocked(mocker):
"""Patch pymodbus to deliver results."""
class ResponseContent:
"""Fake a response."""
registers = [0]
class WriteStatus:
"""Mock a successful response."""
@staticmethod
def isError():
# pylint: disable=invalid-name,missing-function-docstring
return False
# Patch connection function
mocker.patch("pymodbus.client.sync.ModbusTcpClient.connect")
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.read_holding_registers",
return_value=ResponseContent,
)
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.write_registers", return_value=WriteStatus
) | fdee663d9a8a80496ab6678aacb0b820251c83e1 | 2,215 |
def user_can_view_assessments(user, **kwargs):
""" Return True iff given user is allowed to view the assessments """
return not appConfig.settings.LOGIN_REQUIRED or user.is_authenticated | 1ef3f41ee311a6504d6e0cff0f5cad68135e0527 | 2,216 |
from typing import List
def get_hashes(root_hash: str) -> List[str]:
""" Return a list with the commits since `root_hash` """
cmd = f"git rev-list --ancestry-path {root_hash}..HEAD"
proc = run(cmd)
return proc.stdout.splitlines() | c0fdb996cf43066b87040b6647b75f42e8f7360f | 2,217 |
import zipfile
def unzip_file(zip_src, dst_dir):
"""
解压zip文件
:param zip_src: zip文件的全路径
:param dst_dir: 要解压到的目的文件夹
:return:
"""
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, "r")
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
return "请上传zip类型压缩文件" | 8b89f41f38cc688f6e0473a77215ae72b163654a | 2,218 |
def abort_multipart_upload(resource, bucket_name, object_name, upload_id):
"""Abort in-progress multipart upload"""
mpupload = resource.MultipartUpload(bucket_name, object_name, upload_id)
return mpupload.abort() | 93535c2404db98e30bd29b2abbda1444ae4d0e8a | 2,219 |
def double(n):
"""
Takes a number n and doubles it
"""
return n * 2 | 8efeee1aa09c27d679fa8c5cca18d4849ca7e205 | 2,221 |
import torch
def Normalize(tensor, mean, std, inplace=False):
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(
'Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if not tensor.is_floating_point():
raise TypeError(
'Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))
if tensor.ndim < 3:
raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError(
'std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
tensor.sub_(mean).div_(std)
return tensor | 1312955d0db28ae66f6dadd9c29f3034aa21e648 | 2,222 |
def _tagged_mosc_id(kubeconfig, version, arch, private) -> str:
"""determine what the most recently tagged machine-os-content is in given imagestream"""
base_name = rgp.default_imagestream_base_name(version)
base_namespace = rgp.default_imagestream_namespace_base_name()
name, namespace = rgp.payload_imagestream_name_and_namespace(base_name, base_namespace, arch, private)
stdout, _ = exectools.cmd_assert(
f"oc --kubeconfig '{kubeconfig}' --namespace '{namespace}' get istag '{name}:machine-os-content'"
" --template '{{.image.dockerImageMetadata.Config.Labels.version}}'",
retries=3,
pollrate=5,
strip=True,
)
return stdout if stdout else None | 1fa4ebf736b763fb97d644c0d55aa3923020107d | 2,225 |
def load_household_size_by_municipality():
"""Return dataframe, index 'Gemeente', column 'HHsize'."""
dfhh = pd.read_csv('data/huishoudens_samenstelling_gemeentes.csv', comment='#')
dfhh.sort_values('Gemeente', inplace=True)
dfhh.set_index('Gemeente', inplace=True)
# remove rows for nonexistent municipalites
dfhh.drop(index=dfhh.index[dfhh['nHH'].isna()], inplace=True)
# rename municipalities
rename_muns = {
'Beek (L.)': 'Beek',
'Hengelo (O.)': 'Hengelo',
'Laren (NH.)': 'Laren',
'Middelburg (Z.)': 'Middelburg',
'Rijswijk (ZH.)': 'Rijswijk',
'Stein (L.)': 'Stein',
'Groningen (gemeente)': 'Groningen',
'Utrecht (gemeente)': 'Utrecht',
"'s-Gravenhage (gemeente)": "'s-Gravenhage",
}
dfhh.rename(index=rename_muns, inplace=True)
return dfhh | 9e78345a00209135ec4185a806a70deeb10d5bea | 2,226 |
from typing import Dict
from typing import Any
from typing import List
def gcp_iam_service_account_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete service account key.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
service_account_name = argToList(args.get('service_account_name'))
command_results_list: List[CommandResults] = []
for account in service_account_name:
try:
client.gcp_iam_service_account_delete_request(account)
command_results_list.append(CommandResults(
readable_output=f'Service account {account} deleted successfully.'
))
except Exception as exception:
error = CommandResults(
readable_output=f'An error occurred while trying to delete {account}.\n {exception}'
)
command_results_list.append(error)
return command_results_list | eeea44d6cc96b430c63168346849ac30a1247cad | 2,227 |
from typing import Union
from typing import Tuple
def couple_to_string(couple: Union[Span, Tuple[int, int]]) -> str:
"""Return a deduplicated string representation of the given couple or span.
Examples:
>>> couple_to_string((12, 15))
"12-15"
>>> couple_to_string((12, 12))
"12"
>>> couple_to_string(Span(12, 15))
"12-15"
"""
return f"{couple[0]}" + ("" if couple[0] == couple[1] else f"-{couple[1]}") | 8aaa0e2b7dfbdd58e4f9765a56f9057dd2b612f3 | 2,228 |
def create_study(X, y,
storage=None, # type: Union[None, str, storages.BaseStorage]
sample_method=None,
metrics=None,
study_name=None, # type: Optional[str]
direction='maximize', # type: str
load_cache=False, # type: bool
is_autobin=False,
bin_params=dict(),
sample_params=dict(),
trials_list=list(),
export_model_path=None,
precision=np.float64,
):
# type: (...) -> Study
"""Create a new :class:`~diego.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~diego.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion. See also
:class:`~diego.samplers`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
is_auto_bin: do autobinning
bin_params: binning method
precision {[np.dtype]} -- precision:
np.dtypes, float16, float32, float64 for data precision to reduce memory size. (default: {np.float64})
Returns:
A :class:`~diego.study.Study` object.
"""
X, y = check_X_y(X, y, accept_sparse='csr')
storage = get_storage(storage)
try:
study_id = storage.create_new_study_id(study_name)
except basic.DuplicatedStudyError:
# 内存中最好study不要重名,而且可以读取已有的Study。 数据存在storage中。
# if load_if_exists:
# assert study_name is not None
# logger = logging.get_logger(__name__)
# logger.info("Using an existing study with name '{}' instead of "
# "creating a new one.".format(study_name))
# study_id = storage.get_study_id_from_name(study_name)
# else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sample_method=sample_method,
is_autobin=is_autobin,
bin_params=bin_params,
export_model_path=export_model_path,
precision=precision,
metrics=metrics)
if direction == 'minimize':
_direction = basic.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = basic.StudyDirection.MAXIMIZE
else:
raise ValueError(
'Please set either \'minimize\' or \'maximize\' to direction.')
if metrics in ['logloss']:
_direction = basic.StudyDirection.MINIMIZE
X = X.astype(dtype=precision, copy=False)
study.storage.direction = _direction
study.storage.set_train_storage(X, y)
return study | bf8ea9c5280c06c4468e9ba015e62401e65ad870 | 2,229 |
import inspect
def test_no_access_to_class_property(db):
"""Ensure the implementation doesn't access class properties or declared
attrs while inspecting the unmapped model.
"""
class class_property:
def __init__(self, f):
self.f = f
def __get__(self, instance, owner):
return self.f(owner)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class ns:
is_duck = False
floats = False
class Witch(Duck):
@declared_attr
def is_duck(self):
# declared attrs will be accessed during mapper configuration,
# but make sure they're not accessed before that
info = inspect.getouterframes(inspect.currentframe())[2]
assert info[3] != "_should_set_tablename"
ns.is_duck = True
@class_property
def floats(self):
ns.floats = True
assert ns.is_duck
assert not ns.floats | ab24f6405675cba44af570282afa60bd1e600dcf | 2,231 |
import re
def get_gb_version(backbone_top_cmake_path):
"""
Find the game backbone version number by searching the top level CMake file
"""
with open(backbone_top_cmake_path, 'r') as file:
cmake_text = file.read()
regex_result = re.search(gb_version_regex, cmake_text)
return regex_result.group(1) | a4855c5fd82579b5b1f4f777ee0c040227432947 | 2,232 |
from typing import Optional
from typing import List
async def get_processes(name: Optional[str] = None) -> List[Process]:
"""
Get all processes.
Args:
name (Optional[str], optional): Filter by process name. Defaults to None.
Returns:
List[Process]: A list of processes.
"""
if name:
return get_processes_by_name(name)
return get_all_processes() | 5133a81dcda079e6b5ed649443f9befed72be953 | 2,233 |
def get_rucio_redirect_url(lfn, scope):
"""
get_rucio_redirect_url: assemble Rucio redirect URL
@params: lfn ... one filename
e.g. user.gangarbt.62544955._2108356106.log.tgz
scope ... scope of the file with lfn
e.g. user.gangarbt, or valid1
returns: the Rucio redirect URL
"""
redirectUrl = ''
### compose the redirecURL
redirectUrl = '%(redirecthost)s/redirect/%(scope)s/%(filename)s%(suffix)s' % \
{\
'redirecthost': get_rucio_redirect_host(), \
'scope': scope, \
'filename': lfn, \
'suffix': '' \
}
_logger.info('get_rucio_redirect_url: redirectUrl=(%s)' % redirectUrl)
### return the redirectURL
return redirectUrl | 26547c18d9699d0ab3d6d963382bf14c067b982f | 2,235 |
async def _getRequest(websession, url):
"""Send a GET request."""
async with websession.get(url, headers=HEADER) as response:
if response.status == 200:
data = await response.json(content_type=None)
else:
raise Exception('Bad response status code: {}'.format(response.status))
return data | 6511926b2ce753f5233778c702a11142e6cad5a3 | 2,236 |
def interval_seconds():
"""returns the time interval in seconds
Returns:
int
"""
return int(interval_to_milliseconds(interval())/1000) | 831c24cb113dab2b39fc068c397c41c3cc1131b5 | 2,237 |
from typing import Optional
from typing import Sequence
def get_autonomous_db_versions(compartment_id: Optional[str] = None,
db_workload: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetAutonomousDbVersionsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutonomousDbVersionsResult:
"""
This data source provides the list of Autonomous Db Versions in Oracle Cloud Infrastructure Database service.
Gets a list of supported Autonomous Database versions.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_autonomous_db_versions = oci.database.get_autonomous_db_versions(compartment_id=var["compartment_id"],
db_workload=var["autonomous_db_version_db_workload"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str db_workload: A filter to return only autonomous database resources that match the specified workload type.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbWorkload'] = db_workload
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getAutonomousDbVersions:getAutonomousDbVersions', __args__, opts=opts, typ=GetAutonomousDbVersionsResult).value
return AwaitableGetAutonomousDbVersionsResult(
autonomous_db_versions=__ret__.autonomous_db_versions,
compartment_id=__ret__.compartment_id,
db_workload=__ret__.db_workload,
filters=__ret__.filters,
id=__ret__.id) | e10770e9db891079dda251578f851bdb7a0ade8e | 2,239 |
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False):
"""
Calculate the azimuthally averaged radial profile.
Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
Parameters:
imgae (numpy 2-D array): image array.
center (list): [x, y] pixel coordinates. If None, use image center.
Note that x is horizontal and y is vertical, y, x = image.shape.
stdev (bool): if True, the stdev of profile will also be returned.
binsize (float): size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large.
interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data?
Returns:
:
If `stdev == True`, it will return [radius, profile, stdev];
else, it will return [radius, profile].
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# The 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize) + 1)
maxbin = nbins * binsize
bins = np.linspace(0, maxbin, nbins + 1)
# We're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:] + bins[:-1]) / 2.0
# There are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin
# Radial profile itself
nan_flag = np.isnan(image) # get rid of nan
#profile = np.histogram(r, bins, weights=image)[0] / nr
profile = np.histogram(r[~nan_flag], bins, weights=image[~nan_flag])[0] / nr
if interpnan:
profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)],
profile[~np.isnan(profile)])
if stddev:
# Find out which radial bin each point in the map belongs to
# recall that bins are from 1 to nbins
whichbin = np.digitize(r.ravel(), bins)
profile_std = np.array([np.nanstd(image.ravel()[whichbin == b]) for b in range(1, nbins + 1)])
profile_std /= np.sqrt(nr) # Deviation of the mean!
return [bin_centers, profile, profile_std]
else:
return [bin_centers, profile] | f5b5e5b4b21af71c50f0a0a9947d3a5cc203bdf0 | 2,240 |
from typing import OrderedDict
def get_setindices(header, setnames):
"""From header like ---ID, coverage, set1_q-value set2_q-value---
this returns indices for different sets {'q-value': {'set1': 2, 'set2: 3}}
"""
setindices = OrderedDict()
for index, field in enumerate(header):
for setname in setnames:
if field.startswith('{}_'.format(setname)):
fieldname = field[len(setname) + 1:]
try:
setindices[fieldname][setname] = index
except KeyError:
setindices[fieldname] = {setname: index}
return setindices | 1bdbda0528098a55438b4cb24ca22358fae7e682 | 2,241 |
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Example
-------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if groups.lastindex != 2:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride) | a562b0a49f2e5a1d4f49119a424dbfd1588d9c97 | 2,242 |
def generate_label(input_x,threshold):
"""
generate label with input
:param input_x: shape of [batch_size, sequence_length]
:return: y:[batch_size]
"""
batch_size,sequence_length=input_x.shape
y=np.zeros((batch_size,2))
for i in range(batch_size):
input_single=input_x[i]
sum=np.sum(input_single)
if i == 0:print("sum:",sum,";threshold:",threshold)
y_single=1 if sum>threshold else 0
if y_single==1:
y[i]=[0,1]
else: # y_single=0
y[i]=[1,0]
return y | 0091fb0d4c92884af1cf07d8afd248e2afeb92b2 | 2,243 |
import io
def sf_imread(
img_path,
plot=True,
):
"""
Thin wrapper around `skimage.io.imread` that rotates the image if it is
to be used for plotting, but does not if it is to be used for measurements.
Parameters
----------
img_path : str
Path to image
plot : bool
Determines whether or not image will be rotated 90 degrees
Returns
-------
np.array
"""
img_in = io.imread(img_path)
if plot:
img_in = transform.rotate(img_in, -90) # show images going left-right
return img_in | 3eb17fcb5bee144f7c822cfa23d5057c5fecc109 | 2,244 |
def test_plugin_ws_url_attributes(spf, path, query, expected_url):
"""Note, this doesn't _really_ test websocket functionality very well."""
app = spf._app
test_plugin = TestPlugin()
async def handler(request):
return text('OK')
test_plugin.websocket(path)(handler)
spf.register_plugin(test_plugin)
test_client = app.test_client
request, response = test_client.get(path + '?{}'.format(query))
try:
# Sanic 20.3.0 and above
p = test_client.port
except AttributeError:
p = testing.PORT or 0
assert request.url == expected_url.format(testing.HOST, str(p))
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host | f6e1f28f1df1e712ab399db48c8a4e0058d11d11 | 2,245 |
def less_than(x, y, force_cpu=None, cond=None, name=None):
"""
${comment}
Args:
x(Tensor): ${x_comment}.
y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*.
if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_comment}.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_than")
if cond is not None:
check_type(cond, "cond", Variable, "less_than")
if force_cpu != None:
check_type(force_cpu, "force_cpu", bool, "less_than")
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond | 676cdc38b83c2155bbf166f01f50554c1751585e | 2,246 |
import re
def get_page_namespace(url_response):
"""
:type element: Tag
:rtype: int
"""
keyword = '"wgNamespaceNumber"'
text = url_response
if keyword in text:
beginning = text[text.find(keyword) + len(keyword):]
ending = beginning[:beginning.find(',')]
ints = re.findall('\d+', ending)
if len(ints) > 0:
return int(ints[0]) | f4e61d4a927401995f2435a94170ca691ff9119e | 2,247 |
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)] | a39bf967110b802063a37260147c98ecdf8925bb | 2,248 |
def get_site_camera_data(site_no):
"""An orchestration method that fetches camera data and returns the site dictionary"""
json_raw = get_json_camera_data()
camera = json_raw_to_dictionary(json_raw)
return find_site_in_cameras(site_no, camera) | c9d9febebe8c80dd9de18ece8085853224140d3b | 2,249 |
def find_contam(df, contaminant_prevalence=0.5, use_mad_filter=False):
"""Flag taxa that occur in too many samples."""
taxa_counts = {}
for taxa in df['taxa_name']:
taxa_counts[taxa] = 1 + taxa_counts.get(taxa, 0)
thresh = max(2, contaminant_prevalence * len(set(df['sample_name'])))
contaminants = {taxa for taxa, count in taxa_counts.items() if count >= thresh}
if not use_mad_filter or df.shape[0] <= 2:
return df[~df['taxa_name'].isin(contaminants)]
return median_filter(df, contaminants) | 6f5976d97d7585d0dff60b90b2d6e5d6e22b6353 | 2,251 |
def has_level_or_node(level: int, *auth_nodes: str) -> Rule:
"""
:param level: 需要群组权限等级
:param auth_nodes: 需要的权限节点
:return: 群组权限等级大于要求等级或者具备权限节点, 权限节点为deny则拒绝
"""
async def _has_level_or_node(bot: Bot, event: Event, state: T_State) -> bool:
auth_node = '.'.join(auth_nodes)
detail_type = event.dict().get(f'{event.get_type()}_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
# level检查部分
if detail_type != 'group':
level_checker = False
else:
level_res = await DBGroup(group_id=group_id).permission_level()
if level_res.result >= level:
level_checker = True
else:
level_checker = False
# node检查部分
if detail_type == 'private':
user_auth = DBAuth(auth_id=user_id, auth_type='user', auth_node=auth_node)
user_tag_res = await user_auth.tags_info()
allow_tag = user_tag_res.result[0]
deny_tag = user_tag_res.result[1]
elif detail_type == 'group':
group_auth = DBAuth(auth_id=group_id, auth_type='group', auth_node=auth_node)
group_tag_res = await group_auth.tags_info()
allow_tag = group_tag_res.result[0]
deny_tag = group_tag_res.result[1]
else:
allow_tag = 0
deny_tag = 0
if allow_tag == 1 and deny_tag == 0:
return True
elif allow_tag == -2 and deny_tag == -2:
return level_checker
else:
return False
return Rule(_has_level_or_node) | a755b959eeb93caf113c157aa3551c45c1644216 | 2,252 |
import time
import random
import copy
def solve_problem(problem, max_iter_num=MAX_ITER_NUM, max_iter_num_without_adding=MAX_ITER_NUM_WITHOUT_ADDITIONS, iter_num_to_revert_removal=ITER_NUM_TO_REVERT_REMOVAL, remove_prob=ITEM_REMOVAL_PROBABILITY, consec_remove_prob=CONSECUTIVE_ITEM_REMOVAL_PROBABILITY, ignore_removed_item_prob=IGNORE_REMOVED_ITEM_PROBABILITY, modify_prob=PLACEMENT_MODIFICATION_PROBABILITY, calculate_times=False, return_value_evolution=False):
"""Find and return a solution to the passed problem, using an reversible strategy"""
# create an initial solution with no item placed in the container
solution = Solution(problem)
# determine the bounds of the container
min_x, min_y, max_x, max_y = get_bounds(problem.container.shape)
start_time = 0
sort_time = 0
item_discarding_time = 0
item_selection_time = 0
addition_time = 0
removal_time = 0
modification_time = 0
value_evolution_time = 0
if calculate_times:
start_time = time.time()
if return_value_evolution:
value_evolution = list()
else:
value_evolution = None
if calculate_times:
value_evolution_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# sort items by weight, to speed up their discarding (when they would cause the capacity to be exceeded)
items_by_weight = sorted(list(problem.items.items()), key=lambda index_item_tuple: index_item_tuple[1].weight)
if calculate_times:
sort_time += get_time_since(start_time)
iter_count_since_addition = 0
iter_count_since_removal = 0
solution_before_removal = None
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, problem.container.max_weight)]
ignored_item_index = -1
if calculate_times:
item_discarding_time += get_time_since(start_time)
# placements can only be possible with capacity and valid items
if problem.container.max_weight and items_by_weight:
# try to add items to the container, for a maximum number of iterations
for i in range(max_iter_num):
if calculate_times:
start_time = time.time()
# perform a random choice of the next item to try to place
list_index, item_index = select_item(items_by_weight)
if calculate_times:
item_selection_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# try to add the item in a random position and with a random rotation; if it is valid, remove the item from the pending list
if solution.add_item(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)), random.uniform(0, 360)):
if calculate_times:
addition_time += get_time_since(start_time)
# find the weight that can still be added
remaining_weight = problem.container.max_weight - solution.weight
# stop early if the capacity has been exactly reached
if not remaining_weight:
break
# remove the placed item from the list of pending items
items_by_weight.pop(list_index)
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, remaining_weight)]
if calculate_times:
item_discarding_time += get_time_since(start_time)
# stop early if it is not possible to place more items, because all have been placed or all the items outside would cause the capacity to be exceeded
if not items_by_weight:
break
# reset the potential convergence counter, since an item has been added
iter_count_since_addition = 0
else:
if calculate_times:
addition_time += get_time_since(start_time)
# register the fact of being unable to place an item this iteration
iter_count_since_addition += 1
# stop early if there have been too many iterations without changes
if iter_count_since_addition == max_iter_num_without_adding:
break
if calculate_times:
start_time = time.time()
# if there are items in the container, try to remove an item with a certain probability (different if there was a recent removal)
if solution.weight > 0 and random.uniform(0., 1.) < (consec_remove_prob if solution_before_removal else remove_prob):
# if there is no solution prior to a removal with pending re-examination
if not solution_before_removal:
# save the current solution before removing, just in case in needs to be restored later
solution_before_removal = copy.deepcopy(solution)
# reset the counter of iterations since removal, to avoid reverting earlier than needed
iter_count_since_removal = 0
# get the index of the removed item, which is randomly chosen
removed_index = solution.remove_random_item()
# with a certain probability, only if not ignoring any item yet, ignore placing again the removed item until the operation gets reverted or permanently accepted
if ignored_item_index < 0 and items_by_weight and random.uniform(0., 1.) < ignore_removed_item_prob:
ignored_item_index = removed_index
# otherwise, add the removed item to the weight-sorted list of pending-to-add items
else:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[removed_index].weight), (removed_index, problem.items[removed_index]))
# if there is a recent removal to be confirmed or discarded after some time
if solution_before_removal:
# re-examine a removal after a certain number of iterations
if iter_count_since_removal == iter_num_to_revert_removal:
# if the value in the container has improved since removal, accept the operation in a definitive way
if solution.value > solution_before_removal.value:
# if an item had been ignored, make it available for placement again
if ignored_item_index >= 0:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[ignored_item_index].weight), (ignored_item_index, problem.items[ignored_item_index]))
# otherwise, revert the solution to the pre-removal state
else:
solution = solution_before_removal
# after reverting a removal, have some margin to try to add items
iter_count_since_addition = 0
# reset removal data
solution_before_removal = None
iter_count_since_removal = 0
ignored_item_index = -1
# the check will be done after more iterations
else:
iter_count_since_removal += 1
if calculate_times:
removal_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# if there are still items in the container (maybe there was a removal), modify existing placements with a certain probability
if solution.weight > 0 and random.uniform(0., 1.) < modify_prob:
# perform a random choice of the item to try to affect
_, item_index = select_item(items_by_weight)
# move to a random position of the container with a probability of 50%
if random.uniform(0., 1.) < 0.5:
solution.move_item_to(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)))
# otherwise, perform a random rotation
else:
solution.rotate_item_to(item_index, random.uniform(0, 360))
if calculate_times:
modification_time += get_time_since(start_time)
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution.append(solution.value)
if calculate_times:
value_evolution_time += get_time_since(start_time)
# in the end, revert the last unconfirmed removal if it did not improve the container's value
if solution_before_removal and solution.value < solution_before_removal.value:
solution = solution_before_removal
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution[-1] = solution.value
if calculate_times:
value_evolution_time += get_time_since(start_time)
# encapsulate all times informatively in a dictionary
if calculate_times:
approx_total_time = sort_time + item_selection_time + item_discarding_time + addition_time + removal_time + modification_time + value_evolution_time
time_dict = {"Weight-sort": (sort_time, sort_time / approx_total_time), "Stochastic item selection": (item_selection_time, item_selection_time / approx_total_time), "Item discarding": (item_discarding_time, item_discarding_time / approx_total_time), "Addition (with geometric validation)": (addition_time, addition_time / approx_total_time), "Removal and reverting-removal": (removal_time, removal_time / approx_total_time), "Placement modification (with geometric validation)": (modification_time, modification_time / approx_total_time), "Keeping value of each iteration": (value_evolution_time, value_evolution_time / approx_total_time)}
if return_value_evolution:
return solution, time_dict, value_evolution
return solution, time_dict
if return_value_evolution:
return solution, value_evolution
return solution | b240e0129e35c4066ec46d9dde68b012a821e319 | 2,253 |
import PIL
def _pil_apply_edit_steps_mask(image, mask, edit_steps, inplace=False):
"""
Apply edit steps from unmasking method on a PIL image.
Args:
image (PIL.Image): The input image.
mask (Union[int, tuple[int, int, int], PIL.Image]): The mask to apply on the image, could be a single grey
scale intensity [0, 255], an RBG tuple or a PIL Image.
edit_steps (list[EditStep]): Edit steps to be drawn.
inplace (bool): True to draw on the input image, otherwise draw on a cloned image.
Returns:
PIL.Image, the result image.
"""
if not inplace:
image = image.copy()
if isinstance(mask, PIL.Image.Image):
for step in edit_steps:
box = step.to_coord_box()
cropped = mask.crop(box)
image.paste(cropped, box=box)
else:
if isinstance(mask, int):
mask = (mask, mask, mask)
draw = ImageDraw.Draw(image)
for step in edit_steps:
draw.rectangle(step.to_coord_box(), fill=mask)
return image | c2bf05c282039ab5ff7eebefd8d9b3b635e9f74c | 2,254 |
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env) | e291880a21c99fcc05d8203dfd73dccc9084a72b | 2,255 |
def bubble(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories}) | 77ba74f9bf6c09c49db8c4faa5934bd502995a5b | 2,256 |
def get_coherence(model, token_lists, measure='c_v'):
"""
Get model coherence from gensim.models.coherencemodel
:param model: Topic_Model object
:param token_lists: token lists of docs
:param topics: topics as top words
:param measure: coherence metrics
:return: coherence score
"""
if model.method == 'LDA':
cm = CoherenceModel(model=model.ldamodel, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
else:
topics = get_topic_words(token_lists, model.cluster_model.labels_)
cm = CoherenceModel(topics=topics, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
return cm.get_coherence() | 1e8632eb901fc5219a4070d8e0b8e390612f7338 | 2,257 |
def run_vscode_command(
command_id: str,
*args: str,
wait_for_finish: bool = False,
return_command_output: bool = False,
):
"""Runs a VSCode command, using command server if available
Args:
command_id (str): The ID of the VSCode command to run
wait_for_finish (bool, optional): Whether to wait for the command to finish before returning. Defaults to False.
return_command_output (bool, optional): Whether to return the output of the command. Defaults to False.
Raises:
Exception: If there is an issue with the file-based communication, or
VSCode raises an exception
Returns:
Object: The response from the command, if requested.
"""
# NB: This is a hack to work around the fact that talon doesn't support
# variable argument lists
args = [x for x in args if x is not NotSet]
communication_dir_path = get_communication_dir_path()
if not communication_dir_path.exists():
if args or return_command_output:
raise Exception("Must use command-server extension for advanced commands")
print("Communication dir not found; falling back to command palette")
run_vscode_command_by_command_palette(command_id)
return
request_path = communication_dir_path / "request.json"
response_path = communication_dir_path / "response.json"
# Generate uuid that will be mirrored back to us by command server for
# sanity checking
uuid = str(uuid4())
request = Request(
command_id=command_id,
args=args,
wait_for_finish=wait_for_finish,
return_command_output=return_command_output,
uuid=uuid,
)
# First, write the request to the request file, which makes us the sole
# owner because all other processes will try to open it with 'x'
write_request(request, request_path)
# We clear the response file if it does exist, though it shouldn't
if response_path.exists():
print("WARNING: Found old response file")
robust_unlink(response_path)
# Then, perform keystroke telling VSCode to execute the command in the
# request file. Because only the active VSCode instance will accept
# keypresses, we can be sure that the active VSCode instance will be the
# one to execute the command.
actions.user.trigger_command_server_command_execution()
try:
decoded_contents = read_json_with_timeout(response_path)
finally:
# NB: We remove response file first because we want to do this while we
# still own the request file
robust_unlink(response_path)
robust_unlink(request_path)
if decoded_contents["uuid"] != uuid:
raise Exception("uuids did not match")
for warning in decoded_contents["warnings"]:
print(f"WARNING: {warning}")
if decoded_contents["error"] is not None:
raise Exception(decoded_contents["error"])
actions.sleep("25ms")
return decoded_contents["returnValue"] | 0bf1cfed5d2e02cf618ba8c7c9347d0408ef0ee3 | 2,258 |
from imucal.management import find_calibration_info_for_sensor # noqa: F401
from typing import Optional
from typing import Callable
from typing import List
from pathlib import Path
def find_calibrations_for_sensor(
sensor_id: str,
folder: Optional[path_t] = None,
recursive: bool = True,
filter_cal_type: Optional[str] = None,
custom_validator: Optional[Callable[["CalibrationInfo"], bool]] = None,
ignore_file_not_found: Optional[bool] = False,
) -> List[Path]:
"""Find possible calibration files based on the filename.
As this only checks the filenames, this might return false positives depending on your folder structure and naming.
Parameters
----------
sensor_id :
The for 4 letter/digit identifier of a sensor_type, as obtained from
:py:meth:`nilspodlib.header.Header.sensor_id`
folder :
Basepath of the folder to search. If None, tries to find a default calibration
recursive :
If the folder should be searched recursive or not.
filter_cal_type :
Whether only files obtain with a certain calibration type should be found.
This will look for the `CalType` inside the json file and could cause performance issues with many calibration
files.
If None, all found files will be returned.
For possible values, see the `imucal` library.
custom_validator :
A custom function that will be called with the CalibrationInfo object of each potential match.
This needs to load the json file of each match and could cause performance issues with many calibration files.
ignore_file_not_found :
If True this function will not raise an error, but rather return an empty list, if no
calibration files were found for the specific sensor_type.
Returns
-------
list_of_cals
List of paths pointing to available calibration objects.
"""
if not folder:
folder = _check_ref_cal_folder()
return find_calibration_info_for_sensor(
sensor_id=sensor_id,
folder=folder,
recursive=recursive,
filter_cal_type=filter_cal_type,
custom_validator=custom_validator,
ignore_file_not_found=ignore_file_not_found,
) | b28a6deb1348fd86c93ae6fd0b292626a99c2149 | 2,260 |
def reward_displacement(navenv):
""" Reward = distance to previous position"""
r = dist(navenv.current_pos, navenv.old_pos)
return r | f2d9f5bf78a93967c6e74a4fba0e25109fa1fb3b | 2,261 |
import uuid
def MakeLinuxFirmware(save=True, **kwargs):
"""Create and return a LinuxFirmware for test."""
defaults = {
'manufacturer': 'Lonovo',
'serial': 'blah',
'password': '123456789',
'machine_uuid': str(uuid.uuid4()).upper(),
'owner': 'someone',
'asset_tags': ['12345'],
'hostname': 'zerocool.example.com',
}
defaults.update(kwargs)
entity = firmware.LinuxFirmwarePassword(**defaults)
if save:
entity.put()
return entity | 2b4381035ae55ffc5996d06e5a9455c7ca148a85 | 2,262 |
def get_unity_filesystem_parameters():
"""This method provide parameters required for the ansible filesystem
module on Unity"""
return dict(
filesystem_name=dict(required=False, type='str'),
filesystem_id=dict(required=False, type='str'),
nas_server_name=dict(required=False, type='str'),
nas_server_id=dict(required=False, type='str'),
description=dict(required=False, type='str'),
pool_name=dict(required=False, type='str'),
pool_id=dict(required=False, type='str'),
size=dict(required=False, type='int'),
cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
is_thin=dict(required=False, type='bool'),
data_reduction=dict(required=False, type='bool'),
supported_protocols=dict(required=False, type='str',
choices=['NFS', 'CIFS', 'MULTIPROTOCOL']),
smb_properties=dict(type='dict', options=dict(
is_smb_sync_writes_enabled=dict(type='bool'),
is_smb_notify_on_access_enabled=dict(type='bool'),
is_smb_op_locks_enabled=dict(type='bool'),
is_smb_notify_on_write_enabled=dict(type='bool'),
smb_notify_on_change_dir_depth=dict(type='int')
)),
access_policy=dict(required=False, type='str',
choices=['NATIVE', 'UNIX', 'WINDOWS']),
locking_policy=dict(required=False, type='str',
choices=['ADVISORY', 'MANDATORY']),
tiering_policy=dict(required=False, type='str', choices=[
'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
snap_schedule_name=dict(required=False, type='str'),
snap_schedule_id=dict(required=False, type='str'),
quota_config=dict(required=False, type='dict', options=dict(
grace_period=dict(required=False, type='int'),
grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']),
default_hard_limit=dict(required=False, type='int'),
default_soft_limit=dict(required=False, type='int'),
is_user_quota_enabled=dict(required=False, type='bool'),
quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']),
cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]),
state=dict(required=True, type='str', choices=['present', 'absent'])
) | 2cbbe284a8345341abf80948d659c2f2625b6e8f | 2,263 |
def get_time_difference(row, start_col, end_col, start_format, end_format, unit='days'):
"""
Returns a Series object of days
Unit can be D for Days, or Y for Years
"""
start_date = row[start_col]
end_date = row[end_col]
if pd.isnull(start_date) or pd.isnull(end_date):
return np.nan
else:
time_delta = get_time_delta(start_date, end_date, start_format, end_format)
if unit == 'days':
return time_delta.days
elif unit == 'years':
return float(time_delta.days)/365 | a73fea6bebc777ec8ff4ff89118e940f8dfdfcf1 | 2,264 |
def get_account(account_name, password):
"""Displays account data from the wallet.
--- Definitions ---
{"name": "account_name", "prompt": "Alias of account", "default": "Myaccount"}
{"name": "password", "prompt": "Password to decrypt private key", "default": "Mypassword"}
"""
db = get_wallet_db()
account = db.execute(
'SELECT * FROM testaccount WHERE name = ?', (account_name,)
).fetchone()
if account is None:
return None
private_key = Account.decrypt(account["privatekey"], password)
acc = Account.from_key(private_key)
return acc | 09791696c4a6c3ddfc0ceae20e50abf7f15893f5 | 2,265 |
def invert(img):
"""
Function to invert colors of an image
"""
r, g, b, a = colorsys_getRGBA(img) # Get r, g, b, a
r, g, b = 255 - r, 255 - g, 255 - b # Invert all colors
img_arr = np.dstack((r, g, b, a))
return img_arr | 013d708fb434b450404879346f25ad0d7088e3cf | 2,266 |
def spearman_kendall_test(df, item, alpha=0.05, increasing=True,
rank_in='Rank',
category_in='category',
dataset_in='dataset',
userid_in='userid'
):
"""
Do spearman's and kendall's test for the increasing or decreasing trend.
:param df: dataframe, it should include both column 'item' and column 'ranking'
:param item: string, column of target's label
:param rank_in:string, column of rank's label
:param category_in: string, column of category's label
:param userid_in: string, column of userid's label
:param dataset_in: string, column of dataset's label
:param alpha: significant level
:param increasing: bool, test for increasing trend or decreasing trend
:return: dataframe filled in all test results
"""
category = sorted(list(set(df[category_in].tolist())))
dataset = sorted(list(set(df[dataset_in].tolist())))
test_result = []
for ds in dataset:
for cat in category:
count_sm, count_kd = 0, 0
df_temp = df[(df[dataset_in] == ds) & (df[category_in] == cat)]
ur_ds = df_temp[userid_in].unique().tolist()
for user in ur_ds:
rank = df_temp[df_temp[userid_in] == user][rank_in].tolist()
item_specify = df_temp[df_temp[userid_in] == user][item].tolist()
coef_sm, p_sm = spearmanr(rank, item_specify)
coef_kd, p_kd = kendalltau(rank, item_specify)
if increasing:
if (coef_sm > 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd > 0) & (p_kd < alpha):
count_kd += 1
else:
if (coef_sm < 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd < 0) & (p_kd < alpha):
count_kd += 1
test_result.append([ds, cat,
count_sm, count_sm / len(ur_ds),
count_kd, count_kd / len(ur_ds),
len(ur_ds)]
)
stats_test = pd.DataFrame(test_result, columns=[dataset_in,
category_in,
'SpN', 'SpP', 'Kn', 'Kp',
'total']
).sort_values([dataset_in, category_in])
return stats_test | d8c85f20866a68a7070be89c57cdb18d2b33c828 | 2,267 |
def array2string(array, _depth=0):
"""
Recursively create a initializer list style string from an iterable with
multiple dimensions.
Args:
array (iterable): input iterable which is expected to have elements that
can be converted to strings with `str()`.
_depth (int): variable tracking the current recursion depth
"""
if hasattr(array, 'name'):
return array.name
elif not hasattr(array, '__len__'):
return float_nsf(array)
else:
string = ''
array_len = len(array)
for i in range(array_len):
string += array2string(array[i], _depth=_depth + 1) + ', '
if (array_len > 1) or (_depth == 0) :
return '{' + string[0:-2] + '}'
else:
return string[0:-2] | 948bfe9e63f16c588001707125afe8d2867ff6b6 | 2,268 |
def rail_help_wrapper(prog):
""" So formatter_class's max_help_position can be changed. """
return RailHelpFormatter(prog, max_help_position=40) | aa821f68ea1587a051be59f52187dbf9b1dd2d91 | 2,269 |
def project_dashboard(request):
"""
The function calling Project Dashboard page.
:param request:
:return:
"""
global all_vuln, \
total_web, \
all_high, \
total_network, \
all_medium, \
all_low, \
all_web_high, \
all_web_medium, \
all_network_medium, \
all_web_low, \
all_network_low, \
all_network_high
all_project = project_db.objects.all()
return render(request,
'project_dashboard.html',
{'all_project': all_project}) | 0459776f846a8f089fefd528605d2e850aeced5c | 2,270 |
def getSelfRole(store):
"""
Retrieve the Role which corresponds to the user to whom the given store
belongs.
"""
return getAccountRole(store, userbase.getAccountNames(store)) | d548ba6406ec92df7777498574f86cf44737ba8b | 2,271 |
def forwardCOMDQ(robot, m = 0, symbolic = False):
"""
Using Dual Quaternions, this function computes forward kinematics to m - th center of mass, given joints positions in radians. Robot's kinematic parameters have to be set before using this function
robot: object (robot.jointsPositions, robot.linksLengths)
m: int
"""
framesDQ, fkDQ = forwardDQ(robot, m = m, symbolic = symbolic)
# Initial conditions
framesCOMDQ = [np.array([[1], [0], [0], [0], [0], [0], [0], [0]]) if not symbolic else Matrix([1, 0, 0, 0, 0, 0, 0, 0])]
# Gets Denavit - Hartenberg Matrix
if not symbolic:
if not robot.dhParametersCOM:
comDH = dh.centersOfMass(robot)
else:
comDH = np.array(robot.dhParameters([float(q) for q in robot.jointsPositions], [float(Lcom) for Lcom in robot.centersOfMass]))
else:
comDH = robot.symbolicDHParametersCOM
i = 1
for frame in comDH[1 : , :]:
if i > m:
break
else:
if not symbolic:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.leftOperator(dq.Rz(frame[0])).dot(dq.rightOperator(dq.Rx(frame[3]))).dot(dq.rightOperator(dq.Tx(frame[2]))).dot(dq.Tz(frame[1]))
# Rigid body's Dual Quaternion
B = dq.leftOperator(dq.conjugate(framesDQ[i - 1])).dot(framesDQ[i])
# Forward kinematics to Center of Mass
fkCOMDQ = dq.leftOperator(framesDQ[i]).dot(dq.rightOperator(COM)).dot(dq.conjugate(B))
else:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.symbolicLeftOperator(dq.symbolicRz(frame[0])) * dq.symbolicRightOperator(dq.symbolicRx(frame[3])) * dq.symbolicRightOperator(dq.symbolicTx(frame[2])) * dq.symbolicTz(frame[1])
# Rigid body's Dual Quaternion
B = dq.symbolicLeftOperator(dq.symbolicConjugate(framesDQ[i - 1])) * framesDQ[i]
# Forward kinematics to Center of Mass
fkCOMDQ = nsimplify(simplify(dq.symbolicLeftOperator(framesDQ[i]) * dq.symbolicRightOperator(COM) * dq.symbolicConjugate(B)), tolerance = 1e-10, rational = False)
framesCOMDQ.append(fkCOMDQ)
i += 1
return framesCOMDQ, fkCOMDQ | e197839d98cdbb8b15449d33c8677adc5f9d3e8a | 2,272 |
def gen_mode():
"""获取玩家想要考试的模式"""
while True:
mode = input("如何考试?\n输入1顺序考试\n输入2乱序考试\n>>")
if mode in ("1", "2"):
return mode
else:
print()
print("非法输入,请输入\"1\"或\"2\"")
print("你不需要输入双引号")
print("--------------------------------") | eb3ff4a0812fe088f3acb1302730f5f48c6fbcda | 2,273 |
def find_movers(threshold, timeframe: Timeframe, increasing=True, decreasing=False, max_price=None):
"""
Return a dataframe with row index set to ASX ticker symbols and the only column set to
the sum over all desired dates for percentage change in the stock price. A negative sum
implies a decrease, positive an increase in price over the observation period.
"""
assert threshold >= 0.0
# NB: missing values will be imputed here, for now.
cip = company_prices(all_stocks(), timeframe, fields="change_in_percent", missing_cb=None)
movements = cip.sum(axis=0)
results = movements[movements.abs() >= threshold]
print("Found {} movers before filtering: {} {}".format(len(results), increasing, decreasing))
if not increasing:
results = results.drop(results[results > 0.0].index)
if not decreasing:
results = results.drop(results[results < 0.0].index)
#print(results)
if max_price is not None:
ymd = latest_quotation_date('ANZ')
stocks_lte_max_price = [q.asx_code for q in valid_quotes_only(ymd) if q.last_price <= max_price]
results = results.filter(stocks_lte_max_price)
print("Reporting {} movers after filtering".format(len(results)))
return results | ff1524d74dfe76630fb45b24119f57c3289ba355 | 2,274 |
from bigdl.nano.deps.automl.hpo_api import create_optuna_pl_pruning_callback
def create_pl_pruning_callback(*args, **kwargs):
"""Create PyTorchLightning Pruning Callback. Optuna Only."""
return create_optuna_pl_pruning_callback(*args, **kwargs) | 0698e800ed110d430422b7a3fcc72e100ed87658 | 2,275 |
import requests
def get_all_token_volume_by_direction(chain:str, direction:str):
"""
chain: Allowed: ethereum ┃ avalanche ┃ bsc ┃ polygon ┃ arbitrum ┃ fantom ┃ harmony ┃ boba ┃ optimism ┃ moonriver ┃ aurora
direction: Allowed: in ┃ out
"""
chain = chain.lower()
direction = direction.lower()
chains = ["ethereum", "avalanche", "bsc", "polygon", "arbitrum", "fantom", "harmony", "boba", "optimism", "moonriver", "aurora"]
directions = ["in", "out"]
params_ok = False
if chain in chains and direction in directions:
params_ok = True
if params_ok:
endpoint = f"{server}/api/v1/analytics/volume/{chain}/{direction}"
data = requests.get(endpoint)
if data.status_code == 200:
return data.json()["data"]
else:
print("request failed")
else:
print("wrong parameters") | be3285e4e4ac65075a98aeb853e68b1b5f6658cb | 2,276 |
def _interactively_fix_missing_variables(project, result):
"""Return True if we need to re-prepare."""
if project.problems:
return False
if not console_utils.stdin_is_interactive():
return False
# We don't ask the user to manually enter CONDA_PREFIX
# (CondaEnvRequirement) because it's a bizarre/confusing
# thing to ask.
can_ask_about = [status
for status in result.statuses
if (not status and isinstance(status.requirement, EnvVarRequirement) and not isinstance(
status.requirement, CondaEnvRequirement))]
if can_ask_about:
print("(Use Ctrl+C to quit.)")
start_over = False
values = dict()
for status in can_ask_about:
reply = console_utils.console_input("Value for " + status.requirement.env_var + ": ",
encrypted=status.requirement.encrypted)
if reply is None:
return False # EOF
reply = reply.strip()
if reply == '':
start_over = True
break
values[status.requirement.env_var] = reply
if len(values) > 0:
status = project_ops.set_variables(project, result.env_spec_name, values.items(), result)
if status:
return True
else:
console_utils.print_status_errors(status)
return False
else:
return start_over | 73393e453043132fc340be10411cb83569becf8c | 2,277 |
def build_node_descr(levels, switch=False):
"""
Produces a node description of the above binary trees
"""
num_parents = sum([2**i for i in range(levels-1)])
parents, children = tee(character_iterator(switch))
next(children)
node_descr = []
for parent_ident in islice(parents, num_parents):
node_descr.append((parent_ident, next(children), "L"))
node_descr.append((parent_ident, next(children), "R"))
return node_descr | c682cf5e5946b614563dda31ace76f259df02d47 | 2,278 |
import random
def random_sources(xSize, ySize, zSize, number):
""" returns a list of random positions in the grid where the sources of nutrients (blood vessels) will be """
src = []
for _ in range(number):
x = random.randint(0, xSize-1)
y = random.randint(0, ySize-1)
z = random.randint(0, zSize-1)
if (x, y, z) not in src:
src.append((x,y,z))
return src | 17dab43ea2468a11e3720ff0f7eb33b605371496 | 2,279 |
import numpy
def sqeuclidean(
x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.reduce_sum(
(tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2
).numpy() | 3265a062d5a0eece85a3c83e99ad2f9ab27c62cb | 2,280 |
from typing import Optional
def get(*, db_session, tag_id: int) -> Optional[Tag]:
"""Gets a tag by its id."""
return db_session.query(Tag).filter(Tag.id == tag_id).one_or_none() | 752da542eef22ebd977b27c922341690bac2f5ab | 2,281 |
def plot_lines(axes, xdata, ydata, yerrors=None, cdata=None, cmap=None, line_spec='-o', *args, **kwargs):
"""
Plot lines on given matplotlib axes subplot
Uses matplotlib.plot or matplotlib.errorbar if yerrors is not None
:param axes: matplotlib figure or subplot axes, None uses current axes
:param xdata: array[n] data on x axis
:param ydata: list[n] of array[n] data on y axis
:param yerrors: list[m] of array[n] errors on y axis (or None)
:param cdata: list[n] of values to define line colour
:param cmap: name of colormap to generate colour variation in lines
:param line_spec: str or list[m] of str matplotlib.plot line_spec
:param args: additional arguments
:param kwargs: additional arguments
:return: output of plt.plot [line], or plt.errorbar [line, xerrors, yerrors]
"""
if axes is None:
axes = plt.gca()
nplots = len(ydata)
if xdata is None:
xdata = [range(len(y)) for y in ydata]
elif len(xdata) != nplots:
xdata = [xdata] * nplots
if yerrors is None:
yerrors = [None] * nplots
elif len(yerrors) != nplots:
yerrors = [yerrors] * nplots
if cmap is None:
cmap = 'viridis'
if cdata is None:
cdata = np.arange(nplots)
else:
cdata = np.asarray(cdata)
cnorm = cdata - cdata.min()
cnorm = cnorm / cnorm.max()
cols = plt.get_cmap(cmap)(cnorm)
line_spec = fn.liststr(line_spec)
if len(line_spec) != nplots:
line_spec = line_spec * nplots
print(axes)
print(len(xdata), xdata)
print(len(ydata), ydata)
print(len(yerrors), yerrors)
print(len(line_spec), line_spec)
print(len(cols), cols)
lines = []
for n in range(nplots):
lines += plot_line(axes, xdata[n], ydata[n], yerrors[n], line_spec[n], c=cols[n], *args, **kwargs)
return lines | 5c618745ba503a206d594bbac9cbe831d1124625 | 2,282 |
import math
def conv_binary_prevent_overflow(array, structure):
"""
Make sure structure array has great enough positive bitdepth
to be convolved with binary primary array.
Parameters
----------
array : ndarray of bool or int, 2D
Primary integer array to convolve.
Must be a binary array of only zero/False and one/True.
structure : ndarray of bool or int, 2D
Secondary, smaller integer array to convolve with `array`.
Must be a binary array of only zero/False and one/True.
Returns
-------
structure : ndarray, possible uint cast of `structure`
Either the same `structure` array or a cast or `structure`
to a uint data type with more positive bitdepth than the
input array.
"""
# Get upper bound on minimum positive bitdepth for convolution.
conv_bitdepth_pos = math.log(np.prod(structure.shape)+1, 2)
dtype_bitdepths_pos = (1, 7, 8, 15, 16, 31, 32, 63, 64)
for b in dtype_bitdepths_pos:
if conv_bitdepth_pos <= b:
conv_bitdepth_pos = b
break
# Parse input array and structure data type for bitdepth.
input_bitdepth_pos = 0
for arr in (array, structure):
arr_dtype = arr.dtype
if arr.dtype == np.bool:
arr_posbits = 1
elif np.issubdtype(arr_dtype, np.int):
arr_posbits = int(str(arr.dtype).replace('int', '')) - 1
elif np.issubdtype(arr_dtype, np.uint):
arr_posbits = int(str(arr.dtype).replace('uint', ''))
elif np.issubdtype(arr_dtype, np.floating):
arr_posbits = np.inf
else:
arr_posbits = 0
input_bitdepth_pos = max(input_bitdepth_pos, arr_posbits)
if input_bitdepth_pos == 0:
# Handle unknown data type by casting structure to
# maximum possible bitdepth.
structure = structure.astype(np.uint64)
else:
# If maximum positive bitdepth from inputs is too low,
# cast structure to minimum positive bitdepth for conovlution.
if input_bitdepth_pos < conv_bitdepth_pos:
if (conv_bitdepth_pos % 2) != 0:
conv_bitdepth_pos += 1
structure = structure.astype(eval('np.uint{}'.format(conv_bitdepth_pos)))
return structure | dd82382c1109e2ce9d15bf0abd563f32b8e8b585 | 2,283 |
def filter_freq_and_csq(mt: hl.MatrixTable, data_type: str, max_freq: float, least_consequence: str):
"""
Filters MatrixTable to include variants that:
1. Have a global AF <= `max_freq`
2. Have a consequence at least as severe as `least_consequence` (based on ordering from CSQ_ORDER)
:param MatrixTable mt: Input MT
:param str data_type: One of 'exomes' or 'genomes'
:param float max_freq: Max. AF to keep
:param str least_consequence: Least consequence to keep.
:return: Filtered MT
:rtype: MatrixTable
"""
vep_ht = hl.read_table(annotations_ht_path(data_type, 'vep'))
freq = hl.read_table(annotations_ht_path(data_type, 'frequencies'))
mt = mt.select_rows(
vep=vep_genes_expr(vep_ht[mt.row_key].vep, least_consequence),
af=hl.float32(freq[mt.row_key].freq[0].AF)
)
mt = mt.filter_rows(hl.is_defined(mt.vep) & (hl.len(mt.vep) > 0) & (mt.af > 0) & (mt.af <= max_freq))
mt = mt.explode_rows(mt.vep)
mt = mt.rename({'vep': 'gene_id'})
return mt | a20796ccb6fc4db1f93c91f07f5756f390099bee | 2,284 |
def get_regular_intervals(
pre_sfes: list,
post_sfes: list,
pre_keep_flag: bool,
post_keep_flag: bool,
) -> list:
"""
Calculates the intervals for the "regular" egg laying epoch. If pre_keep_flag,
the "regular" epoch is the pre-breakpoint region. If post_keep_flag, the
"regular" epoch is the post-breakpoint region. If both flags are True,
the whole egg-laying trajectory is considered "regular".
Args:
pre_sfes (list): list of pre region SFES
post_sfes (list): list of post region SFES
pre_keep_flag (bool): True if the pre region intervals are considered regular
post_keep_flag (bool): True if the post region intervals are considered regular
Returns:
A list of intervals considered regular
"""
reg_intervals = []
if pre_keep_flag:
pre_sfes_sec = [(x * 60 * 60) for x in pre_sfes]
pre_intervals = np.diff(pre_sfes_sec, n=1)
pre_intervals = normalize_tiny_intervals(pre_intervals)
reg_intervals.extend(pre_intervals)
if post_keep_flag:
post_sfes_sec = [(x * 60 * 60) for x in post_sfes]
post_intervals = np.diff(post_sfes_sec, n=1)
post_intervals = normalize_tiny_intervals(post_intervals)
reg_intervals.extend(post_intervals)
return reg_intervals | 7d92006fc286e3b8c2977c023070388581f192fa | 2,285 |
async def async_setup(hass, config):
"""Platform setup, do nothing."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config[DOMAIN])
)
)
return True | be7c81bc7c91251c1c02696d8daa7eaac1f1d326 | 2,286 |
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket | d46156ee36304b6a13ebd2d467373c7873b8b075 | 2,287 |
def angle_rms(ang, axis=None, period=2*np.pi):
"""returns the rms of angles, uses the property that rms(x)**2 = mean(x)**2 + std(x)**2"""
#rms(x)**2 = mean(x)**2 + std(x)**2
#sqrt(E[X**2]) = E[X]**2 + sqrt(E[(X - E[X])**2])
m,s = angle_mean_std(ang,axis,period)
return np.hypot(m, s) | 5b2c8fc865762b7856fc6a8c68e901dbf367690d | 2,288 |
import functools
def check_units(*units_by_pos, **units_by_name):
"""Create a decorator to check units of function arguments."""
def dec(func):
# Match the signature of the function to the arguments given to the decorator
sig = signature(func)
bound_units = sig.bind_partial(*units_by_pos, **units_by_name)
# Convert our specified dimensionality (e.g. "[pressure]") to one used by
# pint directly (e.g. "[mass] / [length] / [time]**2). This is for both efficiency
# reasons and to ensure that problems with the decorator are caught at import,
# rather than runtime.
dims = {name: (orig, units.get_dimensionality(orig.replace('dimensionless', '')))
for name, orig in bound_units.arguments.items()}
defaults = {name: sig.parameters[name].default for name in sig.parameters
if sig.parameters[name].default is not Parameter.empty}
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Match all passed in value to their proper arguments so we can check units
bound_args = sig.bind(*args, **kwargs)
bad = list(_check_argument_units(bound_args.arguments, defaults, dims))
# If there are any bad units, emit a proper error message making it clear
# what went wrong.
if bad:
msg = f'`{func.__name__}` given arguments with incorrect units: '
msg += ', '.join(f'`{arg}` requires "{req}" but given "{given}"'
for arg, given, req in bad)
if 'none' in msg:
msg += ('\nAny variable `x` can be assigned a unit as follows:\n'
' from metpy.units import units\n'
' x = x * units.meter / units.second')
raise ValueError(msg)
return func(*args, **kwargs)
return wrapper
return dec | b7bd0c78d1339032a442c4e3a354cc1fa9e804b2 | 2,289 |
def check_nifti_dim(fname, data, dim=4):
"""
Remove extra dimensions.
Parameters
----------
fname : str
The name of the file representing `data`
data : np.ndarray
The data which dimensionality needs to be checked
dim : int, optional
The amount of dimensions expected/desired in the data.
Returns
-------
np.ndarray
If `len(data.shape)` = `dim`, returns data.
If `len(data.shape)` > `dim`, returns a version of data without the
dimensions above `dim`.
Raises
------
ValueError
If `data` has less dimensions than `dim`
"""
if len(data.shape) < dim:
raise ValueError(f'{fname} does not seem to be a {dim}D file. '
f'Plase provide a {dim}D nifti file.')
if len(data.shape) > dim:
LGR.warning(f'{fname} has more than {dim} dimensions. Removing D > {dim}.')
for ax in range(dim, len(data.shape)):
data = np.delete(data, np.s_[1:], axis=ax)
return np.squeeze(data) | 0f814cc3eaca7242bf3393a1e749df5ebe7b128a | 2,290 |
def bar_chart(x_data=None, y_data=None, title="Chart Title", x_label=None, y_label=None,
color="blue", figsize=(10,5)):
"""
This function requires two Pandas data series for x and y data.
Optionally: the x label, y label, color, title, and size may be set.
This function returns a bar chart with the specified parameters.
"""
if x_data is None or y_data is None:
print("No data passed.")
return None
if x_label is None:
x_label = x_data.name
if y_label is None:
y_label = y_data.name
fig = plt.figure(figsize=figsize) #Sets size of the bar chart.
plt.bar(x_data, y_data, color=color) #Plots x and y and set the color.
plt.title(title) #Sets title of the chart.
plt.xlabel(x_label) #Sets x-axis label.
plt.ylabel(y_label) #Sets y-axis label.
plt.xticks(x_data, rotation='45') #Setting x-tick labels and rotating 45 degrees.
return plt | 23b8d0c1a50ec4909d8b46c29ac75a483b2e221c | 2,291 |
def get_qtobject_for_uipath(pathstr):
""" Returns the QtObject for a Maya UI path.
Ensure that the path starts from the Maya main window and that there are no \
empty elements in it as this will fail.
"""
split_pathstr = pathstr.split("|")
return _find_qobject(get_maya_main_window(), split_pathstr) | fc39ffe792a25af33663c7843a934045df4a91b0 | 2,292 |
def parse_study_with_run(soup):
"""Given a BeautifulSoup object representing a study, parse out relevant
information.
:param soup: a BeautifulSoup object representing a study
:type soup: bs4.BeautifulSoup
:return: a dictionary containing study information and run information
:rtype: dict
"""
accession = soup.find('PRIMARY_ID', text=PROJECT_PARSER).text
title = soup.find('STUDY_TITLE').text
abstract = soup.find('STUDY_ABSTRACT').text
# Returns all of the runs associated with a study
runs = []
run_parsed = soup.find('ID', text=RUN_PARSER)
if run_parsed:
run_ranges = run_parsed.text.split(",")
for run_range in run_ranges:
if '-' in run_range:
runs += parse_run_range(run_range)
else:
runs.append(run_range)
else:
logger.warning(
'Failed to parse run information from ENA XML. Falling back to '
'ENA search...'
)
# Sometimes the SRP does not contain a list of runs (for whatever reason).
# A common trend with such projects is that they use ArrayExpress.
# In the case that no runs could be found from the project XML,
# fallback to ENA SEARCH.
runs = search_ena_study_runs(accession)
return {
'accession': accession,
'title': title,
'abstract': abstract,
'runlist': runs
} | 44405ed14b67c03a44fef876d3d9ed5afc703489 | 2,293 |
def create_embed(**kwargs) -> Embed:
"""Creates a discord embed object."""
embed_type = kwargs.get('type', Embed.Empty)
title = kwargs.get('title', Embed.Empty)
description = kwargs.get('description', Embed.Empty)
color = kwargs.get('color', get_default_color())
timestamp = kwargs.get('timestamp', Embed.Empty)
url = kwargs.get('url', Embed.Empty)
return Embed(
type=embed_type,
title=title,
description=description,
url=url,
color=color,
timestamp=timestamp
) | 4396d03eab15ccc05ceff7cc8cfcd1b93e85894a | 2,294 |
from typing import Any
from typing import List
from typing import Dict
def _safe_types(
*, test_data: Any, cached_data: Any, key_rules: List[KeyRule],
) -> Dict:
"""Convert data and key_rules to safe data types for diffing.
Args:
test_data: data to compare
cached_data: data to compare
key_rules: list of key rules to apply
Returns:
Dict: safe keyword args for diff_with_rules
"""
wrapped_key_rules = []
for key_rule in key_rules:
if isinstance(cached_data, list):
key_rule.pattern = [_WRAP_KEY] + key_rule.pattern
wrapped_key_rules.append(key_rule)
return {
'old_dict': _wrap_data(cached_data),
'new_dict': _wrap_data(test_data),
'key_rules': wrapped_key_rules,
} | bf44aa091fcc751247fbc4ae92e826746c226cfc | 2,296 |
def _flatten_output(attr_dict, skip: list=[]):
"""
flaten output dict node
node_collection is a list to accumulate the nodes that not unfolded
:param skip: is a list of keys (format with parent_key.key) of Dict name that
will not collected into the json file.
For output nodes not being expanded, write down the uuid and datatype for future query.
"""
# do_not_unfold = ["band_parameters", "scf_parameters", "seekpath_parameters"]
for key, value in attr_dict.items():
if key in skip:
continue
if isinstance(value, AttributeDict):
# keep on unfold if it is a namespace
_flatten_output(value, skip)
elif isinstance(value, orm.Dict):
attr_dict[key] = value.get_dict()
elif isinstance(value, orm.Int):
attr_dict[key] = value.value
else:
# node type not handled attach uuid
attr_dict[key] = {
'uuid': value.uuid,
'datatype': type(value),
}
# print(archive_uuids)
return attr_dict | 75e2f41440b819ba939eb1e12c36a9ff6d894708 | 2,297 |
def get_tpu_estimator(
working_dir,
model_fn,
iterations_per_loop=320,
keep_checkpoint_max=20,
use_tpu=False,
train_batch_size=64):
"""Obtain an TPU estimator from a directory.
Args:
working_dir: the directory for holding checkpoints.
model_fn: an estimator model function.
iterations_per_loop: number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher
the utilization on the TPU. For CPU-only training, this flag is equal to
`num_epochs * num_minibatches`.
keep_checkpoint_max: the maximum number of checkpoints to save in checkpoint
directory.
use_tpu: if True, training happens on TPU.
train_batch_size: minibatch size for training which is equal to total number
of data // number of batches.
Returns:
Returns a TPU estimator.
"""
# If `TPUConfig.per_host_input_for_training` is `True`, `input_fn` is
# invoked per host rather than per core. In this case, a global batch size
# is transformed a per-host batch size in params for `input_fn`,
# but `model_fn` still gets per-core batch size.
run_config = tf.estimator.tpu.RunConfig(
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=working_dir,
save_checkpoints_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
keep_checkpoint_max=keep_checkpoint_max,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
per_host_input_for_training=True,
tpu_job_name=FLAGS.tpu_job_name))
return tf.estimator.tpu.TPUEstimator(
use_tpu=use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=train_batch_size) | 33f78f97aba4011fd8f637ff71a64e8716d5713a | 2,298 |
def rho_err(coeffs, rho, z, density_func):
"""
Returns the difference between the estimated and actual data
"""
soln = density_func(z, coeffs)
return rho - soln | 4a2d7c7243cad062d8568ab72599b4d8be26f874 | 2,299 |
def ui_form_stations():
"""
This function lists all stations
"""
# get _all_ the stations
stations = station_get(0)
# render stations in HTML template
return render_template("stations.html", result=stations) | 07f53e694e135e0612c871c279e63432eeedf966 | 2,300 |
def fitcreds():
"""
returns the ['credentials'] dictionary
:return: dictionary or None
"""
return fitcfg().get('credentials', None) | d8c30b43ae3c91fc7f08d2a47b401416da8b7d4b | 2,302 |
import re
def brace_expand(str):
"""Perform brace expansion, a lá bash."""
match = re.search('{(.+?)(,.*?)?}', str)
if match:
strings = brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(1)))
if match.group(2):
strings.extend(brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(2)[1:])))
return strings
else: # No braces were in the string.
return [str] | a4426eb8d1ecfc3ac8d9b9ecff57c8364b372042 | 2,303 |
def sort_terms(node, parent_children, hierarchy):
"""Recursively create a list of nodes grouped by category."""
for c in parent_children.get(node, []):
hierarchy.append(c)
sort_terms(c, parent_children, hierarchy)
return hierarchy | 5ae737206f3859c01da6b8e9475db688e53a8d13 | 2,305 |
def sumPm(mirror):
"""Returns sum of all mechanical power from active machines"""
sysPm = 0.0
# for each area
for area in mirror.Area:
# reset current sum
area.cv['Pm'] = 0.0
# sum each active machine Pm to area agent
for mach in area.Machines:
if mach.cv['St'] == 1:
area.cv['Pm'] += mach.cv['Pm']
# sum area agent totals to system
sysPm += area.cv['Pm']
return sysPm | 996891a5386f59fb111b5726552537d67e9c419c | 2,306 |
Subsets and Splits