code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import numpy as np
from sccloud import io, tools, demuxEM
def run_demuxEM_pipeline(input_adt_file, input_rna_file, output_name, **kwargs):
# load input data
adt = io.read_input(input_adt_file, genome="_ADT_")
print("ADT file is loaded.")
data = io.read_input(input_rna_file, genome=kwargs["genome"], concat_matrices=True)
print("RNA file is loaded.")
# Filter the RNA matrix
data.obs["n_genes"] = data.X.getnnz(axis=1)
data.obs["n_counts"] = data.X.sum(axis=1).A1
obs_index = np.logical_and.reduce(
(
data.obs["n_genes"] >= kwargs["min_num_genes"],
data.obs["n_counts"] >= kwargs["min_num_umis"],
)
)
data._inplace_subset_obs(obs_index)
data.var["robust"] = True
# run demuxEM
demuxEM.estimate_background_probs(adt, random_state=kwargs["random_state"])
print("Background probability distribution is estimated.")
demuxEM.demultiplex(
data,
adt,
min_signal=kwargs["min_signal"],
alpha=kwargs["alpha"],
n_threads=kwargs["n_jobs"],
)
print("Demultiplexing is done.")
# annotate raw matrix with demuxEM results
genome_indexed_raw_data = io.read_input(
input_rna_file, return_type="MemData", concat_matrices=False
)
for keyword in genome_indexed_raw_data.listKeys():
array2d = genome_indexed_raw_data.getData(keyword)
barcodes = array2d.barcode_metadata.index
idx = barcodes.isin(data.obs_names)
selected = barcodes[idx]
demux_type = np.empty(barcodes.size, dtype="object")
demux_type[:] = ""
demux_type[idx] = data.obs.loc[selected, "demux_type"]
array2d.barcode_metadata["demux_type"] = demux_type
assignment = np.empty(barcodes.size, dtype="object")
assignment[:] = ""
assignment[idx] = data.obs.loc[selected, "assignment"]
array2d.barcode_metadata["assignment"] = assignment
if "assignment.dedup" in data.obs:
assignment_dedup = np.empty(barcodes.size, dtype="object")
assignment_dedup[:] = ""
assignment_dedup[idx] = data.obs.loc[selected, "assignment.dedup"]
array2d.barcode_metadata["assignment.dedup"] = assignment_dedup
print("Demultiplexing results are added to raw expression matrices.")
# generate plots
if kwargs["gen_plots"]:
demuxEM.plot_adt_hist(
adt, "hto_type", output_name + ".ambient_hashtag.hist.pdf", alpha=1.0
)
demuxEM.plot_bar(
adt.uns["background_probs"],
adt.var_names,
"Sample ID",
"Background probability",
output_name + ".background_probabilities.bar.pdf",
)
demuxEM.plot_adt_hist(
adt, "rna_type", output_name + ".real_content.hist.pdf", alpha=0.5
)
demuxEM.plot_rna_hist(data, output_name + ".rna_demux.hist.pdf")
print("Diagnostic plots are generated.")
if len(kwargs["gen_gender_plot"]) > 0:
tools.log_norm(data, 1e5)
for gene_name in kwargs["gen_gender_plot"]:
demuxEM.plot_violin(
data,
{"gene": gene_name},
"{output_name}.{gene_name}.violin.pdf".format(
output_name=output_name, gene_name=gene_name
),
title="{gene_name}: a gender-specific gene".format(gene_name=gene_name),
)
print("Gender-specific gene expression violin plots are generated.")
# output results
io.write_output(adt, output_name + "_ADTs.h5ad")
print(
"Hashtag count information is written to {output_name}_ADTs.h5ad .".format(
output_name=output_name
)
)
io.write_output(data, output_name + "_demux.h5ad")
print(
"Demutiplexed RNA expression information is written to {output_name}_demux.h5ad .".format(
output_name=output_name
)
)
io.write_output(genome_indexed_raw_data, output_name + "_demux")
print(
"Raw sccloud-format hdf5 file with demultiplexing results is written to {output_name}_demux.h5sc .".format(
output_name=output_name
)
)
# output summary statistics
print("\nSummary statistics:")
print("total\t{}".format(data.shape[0]))
for name, value in data.obs["demux_type"].value_counts().iteritems():
print("{}\t{}".format(name, value))
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/pipeline/demuxEM_pipeline.py
| 0.445288 | 0.423518 |
demuxEM_pipeline.py
|
pypi
|
import numpy as np
import pandas as pd
import time
from natsort import natsorted
import multiprocessing
from sklearn.cluster import KMeans
from typing import List
from anndata import AnnData
def estimate_background_probs(adt: AnnData, random_state: int = 0):
"""For cell-hashing data, estimate antibody background probability using EM algorithm.
Parameters
----------
adt: ``anndata.AnnData``
Annotated data matrix for antibody.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
Returns
-------
``None``
Update ``adt.uns``:
* ``adt.uns["background_probs"]``: estimated antibody background probability.
Example
-------
>>> scc.estimate_background_probs(adt)
"""
adt.obs["counts"] = adt.X.sum(axis=1).A1 if adt.shape[1] > 1 else adt.X
counts_log10 = np.log10(adt.obs["counts"].values.reshape(-1, 1))
kmeans = KMeans(n_clusters=2, random_state=random_state).fit(counts_log10)
signal = 0 if kmeans.cluster_centers_[0] > kmeans.cluster_centers_[1] else 1
adt.obs["hto_type"] = "background"
adt.obs.loc[kmeans.labels_ == signal, "hto_type"] = "signal"
idx = np.isin(adt.obs["hto_type"], "background")
pvec = (
adt.X[idx,].sum(axis=0).A1 if adt.shape[1] > 1 else np.array(adt.X[idx,].sum())
)
pvec /= pvec.sum()
adt.uns["background_probs"] = pvec
def estimate_probs(arr, pvec, alpha, alpha_noise, tol):
probs = np.zeros(pvec.size + 1)
old_probs = np.zeros(pvec.size + 1)
z = np.zeros(pvec.size + 1)
noise = pvec.size
# Estimate MLE without Generalized Dirichlet prior
probs_mle = arr / arr.sum()
probs[noise] = (probs_mle / pvec).min() + 0.01
probs[:-1] = np.maximum(probs_mle - probs[noise] * pvec, 0.01)
probs = probs / probs.sum()
# EM algorithm
i = 0
eps = 1.0
while eps > tol:
i += 1
old_probs[:] = probs[:]
# E step
z[:-1] = alpha - 1.0
z[noise] = alpha_noise - 1.0
for j in range(pvec.size):
if arr[j] > 0:
p = probs[j] / (probs[noise] * pvec[j] + probs[j])
z[j] += arr[j] * p
z[noise] += arr[j] * (1.0 - p)
# M step
idx = z > 0.0
probs[idx] = z[idx] / z[idx].sum()
probs[~idx] = 0.0
eps = np.linalg.norm(probs - old_probs, ord=1)
# print ("i = {}, eps = {:.2g}.".format(i, eps))
return probs
def get_droplet_info(probs, sample_names):
ids = np.nonzero(probs >= 0.1)[0]
ids = ids[np.argsort(probs[ids])[::-1]]
return (
"singlet" if ids.size == 1 else "doublet",
",".join([sample_names[i] for i in ids]),
)
def calc_demux(data, adt, nsample, min_signal, probs="raw_probs"):
demux_type = np.full(data.shape[0], "unknown", dtype="object")
assignments = np.full(data.shape[0], "", dtype="object")
signals = adt.obs["counts"].reindex(data.obs_names, fill_value=0.0).values * (
1.0 - data.obsm[probs][:, nsample]
)
idx = signals >= min_signal
tmp = data.obsm[probs][idx,]
norm_probs = tmp[:, 0:nsample] / (1.0 - tmp[:, nsample])[:, None]
values1 = []
values2 = []
for i in range(norm_probs.shape[0]):
droplet_type, droplet_id = get_droplet_info(norm_probs[i,], adt.var_names)
values1.append(droplet_type)
values2.append(droplet_id)
demux_type[idx] = values1
data.obs["demux_type"] = pd.Categorical(
demux_type, categories=["singlet", "doublet", "unknown"]
)
assignments[idx] = values2
data.obs["assignment"] = pd.Categorical(
assignments, categories=natsorted(np.unique(assignments))
)
def has_duplicate_names(names: List[str]) -> bool:
for name in names:
if name.find(".#~") >= 0:
return True
return False
def remove_suffix(assigns: List[str]) -> List[str]:
assigns = assigns.astype(str)
results = []
for value in assigns:
fields = value.split(",")
for i, item in enumerate(fields):
pos = item.find(".#~")
if pos >= 0:
fields[i] = item[:pos]
results.append(",".join(fields))
results = np.array(results)
return pd.Categorical(results, categories=natsorted(np.unique(results)))
def demultiplex(
data: AnnData,
adt: AnnData,
min_signal: float = 10.0,
alpha: float = 0.0,
alpha_noise: float = 1.0,
tol: float = 1e-6,
n_threads: int = 1,
):
"""Demultiplexing cell-hashing data, using the estimated antibody background probability calculated in ``scc.estimate_background_probs``.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix for gene expression matrix.
adt: ``anndata.AnnData``
Annotated data matrix for antibody count matrix.
min_signal: ``float``, optional, default: ``10.0``
Any cell/nucleus with less than ``min_signal`` hashtags from the signal will be marked as ``unknown``.
alpha: ``float``, optional, default: ``0.0``
The Dirichlet prior concentration parameter (alpha) on samples. An alpha value < 1.0 will make the prior sparse.
alpha_noise: ``float``, optional, default: ``1.0``
tol: ``float``, optional, default: ``1e-6``
Tolerance threshold when judging equivalence of two floating point values.
n_threads: ``int``, optional, default: ``1``
Number of threads to use. Must be a positive integer.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs["demux_type"]``: Demultiplexed types of the cells. Either ``singlet``, ``doublet``, or ``unknown``.
* ``data.obs["assignment"]``: Assigned samples of origin for each cell barcode.
* ``data.obs["assignment.dedup"]``: Only exist if one sample name can correspond to multiple feature barcodes. In this array, each feature barcode is assigned a unique sample name.
Examples
--------
>>> scc.demultiplex(adata, adt)
"""
start = time.time()
nsample = adt.shape[1]
data.uns["background_probs"] = adt.uns["background_probs"]
idx_df = data.obs_names.isin(adt.obs_names)
adt.obs["rna_type"] = "background"
adt.obs.loc[data.obs_names[idx_df], "rna_type"] = "signal"
if nsample == 1:
print("Warning: detect only one barcode, no need to demultiplex!")
data.obsm["raw_probs"] = np.zeros((data.shape[0], nsample + 1))
data.obsm["raw_probs"][:, 0] = 1.0
data.obsm["raw_probs"][:, 1] = 0.0
data.obs["demux_type"] = "singlet"
data.obs["assignment"] = adt.var_names[0]
else:
if nsample == 2:
print(
"Warning: detect only two barcodes, demultiplexing accuracy might be affected!"
)
ncalc = idx_df.sum()
if ncalc < data.shape[0]:
nzero = data.shape[0] - ncalc
print(
"Warning: {} cells do not have ADTs, percentage = {:.2f}%.".format(
nzero, nzero * 100.0 / data.shape[0]
)
)
adt_small = adt[data.obs_names[idx_df],].X.toarray()
data.obsm["raw_probs"] = np.zeros((data.shape[0], nsample + 1))
data.obsm["raw_probs"][:, nsample] = 1.0
iter_array = [
(adt_small[i,], adt.uns["background_probs"], alpha, alpha_noise, tol)
for i in range(ncalc)
]
with multiprocessing.Pool(n_threads) as pool:
data.obsm["raw_probs"][idx_df, :] = pool.starmap(estimate_probs, iter_array)
calc_demux(data, adt, nsample, min_signal)
if has_duplicate_names(adt.var_names):
data.obs["assignment.dedup"] = data.obs["assignment"]
data.obs["assignment"] = remove_suffix(data.obs["assignment"].values)
end = time.time()
print("demuxEM.demultiplex is finished. Time spent = {:.2f}s.".format(end - start))
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/demuxEM/demuxEM.py
| 0.860501 | 0.511839 |
demuxEM.py
|
pypi
|
import numpy as np
import pandas as pd
import json
import logging
from sys import stdout
from natsort import natsorted
from typing import List, Dict, Union
from anndata import AnnData
logger = logging.getLogger("sccloud")
class CellType:
def __init__(self, name: str, ignore_nonde: bool = False):
self.name = name
self.score = self.avgp = 0.0
self.weak_support = []
self.strong_support = []
self.subtypes = None
self.ignore_nonde = ignore_nonde
def evaluate(
self,
obj: "json object",
de_up: pd.DataFrame,
de_down: pd.DataFrame,
thre: float,
):
""" Calculate score for matching a cluster with a putative cell type.
"""
self.score = self.avgp = 0.0
self.weak_support = []
self.strong_support = []
nump = 0
for marker_set in obj["markers"]:
numer = 0.0
denom = len(marker_set["genes"]) * 2.0
if denom == 0.0:
continue
for marker in marker_set["genes"]:
sign = marker[-1]
gsym = marker[:-1]
if sign == "+":
if gsym in de_up.index:
fc = de_up.at[gsym, "fc"]
percent = de_up.at[gsym, "percent"]
self.avgp += percent
nump += 1
if fc >= thre:
numer += 2.0
self.strong_support.append(
(marker, "{0:.2f}%".format(percent))
)
else:
numer += 1.0 + (fc - 1.0) / (thre - 1.0)
self.weak_support.append(
(marker, "{0:.2f}%".format(percent))
)
else:
assert sign == "-"
if gsym not in de_up.index:
if gsym in de_down.index:
fc = (
(1.0 / de_down.at[gsym, "fc"])
if de_down.at[gsym, "fc"] > 0.0
else np.inf
)
percent = de_down.at[gsym, "percent"]
if fc >= thre:
numer += 2.0
self.strong_support.append(
(marker, "{0:.2f}%".format(percent))
)
else:
numer += 1.0 + (fc - 1.0) / (thre - 1.0)
self.weak_support.append(
(marker, "{0:.2f}%".format(percent))
)
elif not self.ignore_nonde:
numer += 1.0
self.weak_support.append((marker, "N/A"))
self.score += numer / denom * marker_set["weight"]
self.score = (
self.score / obj["denominator"] if obj["denominator"] > 0.0 else 0.0
)
if nump > 0:
self.avgp /= nump
def __repr__(self):
res = "name: {0}; score: {1:.2f}; average marker percentage: {2:.2f}%".format(
self.name, self.score, self.avgp
)
if len(self.strong_support) > 0:
res += "; strong support: {0}".format(
",".join(["({0},{1})".format(x[0], x[1]) for x in self.strong_support])
)
if len(self.weak_support) > 0:
res += "; weak support: {0}".format(
",".join(["({0},{1})".format(x[0], x[1]) for x in self.weak_support])
)
return res
class Annotator:
def __init__(self, marker_file: Union[str, Dict], genes: List[str]) -> None:
if type(marker_file) != dict:
with open(marker_file) as fin:
self.object = json.load(fin)
else:
self.object = marker_file
self.recalibrate(self.object, genes)
def recalibrate(self, obj: "json object", genes: List[str]) -> None:
""" Remove markers that are not expressed (not in genes) and calculate partial weights for existing genes.
"""
for celltype in obj["cell_types"]:
denom = 0.0
for marker_set in celltype["markers"]:
markers = marker_set["genes"]
s = len(markers)
marker_set["genes"] = [x for x in markers if x[:-1] in genes]
new_s = len(marker_set["genes"])
marker_set["weight"] = marker_set["weight"] / s * new_s
denom += marker_set["weight"]
celltype["denominator"] = denom
sub_obj = celltype.get("subtypes", None)
if sub_obj is not None:
self.recalibrate(sub_obj, genes)
def evaluate(
self,
de_up: pd.DataFrame,
de_down: pd.DataFrame,
thre: float = 1.5,
ignore_nonde: bool = False,
obj: "json object" = None,
):
""" Evaluate a cluster to determine its putative cell type.
"""
if obj is None:
obj = self.object
results = []
for celltype in obj["cell_types"]:
ct = CellType(celltype["name"], ignore_nonde=ignore_nonde)
ct.evaluate(celltype, de_up, de_down, thre)
if ct.score >= 0.5:
sub_obj = celltype.get("subtypes", None)
if sub_obj is not None:
ct.subtypes = self.evaluate(
de_up,
de_down,
thre=thre,
ignore_nonde=ignore_nonde,
obj=sub_obj,
)
results.append(ct)
results.sort(key=lambda x: x.score, reverse=True)
return results
def report(
self,
fout: "output stream",
ct_list: List["CellType"],
thre: float,
space: int = 4,
) -> None:
""" Write putative cell type reports to fout.
"""
for ct in ct_list:
if ct.score >= thre:
fout.write(" " * space + str(ct) + "\n")
if ct.subtypes is not None:
self.report(fout, ct.subtypes, 0.5, space + 4)
def infer_cluster_names(
cell_type_dict: Dict[str, List["CellType"]], threshold: float = 0.5
) -> List[str]:
"""Decide cluster names based on cell types automatically.
Parameters
----------
cell_type_dict: ``Dict[str, List["CellType"]]``
Python dictionary of cell type list for each cluster. This is the output of ``scc.infer_cell_types``.
threshold: ``float``, optional, default: ``0.5``
A threshold for cell type result reported. It should be a real number between ``0.0`` and ``1.0``.
Returns
-------
``List[str]``
A list of cluster names decided by their corresponding cell types. The order is consistent with clusters.
Examples
--------
>>> cell_type_dict = scc.infer_cell_types(adata, markers = 'human_immune', de_test = 't')
>>> cluster_names = scc.infer_cluster_names(cell_type_dict)
"""
cluster_ids = natsorted(cell_type_dict.keys())
names = []
name_dict = dict()
for cluster_id in cluster_ids:
ct_list = cell_type_dict[cluster_id]
if len(ct_list) == 0 or ct_list[0].score < threshold:
cell_name = cluster_id
else:
ct = ct_list[0]
while ct.subtypes is not None and ct.subtypes[0].score >= threshold:
ct = ct.subtypes[0]
cell_name = ct.name
if cell_name in name_dict:
name_dict[cell_name] += 1
cell_name = cell_name + "-" + str(name_dict[cell_name])
else:
name_dict[cell_name] = 1
names.append(cell_name)
return names
def infer_cell_types(
data: AnnData,
markers: Union[str, Dict],
de_test: str,
de_alpha: float = 0.05,
de_key: str = "de_res",
threshold: float = 0.5,
ignore_nonde: bool = False,
output_file: str = None,
) -> Dict[str, List["CellType"]]:
"""Infer putative cell types for each cluster using legacy markers.
Parameters
----------
data : ``anndata.AnnData``
AnnData object of count matrix and DE analysis results.
markers : ``str`` or ``Dict``
* If ``str``, it
* either refers to a JSON file containing legacy markers, or
* ``'human_immune'`` for predefined sccloud markers on human immune cells;
* ``'mouse_immune'`` for mouse immune cells;
* ``'human_brain'`` for human brain cells;
* ``'mouse_brain'`` for mouse brain cells.
* If ``Dict``, it refers to a Python dictionary describing the markers.
de_test: ``str``
sccloud determines cell types using DE test results. This argument indicates which DE test result to use, can be either ``'t'``, ``'fisher'`` or ``'mwu'``.
de_alpha: ``float``, optional, default: ``0.05``
False discovery rate for controling family-wide error.
de_key : ``str``, optional, default: ``"de_res"``
The keyword in ``data.varm`` that stores DE analysis results.
threshold : ``float``, optional, defaut: ``0.5``
Only report putative cell types with a score larger than or equal to ``threshold``.
ignore_nonde: ``bool``, optional, default: ``False``
Do not consider non DE genes as weak negative markers.
output_file: ``str``, optional, default: ``None``
File name of output cluster annotation. If ``None``, do not write to any file.
Returns
-------
``Dict[str, List["CellType"]]``
Python dictionary with cluster ID's being keys, and their corresponding cell type lists sortec by scores being values.
Examples
--------
>>> cell_type_dict = scc.infer_cell_types(adata, markers = 'human_immune', de_test = 't')
"""
if output_file is not None:
fout = open(output_file, "w")
predefined_markers = dict(
human_immune="human_immune_cell_markers.json",
mouse_immune="mouse_immune_cell_markers.json",
mouse_brain="mouse_brain_cell_markers.json",
human_brain="human_brain_cell_markers.json",
)
if markers in predefined_markers:
import pkg_resources
markers = pkg_resources.resource_filename(
"sccloud.annotate_cluster", predefined_markers[markers]
)
anno = Annotator(markers, data.var_names)
clusts = natsorted(
[
x.rpartition(":")[2]
for x in data.varm[de_key].dtype.names
if x.startswith("WAD_score:")
]
)
cell_type_results = {}
for clust_id in clusts:
idx = data.varm[de_key]["{0}_qval:{1}".format(de_test, clust_id)] <= de_alpha
idx_up = idx & (data.varm[de_key]["log_fold_change:{0}".format(clust_id)] > 0.0)
idx_down = idx & (
data.varm[de_key]["log_fold_change:{0}".format(clust_id)] < 0.0
)
assert idx_up.sum() + idx_down.sum() == idx.sum()
cols = [
"{0}:{1}".format(x, clust_id)
for x in [
"percentage_fold_change" if de_test == "fisher" else "log_fold_change",
"percentage",
]
]
de_up = pd.DataFrame(
data=data.varm[de_key][cols][idx_up], index=data.var_names[idx_up]
)
de_up.rename(columns={cols[0]: "fc", cols[1]: "percent"}, inplace=True)
de_down = pd.DataFrame(
data=data.varm[de_key][cols][idx_down], index=data.var_names[idx_down]
)
de_down.rename(columns={cols[0]: "fc", cols[1]: "percent"}, inplace=True)
if de_test != "fisher":
de_up["fc"] = np.exp(de_up["fc"])
de_down["fc"] = np.exp(de_down["fc"])
results = anno.evaluate(de_up, de_down, ignore_nonde=ignore_nonde)
if output_file is not None:
fout.write("Cluster {}:\n".format(clust_id))
anno.report(fout, results, threshold)
cell_type_results[clust_id] = results
if output_file is not None:
fout.close()
return cell_type_results
def annotate(
data: AnnData, name: str, based_on: str, anno_dict: Dict[str, str]
) -> None:
"""Add annotation to AnnData obj.
Parameters
----------
data : `AnnData`
AnnData object.
name : `str`
Name of the new annotation in data.obs.
based_on : `str`
Name of the attribute the cluster ids coming from.
anno_dict : `Dict[str, str]`
Dictionary mapping from cluster id to cell type.
Returns
-------
None
Examples
--------
>>> annotate_cluster.annotate(data, 'anno', 'spectral_louvain_labels', {'1': 'T cell', '2': 'B cell'})
"""
data.obs[name] = [anno_dict[x] for x in data.obs[based_on]]
def run_annotate_cluster(
input_file: str,
output_file: str,
marker_file: str,
de_test: str,
de_alpha: float = 0.05,
de_key: str = "de_res",
threshold: float = 0.5,
ignore_nonde: bool = False,
) -> None:
""" For command line use.
"""
import time
from sccloud.io import read_input
start = time.time()
data = read_input(input_file, h5ad_mode="r")
infer_cell_types(
data,
marker_file,
de_test,
de_alpha=de_alpha,
de_key=de_key,
threshold=threshold,
ignore_nonde=ignore_nonde,
output_file=output_file,
)
data.file.close()
end = time.time()
logger.info("Time spent for annotating clusters is {:.2f}s.".format(end - start))
def annotate_anndata_object(input_file: str, annotation: str) -> None:
""" For command line use.
annotation: anno_name:clust_name:cell_type1;...cell_typen
"""
from sccloud.io import read_input, write_output
data = read_input(input_file, h5ad_mode="r+")
anno_name, clust_name, anno_str = annotation.split(":")
anno_dict = {str(i + 1): x for i, x in enumerate(anno_str.split(";"))}
annotate(data, anno_name, clust_name, anno_dict)
write_output(data, input_file, whitelist = ["obs"])
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/annotate_cluster/annotate_cluster.py
| 0.653348 | 0.282048 |
annotate_cluster.py
|
pypi
|
import time
import numpy as np
import pandas as pd
import os.path
from scipy.io import mmread
from scipy.sparse import csr_matrix, issparse
import tables
import gzip
from typing import List, Tuple
from . import Array2D, MemData
import anndata
import logging
logger = logging.getLogger("sccloud")
def load_10x_h5_file_v2(h5_in: "tables.File", fn: str, ngene: int = None) -> "MemData":
"""Load 10x v2 format matrix from hdf5 file
Parameters
----------
h5_in : tables.File
An instance of tables.File class that is connected to a 10x v2 formatted hdf5 file.
fn : `str`
File name, can be used to indicate channel-specific name prefix.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file_v2(h5_in)
"""
data = MemData()
for group in h5_in.list_nodes("/", "Group"):
genome = group._v_name
M, N = h5_in.get_node("/" + genome + "/shape").read()
mat = csr_matrix(
(
h5_in.get_node("/" + genome + "/data").read(),
h5_in.get_node("/" + genome + "/indices").read(),
h5_in.get_node("/" + genome + "/indptr").read(),
),
shape=(N, M),
)
barcodes = h5_in.get_node("/" + genome + "/barcodes").read().astype(str)
ids = h5_in.get_node("/" + genome + "/genes").read().astype(str)
names = h5_in.get_node("/" + genome + "/gene_names").read().astype(str)
array2d = Array2D(
{"barcodekey": barcodes}, {"featurekey": ids, "featurename": names}, mat
)
array2d.filter(ngene=ngene)
array2d.separate_channels(fn)
data.addData(genome, array2d)
return data
def load_10x_h5_file_v3(h5_in: "tables.File", fn: str, ngene: int = None) -> "MemData":
"""Load 10x v3 format matrix from hdf5 file
Parameters
----------
h5_in : tables.File
An instance of tables.File class that is connected to a 10x v3 formatted hdf5 file.
fn : `str`
File name, can be used to indicate channel-specific name prefix.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file_v3(h5_in)
"""
M, N = h5_in.get_node("/matrix/shape").read()
bigmat = csr_matrix(
(
h5_in.get_node("/matrix/data").read(),
h5_in.get_node("/matrix/indices").read(),
h5_in.get_node("/matrix/indptr").read(),
),
shape=(N, M),
)
barcodes = h5_in.get_node("/matrix/barcodes").read().astype(str)
genomes = h5_in.get_node("/matrix/features/genome").read().astype(str)
ids = h5_in.get_node("/matrix/features/id").read().astype(str)
names = h5_in.get_node("/matrix/features/name").read().astype(str)
data = MemData()
for genome in np.unique(genomes):
idx = genomes == genome
barcode_metadata = {"barcodekey": barcodes}
feature_metadata = {"featurekey": ids[idx], "featurename": names[idx]}
mat = bigmat[:, idx].copy()
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene)
array2d.separate_channels(fn)
data.addData(genome, array2d)
return data
def load_10x_h5_file(input_h5: str, ngene: int = None) -> "MemData":
"""Load 10x format matrix (either v2 or v3) from hdf5 file
Parameters
----------
input_h5 : `str`
The matrix in 10x v2 or v3 hdf5 format.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file('example_10x.h5')
"""
fn = os.path.basename(input_h5)[:-3]
data = None
with tables.open_file(input_h5) as h5_in:
try:
node = h5_in.get_node("/matrix")
data = load_10x_h5_file_v3(h5_in, fn, ngene)
except tables.exceptions.NoSuchNodeError:
data = load_10x_h5_file_v2(h5_in, fn, ngene)
return data
def determine_file_name(
path: str, names: List[str], errmsg: str, fname: str = None, exts: List[str] = None
) -> str:
""" Try several file name options and determine which one is correct.
"""
for name in names:
file_name = os.path.join(path, name)
if os.path.isfile(file_name):
return file_name
if fname is not None:
for ext in exts:
file_name = fname + ext
if os.path.isfile(file_name):
return file_name
raise ValueError(errmsg)
def load_one_mtx_file(path: str, ngene: int = None, fname: str = None) -> "Array2D":
"""Load one gene-count matrix in mtx format into an Array2D object
"""
mtx_file = determine_file_name(
path,
["matrix.mtx.gz", "matrix.mtx"],
"Expression matrix in mtx format is not found",
fname=fname,
exts=[".mtx"],
)
mat = csr_matrix(mmread(mtx_file).T)
barcode_file = determine_file_name(
path,
["cells.tsv.gz", "barcodes.tsv.gz", "barcodes.tsv"],
"Barcode metadata information is not found",
fname=fname,
exts=["_barcode.tsv", ".cells.tsv"],
)
feature_file = determine_file_name(
path,
["genes.tsv.gz", "features.tsv.gz", "genes.tsv"],
"Feature metadata information is not found",
fname=fname,
exts=["_gene.tsv", ".genes.tsv"],
)
barcode_base = os.path.basename(barcode_file)
feature_base = os.path.basename(feature_file)
if barcode_base == "cells.tsv.gz" and feature_base == "genes.tsv.gz":
format_type = "HCA DCP"
elif barcode_base == "barcodes.tsv.gz" and feature_base == "features.tsv.gz":
format_type = "10x v3"
elif barcode_base == "barcodes.tsv" and feature_base == "genes.tsv":
format_type = "10x v2"
elif barcode_base.endswith("_barcode.tsv") and feature_base.endswith("_gene.tsv"):
format_type = "scumi"
elif barcode_base.endswith(".cells.tsv") and feature_base.endswith(".genes.tsv"):
format_type = "dropEst"
else:
raise ValueError("Unknown format type")
if format_type == "HCA DCP":
barcode_metadata = pd.read_csv(barcode_file, sep="\t", header=0)
assert "cellkey" in barcode_metadata
barcode_metadata.rename(columns={"cellkey": "barcodekey"}, inplace=True)
feature_metadata = pd.read_csv(feature_file, sep="\t", header=0)
else:
barcode_metadata = pd.read_csv(
barcode_file, sep="\t", header=None, names=["barcodekey"]
)
if format_type == "10x v3":
feature_metadata = pd.read_csv(
feature_file,
sep="\t",
header=None,
names=["featurekey", "featurename", "featuretype"],
)
elif format_type == "10x v2":
feature_metadata = pd.read_csv(
feature_file, sep="\t", header=None, names=["featurekey", "featurename"]
)
elif format_type == "scumi":
values = (
pd.read_csv(feature_file, sep="\t", header=None)
.iloc[:, 0]
.values.astype(str)
)
arr = np.array(np.char.split(values, sep="_", maxsplit=1).tolist())
feature_metadata = pd.DataFrame(
data={"featurekey": arr[:, 0], "featurename": arr[:, 1]}
)
elif format_type == "dropEst":
feature_metadata = pd.read_csv(
feature_file, sep="\t", header=None, names=["featurekey"]
)
feature_metadata["featurename"] = feature_metadata["featurekey"]
else:
raise ValueError("Unknown format type")
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene=ngene)
if format_type == "10x v3" or format_type == "10x v2":
array2d.separate_channels("") # fn == '' refers to 10x mtx format
return array2d
def load_mtx_file(path: str, genome: str = None, ngene: int = None) -> "MemData":
"""Load gene-count matrix from Market Matrix files (10x v2, v3 and HCA DCP formats)
Parameters
----------
path : `str`
Path to mtx files. The directory impiled by path should either contain matrix, feature and barcode information, or folders containg these information.
genome : `str`, optional (default: None)
Genome name of the matrix. If None, genome will be inferred from path.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing a genome-Array2D pair.
Examples
--------
>>> io.load_10x_h5_file('example_10x.h5')
"""
orig_file = None
if not os.path.isdir(path):
orig_file = path
path = os.path.dirname(path)
data = MemData()
if (
os.path.isfile(os.path.join(path, "matrix.mtx.gz"))
or os.path.isfile(os.path.join(path, "matrix.mtx"))
or (orig_file is not None and os.path.isfile(orig_file))
):
if genome is None:
genome = os.path.basename(path)
data.addData(
genome,
load_one_mtx_file(
path,
ngene=ngene,
fname=None if orig_file is None else os.path.splitext(orig_file)[0],
),
)
else:
for dir_entry in os.scandir(path):
if dir_entry.is_dir():
data.addData(
dir_entry.name, load_one_mtx_file(dir_entry.path, ngene=ngene)
)
return data
def load_csv_file(
input_csv: str, genome: str, sep: str = ",", ngene: int = None
) -> "MemData":
"""Load count matrix from a CSV-style file, such as CSV file or DGE style tsv file.
Parameters
----------
input_csv : `str`
The CSV file, gzipped or not, containing the count matrix.
genome : `str`
The genome reference.
sep: `str`, optional (default: ',')
Separator between fields, either ',' or '\t'.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing a genome-Array2D pair.
Examples
--------
>>> io.load_csv_file('example_ADT.csv', genome = 'GRCh38')
>>> io.load_csv_file('example.umi.dge.txt.gz', genome = 'GRCh38', sep = '\t')
"""
path = os.path.dirname(input_csv)
base = os.path.basename(input_csv)
is_hca_csv = base == "expression.csv"
if sep == "\t":
# DGE, columns are cells, which is around thousands and we can use pandas.read_csv
df = pd.read_csv(input_csv, header=0, index_col=0, sep=sep)
mat = csr_matrix(df.values.T)
barcode_metadata = {"barcodekey": df.columns.values}
feature_metadata = {
"featurekey": df.index.values,
"featurename": df.index.values,
}
else:
# For CSV files, wide columns prevent fast pd.read_csv loading
converter = (
float if base.startswith("expression") else int
) # If expression -> float otherwise int
barcodes = []
names = []
stacks = []
with (
gzip.open(input_csv, mode="rt")
if input_csv.endswith(".gz")
else open(input_csv)
) as fin:
barcodes = next(fin).strip().split(sep)[1:]
for line in fin:
fields = line.strip().split(sep)
names.append(fields[0])
stacks.append([converter(x) for x in fields[1:]])
mat = csr_matrix(np.stack(stacks, axis=1 if not is_hca_csv else 0))
barcode_metadata = {"barcodekey": barcodes}
feature_metadata = {"featurekey": names, "featurename": names}
if is_hca_csv:
barcode_file = os.path.join(path, "cells.csv")
if os.path.exists(barcode_file):
barcode_metadata = pd.read_csv(barcode_file, sep=",", header=0)
assert "cellkey" in barcode_metadata
barcode_metadata.rename(columns={"cellkey": "barcodekey"}, inplace=True)
feature_file = os.path.join(path, "genes.csv")
if os.path.exists(feature_file):
feature_metadata = pd.read_csv(feature_file, sep=",", header=0)
data = MemData()
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene=ngene)
data.addData(genome, array2d)
return data
def load_loom_file(input_loom: str, genome: str, ngene: int = None) -> "MemData":
"""Load count matrix from a LOOM file. Currently only support HCA DCP Loom spec.
Parameters
----------
input_loom : `str`
The LOOM file, containing the count matrix.
genome : `str`
The genome reference.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing a genome-Array2D pair.
Examples
--------
>>> io.load_loom_file('example.loom', genome = 'GRCh38', ngene = 200)
"""
import loompy
col_trans = {"CellID": "barcodekey"}
row_trans = {"Accession": "featurekey", "Gene": "featurename"}
data = MemData()
with loompy.connect(input_loom) as ds:
mat = csr_matrix(ds.sparse().T)
barcode_metadata = {}
for keyword, values in ds.col_attrs.items():
keyword = col_trans.get(keyword, keyword)
barcode_metadata[keyword] = values
feature_metadata = {}
for keyword, values in ds.row_attrs.items():
keyword = row_trans.get(keyword, keyword)
feature_metadata[keyword] = values
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene=ngene)
data.addData(genome, array2d)
return data
def load_scCloud_h5_file(
input_h5: str, ngene: int = None, select_singlets: bool = False
) -> "MemData":
"""Load matrices from sccloud-format hdf5 file
Parameters
----------
input_h5 : `str`
sccloud-format hdf5 file.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
select_singlets: `bool`, optional (default: False)
If only load singlets.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_scCloud_h5_file('example.sccloud.h5')
"""
cite_seq_name = None
selected_barcodes = None
data = MemData()
with tables.open_file(input_h5) as h5_in:
for group in h5_in.list_nodes("/", "Group"):
genome = group._v_name
M, N = h5_in.get_node("/" + genome + "/shape").read()
mat = csr_matrix(
(
h5_in.get_node("/" + genome + "/data").read(),
h5_in.get_node("/" + genome + "/indices").read(),
h5_in.get_node("/" + genome + "/indptr").read(),
),
shape=(N, M),
)
barcode_metadata = {}
for node in h5_in.walk_nodes("/" + genome + "/_barcodes", "Array"):
values = node.read()
if values.dtype.kind == "S":
values = values.astype(str)
barcode_metadata[node.name] = values
feature_metadata = {}
for node in h5_in.walk_nodes("/" + genome + "/_features", "Array"):
values = node.read()
if values.dtype.kind == "S":
values = values.astype(str)
feature_metadata[node.name] = values
array2d = Array2D(barcode_metadata, feature_metadata, mat)
if genome.startswith("CITE_Seq"):
cite_seq_name = genome
else:
array2d.filter(ngene, select_singlets)
selected_barcodes = array2d.get_metadata("barcodekey")
data.addData(genome, array2d)
if (cite_seq_name is not None) and (selected_barcodes is not None):
array2d = data.getData(cite_seq_name)
selected = array2d.get_metadata("barcodekey").isin(selected_barcodes)
array2d.trim(selected)
return data
def infer_file_format(input_file: str) -> Tuple[str, str, str]:
""" Infer file format from input_file name
This function infer file format by inspecting the file name.
Parameters
----------
input_file : `str`
Input file name.
Returns
-------
`str`
File format, choosing from 'sccloud', '10x', 'h5ad', 'mtx', 'dge', and 'csv'.
`str`
The path covering all input files. Most time this is the same as input_file. But for HCA mtx and csv, this should be parent directory.
`str`
Type of the path, either 'file' or 'directory'.
"""
file_format = None
copy_path = input_file
copy_type = "file"
if input_file.endswith(".h5"):
file_format = "10x"
elif input_file.endswith(".h5sc"):
file_format = "sccloud"
elif input_file.endswith(".h5ad"):
file_format = "h5ad"
elif input_file.endswith(".loom"):
file_format = "loom"
elif (
input_file.endswith(".mtx")
or input_file.endswith(".mtx.gz")
or os.path.splitext(input_file)[1] == ""
):
file_format = "mtx"
if os.path.splitext(input_file)[1] != "":
copy_path = os.path.dirname(input_file)
copy_type = "directory"
elif input_file.endswith("dge.txt.gz"):
file_format = "dge"
elif input_file.endswith(".csv") or input_file.endswith(".csv.gz"):
file_format = "csv"
if os.path.basename(input_file) == "expression.csv":
copy_type = os.path.dirname(input_file)
copy_type = "directory"
else:
raise ValueError("Unrecognized file type for file {}!".format(input_file))
return file_format, copy_path, copy_type
def read_input(
input_file: str,
genome: str = None,
return_type: str = "AnnData",
concat_matrices: bool = False,
h5ad_mode: str = "a",
ngene: int = None,
select_singlets: bool = False,
channel_attr: str = None,
black_list: List[str] = [],
) -> "MemData or AnnData or List[AnnData]":
"""Load data into memory.
This function is used to load input data into memory. Inputs can be in 10x genomics v2 & v3 formats (hdf5 or mtx), HCA DCP mtx and csv formats, Drop-seq dge format, and CSV format.
Parameters
----------
input_file : `str`
Input file name.
genome : `str`, optional (default: None)
A string contains comma-separated genome names. sccloud will read all matrices matching the genome names. If genomes is None, all matrices will be considered.
return_type : `str`
Return object type, can be either 'MemData' or 'AnnData'.
concat_matrices : `boolean`, optional (default: False)
If input file contains multiple matrices, if concatenate them into one AnnData object or return a list of AnnData objects.
h5ad_mode : `str`, optional (default: `a`)
If input is in h5ad format, the backed mode for loading the data. mode could be 'a', 'r', 'r+'. 'a' refers to load all into memory.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
select_singlets : `bool`, optional (default: False)
If only keep DemuxEM-predicted singlets when loading data.
channel_attr : `str', optional (default: None)
Use channel_attr to represent different samples. This will set a 'Channel' column field with channel_attr.
black_list : `List[str]`, optional (default: [])
Attributes in black list will be poped out.
Returns
-------
`MemData` object or `anndata` object or a list of `anndata` objects
An `MemData` object or `anndata` object or a list of `anndata` objects containing the count matrices.
Examples
--------
>>> adata = io.read_input('example_10x.h5', genomes = 'mm10')
>>> adata = io.read_input('example.h5ad', mode = 'r+')
>>> adata = io.read_input('example_ADT.csv')
"""
start = time.time()
input_file = os.path.expanduser(os.path.expandvars(input_file))
file_format, _, _ = infer_file_format(input_file)
if file_format == "sccloud":
data = load_scCloud_h5_file(
input_file, ngene=ngene, select_singlets=select_singlets
)
elif file_format == "10x":
data = load_10x_h5_file(input_file, ngene=ngene)
elif file_format == "h5ad":
data = anndata.read_h5ad(
input_file, backed=(None if h5ad_mode == "a" else h5ad_mode)
)
elif file_format == "mtx":
data = load_mtx_file(input_file, genome, ngene=ngene)
elif file_format == "loom":
assert genome is not None
data = load_loom_file(input_file, genome, ngene=ngene)
else:
assert (file_format == "dge" or file_format == "csv") and (genome is not None)
data = load_csv_file(
input_file, genome, sep=("\t" if file_format == "dge" else ","), ngene=ngene
)
if file_format != "h5ad":
data.restrain_keywords(genome)
if return_type == "AnnData":
data = data.convert_to_anndata(concat_matrices=concat_matrices, channel_attr=channel_attr, black_list=black_list)
else:
assert (return_type == "AnnData") and (channel_attr is None) and (black_list == [])
end = time.time()
logger.info("Read input is finished. Time spent = {:.2f}s.".format(end - start))
return data
def _parse_whitelist(whitelist: List[str]):
parse_results = {}
for value in whitelist:
tokens = value.split("/")
curr_dict = parse_results
for i in range(len(tokens) - 1):
if tokens[i] not in curr_dict:
curr_dict[tokens[i]] = dict()
curr_dict = curr_dict[tokens[i]]
if curr_dict is None:
break
if curr_dict is not None:
curr_dict[tokens[-1]] = None
return parse_results
def _update_backed_h5ad(group: "hdf5 group", dat: dict, whitelist: dict):
import h5py
from collections.abc import Mapping
for key, value in dat.items():
if not isinstance(key, str):
logging.warning(
"Dictionary key {} is transformed to str upon writing to h5,"
"using string keys is recommended".format(key)
)
key = str(key)
if whitelist is None or key in whitelist:
if isinstance(value, Mapping):
subgroup = (
group[key] if key in group.keys() else group.create_group(key)
)
assert isinstance(subgroup, h5py.Group)
_update_backed_h5ad(
subgroup, value, whitelist[key] if whitelist is not None else None
)
else:
if key in group.keys():
del group[key]
if issparse(value):
sparse_mat = group.create_group(key)
sparse_mat.attrs["h5sparse_format"] = value.format
sparse_mat.attrs["h5sparse_shape"] = np.array(value.shape)
sparse_mat.create_dataset("data", data=value.data, compression="gzip")
sparse_mat.create_dataset("indices", data=value.indices, compression="gzip")
sparse_mat.create_dataset("indptr", data=value.indptr, compression="gzip")
else:
value = np.array(value) if np.ndim(value) > 0 else np.array([value])
sdt = h5py.special_dtype(vlen=str)
if value.dtype.kind in {"U", "O"} :
value = value.astype(sdt)
if value.dtype.names is not None:
new_dtype = value.dtype.descr
convert_type = False
for i in range(len(value.dtype)):
if value.dtype[i].kind in {"U", "O"}:
new_dtype[i] = (new_dtype[i][0], sdt)
convert_type = True
if convert_type:
value = value.astype(new_dtype)
group.create_dataset(key, data=value, compression="gzip")
def write_output(
data: "MemData or AnnData", output_file: str, whitelist: List = ["obs", "obsm", "uns", "var", "varm"]
) -> None:
""" Write data back to disk.
This function is used to write data back to disk.
Parameters
----------
data : `MemData` or `AnnData`
data to write back, can be either an MemData or AnnData object.
output_file : `str`
output file name. If data is MemData, output_file should ends with suffix '.h5sc'. Otherwise, output_file can end with either '.h5ad' or '.loom'. If output_file ends with '.loom', a LOOM file will be generated. If no suffix is detected, an appropriate one will be appended.
whitelist : `list`, optional, default = ["obs", "obsm", "uns", "var", "varm"]
List that indicates changed fields when writing h5ad file in backed mode. For example,
['uns/Groups', 'obsm/PCA'] will only write Groups in uns, and PCA in obsm; the rest of the fields will be unchanged.
Returns
-------
None
Examples
--------
>>> io.write_output(adata, 'test.h5ad')
"""
start = time.time()
if (not isinstance(data, MemData)) and (not isinstance(data, anndata.AnnData)):
raise ValueError("data is neither an MemData nor AnnData object!")
# Identify and correct file suffix
file_name, _, suffix = output_file.rpartition(".")
if file_name == "":
file_name = output_file
suffix = "h5sc" if isinstance(data, MemData) else "h5ad"
if isinstance(data, MemData) and suffix != "h5sc":
logging.warning(
"Detected file suffix for this MemData object is not .h5sc. We will assume output_file is a file name and append .h5sc suffix."
)
file_name = output_file
suffix = "h5sc"
if isinstance(data, anndata.AnnData) and (suffix not in ["h5ad", "loom"]):
logging.warning(
"Detected file suffix for this AnnData object is neither .h5ad or .loom. We will assume output_file is a file name and append .h5ad suffix."
)
file_name = output_file
suffix = "h5ad"
output_file = file_name + "." + suffix
# Eliminate objects starting with fmat_ from uns
if isinstance(data, anndata.AnnData):
keys = list(data.uns)
for keyword in keys:
if keyword.startswith("fmat_"):
data.uns.pop(keyword)
# Write outputs
if suffix == "h5sc":
data.write_h5_file(output_file)
elif suffix == "loom":
data.write_loom(output_file, write_obsm_varm=True)
elif not data.isbacked or (data.isbacked and data.file._file.h5f.mode != "r+"):
data.write(output_file, compression="gzip")
else:
assert data.file._file.h5f.mode == "r+"
import h5py
h5_file = data.file._file.h5f
# Fix old h5ad files in which obsm/varm were stored as compound datasets
for key in ["obsm", "varm"]:
if key in h5_file.keys() and isinstance(h5_file[key], h5py.Dataset):
del h5_file[key]
whitelist.append(key)
_update_backed_h5ad(
h5_file, data._to_dict_fixed_width_arrays(), _parse_whitelist(whitelist)
)
h5_file.close()
end = time.time()
logger.info("Write output is finished. Time spent = {:.2f}s.".format(end - start))
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/io/io.py
| 0.766556 | 0.391813 |
io.py
|
pypi
|
import time
import numpy as np
import pandas as pd
from anndata import AnnData
from joblib import Parallel, delayed, effective_n_jobs
from natsort import natsorted
import ctypes
import ctypes.util
try:
import louvain as louvain_module
except ImportError:
print("Need louvain!")
try:
import leidenalg
except ImportError:
print("Need leidenalg!")
from sklearn.cluster import KMeans
from typing import List
from sccloud.tools import construct_graph
import logging
logger = logging.getLogger("sccloud")
def louvain(
data: AnnData,
rep: str = "pca",
resolution: int = 1.3,
random_state: int = 0,
class_label: str = "louvain_labels",
) -> None:
"""Cluster the cells using Louvain algorithm.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
The embedding representation used for clustering. Keyword ``'X_' + rep`` must exist in ``data.obsm``. By default, use PCA coordinates.
resolution: ``int``, optional, default: ``1.3``
Resolution factor. Higher resolution tends to find more clusters with smaller sizes.
random_state: ``int``, optional, default: ``0``
Random seed for reproducing results.
class_label: ``str``, optional, default: ``"louvain_labels"``
Key name for storing cluster labels in ``data.obs``.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs[class_label]``: Cluster labels of cells as categorical data.
Examples
--------
>>> scc.louvain(adata)
"""
start = time.time()
rep_key = "W_" + rep
if rep_key not in data.uns:
raise ValueError("Cannot find affinity matrix. Please run neighbors first!")
W = data.uns[rep_key]
G = construct_graph(W)
partition_type = louvain_module.RBConfigurationVertexPartition
partition = partition_type(G, resolution_parameter=resolution, weights="weight")
optimiser = louvain_module.Optimiser()
optimiser.set_rng_seed(random_state)
diff = optimiser.optimise_partition(partition)
labels = np.array([str(x + 1) for x in partition.membership])
categories = natsorted(np.unique(labels))
data.obs[class_label] = pd.Categorical(values=labels, categories=categories)
end = time.time()
logger.info("Louvain clustering is done. Time spent = {:.2f}s.".format(end - start))
def leiden(
data: AnnData,
rep: str = "pca",
resolution: int = 1.3,
n_iter: int = -1,
random_state: int = 0,
class_label: str = "leiden_labels",
) -> None:
"""Cluster the data using Leiden algorithm.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
The embedding representation used for clustering. Keyword ``'X_' + rep`` must exist in ``data.obsm``. By default, use PCA coordinates.
resolution: ``int``, optional, default: ``1.3``
Resolution factor. Higher resolution tends to find more clusters.
n_iter: ``int``, optional, default: ``-1``
Number of iterations that Leiden algorithm runs. If ``-1``, run the algorithm until reaching its optimal clustering.
random_state: ``int``, optional, default: ``0``
Random seed for reproducing results.
class_label: ``str``, optional, default: ``"leiden_labels"``
Key name for storing cluster labels in ``data.obs``.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs[class_label]``: Cluster labels of cells as categorical data.
Examples
--------
>>> scc.leiden(adata)
"""
start = time.time()
rep_key = "W_" + rep
if rep_key not in data.uns:
raise ValueError("Cannot find affinity matrix. Please run neighbors first!")
W = data.uns[rep_key]
G = construct_graph(W)
partition_type = leidenalg.RBConfigurationVertexPartition
partition = leidenalg.find_partition(
G,
partition_type,
seed=random_state,
weights="weight",
resolution_parameter=resolution,
n_iterations=n_iter,
)
labels = np.array([str(x + 1) for x in partition.membership])
categories = natsorted(np.unique(labels))
data.obs[class_label] = pd.Categorical(values=labels, categories=categories)
end = time.time()
logger.info("Leiden clustering is done. Time spent = {:.2f}s.".format(end - start))
def set_numpy_thread_to_one():
library_type = None
library_obj = None
previous_num = None
mkl_loc = ctypes.util.find_library("mkl_rt")
if mkl_loc is not None:
mkl_lib = ctypes.cdll.LoadLibrary(mkl_loc)
library_type = "mkl"
library_obj = mkl_lib
previous_num = mkl_lib.mkl_get_max_threads()
mkl_lib.mkl_set_num_threads(ctypes.byref(ctypes.c_int(1)))
else:
openblas_loc = ctypes.util.find_library("openblas")
if openblas_loc is not None:
openblas_lib = ctypes.cdll.LoadLibrary(openblas_loc)
library_type = "openblas"
library_obj = openblas_lib
previous_num = openblas_lib.openblas_get_num_threads()
openblas_lib.openblas_set_num_threads(1)
else:
import os
import glob
files = glob.glob(
os.path.join(os.path.dirname(np.__file__), ".libs", "libopenblas*.so")
)
if len(files) == 1:
path, openblas_loc = os.path.split(files[0])
part2 = (
":" + os.environ["LD_LIBRARY_PATH"]
if "LD_LIBRARY_PATH" in os.environ
else ""
)
os.environ["LD_LIBRARY_PATH"] = path + part2
openblas_lib = ctypes.cdll.LoadLibrary(openblas_loc)
library_type = "openblas"
library_obj = openblas_lib
previous_num = openblas_lib.openblas_get_num_threads()
openblas_lib.openblas_set_num_threads(1)
return library_type, library_obj, previous_num
def recover_numpy_thread(library_type: str, library_obj: object, value: int):
if library_type == "mkl":
library_obj.mkl_set_num_threads(ctypes.byref(ctypes.c_int(value)))
elif library_type == "openblas":
library_obj.openblas_set_num_threads(value)
def run_one_instance_of_kmeans(n_clusters: int, X: "np.array", seed: int) -> List[str]:
library_type, library_obj, value = set_numpy_thread_to_one()
km = KMeans(n_clusters=n_clusters, n_init=1, n_jobs=1, random_state=seed)
km.fit(X)
recover_numpy_thread(library_type, library_obj, value)
return km.labels_
def run_multiple_kmeans(
data: AnnData,
rep: "str",
n_jobs: int,
n_clusters: int,
n_init: int,
random_state: int,
temp_folder: None,
) -> List[str]:
""" Spectral clustering in parallel
"""
start = time.time()
n_jobs = effective_n_jobs(n_jobs)
rep_key = "X_" + rep
X = data.obsm[rep_key].astype("float64")
np.random.seed(random_state)
seeds = np.random.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, max_nbytes=1e7, temp_folder=temp_folder)(
delayed(run_one_instance_of_kmeans)(n_clusters, X, seed) for seed in seeds
) # Note that if n_jobs == 1, joblib will not fork a new process.
labels = list(zip(*results))
uniqs = np.unique(labels, axis=0)
transfer_dict = {tuple(k): v for k, v in zip(uniqs, range(uniqs.shape[0]))}
labels = [transfer_dict[x] for x in labels]
end = time.time()
logger.info("run_multiple_kmeans finished in {:.2f}s.".format(end - start))
return labels
def spectral_louvain(
data: AnnData,
rep: str = "pca",
resolution: float = 1.3,
rep_kmeans: str = "diffmap",
n_clusters: int = 30,
n_init: int = 20,
n_jobs: int = -1,
random_state: int = 0,
temp_folder: str = None,
class_label: str = "spectral_louvain_labels",
) -> None:
""" Cluster the data using Spectral Louvain algorithm.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
The embedding representation used for clustering. Keyword ``'X_' + rep`` must exist in ``data.obsm``. By default, use PCA coordinates.
resolution: ``int``, optional, default: ``1.3``
Resolution factor. Higher resolution tends to find more clusters with smaller sizes.
rep_kmeans: ``str``, optional, default: ``"diffmap"``
The embedding representation on which the KMeans runs. Keyword must exist in ``data.obsm``. By default, use Diffusion Map coordinates. If diffmap is not calculated, use PCA coordinates instead.
n_clusters: ``int``, optional, default: ``30``
The number of clusters set for the KMeans.
n_init: ``int``, optional, default: ``20``
Size of random seeds at initialization.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all available threads.
random_state: ``int``, optional, default: ``0``
Random seed for reproducing results.
temp_folder: ``str``, optional, default: ``None``
Temporary folder name for joblib to use during the computation.
class_label: ``str``, optional, default: ``"spectral_louvain_labels"``
Key name for storing cluster labels in ``data.obs``.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs[class_label]``: Cluster labels for cells as categorical data.
Examples
--------
>>> scc.spectral_louvain(adata)
"""
start = time.time()
if "X_" + rep_kmeans not in data.obsm.keys():
logger.warning("{} is not calculated, switch to pca instead.".format(rep_kmeans))
rep_kmeans = "pca"
if "X_" + rep_kmeans not in data.obsm.keys():
raise ValueError("Please run {} first!".format(rep_kmeans))
if "W_" + rep not in data.uns:
raise ValueError("Cannot find affinity matrix. Please run neighbors first!")
labels = run_multiple_kmeans(
data, rep_kmeans, n_jobs, n_clusters, n_init, random_state, temp_folder
)
W = data.uns["W_" + rep]
G = construct_graph(W)
partition_type = louvain_module.RBConfigurationVertexPartition
partition = partition_type(
G, resolution_parameter=resolution, weights="weight", initial_membership=labels
)
partition_agg = partition.aggregate_partition()
optimiser = louvain_module.Optimiser()
optimiser.set_rng_seed(random_state)
diff = optimiser.optimise_partition(partition_agg)
partition.from_coarse_partition(partition_agg)
labels = np.array([str(x + 1) for x in partition.membership])
categories = natsorted(np.unique(labels))
data.obs[class_label] = pd.Categorical(values=labels, categories=categories)
end = time.time()
logger.info(
"Spectral Louvain clustering is done. Time spent = {:.2f}s.".format(end - start)
)
def spectral_leiden(
data: AnnData,
rep: str = "pca",
resolution: float = 1.3,
rep_kmeans: str = "diffmap",
n_clusters: int = 30,
n_init: int = 20,
n_jobs: int = -1,
random_state: int = 0,
temp_folder: str = None,
class_label: str = "spectral_leiden_labels",
) -> None:
"""Cluster the data using Spectral Leiden algorithm.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
The embedding representation used for clustering. Keyword ``'X_' + rep`` must exist in ``data.obsm``. By default, use PCA coordinates.
resolution: ``int``, optional, default: ``1.3``
Resolution factor. Higher resolution tends to find more clusters.
rep_kmeans: ``str``, optional, default: ``"diffmap"``
The embedding representation on which the KMeans runs. Keyword must exist in ``data.obsm``. By default, use Diffusion Map coordinates. If diffmap is not calculated, use PCA coordinates instead.
n_clusters: ``int``, optional, default: ``30``
The number of clusters set for the KMeans.
n_init: ``int``, optional, default: ``20``
Size of random seeds at initialization.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all available threads.
random_state: ``int``, optional, default: ``0``
Random seed for reproducing results.
temp_folder: ``str``, optional, default: ``None``
Temporary folder name for joblib to use during the computation.
class_label: ``str``, optional, default: ``"spectral_leiden_labels"``
Key name for storing cluster labels in ``data.obs``.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs[class_label]``: Cluster labels for cells as categorical data.
Examples
--------
>>> scc.spectral_leiden(adata)
"""
start = time.time()
if "X_" + rep_kmeans not in data.obsm.keys():
logger.warning("{} is not calculated, switch to pca instead.".format(rep_kmeans))
rep_kmeans = "pca"
if "X_" + rep_kmeans not in data.obsm.keys():
raise ValueError("Please run {} first!".format(rep_kmeans))
if "W_" + rep not in data.uns:
raise ValueError("Cannot find affinity matrix. Please run neighbors first!")
labels = run_multiple_kmeans(
data, rep_kmeans, n_jobs, n_clusters, n_init, random_state, temp_folder
)
W = data.uns["W_" + rep]
G = construct_graph(W)
partition_type = leidenalg.RBConfigurationVertexPartition
partition = partition_type(
G, resolution_parameter=resolution, weights="weight", initial_membership=labels
)
partition_agg = partition.aggregate_partition()
optimiser = leidenalg.Optimiser()
optimiser.set_rng_seed(random_state)
diff = optimiser.optimise_partition(partition_agg, -1)
partition.from_coarse_partition(partition_agg)
labels = np.array([str(x + 1) for x in partition.membership])
categories = natsorted(np.unique(labels))
data.obs[class_label] = pd.Categorical(values=labels, categories=categories)
end = time.time()
logger.info(
"Spectral Leiden clustering is done. Time spent = {:.2f}s.".format(
end - start
)
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/clustering.py
| 0.790328 | 0.448306 |
clustering.py
|
pypi
|
import time
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from sklearn.decomposition import PCA
from typing import Tuple
from anndata import AnnData
import logging
logger = logging.getLogger("sccloud")
def qc_metrics(
data: AnnData,
mito_prefix: str = "MT-",
min_genes: int = 500,
max_genes: int = 6000,
min_umis: int = 100,
max_umis: int = 600000,
percent_mito: float = 10.0,
percent_cells: float = 0.05,
) -> None:
"""Generate Quality Control (QC) metrics on the dataset.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
mito_prefix: ``str``, optional, default: ``"MT-"``
Prefix for mitochondrial genes.
min_genes: ``int``, optional, default: ``500``
Only keep cells with at least ``min_genes`` genes.
max_genes: ``int``, optional, default: ``6000``
Only keep cells with less than ``max_genes`` genes.
min_umis: ``int``, optional, default: ``100``
Only keep cells with at least ``min_umis`` UMIs.
max_umis: ``int``, optional, default: ``600,000``
Only keep cells with less than ``max_umis`` UMIs.
percent_mito: ``float``, optional, default: ``10.0``
Only keep cells with percent mitochondrial genes less than ``percent_mito`` % of total counts.
percent_cells: ``float``, optional, default: ``0.05``
Only assign genes to be ``robust`` that are expressed in at least ``percent_cells`` % of cells.
Returns
-------
``None``
Update ``data.obs``:
* ``n_genes``: Total number of genes for each cell.
* ``n_counts``: Total number of counts for each cell.
* ``percent_mito``: Percent of mitochondrial genes for each cell.
* ``passed_qc``: Boolean type indicating if a cell passes the QC process based on the QC metrics.
Update ``data.var``:
* ``n_cells``: Total number of cells in which each gene is measured.
* ``percent_cells``: Percent of cells in which each gene is measured.
* ``robust``: Boolean type indicating if a gene is robust based on the QC metrics.
* ``highly_variable_features``: Boolean type indicating if a gene is a highly variable feature. By default, set all robust genes as highly variable features.
Examples
--------
>>> scc.qcmetrics(adata)
"""
data.obs["passed_qc"] = False
data.obs["n_genes"] = data.X.getnnz(axis=1)
data.obs["n_counts"] = data.X.sum(axis=1).A1
mito_prefixes = mito_prefix.split(",")
def startswith(name):
for prefix in mito_prefixes:
if name.startswith(prefix):
return True
return False
mito_genes = data.var_names.map(startswith).values.nonzero()[0]
data.obs["percent_mito"] = (data.X[:, mito_genes].sum(axis=1).A1 / np.maximum(
data.obs["n_counts"].values, 1.0
)) * 100
# Assign passed_qc
filters = [
data.obs["n_genes"] >= min_genes,
data.obs["n_genes"] < max_genes,
data.obs["n_counts"] >= min_umis,
data.obs["n_counts"] < max_umis,
data.obs["percent_mito"] < percent_mito,
]
data.obs.loc[np.logical_and.reduce(filters), "passed_qc"] = True
var = data.var
data = data[
data.obs["passed_qc"]
] # compute gene stats in space of filtered cells only
var["n_cells"] = data.X.getnnz(axis=0)
var["percent_cells"] = (var["n_cells"] / data.shape[0]) * 100
var["robust"] = var["percent_cells"] >= percent_cells
var["highly_variable_features"] = var[
"robust"
] # default all robust genes are "highly" variable
def get_filter_stats(data: AnnData) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Calculate filtration stats on cell barcodes and genes, respectively.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
Returns
-------
df_cells: ``pandas.DataFrame``
Data frame of stats on cell filtration.
df_genes: ``pandas.DataFrame``
Data frame of stats on gene filtration.
Examples
--------
>>> scc.get_filter_stats(adata)
"""
# cell stats
gb1 = data.obs.groupby("Channel")
df_before = gb1.median()
df_before = df_before.assign(total=gb1.size())
df_before.rename(
columns={
"n_genes": "median_n_genes_before",
"n_counts": "median_n_umis_before",
"percent_mito": "median_percent_mito_before",
},
inplace=True,
)
data = data[data.obs["passed_qc"]] # focusing only on filtered cells
gb2 = data.obs.groupby("Channel")
df_after = gb2.median()
df_after = df_after.assign(kept=gb2.size())
df_after.rename(
columns={
"n_genes": "median_n_genes",
"n_counts": "median_n_umis",
"percent_mito": "median_percent_mito",
},
inplace=True,
)
df_cells = pd.concat((df_before, df_after), axis=1, sort=False)
df_cells.fillna(0, inplace=True)
df_cells["kept"] = df_cells["kept"].astype(int)
df_cells["filt"] = df_cells["total"] - df_cells["kept"]
df_cells = df_cells[
[
"kept",
"median_n_genes",
"median_n_umis",
"median_percent_mito",
"filt",
"total",
"median_n_genes_before",
"median_n_umis_before",
"median_percent_mito_before",
]
]
df_cells.sort_values("kept", inplace=True)
# gene stats
idx = data.var["robust"] == False
df_genes = pd.DataFrame(
{
"n_cells": data.var.loc[idx, "n_cells"],
"percent_cells": data.var.loc[idx, "percent_cells"],
}
)
df_genes.index.name = "gene"
df_genes.sort_values("n_cells", ascending=False, inplace=True)
return df_cells, df_genes
def filter_data(data: AnnData) -> None:
""" Filter data based on qc_metrics calculated in ``scc.qc_metrics``.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
Returns
-------
``None``
Update ``data`` with cells and genes after filtration.
Examples
--------
>>> scc.filter_data(adata)
"""
assert "passed_qc" in data.obs
data._inplace_subset_obs(data.obs["passed_qc"].values)
data._inplace_subset_var((data.var["n_cells"] > 0).values)
logger.info(
"After filteration, {nc} cells and {ng} genes are kept. Among {ng} genes, {nrb} genes are robust.".format(
nc=data.shape[0], ng=data.shape[1], nrb=data.var["robust"].sum()
)
)
def generate_filter_plots(
data: AnnData, plot_filt: str, plot_filt_figsize: str = None
) -> None:
""" This function generates filtration plots, only used in command line.
"""
df_plot_before = data.obs[["Channel", "n_genes", "n_counts", "percent_mito"]].copy()
df_plot_before.reset_index(drop=True, inplace=True)
df_plot_before["status"] = "original"
data = data[data.obs["passed_qc"]] # focusing only on filtered cells
df_plot_after = data.obs[["Channel", "n_genes", "n_counts", "percent_mito"]].copy()
df_plot_after.reset_index(drop=True, inplace=True)
df_plot_after["status"] = "filtered"
df_plot = pd.concat((df_plot_before, df_plot_after), axis=0)
from sccloud.plotting import plot_qc_violin
figsize = None
if plot_filt_figsize is not None:
width, height = plot_filt_figsize.split(",")
figsize = (int(width), int(height))
plot_qc_violin(
df_plot,
"count",
plot_filt + ".filt.UMI.pdf",
xattr="Channel",
hue="status",
xlabel="Channel",
split=True,
linewidth=0,
figsize=figsize,
)
plot_qc_violin(
df_plot,
"gene",
plot_filt + ".filt.gene.pdf",
xattr="Channel",
hue="status",
xlabel="Channel",
split=True,
linewidth=0,
figsize=figsize,
)
plot_qc_violin(
df_plot,
"mito",
plot_filt + ".filt.mito.pdf",
xattr="Channel",
hue="status",
xlabel="Channel",
split=True,
linewidth=0,
figsize=figsize,
)
logger.info("Filtration plots are generated.")
def run_filter_data(
data: AnnData,
output_filt: str = None,
plot_filt: str = None,
plot_filt_figsize: Tuple[int, int] = None,
mito_prefix: str = "MT-",
min_genes: int = 500,
max_genes: int = 6000,
min_umis: int = 100,
max_umis: int = 600000,
percent_mito: float = 10.0,
percent_cells: float = 0.05,
) -> None:
""" This function is for command line use.
"""
start = time.time()
qc_metrics(
data,
mito_prefix,
min_genes,
max_genes,
min_umis,
max_umis,
percent_mito,
percent_cells,
)
if output_filt is not None:
writer = pd.ExcelWriter(output_filt + ".filt.xlsx", engine="xlsxwriter")
df_cells, df_genes = get_filter_stats(data)
df_cells.to_excel(writer, sheet_name="Cell filtration stats")
df_genes.to_excel(writer, sheet_name="Gene filtration stats")
writer.save()
logger.info("Filtration results are written.")
if plot_filt is not None:
generate_filter_plots(data, plot_filt, plot_filt_figsize)
filter_data(data)
end = time.time()
logger.info("filter_data is finished. Time spent = {:.2f}s.".format(end - start))
def log_norm(data: AnnData, norm_count: float = 1e5) -> None:
"""Normalization, and then apply natural logarithm to the data.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
norm_count: ``int``, optional, default: ``1e5``.
Total count of cells after normalization.
Returns
-------
``None``
Update ``data.X`` with count matrix after log-normalization.
Examples
--------
>>> scc.log_norm(adata)
"""
start = time.time()
assert issparse(data.X)
mat = data.X[:, data.var["robust"].values]
scale = norm_count / mat.sum(axis=1).A1
data.X.data *= np.repeat(scale, np.diff(data.X.indptr))
data.X = data.X.log1p()
end = time.time()
logger.info("Normalization is finished. Time spent = {:.2f}s.".format(end - start))
def select_features(data: AnnData, features: str = None) -> str:
""" Subset the features and store the resulting matrix in dense format in data.uns with `'fmat_'` prefix. `'fmat_*'` will be removed before writing out the disk.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
features: ``str``, optional, default: ``None``.
a keyword in ``data.var``, which refers to a boolean array. If ``None``, all features will be selected.
Returns
-------
keyword: ``str``
The keyword in ``data.uns`` referring to the features selected.
Update ``data.uns``:
* ``data.uns[keyword]``: A submatrix of the data containing features selected.
Examples
--------
>>> scc.select_features(adata)
"""
keyword = "fmat_" + str(features) # fmat: feature matrix
if keyword not in data.uns:
if features is not None:
assert features in data.var
fmat = data.X[:, data.var[features].values]
else:
fmat = data.X
if issparse(fmat):
data.uns[keyword] = fmat.toarray()
else:
data.uns[keyword] = fmat.copy()
return keyword
def pca(
data: AnnData,
n_components: int = 50,
features: str = "highly_variable_features",
standardize: bool = True,
max_value: float = 10,
random_state: int = 0,
) -> None:
"""Perform Principle Component Analysis (PCA) to the data.
The calculation uses *scikit-learn* implementation.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
n_components: ``int``, optional, default: ``50``.
Number of Principal Components to get.
features: ``str``, optional, default: ``"highly_variable_features"``.
Keyword in ``data.var`` to specify features used for PCA.
standardize: ``bool``, optional, default: ``True``.
Whether to scale the data to unit variance and zero mean or not.
max_value: ``float``, optional, default: ``10``.
The threshold to truncate data after scaling. If ``None``, do not truncate.
random_state: ``int``, optional, default: ``0``.
Random seed to be set for reproducing result.
Returns
-------
``None``.
Update ``data.obsm``:
* ``data.obsm["X_pca"]``: PCA matrix of the data.
Update ``data.uns``:
* ``data.uns["PCs"]``: The principal components containing the loadings.
* ``data.uns["pca_variance"]``: Explained variance, i.e. the eigenvalues of the covariance matrix.
* ``data.uns["pca_variance_ratio"]``: Ratio of explained variance.
Examples
--------
>>> scc.pca(adata)
"""
keyword = select_features(data, features)
start = time.time()
X = data.uns[keyword]
if standardize:
# scaler = StandardScaler(copy=False)
# scaler.fit_transform(X)
m1 = X.mean(axis=0)
psum = np.multiply(X, X).sum(axis=0)
std = ((psum - X.shape[0] * (m1 ** 2)) / (X.shape[0] - 1.0)) ** 0.5
std[std == 0] = 1
X -= m1
X /= std
if max_value is not None:
X[X > max_value] = max_value
X[X < -max_value] = -max_value
pca = PCA(n_components=n_components, random_state=random_state)
X_pca = pca.fit_transform(X)
data.obsm["X_pca"] = X_pca
data.uns[
"PCs"
] = pca.components_.T # cannot be varm because numbers of features are not the same
data.uns["pca"] = {}
data.uns["pca"]["variance"] = pca.explained_variance_
data.uns["pca"]["variance_ratio"] = pca.explained_variance_ratio_
end = time.time()
logger.info("PCA is done. Time spent = {:.2f}s.".format(end - start))
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/preprocessing.py
| 0.916025 | 0.538073 |
preprocessing.py
|
pypi
|
import time
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from collections import defaultdict
from joblib import Parallel, delayed
import skmisc.loess as sl
from typing import List
from anndata import AnnData
import logging
logger = logging.getLogger("sccloud")
def estimate_feature_statistics(data: AnnData, consider_batch: bool) -> None:
""" Estimate feature (gene) statistics per channel, such as mean, var etc.
"""
assert issparse(data.X)
if consider_batch:
start = time.time()
if "Channels" not in data.uns:
data.uns["Channels"] = data.obs["Channel"].unique()
if "Group" not in data.obs:
data.obs["Group"] = "one_group"
if "Groups" not in data.uns:
data.uns["Groups"] = data.obs["Group"].unique()
channels = data.uns["Channels"]
groups = data.uns["Groups"]
ncells = np.zeros(channels.size)
means = np.zeros((data.shape[1], channels.size))
partial_sum = np.zeros((data.shape[1], channels.size))
group_dict = defaultdict(list)
for i, channel in enumerate(channels):
idx = np.isin(data.obs["Channel"], channel)
mat = data.X[idx].astype(np.float64)
ncells[i] = mat.shape[0]
if ncells[i] == 0:
continue
if ncells[i] == 1:
means[:, i] = mat.toarray()[0]
else:
means[:, i] = mat.mean(axis=0).A1
m2 = mat.power(2).sum(axis=0).A1
partial_sum[:, i] = m2 - ncells[i] * (means[:, i] ** 2)
group = data.obs["Group"][idx.nonzero()[0][0]]
group_dict[group].append(i)
partial_sum[partial_sum < 1e-6] = 0.0
overall_means = np.dot(means, ncells) / data.shape[0]
batch_adjusted_vars = np.zeros(data.shape[1])
c2gid = np.zeros(channels.size, dtype=int)
gncells = np.zeros(groups.size)
gmeans = np.zeros((data.shape[1], groups.size))
gstds = np.zeros((data.shape[1], groups.size))
for i, group in enumerate(groups):
gchannels = group_dict[group]
c2gid[gchannels] = i
gncells[i] = ncells[gchannels].sum()
gmeans[:, i] = np.dot(means[:, gchannels], ncells[gchannels]) / gncells[i]
gstds[:, i] = (
partial_sum[:, gchannels].sum(axis=1) / gncells[i]
) ** 0.5 # calculate std
if groups.size > 1:
batch_adjusted_vars += gncells[i] * (
(gmeans[:, i] - overall_means) ** 2
)
data.varm["means"] = means
data.varm["partial_sum"] = partial_sum
data.uns["ncells"] = ncells
data.varm["gmeans"] = gmeans
data.varm["gstds"] = gstds
data.uns["gncells"] = gncells
data.uns["c2gid"] = c2gid
data.var["mean"] = overall_means
data.var["var"] = (batch_adjusted_vars + partial_sum.sum(axis=1)) / (
data.shape[0] - 1.0
)
end = time.time()
logger.info(
"Estimation on feature statistics per channel is finished. Time spent = {:.2f}s.".format(
end - start
)
)
else:
mean = data.X.mean(axis=0).A1
m2 = data.X.power(2).sum(axis=0).A1
var = (m2 - data.X.shape[0] * (mean ** 2)) / (data.X.shape[0] - 1)
data.var["mean"] = mean
data.var["var"] = var
def select_hvf_scCloud(
data: AnnData, consider_batch: bool, n_top: int = 2000, span: float = 0.02
) -> None:
""" Select highly variable features using the sccloud method
"""
if "robust" not in data.var:
raise ValueError("Please run `qc_metrics` to identify robust genes")
estimate_feature_statistics(data, consider_batch)
robust_idx = data.var["robust"].values
hvf_index = np.zeros(robust_idx.sum(), dtype=bool)
mean = data.var.loc[robust_idx, "mean"]
var = data.var.loc[robust_idx, "var"]
lobj = sl.loess(mean, var, span=span, degree=2)
lobj.fit()
rank1 = np.zeros(hvf_index.size, dtype=int)
rank2 = np.zeros(hvf_index.size, dtype=int)
delta = var - lobj.outputs.fitted_values
fc = var / lobj.outputs.fitted_values
rank1[np.argsort(delta)[::-1]] = range(hvf_index.size)
rank2[np.argsort(fc)[::-1]] = range(hvf_index.size)
hvf_rank = rank1 + rank2
hvf_index[np.argsort(hvf_rank)[:n_top]] = True
data.var["hvf_loess"] = 0.0
data.var.loc[robust_idx, "hvf_loess"] = lobj.outputs.fitted_values
data.var["hvf_rank"] = -1
data.var.loc[robust_idx, "hvf_rank"] = hvf_rank
data.var["highly_variable_features"] = False
data.var.loc[robust_idx, "highly_variable_features"] = hvf_index
def select_hvf_seurat_single(
X: "csr_matrix",
n_top: int,
min_disp: float,
max_disp: float,
min_mean: float,
max_mean: float,
) -> List[int]:
""" HVF selection for one channel using Seurat method
"""
X = X.copy().expm1()
mean = X.mean(axis=0).A1
m2 = X.power(2).sum(axis=0).A1
var = (m2 - X.shape[0] * (mean ** 2)) / (X.shape[0] - 1)
dispersion = np.full(X.shape[1], np.nan)
idx_valid = (mean > 0.0) & (var > 0.0)
dispersion[idx_valid] = var[idx_valid] / mean[idx_valid]
mean = np.log1p(mean)
dispersion = np.log(dispersion)
df = pd.DataFrame({"log_dispersion": dispersion, "bin": pd.cut(mean, bins=20)})
log_disp_groups = df.groupby("bin")["log_dispersion"]
log_disp_mean = log_disp_groups.mean()
log_disp_std = log_disp_groups.std(ddof=1)
log_disp_zscore = (
df["log_dispersion"].values - log_disp_mean.loc[df["bin"]].values
) / log_disp_std.loc[df["bin"]].values
log_disp_zscore[np.isnan(log_disp_zscore)] = 0.0
hvf_rank = np.full(X.shape[1], -1, dtype=int)
ords = np.argsort(log_disp_zscore)[::-1]
if n_top is None:
hvf_rank[ords] = range(X.shape[1])
idx = np.logical_and.reduce(
(
mean > min_mean,
mean < max_mean,
log_disp_zscore > min_disp,
log_disp_zscore < max_disp,
)
)
hvf_rank[~idx] = -1
else:
hvf_rank[ords[:n_top]] = range(n_top)
return hvf_rank
def select_hvf_seurat_multi(
X: "csr_matrix",
channels: List[str],
cell2channel: List[str],
n_top: int,
n_jobs: int,
min_disp: float,
max_disp: float,
min_mean: float,
max_mean: float,
) -> List[int]:
Xs = []
for channel in channels:
Xs.append(X[np.isin(cell2channel, channel)])
from joblib import effective_n_jobs
n_jobs = effective_n_jobs(n_jobs)
res_arr = np.array(
Parallel(n_jobs=n_jobs)(
delayed(select_hvf_seurat_single)(
Xs[i], n_top, min_disp, max_disp, min_mean, max_mean
)
for i in range(channels.size)
)
)
selected = res_arr >= 0
shared = selected.sum(axis=0)
cands = (shared > 0).nonzero()[0]
import numpy.ma as ma
median_rank = ma.median(ma.masked_array(res_arr, mask=~selected), axis=0).data
cands = sorted(cands, key=lambda x: median_rank[x])
cands = sorted(cands, key=lambda x: shared[x], reverse=True)
hvf_rank = np.full(X.shape[1], -1, dtype=int)
hvf_rank[cands[:n_top]] = range(n_top)
return hvf_rank
def select_hvf_seurat(
data: AnnData,
consider_batch: bool,
n_top: int,
min_disp: float,
max_disp: float,
min_mean: float,
max_mean: float,
n_jobs: int,
) -> None:
""" Select highly variable features using Seurat method.
"""
robust_idx = data.var["robust"].values
X = data.X[:, robust_idx]
hvf_rank = (
select_hvf_seurat_multi(
X,
data.uns["Channels"],
data.obs["Channel"],
n_top,
n_jobs=n_jobs,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
)
if consider_batch
else select_hvf_seurat_single(
X,
n_top=n_top,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
)
)
hvf_index = hvf_rank >= 0
data.var["hvf_rank"] = -1
data.var.loc[robust_idx, "hvf_rank"] = hvf_rank
data.var["highly_variable_features"] = False
data.var.loc[robust_idx, "highly_variable_features"] = hvf_index
def highly_variable_features(
data: AnnData,
consider_batch: bool,
flavor: str = "sccloud",
n_top: int = 2000,
span: float = 0.02,
min_disp: float = 0.5,
max_disp: float = np.inf,
min_mean: float = 0.0125,
max_mean: float = 7,
n_jobs: int = -1,
) -> None:
""" Highly variable features (HVF) selection. The input data should be logarithmized.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
consider_batch: ``bool``.
Whether consider batch effects or not.
flavor: ``str``, optional, default: ``"sccloud"``
The HVF selection method to use. Available choices are ``"sccloud"`` or ``"Seurat"``.
n_top: ``int``, optional, default: ``2000``
Number of genes to be selected as HVF. if ``None``, no gene will be selected.
span: ``float``, optional, default: ``0.02``
Only applicable when ``flavor`` is ``"sccloud"``. The smoothing factor used by *scikit-learn loess* model in sccloud HVF selection method.
min_disp: ``float``, optional, default: ``0.5``
Minimum normalized dispersion.
max_disp: ``float``, optional, default: ``np.inf``
Maximum normalized dispersion. Set it to ``np.inf`` for infinity bound.
min_mean: ``float``, optional, default: ``0.0125``
Minimum mean.
max_mean: ``float``, optional, default: ``7``
Maximum mean.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to be used during calculation. If ``-1``, all available threads will be used.
Returns
-------
Examples
--------
>>> scc.highly_variable_features(adata, consider_batch = False)
"""
start = time.time()
if "Channels" not in data.uns:
if "Channel" not in data.obs:
data.obs["Channel"] = ""
data.uns["Channels"] = data.obs["Channel"].unique()
if data.uns["Channels"].size == 1 and consider_batch:
consider_batch = False
logger.warning(
"Warning: only contains one channel, no need to consider batch for selecting highly variable features."
)
if flavor == "sccloud":
select_hvf_scCloud(data, consider_batch, n_top=n_top, span=span)
else:
assert flavor == "Seurat"
select_hvf_seurat(
data,
consider_batch,
n_top=n_top,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
n_jobs=n_jobs,
)
end = time.time()
logger.info(
"{tot} highly variable features have been selected. Time spent = {time:.2f}s.".format(
tot=data.var["highly_variable_features"].sum(), time=end - start
)
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/hvf_selection.py
| 0.698021 | 0.510192 |
hvf_selection.py
|
pypi
|
import numpy as np
import anndata
from typing import List
import logging
logger = logging.getLogger("sccloud")
def parse_subset_selections(subset_selections):
subsets_dict = {}
for subset_str in subset_selections:
attr, value_str = subset_str.split(":")
if attr in subsets_dict:
subsets_dict[attr].extend(value_str.split(","))
else:
subsets_dict[attr] = value_str.split(",")
return subsets_dict
def get_anndata_for_subclustering(data: "AnnData", subset_selections: List[str]):
obs_index = np.full(data.shape[0], True)
subsets_dict = parse_subset_selections(subset_selections)
for key, value in subsets_dict.items():
logger.info("{} in {}".format(str(key), str(value)))
obs_index = obs_index & np.isin(data.obs[key], value)
data = data[obs_index, :]
obs_dict = {"obs_names": data.obs_names.values}
for attr in data.obs.columns:
if attr != "pseudotime":
if attr.find("_labels") < 0:
obs_dict[attr] = data.obs[attr].values
else:
obs_dict["parent_" + attr] = data.obs[attr].values
var_dict = {
"var_names": data.var_names.values,
"gene_ids": data.var["gene_ids"].values,
"robust": data.var["robust"].values,
}
newdata = anndata.AnnData(X=data.X, obs=obs_dict, var=var_dict)
newdata.var["n_cells"] = newdata.X.getnnz(axis=0)
newdata.var["robust"] = (
newdata.var["robust"].values & (newdata.var["n_cells"] > 0).values
)
newdata.var["highly_variable_features"] = newdata.var[
"robust"
] # default all robust genes are "highly" variable
if "Channels" in data.uns:
newdata.uns["Channels"] = data.uns["Channels"]
if "Groups" in data.uns:
newdata.uns["Groups"] = data.uns["Groups"]
if "plus" in data.varm.keys():
newdata.varm["means"] = data.varm["plus"]
if "muls" in data.varm.keys():
newdata.varm["muls"] = data.varm["muls"]
logger.info("{0} cells are selected.".format(newdata.shape[0]))
return newdata
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/subcluster_utils.py
| 0.488527 | 0.379637 |
subcluster_utils.py
|
pypi
|
import time
import numpy as np
from scipy.sparse import issparse
from anndata import AnnData
import logging
logger = logging.getLogger("sccloud")
from sccloud.tools import estimate_feature_statistics, select_features
def set_group_attribute(data: AnnData, attribute_string: str) -> None:
"""Set group attributes used in batch correction.
Batch correction assumes the differences in gene expression between channels are due to batch effects.
However, in many cases, we know that channels can be partitioned into several groups and each group is
biologically different from others. In this case, *sccloud* will only perform batch correction for channels within each group.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
attribute_string: ``str``
Attributes used to construct groups:
* If ``None``, assume all channels are from one group.
* ``attr``, where ``attr`` is a keyword in ``data.obs``.
So the groups are defined by this sample attribute.
*``att1+att2+...+attrn``, where ``attr1`` to ``attrn`` are keywords in ``data.obs``.
So the groups are defined by the Cartesian product of these *n* attributes.
* ``attr=value_11,...value_1n_1;value_21,...value_2n_2;...;value_m1,...,value_mn_m``, where ``attr`` is a keyword in ``data.obs``.
In this form, there will be *(m+1)* groups. A cell belongs to group *i* (*i > 1*) if and only if
its sample attribute ``attr`` has a value among ``value_i1``, ... ``value_in_i``.
A cell belongs to group 0 if it does not belong to any other groups.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs["Group"]``: Group ID for each cell.
Examples
--------
>>> scc.set_group_attribute(adata, attr_string = "Individual")
>>> scc.set_group_attribute(adata, attr_string = "Individual+assignment")
>>> scc.set_group_attribute(adata, attr_string = "Channel=1,3,5;2,4,6,8")
"""
if attribute_string.find("=") >= 0:
attr, value_str = attribute_string.split("=")
assert attr in data.obs.columns
values = value_str.split(";")
data.obs["Group"] = "0"
for group_id, value in enumerate(values):
vals = value.split(",")
idx = np.isin(data.obs[attr], vals)
data.obs.loc[idx, "Group"] = str(group_id + 1)
elif attribute_string.find("+") >= 0:
attrs = attribute_string.split("+")
assert np.isin(attrs, data.obs.columns).sum() == len(attrs)
data.obs["Group"] = data.obs[attrs].apply(lambda x: "+".join(x), axis=1)
else:
assert attribute_string in data.obs.columns
data.obs["Group"] = data.obs[attribute_string]
def estimate_adjustment_matrices(data: AnnData) -> bool:
""" Estimate adjustment matrices
"""
if ("gmeans" not in data.varm) or ("gstds" not in data.varm):
estimate_feature_statistics(data, True)
if data.uns["Channels"].size == 1:
logger.warning(
"Warning: data only contains 1 channel. Batch correction disabled!"
)
return False
nchannel = data.uns["Channels"].size
plus = np.zeros((data.shape[1], nchannel))
muls = np.zeros((data.shape[1], nchannel))
ncells = data.uns["ncells"]
means = data.varm["means"]
partial_sum = data.varm["partial_sum"]
gmeans = data.varm["gmeans"]
gstds = data.varm["gstds"]
c2gid = data.uns["c2gid"]
for i in range(data.uns["Channels"].size):
if ncells[i] > 1:
muls[:, i] = (partial_sum[:, i] / (ncells[i] - 1.0)) ** 0.5
outliers = muls[:, i] < 1e-6
normals = np.logical_not(outliers)
muls[outliers, i] = 1.0
muls[normals, i] = gstds[normals, c2gid[i]] / muls[normals, i]
plus[:, i] = gmeans[:, c2gid[i]] - muls[:, i] * means[:, i]
data.varm["plus"] = plus
data.varm["muls"] = muls
return True
def correct_batch_effects(data: AnnData, keyword: str, features: str = None) -> None:
""" Apply calculated plus and muls to correct batch effects for a dense matrix
"""
X = data.uns[keyword]
m = X.shape[1]
if features is not None:
selected = data.var[features].values
plus = data.varm["plus"][selected, :]
muls = data.varm["muls"][selected, :]
else:
selected = np.ones(data.shape[1], dtype=bool)
plus = data.varm["plus"]
muls = data.varm["muls"]
for i, channel in enumerate(data.uns["Channels"]):
idx = np.isin(data.obs["Channel"], channel)
if idx.sum() == 0:
continue
X[idx] = X[idx] * np.reshape(muls[:, i], newshape=(1, m)) + np.reshape(
plus[:, i], newshape=(1, m)
)
# X[X < 0.0] = 0.0
def correct_batch(data: AnnData, features: str = None) -> None:
"""Batch correction on data.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
features: `str`, optional, default: ``None``
Features to be included in batch correction computation. If ``None``, simply consider all features.
Returns
-------
``None``
Update ``data.X`` by the corrected count matrix.
Examples
--------
>>> scc.correct_batch(adata, features = "highly_variable_features")
"""
tot_seconds = 0.0
# estimate adjustment parameters
start = time.time()
can_correct = estimate_adjustment_matrices(data)
end = time.time()
tot_seconds += end - start
logger.info("Adjustment parameters are estimated.")
# select dense matrix
keyword = select_features(data, features)
logger.info("Features are selected.")
if can_correct:
start = time.time()
correct_batch_effects(data, keyword, features)
end = time.time()
tot_seconds += end - start
logger.info(
"Batch correction is finished. Time spent = {:.2f}s.".format(tot_seconds)
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/batch_correction.py
| 0.837786 | 0.548855 |
batch_correction.py
|
pypi
|
import time
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
try:
import igraph
except ImportError as error:
print("Need python-igraph!")
from collections import deque
from typing import List
from anndata import AnnData
import logging
logger = logging.getLogger("sccloud")
def calc_pseudotime(data: AnnData, roots: List[str]) -> None:
"""Calculate Pseudotime based on Diffusion Map.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
roots: ``List[str]``
List of cell barcodes in the data.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs["pseudotime"]``: Pseudotime result.
Examples
--------
>>> scc.calc_pseudotime(adata, roots = list(adata.obs_names[0:100]))
"""
start = time.time()
if not isinstance(roots, list):
roots = [roots]
if "X_diffmap" not in data.obsm.keys():
raise ValueError("Please run diffmap first!")
data.uns["roots"] = roots
mask = np.isin(data.obs_names, data.uns["roots"])
distances = np.mean(
euclidean_distances(data.obsm["X_diffmap"][mask, :], data.obsm["X_diffmap"]),
axis=0,
)
dmin = distances.min()
dmax = distances.max()
data.obs["pseudotime"] = (distances - dmin) / (dmax - dmin)
end = time.time()
logger.info("calc_pseudotime finished. Time spent = {:.2f}s".format(end - start))
def calc_diffmap_dis(data: AnnData, source: str, t: int, save_to: str) -> None:
mask = np.isin(data.obs_names, source)
diffmap = data.obsm["X_phi"] * (data.uns["diffmap_evals"] ** t)
dis = euclidean_distances(diffmap[mask, :], diffmap)[0,:]
data.obs[save_to] = 1.0 - dis
def construct_knn_graph(indices, distances):
G = igraph.Graph(directed=False)
G.add_vertices(indices.shape[0])
edges = []
w = []
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
edges.append((i, indices[i][j]))
w.append(distances[i][j])
G.add_edges(edges)
G.es["weight"] = w
return G
def bfs_on_mst(G, root_id):
mst = G.spanning_tree(weights="weight")
myiter = mst.bfsiter(root_id, advanced=True)
n = G.vcount()
parents = np.full(n, -1, dtype=int)
for value in myiter:
if value[2] is not None:
parents[value[0].index] = value[2].index
return parents
def infer_path(data: AnnData, cluster: str, clust_id, path_name: str, k: int = 10):
"""Inference on path of a cluster.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
cluster: ``str``
Cluster name. Must exist in ``data.obs``.
clust_id
Cluster label. Must be a value of ``data.obs[cluster]``.
path_name: ``str``
Key name of the resulting path information.
k: ``int``, optional, default: ``10``
Number of nearest neighbors on Diffusion Map coordinates used in path reference.
Returns
-------
``None``
Update ``data.obs``:
* ``data.obs[path_name]``: The inferred path information on Diffusion Map about a specific cluster.
Examples
--------
>>> scc.infer_path(adata, cluster = 'leiden_labels', clust_id = '1', path_name = 'leiden_1_path')
"""
assert "roots" in data.uns and len(data.uns["roots"]) == 1
root_id = int(np.isin(data.obs_names, data.uns["roots"][0]).nonzero()[0][0])
indices = data.uns["diffmap_knn_indices"]
distances = data.uns["diffmap_knn_distances"]
G = construct_knn_graph(indices, distances)
parents = bfs_on_mst(G, root_id)
inpath = np.zeros(data.shape[0], dtype=bool)
idx = np.isin(data.obs[cluster], clust_id)
inpath[idx] = True
qsize = idx.sum()
queue = deque(idx.nonzero()[0])
while qsize > 0:
vid = queue.popleft()
qsize -= 1
if parents[vid] >= 0 and not inpath[parents[vid]]:
inpath[parents[vid]] = True
queue.append(parents[vid])
qsize += 1
for vid in np.nonzero(inpath & ~idx)[0]:
inpath[indices[vid, 0:k]] = True
data.obs[path_name] = inpath.astype(str)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/pseudotime.py
| 0.761804 | 0.556339 |
pseudotime.py
|
pypi
|
import numpy as np
import pandas as pd
import time
from scipy.sparse import issparse
from sccloud.io import read_input
def scp_write_coords(data, output_name):
cluster_labels = []
for col_name in data.obs.columns:
if col_name.find("labels") >= 0:
cluster_labels.append(col_name)
df_labels = data.obs[cluster_labels]
clu_str = group_str = ""
if len(cluster_labels) > 0:
clu_str = "".join(["\t" + x for x in cluster_labels])
group_str = "".join(["\tgroup"] * len(cluster_labels))
basis_set = set(data.obsm_keys())
for basis in ["X_tsne", "X_fitsne", "X_umap", "X_diffmap_pca", "X_fle", "X_net_tsne", "X_net_umap", "X_net_fle"]:
if basis in basis_set:
coords = ["X", "Y"] if basis != "X_diffmap_pca" else ["X", "Y", "Z"]
coo_str = "\t".join(coords)
num_str = "\t".join(["numeric"] * len(coords))
coord_file = "{}.scp.{}.coords.txt".format(output_name, basis)
with open(coord_file, "w") as fout:
fout.write("NAME\t{coo}{clu}\n".format(coo=coo_str, clu=clu_str))
fout.write("TYPE\t{coo}{clu}\n".format(coo=num_str, clu=group_str))
df_out = pd.DataFrame(
data.obsm[basis][:, 0 : len(coords)],
columns=coords,
index=data.obs_names,
)
df_out = pd.concat([df_out, df_labels], axis=1)
df_out.to_csv(coord_file, sep="\t", header=False, mode="a")
print("Coordinate file {} is written.".format(coord_file))
def scp_write_metadata(data, output_name):
ban = ["n_genes", "n_counts", "percent_mito", "pseudotime"]
meta = []
for col_name in data.obs.columns:
if (col_name not in ban) and (col_name.find("labels") < 0):
meta.append(col_name)
meta_str = "".join(["\t" + x for x in meta])
group_str = "".join(["\tgroup"] * len(meta))
metadata_file = "{}.scp.metadata.txt".format(output_name)
with open(metadata_file, "w") as fout:
fout.write("NAME{meta}\n".format(meta=meta_str))
fout.write("TYPE{meta}\n".format(meta=group_str))
data.obs[meta].to_csv(metadata_file, sep="\t", header=False, mode="a")
print("Metadata file {} is written.".format(metadata_file))
def write_market_matrix(mtx_file, X, round_to):
with open(mtx_file, "w") as fout:
fmt_str = "{{}} {{}} {{:.{}f}}\n".format(round_to)
fout.write("%%MatrixMarket matrix coordinate real general\n%\n")
if issparse(X):
X = X.tocoo()
fout.write("{} {} {}\n".format(X.shape[0], X.shape[1], (X.data != 0).sum()))
for x, y, value in zip(X.row, X.col, X.data):
if value != 0:
fout.write(fmt_str.format(x + 1, y + 1, value))
else:
fout.write("{} {} {}\n".format(X.shape[0], X.shape[1], np.count_nonzero(X)))
for x, y in zip(*X.nonzero()):
fout.write(fmt_str.format(x + 1, y + 1, X[x, y]))
def write_dense_matrix(expr_file, data, round_to):
fmt_str = "{{:.{}f}}".format(round_to)
exprmat = data.X.transpose()
if issparse(exprmat):
exprmat = exprmat.toarray()
with open(expr_file, "w") as fout:
fout.write("GENE\t" + "\t".join(data.obs_names) + "\n")
for i in range(exprmat.shape[0]):
fout.write(
data.var_names[i]
+ "\t"
+ "\t".join(
[fmt_str.format(x) if x > 0.0 else "0" for x in exprmat[i, :]]
)
+ "\n"
)
print("Expression matrix {} is written.".format(expr_file))
def scp_write_expression(data, output_name, is_sparse=True, round_to=2):
if is_sparse:
barcode_file = "{}.scp.barcodes.tsv".format(output_name)
with open(barcode_file, "w") as fout:
fout.write("\n".join(data.obs_names) + "\n")
print("Barcode file {} is written.".format(barcode_file))
gene_file = "{}.scp.features.tsv".format(output_name)
df = pd.DataFrame(
{"gene_names": data.var_names, "gene_ids": data.var["gene_ids"]}
)[["gene_ids", "gene_names"]]
with open(gene_file, "w") as fout:
df.to_csv(fout, sep=" ", header=False, index=False)
print("Features file {} is written.".format(gene_file))
mtx_file = "{}.scp.matrix.mtx".format(output_name)
write_market_matrix(mtx_file, data.X.transpose().tocsr(), round_to)
print("Matrix file {} is written.".format(mtx_file))
else:
expr_file = "{}.scp.expr.txt".format(output_name)
write_dense_matrix(expr_file, data, round_to)
def run_scp_output(
input_h5ad_file: str, output_name: str, is_sparse: bool = True, round_to: int = 2
):
"""Generate outputs for single cell portal.
Parameters
----------
input_h5ad_file: ``str``
Input h5ad file name.
output_name: ``str``
Name prefix for output files.
is_sparse: ``bool``, optional, default: ``True``
If ``True``, enforce the count matrix to be sparse after written into files.
round_to: ``int``, optional, default: ``2``
Round numbers to ``round_to`` decimal places.
Returns
-------
``None``
Generate several files:
* ``output_name.scp.basis.coords.txt``, where ``basis`` is for each key in ``adata.obsm`` field.
* ``output_name.scp.metadata.txt``.
* Gene expression files:
* If in sparse format:
* ``output_name.scp.features.tsv``, information on genes;
* ``output_name.scp.barcodes.tsv``, information on cell barcodes;
* ``output_name.scp.matrix.mtx``, count matrix.
* If not in sparse:
* ``output_name.scp.expr.txt``.
Examples
--------
>>> scc.run_scp_output("result.h5ad", output_name = "scp_result")
"""
adata = read_input(input_h5ad_file, h5ad_mode="a")
start = time.time()
scp_write_coords(adata, output_name)
scp_write_metadata(adata, output_name)
scp_write_expression(adata, output_name, is_sparse, round_to)
end = time.time()
print("Time spent for generating SCP outputs is {:.2f}s.".format(end - start))
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/scp_output.py
| 0.5 | 0.333123 |
scp_output.py
|
pypi
|
import time
import numpy as np
import logging
from scipy.sparse import issparse
from scipy.sparse.csgraph import connected_components
from scipy.sparse.linalg import eigsh
from scipy.stats import entropy
from sklearn.decomposition import PCA
from sklearn.utils.extmath import randomized_svd
from typing import List, Tuple
from anndata import AnnData
from sccloud.tools import update_rep, W_from_rep
logger = logging.getLogger("sccloud")
def calculate_normalized_affinity(
W: "csr_matrix"
) -> Tuple["csr_matrix", "np.array", "np.array"]:
diag = W.sum(axis=1).A1
diag_half = np.sqrt(diag)
W_norm = W.tocoo(copy=True)
W_norm.data /= diag_half[W_norm.row]
W_norm.data /= diag_half[W_norm.col]
W_norm = W_norm.tocsr()
return W_norm, diag, diag_half
def calc_von_neumann_entropy(lambdas: List[float], t: float) -> float:
etas = 1.0 - lambdas ** t
etas = etas / etas.sum()
return entropy(etas)
def find_knee_point(x: List[float], y: List[float]) -> int:
""" Return the knee point, which is defined as the point furthest from line between two end points
"""
p1 = np.array((x[0], y[0]))
p2 = np.array((x[-1], y[-1]))
length_p12 = np.linalg.norm(p2 - p1)
max_dis = 0.0
knee = 0
for cand_knee in range(1, len(x) - 1):
p3 = np.array((x[cand_knee], y[cand_knee]))
dis = np.linalg.norm(np.cross(p2 - p1, p3 - p1)) / length_p12
if max_dis < dis:
max_dis = dis
knee = cand_knee
return knee
def calculate_diffusion_map(
W: "csr_matrix", n_components: int, solver: str, random_state: int, max_t: int
) -> Tuple["np.array", "np.array", "np.array"]:
assert issparse(W)
nc, labels = connected_components(W, directed=True, connection="strong")
logger.info("Calculating connected components is done.")
assert nc == 1
W_norm, diag, diag_half = calculate_normalized_affinity(W)
logger.info("Calculating normalized affinity matrix is done.")
if solver == "eigsh":
np.random.seed(random_state)
v0 = np.random.uniform(-1.0, 1.0, W_norm.shape[0])
Lambda, U = eigsh(W_norm, k=n_components, v0=v0)
Lambda = Lambda[::-1]
U = U[:, ::-1]
else:
assert solver == "randomized"
U, S, VT = randomized_svd(
W_norm, n_components=n_components, random_state=random_state
)
signs = np.sign((U * VT.transpose()).sum(axis=0)) # get eigenvalue signs
Lambda = signs * S # get eigenvalues
# remove the first eigen value and vector
Lambda = Lambda[1:]
U = U[:, 1:]
Phi = U / diag_half[:, np.newaxis]
if max_t == -1:
Lambda_new = Lambda / (1.0 - Lambda)
else:
# Find the knee point
x = np.array(range(1, max_t + 1), dtype = float)
y = np.array([calc_von_neumann_entropy(Lambda, t) for t in x])
t = x[find_knee_point(x, y)]
logger.info("Detected knee point at t = {:.0f}.".format(t))
# U_df = U * Lambda #symmetric diffusion component
Lambda_new = Lambda * ((1.0 - Lambda ** t) / (1.0 - Lambda))
Phi_pt = Phi * Lambda_new # asym pseudo component
return Phi_pt, Lambda, Phi # , U_df, W_norm
def diffmap(
data: AnnData,
n_components: int = 100,
rep: str = "pca",
solver: str = "eigsh",
random_state: int = 0,
max_t: float = 5000,
) -> None:
"""Calculate Diffusion Map.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
n_components: ``int``, optional, default: ``100``
Number of diffusion components to calculate.
rep: ``str``, optional, default: ``"pca"``
Embedding Representation of data used for calculating the Diffusion Map. By default, use PCA coordinates.
solver: ``str``, optional, default: ``"eigsh"``
Solver for eigen decomposition:
* ``"eigsh"``: default setting. Use *scipy* `eigsh <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigsh.html>`_ as the solver to find eigenvalus and eigenvectors using the Implicitly Restarted Lanczos Method.
* ``"randomized"``: Use *scikit-learn* `randomized_svd <https://scikit-learn.org/stable/modules/generated/sklearn.utils.extmath.randomized_svd.html>`_ as the solver to calculate a truncated randomized SVD.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
max_t: ``float``, optional, default: ``5000``
scCloud tries to determine the best t to sum up to between ``[1, max_t]``.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm["X_diffmap"]``: Diffusion Map matrix of the data.
Update ``data.uns``:
* ``data.uns["diffmap_evals"]``: Eigenvalues corresponding to Diffusion Map matrix.
Examples
--------
>>> scc.diffmap(adata)
"""
start = time.time()
rep = update_rep(rep)
Phi_pt, Lambda, Phi = calculate_diffusion_map(
W_from_rep(data, rep),
n_components=n_components,
solver=solver,
random_state=random_state,
max_t = max_t,
)
data.obsm["X_diffmap"] = Phi_pt
data.uns["diffmap_evals"] = Lambda
data.obsm["X_phi"] = Phi
# data.uns['W_norm'] = W_norm
# data.obsm['X_dmnorm'] = U_df
end = time.time()
logger.info("diffmap finished. Time spent = {:.2f}s.".format(end - start))
def reduce_diffmap_to_3d(data: AnnData, random_state: int = 0) -> None:
"""Reduce high-dimensional Diffusion Map matrix to 3-dimentional.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm["X_diffmap_pca"]``: 3D Diffusion Map matrix of data.
Examples
--------
>>> scc.reduce_diffmap_to_3d(adata)
"""
start = time.time()
if "X_diffmap" not in data.obsm.keys():
raise ValueError("Please run diffmap first!")
pca = PCA(n_components=3, random_state=random_state)
data.obsm["X_diffmap_pca"] = pca.fit_transform(data.obsm["X_diffmap"])
end = time.time()
logger.info(
"Reduce diffmap to 3D is done. Time spent = {:.2f}s.".format(end - start)
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/diffusion_map.py
| 0.862207 | 0.513607 |
diffusion_map.py
|
pypi
|
import numpy as np
import pandas as pd
import os
import time
from subprocess import check_call
from typing import List
from anndata import AnnData
from sccloud.io import infer_file_format, read_input, write_output, MemData
def find_digits(value):
pos = len(value) - 1
while pos >= 0 and value[pos].isdigit():
pos -= 1
pos += 1
assert pos < len(value)
return (value[:pos], int(value[pos:]))
def parse_restriction_string(rstr):
pos = rstr.index(":")
name = rstr[:pos]
isin = True
if rstr[pos + 1] == "~":
isin = False
pos += 1
content = set()
for item in rstr[pos + 1 :].split(","):
values = item.split("-")
if len(values) == 1:
content.add(values[0])
else:
prefix, fr = find_digits(values[0])
assert values[1].isdigit()
to = int(values[1]) + 1
for i in range(fr, to):
content.add(prefix + str(i))
return (name, isin, content)
def aggregate_matrices(
csv_file: str,
what_to_return: str = AnnData,
restrictions: List[str] = [],
attributes: List[str] = [],
google_cloud: bool = False,
select_singlets: bool = False,
ngene: int = None,
concat_matrices: bool = False,
) -> "None or AnnData or MemData":
"""Aggregate channel-specific count matrices into one big count matrix.
This function takes as input a csv_file, which contains at least 2 columns — Sample, sample name; Location, file that contains the count matrices (e.g. filtered_gene_bc_matrices_h5.h5), and merges matrices from the same genome together. Depending on what_to_return, it can output the merged results into a sccloud-formatted HDF5 file or return as an AnnData or MemData object.
Parameters
----------
csv_file : `str`
The CSV file containing information about each channel.
what_to_return : `str`, optional (default: 'AnnData')
If this value is equal to 'AnnData' or 'MemData', an AnnData or MemData object will be returned. Otherwise, results will be written into 'what_to_return.sccloud.h5' file and None is returned.
restrictions : `list[str]`, optional (default: [])
A list of restrictions used to select channels, each restriction takes the format of name:value,…,value or name:~value,..,value, where ~ refers to not.
attributes : `list[str]`, optional (default: [])
A list of attributes need to be incorporated into the output count matrix.
google_cloud : `bool`, optional (default: False)
If the channel-specific count matrices are stored in a google bucket.
select_singlets : `bool`, optional (default: False)
If we have demultiplexed data, turning on this option will make sccloud only include barcodes that are predicted as singlets.
ngene : `int`, optional (default: None)
The minimum number of expressed genes to keep one barcode.
concat_matrices : `bool`, optional (default: False)
If concatenate multiple matrices. If so, return only one AnnData object, otherwise, might return a list of AnnData objects.
Returns
-------
None
Examples
--------
>>> tools.aggregate_matrix('example.csv', 'example_10x.h5', ['Source:pbmc', 'Donor:1'], ['Source', 'Platform', 'Donor'])
"""
df = pd.read_csv(csv_file, header=0, index_col="Sample")
df["Sample"] = df.index
# Select channels
rvec = [parse_restriction_string(x) for x in restrictions]
idx = pd.Series([True] * df.shape[0], index=df.index, name="selected")
for name, isin, content in rvec:
assert name in df.columns
if isin:
idx = idx & df[name].isin(content)
else:
idx = idx & (~(df[name].isin(content)))
df = df.loc[idx]
if df.shape[0] == 0:
raise ValueError("No channels pass the restrictions!")
# Load channels
tot = 0
aggrData = MemData()
dest_paths = []
for sample_name, row in df.iterrows():
input_file = os.path.expanduser(
os.path.expandvars(row["Location"].rstrip(os.sep))
)
file_format, copy_path, copy_type = infer_file_format(input_file)
if google_cloud:
base_name = os.path.basename(copy_path)
dest_path = sample_name + "_tmp_" + base_name
if copy_type == "directory":
check_call(["mkdir", "-p", dest_path])
call_args = ["gsutil", "-m", "cp", "-r", copy_path, dest_path]
else:
call_args = ["gsutil", "-m", "cp", copy_path, dest_path]
check_call(call_args)
dest_paths.append(dest_path)
input_file = dest_path
if file_format == "csv" and copy_type == "directory":
input_file = os.path.join(dest_path, os.path.basename(input_file))
genome = None
if file_format in ["dge", "csv", "mtx", "loom"]:
assert "Reference" in row
genome = row["Reference"]
data = read_input(
input_file,
genome=genome,
return_type="MemData",
ngene=ngene,
select_singlets=select_singlets,
)
data.update_barcode_metadata_info(sample_name, row, attributes)
aggrData.addAggrData(data)
tot += 1
print("Processed {}.".format(input_file))
# Delete temporary file
for dest_path in dest_paths:
check_call(["rm", "-rf", dest_path])
# Merge channels
t1 = time.time()
aggrData.aggregate()
t2 = time.time()
print("Data aggregation is finished in {:.2f}s.".format(t2 - t1))
if what_to_return == "AnnData":
aggrData = aggrData.convert_to_anndata(concat_matrices)
elif what_to_return != "MemData":
write_output(aggrData, what_to_return)
aggrData = None
print("Aggregated {tot} files.".format(tot=tot))
return aggrData
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/tools/data_aggregation.py
| 0.7181 | 0.656163 |
data_aggregation.py
|
pypi
|
import pandas as pd
from matplotlib import pyplot as pl
from sccloud.io import read_input
from .plot_utils import transform_basis
from .plot_qc import plot_qc_violin
from . import plot_library, iplot_library
pop_list = {
"composition": {
"basis",
"attrs",
"apply_to_all",
"group",
"genes",
"gene",
"nrows",
"ncols",
"alpha",
"legend_fontsize",
"use_raw",
"showzscore",
"title",
"showall",
"show_background",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
"scatter": {
"cluster",
"attr",
"group",
"genes",
"gene",
"style",
"stacked",
"logy",
"use_raw",
"showzscore",
"title",
"showall",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
"scatter_groups": {
"attr",
"attrs",
"apply_to_all",
"genes",
"gene",
"style",
"stacked",
"logy",
"use_raw",
"showzscore",
"title",
"show_background",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
"scatter_genes": {
"cluster",
"attr",
"attrs",
"restrictions",
"apply_to_all",
"group",
"gene",
"style",
"stacked",
"logy",
"legend_fontsize",
"showzscore",
"title",
"showall",
"show_background",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
"scatter_gene_groups": {
"cluster",
"attr",
"attrs",
"restrictions",
"apply_to_all",
"genes",
"style",
"stacked",
"logy",
"legend_fontsize",
"showzscore",
"title",
"showall",
"show_background",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
"heatmap": {
"attr",
"basis",
"attrs",
"restrictions",
"apply_to_all",
"group",
"gene",
"style",
"stacked",
"logy",
"nrows",
"ncols",
"subplot_size",
"left",
"bottom",
"wspace",
"hspace",
"alpha",
"legend_fontsize",
"showall",
"show_background",
"qc_type",
"qc_xtick_font",
"qc_xtick_rotation",
"qc_line_width",
},
}
def make_static_plots(input_file, plot_type, output_file, dpi=500, **kwargs):
adata = read_input(input_file, h5ad_mode="r")
if plot_type == "qc_violin":
if kwargs["attr"] is None:
plot_qc_violin(
adata,
kwargs["qc_type"],
output_file,
xattr=kwargs["cluster"],
xlabel=kwargs["cluster"],
xtick_font=kwargs["qc_xtick_font"],
xtick_rotation=kwargs["qc_xtick_rotation"],
figsize=kwargs["subplot_size"],
linewidth=kwargs["qc_line_width"],
)
else:
plot_qc_violin(
adata,
kwargs["qc_type"],
output_file,
xattr=kwargs["cluster"],
hue=kwargs["attr"],
xlabel=kwargs["cluster"],
xtick_font=kwargs["qc_xtick_font"],
xtick_rotation=kwargs["qc_xtick_rotation"],
split=True,
figsize=kwargs["subplot_size"],
linewidth=kwargs["qc_line_width"],
)
else:
assert plot_type in pop_list
pop_set = pop_list[plot_type].copy()
for key, value in kwargs.items():
if value is None:
pop_set.add(key)
for key in pop_set:
kwargs.pop(key)
fig = getattr(plot_library, "plot_" + plot_type)(adata, **kwargs)
fig.savefig(output_file, dpi=dpi)
print(output_file + " is generated.")
adata.file.close()
def make_interactive_plots(input_file, plot_type, output_file, **kwargs):
adata = read_input(input_file, h5ad_mode="r")
basis = transform_basis(plot_type)
if plot_type == "diffmap" or plot_type == "diffmap_pca":
df = pd.DataFrame(
adata.obsm["X_{}".format(plot_type)][:, 0:3],
index=adata.obs.index,
columns=[basis + i for i in ["1", "2", "3"]],
)
if kwargs["isgene"]:
coln = adata.var.index.get_loc(kwargs["attr"])
df.insert(0, "Annotation", adata.X[:, coln].toarray().ravel())
else:
df.insert(0, "Annotation", adata.obs[kwargs["attr"]])
if not kwargs["isreal"]:
iplot_library.scatter3d(df, output_file)
else:
iplot_library.scatter3d_real(df, output_file, kwargs["log10"])
else:
df = pd.DataFrame(
adata.obsm["X_{}".format(plot_type)],
index=adata.obs.index,
columns=[basis + i for i in ["1", "2"]],
)
if kwargs["isgene"]:
coln = adata.var.index.get_loc(kwargs["attr"])
df.insert(0, "Annotation", adata.X[:, coln].toarray().ravel())
else:
df.insert(0, "Annotation", adata.obs[kwargs["attr"]])
if not kwargs["isreal"]:
iplot_library.scatter(df, output_file)
else:
iplot_library.scatter_real(df, output_file, kwargs["log10"])
print(output_file + " is generated.")
adata.file.close()
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/plotting/run_plotting.py
| 0.543711 | 0.408159 |
run_plotting.py
|
pypi
|
import matplotlib as mpl
mpl.use("Agg")
import numpy as np
import pandas as pd
import seaborn as sns
from natsort import natsorted
import matplotlib.pyplot as plt
# plot_type: gene, count, mito
def plot_qc_violin(
data,
plot_type,
out_file,
xattr="Channel",
hue=None,
inner=None,
dpi=500,
figsize=None,
xlabel=None,
xtick_font=None,
xtick_rotation=False,
split=False,
linewidth=None,
):
pt2attr = {"gene": "n_genes", "count": "n_counts", "mito": "percent_mito"}
pt2ylab = {
"gene": "Number of expressed genes",
"count": "Number of UMIs",
"mito": "Percentage of mitochondrial UMIs",
}
yattr = pt2attr[plot_type]
tmp_df = data if isinstance(data, pd.core.frame.DataFrame) else data.obs
df = (tmp_df[[xattr, yattr]] if hue is None else tmp_df[[xattr, yattr, hue]]).copy()
df[xattr] = pd.Categorical(
df[xattr].values, categories=natsorted(np.unique(df[xattr].values))
)
if hue is not None:
df[hue] = pd.Categorical(
df[hue].values, categories=natsorted(np.unique(df[hue].values))
)
sns.violinplot(
x=xattr,
y=yattr,
hue=hue,
data=df,
inner=inner,
split=split,
linewidth=linewidth,
cut=0,
)
ax = plt.gca()
ax.grid(False)
if xlabel is not None:
ax.set_xlabel(xlabel)
ax.set_ylabel(pt2ylab[plot_type])
for tick in ax.xaxis.get_major_ticks():
if xtick_font is not None:
tick.label.set_fontsize(xtick_font)
if xtick_rotation:
tick.label.set_rotation("vertical")
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), fontsize=xtick_font)
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
plt.tight_layout()
plt.savefig(out_file, dpi=dpi)
plt.close()
def plot_hvf(x, y, fitted, hvg_index, out_file, dpi=500, markersize=5, linewidth=2):
ax = plt.gca()
ax.plot(x[hvg_index], y[hvg_index], "b.", markersize=markersize)
ax.plot(x[~hvg_index], y[~hvg_index], "k.", markersize=markersize)
order = np.argsort(x)
ax.plot(x[order], fitted[order], "r-", linewidth=linewidth)
ax.set_xlabel("Mean log expression")
ax.set_ylabel("Variance of log expression")
plt.savefig(out_file, dpi=dpi)
plt.close()
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/plotting/plot_qc.py
| 0.505127 | 0.367355 |
plot_qc.py
|
pypi
|
from .Base import Base
from sccloud.tools import aggregate_matrices
class AggregateMatrix(Base):
"""
Aggregate 10x matrices from each channel into one big matrix.
Usage:
sccloud aggregate_matrix <csv_file> <output_name> [--restriction <restriction>... --attributes <attributes> --google-cloud --select-only-singlets --minimum-number-of-genes <ngene>]
sccloud aggregate_matrix -h
Arguments:
csv_file Input csv-formatted file containing information of each scRNA-Seq run. Each row must contain at least 2 columns --- Sample, sample name and Location, location of the channel-specific count matrix in either 10x v2/v3, DGE, mtx, csv or loom format. If matrix is in DGE, mtx or csv format, an addition Reference column is required.
output_name The output file name.
Options:
--restriction <restriction>... Select channels that satisfy all restrictions. Each restriction takes the format of name:value,...,value or name:~value,..,value, where ~ refers to not. You can specifiy multiple restrictions by setting this option multiple times.
--attributes <attributes> Specify a comma-separated list of outputted attributes. These attributes should be column names in the csv file.
--google-cloud If files are stored in google cloud. Assuming google cloud sdk is installed.
--select-only-singlets If we have demultiplexed data, turning on this option will make sccloud only include barcodes that are predicted as singlets.
--minimum-number-of-genes <ngene> Only keep barcodes with at least <ngene> expressed genes.
-h, --help Print out help information.
Outputs:
output_name.h5sc A sccloud-formatted HDF5 file containing the count matrices and associated attributes.
Examples:
sccloud aggregate_matrix --restriction Source:BM,CB --restriction Individual:1-8 --attributes Source,Platform Manton_count_matrix.csv manton_bm_cb
"""
def execute(self):
aggregate_matrices(
self.args["<csv_file>"],
what_to_return=self.args["<output_name>"],
restrictions=self.args["--restriction"],
attributes=self.split_string(self.args["--attributes"]),
google_cloud=self.args["--google-cloud"],
select_singlets=self.args["--select-only-singlets"],
ngene=self.convert_to_int(self.args["--minimum-number-of-genes"]),
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/AggregateMatrix.py
| 0.784649 | 0.282181 |
AggregateMatrix.py
|
pypi
|
import os
from .Base import Base
from sccloud.plotting import make_static_plots
class Plotting(Base):
"""
Generate cluster composition plots.
Usage:
sccloud plot [options] [--restriction <restriction>...] <plot_type> <input_h5ad_file> <output_file>
sccloud plot -h
Arguments:
plot_type Only 2D plots, chosen from 'composition', 'scatter', 'scatter_groups', 'scatter_genes', 'scatter_gene_groups', 'heatmap', and 'qc_violin'.
input_h5ad_file Single cell data in h5ad file format with clustering done by 'sccloud cluster'.
output_file Output image file.
Options:
--dpi <dpi> DPI value for the figure. [default: 500]
--cluster-labels <attr> Use <attr> as cluster labels. This option is used in 'composition', 'scatter_groups', 'heatmap', and 'qc_violin'.
--attribute <attr> Plot <attr> against cluster labels. This option is only used in 'composition' and 'qc_violin'.
--basis <basis> Basis for 2D plotting, chosen from 'tsne', 'fitsne', 'umap', 'pca', 'rpca', 'fle', 'diffmap_pca', 'net_tsne', 'net_fitsne', 'net_umap' or 'net_fle'. If CITE-Seq data is used, basis can also be 'citeseq_fitsne'. This option is used in 'scatter', 'scatter_groups', 'scatter_genes', and 'scatter_gene_groups'. [default: fitsne]
--attributes <attrs> <attrs> is a comma-separated list of attributes to color the basis. This option is only used in 'scatter'.
--restriction <restriction>... Set restriction if you only want to plot a subset of data. Multiple <restriction> strings are allowed. Each <restriction> takes the format of 'attr:value,value', or 'attr:~value,value..." which means excluding values. This option is used in 'composition' and 'scatter'.
--apply-to-each-figure Indicate that the <restriction> strings are not applied to all attributes but for specific attributes. The string's 'attr' value should math the attribute you want to restrict.
--show-background Show points that are not selected as gray.
--group <attr> <attr> is used to make group plots. In group plots, the first one contains all components in the group and the following plots show each component separately. This option is iused in 'scatter_groups' and 'scatter_gene_groups'. If <attr> is a semi-colon-separated string, parse the string as groups.
--genes <genes> <genes> is a comma-separated list of gene names to visualize. This option is used in 'scatter_genes' and 'heatmap'.
--gene <gene> Visualize <gene> in group plots. This option is only used in 'scatter_gene_groups'.
--style <style> Composition plot styles. Can be either 'frequency', 'count', or 'normalized'. [default: frequency]
--not-stacked Do not stack bars in composition plot.
--log-y Plot y axis in log10 scale for composition plot.
--nrows <nrows> Number of rows in the figure. If not set, sccloud will figure it out automatically.
--ncols <ncols> Number of columns in the figure. If not set, sccloud will figure it out automatically.
--subplot-size <sizes> Sub-plot size in inches, w x h, separated by comma. Note that margins are not counted in the sizes. For composition, default is (6, 4). For scatter plots, default is (4, 4).
--left <left> Figure's left margin in fraction with respect to subplot width.
--bottom <bottom> Figure's bottom margin in fraction with respect to subplot height.
--wspace <wspace> Horizontal space between subplots in fraction with respect to subplot width.
--hspace <hspace> Vertical space between subplots in fraction with respect to subplot height.
--alpha <alpha> Point transparent parameter. Can be a list of parameters separated by comma.
--legend-fontsize <fontsize> Legend font size.
--use-raw Use anndata stored raw expression matrix. Only used by 'scatter_genes' and 'scatter_gene_groups'.
--do-not-show-all Do not show all components in group for scatter_groups.
--show-zscore If show zscore in heatmap.
--heatmap-title <title> Title for heatmap.
--qc-type <type> Plot qc_violin by annotation, <type> can be either 'gene', 'count' (UMI), or 'mito' (mitochondrial rate). [default: gene]
--qc-xtick-font <font> X tick font for qc_violin. [default: 5]
--qc-xtick-rotation If rotate x label.
--qc-line-width <width> Line width for qc_violin. [default: 0.5]
-h, --help Print out help information.
Examples:
sccloud plot composition --cluster-labels louvain_labels --attribute Individual --style normalized --not-stacked Manton_BM.h5ad test.pdf
sccloud plot scatter --basis tsne --attributes louvain_labels,Individual Manton_BM.h5ad test.pdf
sccloud plot scatter_groups --cluster-labels louvain_labels --group Individual Manton_BM.h5ad test.pdf
sccloud plot scatter_genes --genes CD8A,CD4,CD3G,MS4A1,NCAM1,CD14,ITGAX,IL3RA,CD38,CD34,PPBP Manton_BM.h5ad test.pdf
sccloud plot scatter_gene_groups --gene CD8A --group Individual Manton_BM.h5ad test.pdf
sccloud plot heatmap --cluster-labels louvain_labels --genes CD8A,CD4,CD3G,MS4A1,NCAM1,CD14,ITGAX,IL3RA,CD38,CD34,PPBP --heatmap-title 'markers' Manton_BM.h5ad test.pdf
sccloud plot qc_violin --qc-type gene --cluster-labels louvain_labels --attribute Channel --subplot-size 7,5 --qc-xtick-font 5 --qc-line-width 0.5 Manton_BM.h5ad test.pdf
"""
def execute(self):
kwargs = {
"cluster": self.args["--cluster-labels"],
"attr": self.args["--attribute"],
"restrictions": self.args["--restriction"],
"apply_to_all": not self.args["--apply-to-each-figure"],
"show_background": self.args["--show-background"],
"basis": self.args["--basis"],
"attrs": self.split_string(self.args["--attributes"]),
"group": self.args["--group"],
"genes": self.split_string(self.args["--genes"]),
"gene": self.args["--gene"],
"style": self.args["--style"],
"stacked": not self.args["--not-stacked"],
"logy": self.args["--log-y"],
"nrows": int(self.args["--nrows"])
if self.args["--nrows"] is not None
else None,
"ncols": int(self.args["--ncols"])
if self.args["--ncols"] is not None
else None,
"subplot_size": [float(x) for x in self.args["--subplot-size"].split(",")]
if self.args["--subplot-size"] is not None
else None,
"left": float(self.args["--left"])
if self.args["--left"] is not None
else None,
"bottom": float(self.args["--bottom"])
if self.args["--bottom"] is not None
else None,
"wspace": float(self.args["--wspace"])
if self.args["--wspace"] is not None
else None,
"hspace": float(self.args["--hspace"])
if self.args["--hspace"] is not None
else None,
"legend_fontsize": float(self.args["--legend-fontsize"])
if self.args["--legend-fontsize"] is not None
else None,
"use_raw": self.args["--use-raw"],
"showall": not self.args["--do-not-show-all"],
"showzscore": self.args["--show-zscore"],
"title": self.args["--heatmap-title"],
"qc_type": self.args["--qc-type"],
"qc_xtick_font": int(self.args["--qc-xtick-font"]),
"qc_xtick_rotation": self.args["--qc-xtick-rotation"],
"qc_line_width": float(self.args["--qc-line-width"]),
}
if self.args["--alpha"] is None:
kwargs["alpha"] = None
else:
values = [float(x) for x in self.args["--alpha"].split(",")]
if len(values) == 1:
kwargs["alpha"] = values[0]
else:
kwargs["alpha"] = values
make_static_plots(
self.args["<input_h5ad_file>"],
self.args["<plot_type>"],
self.args["<output_file>"],
dpi=int(self.args["--dpi"]),
**kwargs
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/Plotting.py
| 0.769167 | 0.419945 |
Plotting.py
|
pypi
|
import os
from .Base import Base
from sccloud.tools import run_de_analysis
class DeAnalysis(Base):
"""
Perform DE analysis.
Usage:
sccloud de_analysis [options] <input_h5ad_file> <output_spreadsheet>
sccloud de_analysis -h
Arguments:
input_h5ad_file Single cell data with clustering calculated. DE results would be written back.
output_spreadsheet Output spreadsheet with DE results.
Options:
-p <threads> Use <threads> threads. [default: 1]
--labels <attr> <attr> used as cluster labels. [default: louvain_labels]
--result-key <key> Store DE results into AnnData varm with key = <key>. [default: de_res]
--auc Calculate area under ROC (AUROC) and area under Precision-Recall (AUPR).
--t Calculate Welch's t-test.
--fisher Calculate Fisher's exact test.
--mwu Calculate Mann-Whitney U test.
--temp-folder <temp_folder> Joblib temporary folder for memmapping numpy arrays.
--alpha <alpha> Control false discovery rate at <alpha>. [default: 0.05]
--ndigits <ndigits> Round non p-values and q-values to <ndigits> after decimal point in the excel. [default: 3]
--quiet Do not show detailed intermediate outputs.
-h, --help Print out help information.
Outputs:
input_h5ad_file DE results would be written back to the 'varm' field with name set by --result-key <key>.
output_spreadsheet An excel spreadsheet containing DE results. Each cluster has two tabs in the spreadsheet. One is for up-regulated genes and the other is for down-regulated genes.
Examples:
sccloud de_analysis -p 26 --labels louvain_labels --auc --t --fisher --mwu manton_bm.h5ad manton_bm_de.xlsx
"""
def execute(self):
run_de_analysis(
self.args["<input_h5ad_file>"],
self.args["<output_spreadsheet>"],
self.args["--labels"],
result_key=self.args["--result-key"],
n_jobs=int(self.args["-p"]),
auc=self.args["--auc"],
t=self.args["--t"],
fisher=self.args["--fisher"],
mwu=self.args["--mwu"],
temp_folder=self.args["--temp-folder"],
verbose=not self.args["--quiet"],
alpha=float(self.args["--alpha"]),
ndigits=int(self.args["--ndigits"]),
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/DeAnalysis.py
| 0.514156 | 0.260331 |
DeAnalysis.py
|
pypi
|
from .Base import Base
from sccloud.pipeline import run_pipeline
class SubClustering(Base):
"""
Run sccloud to obtain subclusters.
Usage:
sccloud subcluster [options] --subset-selection <subset-selection>... <input_file> <output_name>
sccloud subcluster -h
Arguments:
input_file Single cell data with clustering done in h5ad format.
output_name Output file name. All outputs will use it as the prefix.
Options:
--subset-selection <subset-selection>... Specify which cells will be included in the subcluster analysis. Each <subset_selection> string takes the format of 'attr:value,...,value', which means select cells with attr in the values. If multiple <subset_selection> strings are specified, the subset of cells selected is the intersection of these strings.
-p <number>, --threads <number> Number of threads. [default: 1]
--correct-batch-effect Correct for batch effects for subclustering task.
--batch-group-by Batch correction assumes the differences in gene expression between channels are due to batch effects. However, in many cases, we know that channels can be partitioned into several groups and each group is biologically different from others. In this case, we will only perform batch correction for channels within each group. This option defines the groups. If <expression> is None, we assume all channels are from one group. Otherwise, groups are defined according to <expression>. <expression> takes the form of either 'attr', or 'attr1+attr2+sccloud..+attrn', or 'attr=value11,sccloud..,value1n_1;value21,sccloud..,value2n_2;sccloud..;valuem1,sccloud..,valuemn_m'. In the first form, 'attr' should be an existing sample attribute, and groups are defined by 'attr'. In the second form, 'attr1',sccloud..,'attrn' are n existing sample attributes and groups are defined by the Cartesian product of these n attributes. In the last form, there will be m + 1 groups. A cell belongs to group i (i > 0) if and only if its sample attribute 'attr' has a value among valuei1,sccloud..,valuein_i. A cell belongs to group 0 if it does not belong to any other groups.
--output-loom Output loom-formatted file.
--select-hvf-flavor <flavor> Highly variable feature selection method. <flavor> can be 'sccloud' or 'Seurat'. [default: sccloud]
--select-hvf-ngenes <nfeatures> Select top <nfeatures> highly variable features. If <flavor> is 'Seurat' and <nfeatures> is 'None', select HVGs with z-score cutoff at 0.5. [default: 2000]
--no-select-hvf Do not select highly variable features.
--plot-hvf Plot highly variable feature selection.
--random-state <seed> Random number generator seed. [default: 0]
--temp-folder <temp_folder> Joblib temporary folder for memmapping numpy arrays.
--nPC <number> Number of principal components. [default: 50]
--knn-K <number> Number of nearest neighbors for building kNN graph. [default: 100]
--knn-full-speed For the sake of reproducibility, we only run one thread for building kNN indices. Turn on this option will allow multiple threads to be used for index building. However, it will also reduce reproducibility due to the racing between multiple threads.
--kBET Calculate kBET.
--kBET-batch <batch> kBET batch keyword.
--kBET-alpha <alpha> kBET rejection alpha. [default: 0.05]
--kBET-K <K> kBET K. [default: 25]
--diffmap Calculate diffusion maps.
--diffmap-ndc <number> Number of diffusion components. [default: 100]
--diffmap-solver <solver> Solver for eigen decomposition, either 'eigsh' or 'randomized'. [default: eigsh]
--diffmap-maxt <max_t> Maximum time stamp to search for the knee point. [default: 5000]
--diffmap-to-3d If map diffusion map into 3D space using PCA.
--calculate-pseudotime <roots> Calculate diffusion-based pseudotimes based on <roots>. <roots> should be a comma-separated list of cell barcodes.
--louvain Run louvain clustering algorithm.
--louvain-resolution <resolution> Resolution parameter for the louvain clustering algorithm. [default: 1.3]
--louvain-class-label <label> Louvain cluster label name in AnnData. [default: louvain_labels]
--leiden Run leiden clustering algorithm.
--leiden-resolution <resolution> Resolution parameter for the leiden clustering algorithm. [default: 1.3]
--leiden-niter <niter> Number of iterations of running the Leiden algorithm. If <niter> is negative, run Leiden iteratively until no improvement. [default: -1]
--leiden-class-label <label> Leiden cluster label name in AnnData. [default: leiden_labels]
--spectral-louvain Run spectral-louvain clustering algorithm.
--spectral-louvain-basis <basis> Basis used for KMeans clustering. Can be 'pca' or 'diffmap'. If 'diffmap' is not calculated, use 'pca' instead. [default: diffmap]
--spectral-louvain-nclusters <number> Number of clusters for Kmeans initialization. [default: 30]
--spectral-louvain-ninit <number> Number of Kmeans tries. [default: 20]
--spectral-louvain-resolution <resolution> Resolution parameter for louvain. [default: 1.3]
--spectral-louvain-class-label <label> Spectral-louvain label name in AnnData. [default: spectral_louvain_labels]
--spectral-leiden Run spectral-leiden clustering algorithm.
--spectral-leiden-basis <basis> Basis used for KMeans clustering. Can be 'pca' or 'diffmap'. If 'diffmap' is not calculated, use 'pca' instead. [default: diffmap]
--spectral-leiden-nclusters <number> Number of clusters for Kmeans initialization. [default: 30]
--spectral-leiden-ninit <number> Number of Kmeans tries. [default: 20]
--spectral-leiden-resolution <resolution> Resolution parameter for leiden. [default: 1.3]
--spectral-leiden-class-label <label> Spectral-leiden label name in AnnData. [default: spectral_leiden_labels]
--tsne Run multi-core t-SNE for visualization.
--fitsne Run FIt-SNE for visualization.
--tsne-perplexity <perplexity> t-SNE's perplexity parameter, used by both tSNE, FItSNE net-tSNE and net-FItSNE. [default: 30]
--umap Run umap for visualization.
--umap-K <K> K neighbors for umap. [default: 15]
--umap-min-dist <number> Umap parameter. [default: 0.5]
--umap-spread <spread> Umap parameter. [default: 1.0]
--fle Run force-directed layout embedding.
--fle-K <K> K neighbors for building graph for FLE. [default: 50]
--fle-target-change-per-node <change> Target change per node to stop forceAtlas2. [default: 2.0]
--fle-target-steps <steps> Maximum number of iterations before stopping the forceAtlas2 algoritm. [default: 5000]
--fle-memory <memory> Memory size in GB for the Java FA2 component. [default: 8]
--net-down-sample-fraction <frac> Down sampling fraction for net-related visualization. [default: 0.1]
--net-down-sample-K <K> Use <K> neighbors to estimate local density for each data point for down sampling. [default: 25]
--net-down-sample-alpha <alpha> Weighted down sample, proportional to radius^alpha. [default: 1.0]
--net-regressor-L2-penalty <value> L2 penalty parameter for the deep net regressor. [default: 0.1]
--net-tsne Run net tSNE for visualization.
--net-tsne-polish-learning-frac <frac> After running the deep regressor to predict new coordinates, use <frac> * nsample as the learning rate to use to polish the coordinates. [default: 0.33]
--net-tsne-polish-niter <niter> Number of iterations for polishing tSNE run. [default: 150]
--net-tsne-out-basis <basis> Output basis for net-tSNE. [default: net_tsne]
--net-umap Run net umap for visualization.
--net-umap-polish-learning-rate <rate> After running the deep regressor to predict new coordinate, what is the learning rate to use to polish the coordinates for UMAP. [default: 1.0]
--net-umap-polish-nepochs <nepochs> Number of iterations for polishing UMAP run. [default: 40]
--net-umap-out-basis <basis> Output basis for net-UMAP. [default: net_umap]
--net-fle Run net FLE.
--net-fle-polish-target-steps <steps> After running the deep regressor to predict new coordinate, what is the number of force atlas 2 iterations. [default: 1500]
--net-fle-out-basis <basis> Output basis for net-FLE. [default: net_fle]
-h, --help Print out help information.
Outputs:
output_name.h5ad Output file in h5ad format. The clustering results are stored in the 'obs' field (e.g. 'louvain_labels' for louvain cluster labels). The PCA, t-SNE and diffusion map coordinates are stored in the 'obsm' field.
output_name.loom Optional output. Only exists if '--output-loom' is set. output_name.h5ad in loom format for visualization.
Examples:
sccloud subcluster -p 20 --correct-batch-effect --subset-selection louvain_labels:3,6 --subset-selection Condition:CB_nonmix --tsne --louvain manton_bm.h5ad manton_bm_subset
"""
def execute(self):
kwargs = {
"processed": True,
"subcluster": True,
"cite_seq": False,
"select_singlets": False,
"subset_selections": self.args["--subset-selection"],
"n_jobs": int(self.args["--threads"]),
"genome": None,
"channel_attr": None,
"black_list": None,
"batch_correction": self.args["--correct-batch-effect"],
"group_attribute": self.args["--batch-group-by"],
"output_loom": self.args["--output-loom"],
"select_hvf": not self.args["--no-select-hvf"],
"hvf_flavor": self.args["--select-hvf-flavor"],
"hvf_ngenes": int(self.args["--select-hvf-ngenes"])
if self.args["--select-hvf-ngenes"] != "None"
else None,
"plot_hvf": self.args["<output_name>"] if self.args["--plot-hvf"] else None,
"random_state": int(self.args["--random-state"]),
"temp_folder": self.args["--temp-folder"],
"nPC": int(self.args["--nPC"]),
"K": int(self.args["--knn-K"]),
"full_speed": self.args["--knn-full-speed"],
"kBET": self.args["--kBET"],
"kBET_batch": self.args["--kBET-batch"],
"kBET_alpha": float(self.args["--kBET-alpha"]),
"kBET_K": int(self.args["--kBET-K"]),
"diffmap": self.args["--diffmap"],
"diffmap_ndc": int(self.args["--diffmap-ndc"]),
"diffmap_maxt": int(self.args["--diffmap-maxt"]),
"diffmap_solver": self.args["--diffmap-solver"],
"diffmap_to_3d": self.args["--diffmap-to-3d"],
"pseudotime": self.split_string(self.args["--calculate-pseudotime"]),
"louvain": self.args["--louvain"],
"louvain_resolution": float(self.args["--louvain-resolution"]),
"louvain_class_label": self.args["--louvain-class-label"],
"leiden": self.args["--leiden"],
"leiden_resolution": float(self.args["--leiden-resolution"]),
"leiden_niter": int(self.args["--leiden-niter"]),
"leiden_class_label": self.args["--leiden-class-label"],
"spectral_louvain": self.args["--spectral-louvain"],
"spectral_louvain_basis": self.args["--spectral-louvain-basis"],
"spectral_louvain_nclusters": int(
self.args["--spectral-louvain-nclusters"]
),
"spectral_louvain_ninit": int(self.args["--spectral-louvain-ninit"]),
"spectral_louvain_resolution": float(
self.args["--spectral-louvain-resolution"]
),
"spectral_louvain_class_label": self.args["--spectral-louvain-class-label"],
"spectral_leiden": self.args["--spectral-leiden"],
"spectral_leiden_basis": self.args["--spectral-leiden-basis"],
"spectral_leiden_nclusters": int(self.args["--spectral-leiden-nclusters"]),
"spectral_leiden_ninit": int(self.args["--spectral-leiden-ninit"]),
"spectral_leiden_resolution": float(
self.args["--spectral-leiden-resolution"]
),
"spectral_leiden_class_label": self.args["--spectral-leiden-class-label"],
"tsne": self.args["--tsne"],
"fitsne": self.args["--fitsne"],
"tsne_perplexity": float(self.args["--tsne-perplexity"]),
"umap": self.args["--umap"],
"umap_K": int(self.args["--umap-K"]),
"umap_min_dist": float(self.args["--umap-min-dist"]),
"umap_spread": float(self.args["--umap-spread"]),
"fle": self.args["--fle"],
"fle_K": int(self.args["--fle-K"]),
"fle_target_change_per_node": float(
self.args["--fle-target-change-per-node"]
),
"fle_target_steps": int(self.args["--fle-target-steps"]),
"fle_memory": int(self.args["--fle-memory"]),
"net_ds_frac": float(self.args["--net-down-sample-fraction"]),
"net_ds_K": int(self.args["--net-down-sample-K"]),
"net_ds_alpha": float(self.args["--net-down-sample-alpha"]),
"net_l2": float(self.args["--net-regressor-L2-penalty"]),
"net_tsne": self.args["--net-tsne"],
"net_tsne_polish_learing_frac": float(
self.args["--net-tsne-polish-learning-frac"]
),
"net_tsne_polish_niter": int(self.args["--net-tsne-polish-niter"]),
"net_tsne_basis": self.args["--net-tsne-out-basis"],
"net_umap": self.args["--net-umap"],
"net_umap_polish_learing_rate": float(
self.args["--net-umap-polish-learning-rate"]
),
"net_umap_polish_nepochs": int(self.args["--net-umap-polish-nepochs"]),
"net_umap_basis": self.args["--net-umap-out-basis"],
"net_fle": self.args["--net-fle"],
"net_fle_polish_target_steps": int(
self.args["--net-fle-polish-target-steps"]
),
"net_fle_basis": self.args["--net-fle-out-basis"],
}
run_pipeline(self.args["<input_file>"], self.args["<output_name>"], **kwargs)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/SubClustering.py
| 0.748628 | 0.553143 |
SubClustering.py
|
pypi
|
from .Base import Base
from sccloud.pipeline import run_demuxEM_pipeline
class DemuxEM(Base):
"""
Run the demuxEM pipeline for cell-hashing/nuclei-hashing data.
Usage:
sccloud demuxEM [options] <input_adt_csv_file> <input_raw_gene_bc_matrices_h5.h5> <output_name>
sccloud demuxEM -h
Arguments:
input_adt_csv_file Input ADT (antibody tag) count matrix in CSV format.
input_raw_gene_bc_matrices_h5.h5 Input raw RNA expression matrix in 10x hdf5 format.
output_name Output name. All outputs will use it as the prefix.
Options:
-p <number>, --threads <number> Number of threads. [default: 1]
--genome <genome> Reference genome name. If not provided, we will infer it from the expression matrix file.
--alpha-on-samples <alpha> The Dirichlet prior concentration parameter (alpha) on samples. An alpha value < 1.0 will make the prior sparse. [default: 0.0]
--min-num-genes <number> We only demultiplex cells/nuclei with at least <number> of expressed genes. [default: 100]
--min-num-umis <number> We only demultiplex cells/nuclei with at least <number> of UMIs. [default: 100]
--min-signal-hashtag <count> Any cell/nucleus with less than <count> hashtags from the signal will be marked as unknown. [default: 10.0]
--random-state <seed> The random seed used in the KMeans algorithm to separate empty ADT droplets from others. [default: 0]
--generate-diagnostic-plots Generate a series of diagnostic plots, including the background/signal between HTO counts, estimated background probabilities, HTO distributions of cells and non-cells etc.
--generate-gender-plot <genes> Generate violin plots using gender-specific genes (e.g. Xist). <gene> is a comma-separated list of gene names.
-h, --help Print out help information.
Outputs:
output_name_demux.h5sc RNA expression matrix with demultiplexed sample identities in scCloud HDF5 format.
output_name_ADTs.h5ad Antibody tag matrix in h5ad format.
output_name_demux.h5ad Demultiplexed RNA count matrix in h5ad format.
output_name.ambient_hashtag.hist.pdf Optional output. A histogram plot depicting hashtag distributions of empty droplets and non-empty droplets.
output_name.background_probabilities.bar.pdf Optional output. A bar plot visualizing the estimated hashtag background probability distribution.
output_name.real_content.hist.pdf Optional output. A histogram plot depicting hashtag distributions of not-real-cells and real-cells as defined by total number of expressed genes in the RNA assay.
output_name.rna_demux.hist.pdf Optional output. A histogram plot depicting RNA UMI distribution for singlets, doublets and unknown cells.
output_name.gene_name.violin.pdf Optional outputs. Violin plots depicting gender-specific gene expression across samples. We can have multiple plots if a gene list is provided in '--generate-gender-plot' option.
Examples:
sccloud demuxEM -p 8 --generate-diagnostic-plots sample_adt.csv sample_raw_gene_bc_matrices_h5.h5 sample_output
"""
def execute(self):
kwargs = {
"n_jobs": int(self.args["--threads"]),
"genome": self.args["--genome"],
"alpha": float(self.args["--alpha-on-samples"]),
"min_num_genes": int(self.args["--min-num-genes"]),
"min_num_umis": int(self.args["--min-num-umis"]),
"min_signal": float(self.args["--min-signal-hashtag"]),
"random_state": int(self.args["--random-state"]),
"gen_plots": self.args["--generate-diagnostic-plots"],
"gen_gender_plot": self.split_string(self.args["--generate-gender-plot"]),
}
run_demuxEM_pipeline(
self.args["<input_adt_csv_file>"],
self.args["<input_raw_gene_bc_matrices_h5.h5>"],
self.args["<output_name>"],
**kwargs
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/DemuxEM.py
| 0.849316 | 0.509642 |
DemuxEM.py
|
pypi
|
import os
from .Base import Base
from sccloud.annotate_cluster import run_annotate_cluster, annotate_anndata_object
class AnnotateCluster(Base):
"""
Annotate potential cell types for each cluster. This command has two forms: the first form generates putative annotations and the second form write annotations into the h5ad object.
Usage:
sccloud annotate_cluster [--marker-file <file> --de-test <test> --de-alpha <alpha> --de-key <key> --minimum-report-score <score> --do-not-use-non-de-genes] <input_h5ad_file> <output_file>
sccloud annotate_cluster --annotation <annotation_string> <input_h5ad_file>
sccloud annotate_cluster -h
Arguments:
input_h5ad_file Single cell data with DE analysis done by sccloud de_analysis.
output_file Output annotation file.
Options:
--marker-file <file> JSON file for markers. Could also be human_immune/mouse_immune/mouse_brain/human_brain, which triggers sccloud to markers included in the package. [default: human_immune]
--de-test <test> DE test to use to infer cell types. [default: t]
--de-alpha <alpha> False discovery rate to control family-wise error rate. [default: 0.05]
--de-key <key> Keyword where the DE results store in varm. [default: de_res]
--minimum-report-score <score> Minimum cell type score to report a potential cell type. [default: 0.5]
--do-not-use-non-de-genes Do not count non DE genes as down-regulated.
--annotation <annotation_string> Write cell type annotations in <annotation_string> into <input_h5ad_file>. <annotation_string> has this format: 'anno_name:clust_name:anno_1;anno_2;...;anno_n'. 'anno_name' is the annotation attribute in the h5ad object, 'clust_name' is the attribute with cluster ids, and anno_i is the annotation for cluster i.
-h, --help Print out help information.
Outputs:
output_file This is a text file. For each cluster, all its putative cell types are listed in descending order of the cell type score. For each putative cell type, all markers support this cell type are listed. If one putative cell type has cell subtypes, all subtypes will be listed under this cell type.
Examples:
sccloud annotate_cluster manton_bm.h5ad manton_bm.anno.txt
sccloud annotate_cluster --annotation "anno:T cells;B cells;NK cells;Monocytes" manton_bm.h5ad
"""
def execute(self):
if self.args["<output_file>"] is not None:
run_annotate_cluster(
self.args["<input_h5ad_file>"],
self.args["<output_file>"],
self.args["--marker-file"],
de_test=self.args["--de-test"],
de_alpha=float(self.args["--de-alpha"]),
de_key=self.args["--de-key"],
threshold=float(self.args["--minimum-report-score"]),
ignore_nonde=self.args["--do-not-use-non-de-genes"],
)
else:
annotate_anndata_object(
self.args["<input_h5ad_file>"], self.args["--annotation"]
)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/AnnotateCluster.py
| 0.619817 | 0.237333 |
AnnotateCluster.py
|
pypi
|
from .Base import Base
from sccloud.pipeline import run_pipeline
class Clustering(Base):
"""
Run sccloud.pipeline to obtain top-level clusters.
Usage:
sccloud cluster [options] <input_file> <output_name>
sccloud cluster -h
Arguments:
input_file Input HDF5 file in 10x or sccloud format. If first-pass analysis has been performed, but you want to run some additional analysis, you could also pass a h5ad-formatted file.
output_name Output file name. All outputs will use it as the prefix.
Options:
-p <number>, --threads <number> Number of threads. [default: 1]
--processed Input file is processed and thus no PCA & diffmap will be run.
--genome <genome> A string contains comma-separated genome names. sccloud will read all groups associated with genome names in the list from the hdf5 file. If genome is None, all groups will be considered.
--channel <channel_attr> Use <channel_attr> to represent different samples. This will set a 'Channel' column field with <channel_attr>.
--black-list <black_list> Cell barcode attributes in black list will be popped out. Format is "attr1,attr2,...,attrn".
--min-genes-on-raw <number> If input are raw 10x matrix, which include all barcodes, perform a pre-filtration step to keep the data size small. In the pre-filtration step, only keep cells with at least <number> of genes. [default: 100]
--select-singlets Only select DemuxEM-predicted singlets for analysis.
--cite-seq Data are CITE-Seq data. sccloud will perform analyses on RNA count matrix first. Then it will attach the ADT matrix to the RNA matrix with all antibody names changing to 'AD-' + antibody_name. Lastly, it will embed the antibody expression using FIt-SNE (the basis used for plotting is 'citeseq_fitsne').
--cite-seq-capping <percentile> For CITE-Seq surface protein expression, make all cells with expression > <percentile> to the value at <percentile> to smooth outlier. Set <percentile> to 100.0 to turn this option off. [default: 99.99]
--output-filtration-results Output filtration results as a spreadsheet.
--plot-filtration-results Plot filtration results as PDF files.
--plot-filtration-figsize <figsize> Figure size for filtration plots. <figsize> is a comma-separated list of two numbers, the width and height of the figure (e.g. 6,4).
--output-seurat-compatible Output seurat-compatible h5ad file. Caution: File size might be large, do not turn this option on for large data sets.
--output-loom Output loom-formatted file.
--min-genes <number> Only keep cells with at least <number> of genes. [default: 500]
--max-genes <number> Only keep cells with less than <number> of genes. [default: 6000]
--min-umis <number> Only keep cells with at least <number> of UMIs. [default: 100]
--max-umis <number> Only keep cells with less than <number> of UMIs. [default: 600000]
--mito-prefix <prefix> Prefix for mitochondrial genes. If multiple prefixes are provided, separate them by comma (e.g. "MT-,mt-"). [default: MT-]
--percent-mito <percent> Only keep cells with mitochondrial percent less than <percent>%. [default: 10.0]
--gene-percent-cells <percent> Only use genes that are expressed in at least <percent>% of cells to select variable genes. [default: 0.05]
--counts-per-cell-after <number> Total counts per cell after normalization. [default: 1e5]
--select-hvf-flavor <flavor> Highly variable feature selection method. <flavor> can be 'sccloud' or 'Seurat'. [default: sccloud]
--select-hvf-ngenes <nfeatures> Select top <nfeatures> highly variable features. If <flavor> is 'Seurat' and <nfeatures> is 'None', select HVGs with z-score cutoff at 0.5. [default: 2000]
--no-select-hvf Do not select highly variable features.
--plot-hvf Plot highly variable feature selection.
--correct-batch-effect Correct for batch effects.
--batch-group-by <expression> Batch correction assumes the differences in gene expression between channels are due to batch effects. However, in many cases, we know that channels can be partitioned into several groups and each group is biologically different from others. In this case, we will only perform batch correction for channels within each group. This option defines the groups. If <expression> is None, we assume all channels are from one group. Otherwise, groups are defined according to <expression>. <expression> takes the form of either 'attr', or 'attr1+attr2+sccloud..+attrn', or 'attr=value11,sccloud..,value1n_1;value21,sccloud..,value2n_2;sccloud..;valuem1,sccloud..,valuemn_m'. In the first form, 'attr' should be an existing sample attribute, and groups are defined by 'attr'. In the second form, 'attr1',sccloud..,'attrn' are n existing sample attributes and groups are defined by the Cartesian product of these n attributes. In the last form, there will be m + 1 groups. A cell belongs to group i (i > 0) if and only if its sample attribute 'attr' has a value among valuei1,sccloud..,valuein_i. A cell belongs to group 0 if it does not belong to any other groups.
--random-state <seed> Random number generator seed. [default: 0]
--temp-folder <temp_folder> Joblib temporary folder for memmapping numpy arrays.
--nPC <number> Number of principal components. [default: 50]
--knn-K <number> Number of nearest neighbors for building kNN graph. [default: 100]
--knn-full-speed For the sake of reproducibility, we only run one thread for building kNN indices. Turn on this option will allow multiple threads to be used for index building. However, it will also reduce reproducibility due to the racing between multiple threads.
--kBET Calculate kBET.
--kBET-batch <batch> kBET batch keyword.
--kBET-alpha <alpha> kBET rejection alpha. [default: 0.05]
--kBET-K <K> kBET K. [default: 25]
--diffmap Calculate diffusion maps.
--diffmap-ndc <number> Number of diffusion components. [default: 100]
--diffmap-solver <solver> Solver for eigen decomposition, either 'eigsh' or 'randomized'. [default: eigsh]
--diffmap-maxt <max_t> Maximum time stamp to search for the knee point. [default: 5000]
--diffmap-to-3d If map diffusion map into 3D space using PCA.
--calculate-pseudotime <roots> Calculate diffusion-based pseudotimes based on <roots>. <roots> should be a comma-separated list of cell barcodes.
--louvain Run louvain clustering algorithm.
--louvain-resolution <resolution> Resolution parameter for the louvain clustering algorithm. [default: 1.3]
--louvain-class-label <label> Louvain cluster label name in AnnData. [default: louvain_labels]
--leiden Run leiden clustering algorithm.
--leiden-resolution <resolution> Resolution parameter for the leiden clustering algorithm. [default: 1.3]
--leiden-niter <niter> Number of iterations of running the Leiden algorithm. If <niter> is negative, run Leiden iteratively until no improvement. [default: -1]
--leiden-class-label <label> Leiden cluster label name in AnnData. [default: leiden_labels]
--spectral-louvain Run spectral-louvain clustering algorithm.
--spectral-louvain-basis <basis> Basis used for KMeans clustering. Can be 'pca' or 'diffmap'. If 'diffmap' is not calculated, use 'pca' instead. [default: diffmap]
--spectral-louvain-nclusters <number> Number of clusters for Kmeans initialization. [default: 30]
--spectral-louvain-ninit <number> Number of Kmeans tries. [default: 20]
--spectral-louvain-resolution <resolution> Resolution parameter for louvain. [default: 1.3]
--spectral-louvain-class-label <label> Spectral-louvain label name in AnnData. [default: spectral_louvain_labels]
--spectral-leiden Run spectral-leiden clustering algorithm.
--spectral-leiden-basis <basis> Basis used for KMeans clustering. Can be 'pca' or 'diffmap'. If 'diffmap' is not calculated, use 'pca' instead. [default: diffmap]
--spectral-leiden-nclusters <number> Number of clusters for Kmeans initialization. [default: 30]
--spectral-leiden-ninit <number> Number of Kmeans tries. [default: 20]
--spectral-leiden-resolution <resolution> Resolution parameter for leiden. [default: 1.3]
--spectral-leiden-class-label <label> Spectral-leiden label name in AnnData. [default: spectral_leiden_labels]
--tsne Run multi-core t-SNE for visualization.
--fitsne Run FIt-SNE for visualization.
--tsne-perplexity <perplexity> t-SNE's perplexity parameter, used by both tSNE, FItSNE and net-tSNE. [default: 30]
--umap Run umap for visualization.
--umap-K <K> K neighbors for umap. [default: 15]
--umap-min-dist <number> Umap parameter. [default: 0.5]
--umap-spread <spread> Umap parameter. [default: 1.0]
--fle Run force-directed layout embedding.
--fle-K <K> K neighbors for building graph for FLE. [default: 50]
--fle-target-change-per-node <change> Target change per node to stop forceAtlas2. [default: 2.0]
--fle-target-steps <steps> Maximum number of iterations before stopping the forceAtlas2 algoritm. [default: 5000]
--fle-memory <memory> Memory size in GB for the Java FA2 component. [default: 8]
--net-down-sample-fraction <frac> Down sampling fraction for net-related visualization. [default: 0.1]
--net-down-sample-K <K> Use <K> neighbors to estimate local density for each data point for down sampling. [default: 25]
--net-down-sample-alpha <alpha> Weighted down sample, proportional to radius^alpha. [default: 1.0]
--net-regressor-L2-penalty <value> L2 penalty parameter for the deep net regressor. [default: 0.1]
--net-tsne Run net tSNE for visualization.
--net-tsne-polish-learning-frac <frac> After running the deep regressor to predict new coordinates, use <frac> * nsample as the learning rate to use to polish the coordinates. [default: 0.33]
--net-tsne-polish-niter <niter> Number of iterations for polishing tSNE run. [default: 150]
--net-tsne-out-basis <basis> Output basis for net-tSNE. [default: net_tsne]
--net-umap Run net umap for visualization.
--net-umap-polish-learning-rate <rate> After running the deep regressor to predict new coordinate, what is the learning rate to use to polish the coordinates for UMAP. [default: 1.0]
--net-umap-polish-nepochs <nepochs> Number of iterations for polishing UMAP run. [default: 40]
--net-umap-out-basis <basis> Output basis for net-UMAP. [default: net_umap]
--net-fle Run net FLE.
--net-fle-polish-target-steps <steps> After running the deep regressor to predict new coordinate, what is the number of force atlas 2 iterations. [default: 1500]
--net-fle-out-basis <basis> Output basis for net-FLE. [default: net_fle]
-h, --help Print out help information.
Outputs:
output_name.h5ad Output file in h5ad format. To load this file in python, use ``import sccloud; data = sccloud.tools.read_input('output_name.h5ad', mode = 'a')``. The log-normalized expression matrix is stored in ``data.X`` as a CSR-format sparse matrix. The ``obs`` field contains cell related attributes, including clustering results. For example, ``data.obs_names`` records cell barcodes; ``data.obs['Channel']`` records the channel each cell comes from; ``data.obs['n_genes']``, ``data.obs['n_counts']``, and ``data.obs['percent_mito']`` record the number of expressed genes, total UMI count, and mitochondrial rate for each cell respectively; ``data.obs['louvain_labels']`` and ``data.obs['approx_louvain_labels']`` record each cell's cluster labels using different clustring algorithms; ``data.obs['pseudo_time']`` records the inferred pseudotime for each cell. The ``var`` field contains gene related attributes. For example, ``data.var_names`` records gene symbols, ``data.var['gene_ids']`` records Ensembl gene IDs, and ``data.var['selected']`` records selected variable genes. The ``obsm`` field records embedding coordiates. For example, ``data.obsm['X_pca']`` records PCA coordinates, ``data.obsm['X_tsne']`` records tSNE coordinates, ``data.obsm['X_umap']`` records UMAP coordinates, ``data.obsm['X_diffmap']`` records diffusion map coordinates, ``data.obsm['X_diffmap_pca']`` records the first 3 PCs by projecting the diffusion components using PCA, and ``data.obsm['X_fle']`` records the force-directed layout coordinates from the diffusion components. The ``uns`` field stores other related information, such as reference genome (``data.uns['genome']``). If '--make-output-seurat-compatible' is on, this file can be loaded into R and converted into a Seurat object.
output_name.seurat.h5ad Optional output. Only exists if '--output-seurat-compatible' is set. 'output_name.h5ad' in seurat-compatible manner. This file can be loaded into R and converted into a Seurat object.
output_name.filt.xlsx Optional output. Only exists if '--output-filtration-results' is set. This file has two sheets --- Cell filtration stats and Gene filtration stats. The first sheet records cell filtering results and it has 10 columns: Channel, channel name; kept, number of cells kept; median_n_genes, median number of expressed genes in kept cells; median_n_umis, median number of UMIs in kept cells; median_percent_mito, median mitochondrial rate as UMIs between mitochondrial genes and all genes in kept cells; filt, number of cells filtered out; total, total number of cells before filtration, if the input contain all barcodes, this number is the cells left after '--min-genes-on-raw' filtration; median_n_genes_before, median expressed genes per cell before filtration; median_n_umis_before, median UMIs per cell before filtration; median_percent_mito_before, median mitochondrial rate per cell before filtration. The channels are sorted in ascending order with respect to the number of kept cells per channel. The second sheet records genes that failed to pass the filtering. This sheet has 3 columns: gene, gene name; n_cells, number of cells this gene is expressed; percent_cells, the fraction of cells this gene is expressed. Genes are ranked in ascending order according to number of cells the gene is expressed. Note that only genes not expressed in any cell are removed from the data. Other filtered genes are marked as non-robust and not used for TPM-like normalization.
output_name.filt.gene.pdf Optional output. Only exists if '--plot-filtration-results' is set. This file contains violin plots contrasting gene count distributions before and after filtration per channel.
output_name.filt.UMI.pdf Optional output. Only exists if '--plot-filtration-results' is set. This file contains violin plots contrasting UMI count distributions before and after filtration per channel.
output_name.filt.mito.pdf Optional output. Only exists if '--plot-filtration-results' is set. This file contains violin plots contrasting mitochondrial rate distributions before and after filtration per channel.
output_name.hvf.pdf Optional output. Only exists if '--plot-hvf' is set. This file contains a scatter plot describing the highly variable gene selection procedure.
output_name.loom Optional output. Only exists if '--output-loom' is set. 'output_name.h5ad' in loom format for visualization.
Examples:
sccloud cluster -p 20 --correct-batch-effect --louvain --tsne manton_bm_10x.h5 manton_bm
"""
def execute(self):
kwargs = {
"n_jobs": int(self.args["--threads"]),
"processed": self.args["--processed"],
"genome": self.args["--genome"],
"channel_attr": self.args["--channel"],
"black_list": self.args["--black-list"],
"subcluster": False,
"min_genes_on_raw": int(self.args["--min-genes-on-raw"]),
"select_singlets": self.args["--select-singlets"],
"cite_seq": self.args["--cite-seq"],
"cite_seq_capping": float(self.args["--cite-seq-capping"]),
"output_filt": self.args["<output_name>"]
if self.args["--output-filtration-results"]
else None,
"plot_filt": self.args["<output_name>"]
if self.args["--plot-filtration-results"]
else None,
"plot_filt_figsize": self.args["--plot-filtration-figsize"],
"seurat_compatible": self.args["--output-seurat-compatible"],
"output_loom": self.args["--output-loom"],
"min_genes": int(self.args["--min-genes"]),
"max_genes": int(self.args["--max-genes"]),
"min_umis": int(self.args["--min-umis"]),
"max_umis": int(self.args["--max-umis"]),
"mito_prefix": self.args["--mito-prefix"],
"percent_mito": float(self.args["--percent-mito"]),
"percent_cells": float(self.args["--gene-percent-cells"]),
"norm_count": float(self.args["--counts-per-cell-after"]),
"select_hvf": not self.args["--no-select-hvf"],
"hvf_flavor": self.args["--select-hvf-flavor"],
"hvf_ngenes": int(self.args["--select-hvf-ngenes"])
if self.args["--select-hvf-ngenes"] != "None"
else None,
"plot_hvf": self.args["<output_name>"] if self.args["--plot-hvf"] else None,
"batch_correction": self.args["--correct-batch-effect"],
"group_attribute": self.args["--batch-group-by"],
"random_state": int(self.args["--random-state"]),
"temp_folder": self.args["--temp-folder"],
"nPC": int(self.args["--nPC"]),
"K": int(self.args["--knn-K"]),
"full_speed": self.args["--knn-full-speed"],
"kBET": self.args["--kBET"],
"kBET_batch": self.args["--kBET-batch"],
"kBET_alpha": float(self.args["--kBET-alpha"]),
"kBET_K": int(self.args["--kBET-K"]),
"diffmap": self.args["--diffmap"],
"diffmap_ndc": int(self.args["--diffmap-ndc"]),
"diffmap_maxt": int(self.args["--diffmap-maxt"]),
"diffmap_solver": self.args["--diffmap-solver"],
"diffmap_to_3d": self.args["--diffmap-to-3d"],
"pseudotime": self.split_string(self.args["--calculate-pseudotime"]),
"louvain": self.args["--louvain"],
"louvain_resolution": float(self.args["--louvain-resolution"]),
"louvain_class_label": self.args["--louvain-class-label"],
"leiden": self.args["--leiden"],
"leiden_resolution": float(self.args["--leiden-resolution"]),
"leiden_niter": int(self.args["--leiden-niter"]),
"leiden_class_label": self.args["--leiden-class-label"],
"spectral_louvain": self.args["--spectral-louvain"],
"spectral_louvain_basis": self.args["--spectral-louvain-basis"],
"spectral_louvain_nclusters": int(
self.args["--spectral-louvain-nclusters"]
),
"spectral_louvain_ninit": int(self.args["--spectral-louvain-ninit"]),
"spectral_louvain_resolution": float(
self.args["--spectral-louvain-resolution"]
),
"spectral_louvain_class_label": self.args["--spectral-louvain-class-label"],
"spectral_leiden": self.args["--spectral-leiden"],
"spectral_leiden_basis": self.args["--spectral-leiden-basis"],
"spectral_leiden_nclusters": int(self.args["--spectral-leiden-nclusters"]),
"spectral_leiden_ninit": int(self.args["--spectral-leiden-ninit"]),
"spectral_leiden_resolution": float(
self.args["--spectral-leiden-resolution"]
),
"spectral_leiden_class_label": self.args["--spectral-leiden-class-label"],
"tsne": self.args["--tsne"],
"fitsne": self.args["--fitsne"],
"tsne_perplexity": float(self.args["--tsne-perplexity"]),
"umap": self.args["--umap"],
"umap_K": int(self.args["--umap-K"]),
"umap_min_dist": float(self.args["--umap-min-dist"]),
"umap_spread": float(self.args["--umap-spread"]),
"fle": self.args["--fle"],
"fle_K": int(self.args["--fle-K"]),
"fle_target_change_per_node": float(
self.args["--fle-target-change-per-node"]
),
"fle_target_steps": int(self.args["--fle-target-steps"]),
"fle_memory": int(self.args["--fle-memory"]),
"net_ds_frac": float(self.args["--net-down-sample-fraction"]),
"net_ds_K": int(self.args["--net-down-sample-K"]),
"net_ds_alpha": float(self.args["--net-down-sample-alpha"]),
"net_l2": float(self.args["--net-regressor-L2-penalty"]),
"net_tsne": self.args["--net-tsne"],
"net_tsne_polish_learing_frac": float(
self.args["--net-tsne-polish-learning-frac"]
),
"net_tsne_polish_niter": int(self.args["--net-tsne-polish-niter"]),
"net_tsne_basis": self.args["--net-tsne-out-basis"],
"net_umap": self.args["--net-umap"],
"net_umap_polish_learing_rate": float(
self.args["--net-umap-polish-learning-rate"]
),
"net_umap_polish_nepochs": int(self.args["--net-umap-polish-nepochs"]),
"net_umap_basis": self.args["--net-umap-out-basis"],
"net_fle": self.args["--net-fle"],
"net_fle_polish_target_steps": int(
self.args["--net-fle-polish-target-steps"]
),
"net_fle_basis": self.args["--net-fle-out-basis"],
}
run_pipeline(self.args["<input_file>"], self.args["<output_name>"], **kwargs)
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/commands/Clustering.py
| 0.742422 | 0.503418 |
Clustering.py
|
pypi
|
import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
from sccloud.io import read_input
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
"""Extract and display gene expressions for each cluster from an `anndata` object.
This function helps to see marker expressions in clusters via the interactive python environment.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
measure : ``str``, optional, default: ``"percentage"``
Can be either ``"percentage"`` or ``"mean_logExpr"``:
* ``percentage`` shows the percentage of cells expressed the genes;
* ``mean_logExpr`` shows the mean log expression.
Returns
-------
``pandas.DataFrame``
A data frame containing marker expressions in each cluster.
Examples
--------
>>> results = scc.search_genes(adata, ['CD3E', 'CD4', 'CD8'])
"""
columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + ":")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
"""Extract and display differential expression analysis results of markers for each cluster.
This function helps to see if markers are up or down regulated in each cluster via the interactive python environment:
* ``++`` indicates up-regulated and fold change >= threshold;
* ``+`` indicates up-regulated but fold change < threshold;
* ``--`` indicates down-regulated and fold change <= 1 / threshold;
* ``-`` indicates down-regulated but fold change > 1 / threshold;
* ``?`` indicates not differentially expressed.
Parameters
----------
data: ``anndata.Anndata``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
de_test : ``str``, optional, default: ``"fisher"``
Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.
de_alpha : ``float``, optional, default: ``0.05``
False discovery rate.
thre : ``float``, optional, default: ``1.5``
Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).
Returns
-------
``pandas.DataFrame``
A data frame containing marker differential expression results for each cluster.
Examples
--------
>>> df = sccloud.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)
"""
columns = [
x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + "_qval:")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.startswith("percentage_fold_change:")
if de_test == "fisher"
else x.startswith("log_fold_change:")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
""" Show data attributes. For command line use.
"""
data = read_input(input_file, h5ad_mode="r")
if show_attributes:
print(
"Available sample attributes in input dataset: {0}".format(
", ".join(data.obs.columns.values)
)
)
if show_gene_attributes:
print(
"Available gene attributes in input dataset: {0}".format(
", ".join(data.var.columns.values)
)
)
if not show_values_for_attributes is None:
for attr in show_values_for_attributes.split(","):
print(
"Available values for attribute {0}: {1}.".format(
attr, ", ".join(np.unique(data.obs[attr]))
)
)
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
"""Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.
Parameters
----------
data : `anndata` object
An `anndata` object containing the expression matrix.
glist : `list[str]`
A list of gene symbols.
restriction_vec : `list[str]`
A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value
group_str : `str`
How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.
fdr_alpha : `float`, optional (default: 0.05)
False discovery rate.
res_key : `str`, optional (default: None)
Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.
Returns
-------
`pandas.DataFrame`
Results for genes that pass FDR control.
Examples
--------
>>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')
"""
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
|
/sccloud-0.14.0.tar.gz/sccloud-0.14.0/scCloud/misc/misc.py
| 0.91351 | 0.756313 |
misc.py
|
pypi
|
import argparse
import importlib.metadata
import logging
import time
import adafruit_scd30 # type: ignore
import board # type: ignore
import busio # type: ignore
from prometheus_client import Gauge, Summary, start_http_server
logger = logging.getLogger(__name__)
METRIC_MEASUREMENT_TIME = Summary(
"scd30_measurement_seconds", "Time spent processing performing measurement"
)
METRIC_SENSOR_CO2 = Gauge("scd30_sensor_co2", "CO2 (PPM)", ["sensor"])
METRIC_SENSOR_TEMPERATURE = Gauge(
"scd30_sensor_temperature", "Temperature (degrees C)", ["sensor"]
)
METRIC_SENSOR_HUMIDITY = Gauge("scd30_sensor_humidity", "Humidity (%%rH)", ["sensor"])
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
prog="scd30-exporter",
description=importlib.metadata.metadata("scd30-exporter")["Summary"],
)
parser.add_argument("name", help="sensor name")
parser.add_argument(
"--log-level",
default="WARNING",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="log level",
)
parser.add_argument(
"--interval", type=int, default=10, help="interval in seconds between readings"
)
parser.add_argument("--port", type=int, default=8000, help="exporter port")
return parser.parse_args()
@METRIC_MEASUREMENT_TIME.time()
def read_data(scd: adafruit_scd30.SCD30) -> tuple:
while True:
if scd.data_available:
return scd.CO2, scd.temperature, scd.relative_humidity
time.sleep(0.5)
def main() -> None:
args = parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=args.log_level,
datefmt="%Y-%m-%d %H:%M:%S",
)
start_http_server(args.port)
# SCD-30 has tempremental I2C with clock stretching, datasheet recommends
# starting at 50KHz
i2c = busio.I2C(board.SCL, board.SDA, frequency=50000)
scd = adafruit_scd30.SCD30(i2c)
while True:
co2, temp, humidity = read_data(scd)
logger.debug(
f"CO2: {co2:.2f} PPM, Temperature: {temp:.2f} °C, Humidity: {humidity:.2f} %%rH"
)
METRIC_SENSOR_CO2.labels(args.name).set(co2)
METRIC_SENSOR_TEMPERATURE.labels(args.name).set(temp)
METRIC_SENSOR_HUMIDITY.labels(args.name).set(humidity)
time.sleep(args.interval)
|
/scd30_exporter-0.2.0-py3-none-any.whl/scd30_exporter/cli.py
| 0.502441 | 0.1602 |
cli.py
|
pypi
|
from datetime import timedelta
import logging
import smbus2
import struct
import time
def interpret_as_float(integer: int):
return struct.unpack('!f', struct.pack('!I', integer))[0]
class SCD30:
"""Python I2C driver for the SCD30 CO2 sensor."""
def __init__(self):
self._i2c_addr = 0x61
self._i2c = smbus2.SMBus(1)
def _pretty_hex(self, data):
"""Formats an I2C message in an easily readable format.
Parameters:
data: either None, int, or a list of ints.
Returns:
a string '<none>' or hex-formatted data (singular or list).
"""
if data is None:
return "<none>"
if type(data) is int:
data = [data]
if len(data) == 0:
return "<none>"
if len(data) == 1:
value = "{:02x}".format(data[0])
if len(value) % 2:
value = "0" + value
return "0x" + value
return "[" + ", ".join("0x{:02x}".format(byte) for byte in data) + "]"
def _check_word(self, word: int, name: str = "value"):
"""Checks that a word is a valid two-byte value and throws otherwise.
Parameters:
word: integer value to check.
name (optional): name of the variable to include in the error.
"""
if not 0 <= word <= 0xFFFF:
raise ValueError(
f"{name} outside valid two-byte word range: {word}")
def _word_or_none(self, response: list):
"""Unpacks an I2C response as either a single 2-byte word or None.
Parameters:
response: None or a single-value list.
Returns:
None or the single value inside 'response'.
"""
return next(iter(response or []), None)
def _crc8(self, word: int):
"""Computes the CRC-8 checksum as per the SCD30 interface description.
Parameters:
word: two-byte integer word value to compute the checksum over.
Returns:
single-byte integer CRC-8 checksum.
Polynomial: x^8 + x^5 + x^4 + 1 (0x31, MSB)
Initialization: 0xFF
Algorithm adapted from:
https://en.wikipedia.org/wiki/Computation_of_cyclic_redundancy_checks
"""
self._check_word(word, "word")
polynomial = 0x31
rem = 0xFF
word_bytes = word.to_bytes(2, "big")
for byte in word_bytes:
rem ^= byte
for _ in range(8):
if rem & 0x80:
rem = (rem << 1) ^ polynomial
else:
rem = rem << 1
rem &= 0xFF
return rem
def _send_command(self, command: int, num_response_words: int = 1,
arguments: list = []):
"""Sends the provided I2C command and reads out the results.
Parameters:
command: the two-byte command code, e.g. 0x0010.
num_response_words: number of two-byte words in the result.
arguments (optional): list of two-byte arguments to the command.
Returns:
list of num_response_words two-byte int values from the sensor.
"""
self._check_word(command, "command")
logging.debug(f"Executing command {self._pretty_hex(command)} with "
f"arguments: {self._pretty_hex(arguments)}")
raw_message = list(command.to_bytes(2, "big"))
for argument in arguments:
self._check_word(argument, "argument")
raw_message.extend(argument.to_bytes(2, "big"))
raw_message.append(self._crc8(argument))
logging.debug(
f"Sending raw I2C data block: {self._pretty_hex(raw_message)}")
# self._i2c.write_i2c_block_data(self._i2c_addr, command, arguments)
write_txn = smbus2.i2c_msg.write(self._i2c_addr, raw_message)
self._i2c.i2c_rdwr(write_txn)
# The interface description suggests a >3ms delay between writes and
# reads for most commands.
time.sleep(timedelta(milliseconds=5).total_seconds())
if num_response_words == 0:
return []
read_txn = smbus2.i2c_msg.read(self._i2c_addr, num_response_words * 3)
self._i2c.i2c_rdwr(read_txn)
# raw_response = self._i2c.read_i2c_block_data(
# self._i2c_addr, command, 3 * num_response_words)
raw_response = list(read_txn)
logging.debug("Received raw I2C response: " +
self._pretty_hex(raw_response))
if len(raw_response) != 3 * num_response_words:
logging.error(f"Wrong response length: {len(raw_response)} "
f"(expected {3 * num_response_words})")
# Data is returned as a sequence of num_response_words 2-byte words
# (big-endian), each with a CRC-8 checksum:
# [MSB0, LSB0, CRC0, MSB1, LSB1, CRC1, ...]
response = []
for i in range(num_response_words):
# word_with_crc contains [MSB, LSB, CRC] for the i-th response word
word_with_crc = raw_response[3 * i: 3 * i + 3]
word = int.from_bytes(word_with_crc[:2], "big")
response_crc = word_with_crc[2]
computed_crc = self._crc8(word)
if (response_crc != computed_crc):
logging.error(
f"CRC verification for word {self._pretty_hex(word)} "
f"failed: received {self._pretty_hex(response_crc)}, "
f"computed {self._pretty_hex(computed_crc)}")
return None
response.append(word)
logging.debug(f"CRC-verified response: {self._pretty_hex(response)}")
return response
def get_firmware_version(self):
"""Reads the firmware version from the sensor.
Returns:
two-byte integer version number
"""
return self._word_or_none(self._send_command(0xD100))
def get_data_ready(self):
return self._word_or_none(self._send_command(0x0202))
def start_periodic_measurement(self, ambient_pressure: int = 0):
"""Starts periodic measurement of CO2 concentration, humidity and temp.
Parameters:
ambient_pressure (optional): external pressure reading in millibars.
The enable status of periodic measurement is stored in non-volatile
memory onboard the sensor module and will persist after shutdown.
ambient_pressure may be set to either 0 to disable ambient pressure
compensation, or between [700; 1400] mBar.
"""
if ambient_pressure and not 700 <= ambient_pressure <= 1400:
raise ValueError("Ambient pressure must be set to either 0 or in "
"the range [700; 1400] mBar")
self._send_command(0x0010, num_response_words=0,
arguments=[ambient_pressure])
def stop_periodic_measurement(self):
"""Stops periodic measurement of CO2 concentration, humidity and temp.
The enable status of periodic measurement is stored in non-volatile
memory onboard the sensor module and will persist after shutdown.
"""
self._send_command(0x0104, num_response_words=0)
def get_measurement_interval(self):
"""Gets the interval used for periodic measurements.
Returns:
measurement interval in seconds or None.
"""
interval = self._word_or_none(self._send_command(0x4600, 1))
if interval is None or not 2 <= interval <= 1800:
logging.error("Failed to read measurement interval, received: " +
self._pretty_hex(interval))
return interval
def set_measurement_interval(self, interval=2):
"""Sets the interval used for periodic measurements.
Parameters:
interval: the interval in seconds within the range [2; 1800].
The interval setting is stored in non-volatile memory and persists
after power-off.
"""
if not 2 <= interval <= 1800:
raise ValueError("Interval must be in the range [2; 1800] (sec)")
self._send_command(0x4600, 1, [interval])
def read_measurement(self):
"""Reads out a CO2, temperature and humidity measurement.
Must only be called if a measurement is available for reading, i.e.
get_data_ready() returned 1.
Returns:
tuple of measurement values (CO2 ppm, Temp 'C, RH %) or None.
"""
data = self._send_command(0x0300, num_response_words=6)
if data is None or len(data) != 6:
logging.error("Failed to read measurement, received: " +
self._pretty_hex(data))
return None
co2_ppm = interpret_as_float((data[0] << 16) | data[1])
temp_celsius = interpret_as_float((data[2] << 16) | data[3])
rh_percent = interpret_as_float((data[4] << 16) | data[5])
return (co2_ppm, temp_celsius, rh_percent)
def set_auto_self_calibration(self, active: bool):
"""(De-)activates the automatic self-calibration feature.
Parameters:
active: True to enable, False to disable.
The setting is persisted in non-volatile memory.
"""
arg = 1 if active else 0
self._send_command(0x5306, num_response_words=0, arguments=[arg])
def get_auto_self_calibration_active(self):
"""Gets the automatic self-calibration feature status.
Returns:
1 if ASC is active, 0 if inactive, or None upon error.
"""
return self._word_or_none(self._send_command(0x5306))
def get_temperature_offset(self):
"""Gets the currently active temperature offset.
The temperature offset is used to compensate for reading bias caused by
heat generated by nearby electrical components or the SCD30 itself.
See the documentation of set_temperature_offset for more details on
calculating the offset value correctly.
The temperature offset is stored in non-volatile memory and persists
across shutdowns.
Returns:
Temperature offset floating-point value in degrees Celsius.
"""
offset_ticks = self._word_or_none(self._send_command(0x5403))
if offset_ticks is None:
return None
return offset_ticks / 100.0
def set_temperature_offset(self, offset: float):
"""Sets a new temperature offset.
The correct temperature offset will vary depending on the installation
of the sensor as well as its configuration; different measurement
intervals will result in different power draw, and thus, different
amounts of electrical heating.
To compute the offset value for any fixed configuration:
1. Install and configure the sensor as needed.
2. Start continuous measurement and let it run for at least 10
minutes or until a stable temperature equilibrium is reached.
3. Get the previous temperature offset value T_offset_old from
the SCD30 using get_temperature_offset.
4. Get a temperature reading T_measured from the SCD30 using
read_measurement.
5. Measure the ambient temperature T_ambient using a *different*
sensor, away from the immediate proximity of the SCD30.
6. Compute the new offset to set as follows:
T_offset_new = (T_measured + T_offset_old) - T_ambient
After setting a new value, allow the sensor readings to stabilize,
which will happen slowly and gradually.
For more details, see the documentation on the project page.
Arguments:
offset: temperature offset floating-point value in degrees Celsius.
"""
offset_ticks = int(offset * 100)
return self._send_command(0x5403, 0, [offset_ticks])
def soft_reset(self):
"""Resets the sensor without the need to disconnect power.
This restarts the onboard system controller and forces the sensor
back to its power-up state.
"""
self._send_command(0xD304, num_response_words=0)
|
/scd30_i2c-0.0.6-py3-none-any.whl/scd30_i2c/__init__.py
| 0.873431 | 0.337163 |
__init__.py
|
pypi
|
____ ____ ____ _ _____ _____
/ ___\/ _\/ _ \/ \/ // /
| \| / | | \|| || __\| __\
\___ || \__| |_/|| || | | |
\____/\____/\____/\_/\_/ \_/
[](https://travis-ci.org/phoenixding/scdiff)
[](https://opensource.org/licenses/MIT)
# INTRODUCTION
<div style="text-align: justify">
Most existing single-cell trajectory inference methods have relied primarily on the assumption that descendant cells are similar to their parents in terms of gene expression levels.
These assumptions do not always hold for in-vivo studies which often include infrequently sampled, un-synchronized and diverse cell populations.
Thus, additional information may be needed to determine the correct ordering and branching of progenitor cells and the set of transcription factors (TFs)
that are active during advancing stages of organogenesis. To enable such modeling we developed scdiff,
which integrates expression similarity with regulatory information to reconstruct the dynamic developmental cell trajectories.
SCDIFF is a package written in python and javascript, designed to analyze the cell differentiation trajectories
using time-series single cell RNA-seq data. It is able to predict the
transcription factors and differential genes associated with the cell differentiation trajectoreis.
It also visualizes the trajectories using an interactive tree-stucture graph, in which nodes
represent different sub-population cells (clusters).
</div>

# PREREQUISITES
* python (python 2 and python 3 are both supported)
It was installed by default for most Linux distribution and MAC.
If not, please check [https://www.python.org/downloads/](https://www.python.org/downloads/) for installation
instructions.
* Python packages dependencies:
-- scikit-learn
-- scipy
-- numpy
-- matplotlib
-- pydiffmap
-- imbalanced_learn
The python setup.py script (or pip) will try to install these packages automatically.
However, please install them manually if, by any reason, the automatic
installation fails.
# INSTALLATION
There are 3 options to install scdiff.
* __Option 1: Install from download directory__
cd to the downloaded scdiff package root directory
```shell
$cd scdiff
```
run python setup to install
```shell
$python setup.py install
```
MacOS or Linux users might need the sudo/root access to install.
Users without the root access can install the package using the pip/easy_install with a --user parameter ([install python libraries without root](https://stackoverflow.com/questions/7465445/how-to-install-python-modules-without-root-access)).
```shell
$sudo python setup.py install
```
use python3 instead of python in the above commands to install if using python3.
* __Option 2: Install from Github__ (recommended):
python 2:
```shell
$sudo pip install --upgrade https://github.com/phoenixding/scdiff/zipball/master
```
python 3:
```shell
$sudo pip3 install --upgrade https://github.com/phoenixding/scdiff/zipball/master
```
* __Option 3: Install from PyPI__ :
python2:
```
$sudo pip install --upgrade scdiff
```
python 3:
```
$sudo pip3 install --upgrade scdiff
```
The above pip installation options should be working for Linux, Window and MacOS systems.
For MacOS users, it's recommended to use python3 installation. The default python2 in MacOS has
some compatibility issues with a few dependent libraries. The users would have to install their own
version of python2 (e.g. via [Anocanda](https://anaconda.org/)) if they prefer to use python2 in MacOS.
# USAGE
```shell
usage: scdiff [-h] -i INPUT -t TF_DNA -k CLUSTERS -o OUTPUT [-s SPEEDUP] [-l LargeType]
[-d DSYNC][-a VIRTUALANCESTOR]
-h, --help show this help message and exit
-i INPUT, --input INPUT, required
input single cell RNA-seq expression data
-t TF_DNA, --tf_dna TF_DNA, required
TF-DNA interactions used in the analysis
-k CLUSTERS, --clusters CLUSTERS, required
how to learn the number of clusters for each time
point? user-defined or auto? if user-defined, please
specify the configuration file path. If set as "auto"
scdiff will learn the parameters automatically.
-o OUTPUT, --output OUTPUT, required
output folder to store all results
-s SPEEDUP, --speedup SPEEDUP(1/None), optional
If set as 'True' or '1', SCIDFF will speedup the running
by reducing the iteration times.
-l LARGETYPE, --largetype LARGETYPE (1/None), optional
if specified as 'True' or '1', scdiff will use LargeType mode to
improve the running efficiency (both memory and time).
As spectral clustering is not scalable to large data,
PCA+K-Means clustering was used instead. The running speed is improved
significantly but the performance is slightly worse. If there are
more than 2k cells at each time point on average, it is highly
recommended to use this parameter to improve time and memory efficiency.
-d DSYNC, --dsync DSYNC (1/None), optional
If specified as 'True' or '1', the cell synchronization will be disabled.
If the users believe that cells at the same time point are similar in terms of
differentiation/development. The synchronization can be disabled.
-a VIRTUALANCESTOR, --virtualAncestor VIRTUALANCESTOR (1/None), optional
scdiff requires a 'Ancestor' node (the starting node,
all other nodes are descendants). By default,
the 'Ancestor' node is set as the first time point. The hypothesis behind is :
The cells at first time points are not differentiated yet
( or at the very early stage of differentiation and thus no clear sub-groups,
all Cells at the first time point belong to the same cluster).
If it is not the case, users can set -a as 'True' or '1' to enable
a virtual ancestor before the first time point. The expression of the
virtual ancestor is the median expression of all cells at first time point.
-f LOG2FOLDCHANGECUT, --log2foldchangecut LOG2FOLDCHANGECUT (Float), optional
By default, scdiff uses log2 Fold change 1(=>2^1=2)
as the cutoff for differential genes (together with t-test p-value cutoff 0.05).
However, users are allowed to customize the cutoff based on their
application scenario (e.g. log2 fold change 1.5).
-e ETFLISTFILE, --etfListFile ETFLISTFILE (String), optional
By default, scdiff recognizes 1.6k
TFs (we collected in human and mouse). Users are able
to provide a customized list of TFs instead using this
option. It specifies the path to the TF list file, in
which each line is a TF name. Here, it does not require
the targets information for the TFs, which will be used to infer
eTFs (TFs predicted based on the expression of themselves instead of the their targets).
```
# INPUTS AND PRE-PROCESSING
scdiff takes the two required input files (-i/--input and -t/--tf_dna), two optional files (-k/--cluster, -e/--etfListFile) and a few other optional parameters.
* __-i/--input__
(<span style="color:red">Note: The gene names in the expression file must be consistent with those in [TF_DNA](tf_dna/Human_TF_targets.txt) file. If using the provided [TF_DNA](tf_dna/Human_TF_targets.txt) file, [gene symbols](https://ghr.nlm.nih.gov/about/gene-names-symbols) must be used to represent the genes in the expression file.</span>)
This specifies the single cell RNA-Seq expression data.
If the RNA-Seq data is not processed, the instruction about how to calculate expression based on RNA-Seq raw reads can be found in many other studies, e.g (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4728800/).
For example, users can use Tophat + Cufflink to calculate the gene expression in terms of FPKM. Please refer to corresponding tools for instructions.
Once we get the RNA-Seq gene expression, the expression data should be transformed to log space for example by log2(x+1) where x could represent the gene expression in terms of RPKM, FPKM,TPM, umi-count depending
on what tools are used to process the RNA-Seq expression data.
Note: For large expression datasets (e.g. >1Gb), it's recommended to filter the genes with very low variance to speed up and save memory.
We provided a script [utils/filterGenes.py](utils/filterGenes.py) in the utils folder for this purpose (please use "--help" parameter to show the usage information).
Top 10,000 genes are enough for most cases as the expression of many genes is quite stable (OR all zeros/very small values for non-expressing genes) and thus non-informative (>80% agreement of the cell assignments with the results using all genes as tested on multiple datasets).
```
//To keep the top 10,000 genes with the largest variance
$ python filterGenes.py -i example/example.E -n 10000
```
The input file has the following formatting requirements:
* __Header Row__
First 3 columns are "Cells","Time","Label" and the remaining columns are gene names.
* __Data Rows__
* __1st column__: Cell ID, represents the ID for the cell.
* __2nd column__: Cell time, Integer, represents the measurement time of the cell.
* __3rd column__: Cell label, represents the label of the cell (e.g cell type if known). In most cases, we don't have any prior knowledge of the cell type. In this case, use "NA" instead.
Or, you can use any name you want to label each cell. We don't use this information in our model and it's only used to mark the cells with
the same attributes (any known attributes users are interested in, for example, cell type, time point, WT/Treatment, etc.) in the visualization.
Please avoid too many different labels, which will make the visualization very crowded. It's recommended to use <20 different labels.
If, however, users want to use more labels, please use the 'NA' as the label for all cells and use the cell IDs to infer the label composition of each node.
* __4th- columns__: Gene expression values.
Example input:
[example.E](example/example.E)
* __-t/--tf_dna__
This specifies the TF-gene interaction data. In other words, it specifies the TF targets.
Under the tf_dna directory, we provided a [human TF-gene interaction file](tf_dna/Human_TF_targets.txt) and a [mouse TF-gene interaction file](tf_dna/Mouse_TF_targets.txt) inferred using the strategy in our previous study (https://www.ncbi.nlm.nih.gov/pubmed/20219943).
Although this TF-gene interactions are collected in human and mouse, they should be also able to apply to other close species.
Besides, in our previous work DREM (http://sb.cs.cmu.edu/drem/), we did collected the TF-gene interactions for common species including human, mouse, fry, E.coli, yeast, Arabidopsis.
Please refer to http://sb.cs.cmu.edu/drem/DREMmanual.pdf appendix B for complete details.
Those TF-gene interaction files can be downloaded from our DREM software (https://github.com/phoenixding/idrem/tree/master/TFInput).
You might need to unzip and re-format the file to satisfy the requirements. The TF-gene interaction file has the following formatting requirements:
* __Header Row__
```
TF Gene Input
```
* __Data Rows__
* __1st column__: TF ID (gene symbol)
* __2rd column__: gene ID (gene symbol)
* __3rd column__: Input, optional, the interaction strength between TF and target gene. If missing, by default it's 1.
This column is not used in scdiff.
Example file:
[example TF gene interaction file](tf_dna/Human_TF_targets.txt)
* __-k/--cluster__
This specifies the clustering parameter (String).
It's need to be either 'auto' or path to the 'config' file. Here, 'auto' denotes the clustering parameters will be learned automatically.
The path to the 'config' file specifies the file with customized initial clustering parameters. This is **highly recommended** when users have some prior knowledge.
For example, if we know there are how many sub-populations within each time,
we can just directly specify the clustering parameters (K, # of clusters) using the 'config' file.
Please note that this config file is optional, users can set -k as "auto" and the scdiff will learn the clustering parameters automatically.
Config file format:
* __1st column__: Time point
* __2nd column__: # of clusters(sub cell-types) at this time point.
```
14 1
16 2
18 5
```
please note if using the customized config file for clustering, the first line (represents the first time point) second column (# of sub cell-types) must be 1.
The first line denotes the ancestor time point and all cells at later time points are descendants. However, if the first time point is already differentiated and there
are already multiple sub-types, users need to use VirtualAncestor option (-a 1) to generate a virtual ancestor node (virtual root node). In the config file,
a virtual ancestor line also needs to be added before the first time point. The virtual ancestor time point should be FirstTimePoint(Integer)-1. For example,
if the first time point is 14, the virtual ancestor time point would be 13.
Example config file:
[example.config](example/example.config)
* __-e/--etfListFile__
This species the path to the TF list file. By default, scdiff recognizes 1.6k TFs we collected in human and mouse.
If users want to use their list of TFs, please use this parameter to specify the path to the file.
Format of the TF List file:
each row represents a TF standard name and matches to the gene expression names.
We required that the predicted TFs must be expressing (based on the expression data).
An example of the TF List file can be found under the "tf_list" folder [HumanTFList.txt](tf_list/Human_mouse_TFList.txt).
For other scdiff optional parameters, please refer to the [usage](#usage) section.
# RECOMMENDED PIPELINE
Please follow the following steps to analyze the single-cell data.
* (1) Expression file Pre-processing (__MUST__):
Please convert the expression file into the format as described above.
For large dataset (e.g. >1Gb), please remove low variance genes using the provided [utils/filterGenes.py](utils/filterGenes.py) script.
Note: the expression __MUST__ be in log space.
* (2) Estimate number of Clusters K for each time point in a user-defined or semi-automatic way (__Not Required But Recommended__)
If users have prior knowledge about how many clusters K approximately (e.g. based on markers) for each time, please specify using the "-k config_file" parameter as described above.
If no prior knowledge is available, users can choose to estimate the K automatically or semi-automatically with the provided [utils/semiAutomaticK.py](utils/semiAutomaticK.py) script.
Although scdiff can estimate the number of Clusters K for each time point in a fully automatic way with "-k auto" option, but in many cases users are able to get better estimation
using their eyes with the help of the visualization (e.g., TSNE plot). semiAutomaticK.py script draws the TSNE plots to help users to determine possible number of clusters K for each time point.
Then users can run scdiff with the option: "-k config_file". The TSNE plot will be output to a pdf file (*.tsne.pdf).
```
$python semiAutomaticK.py -i example.E
```

* (3) Run scdiff as described in USAGE section.
# RESULTS AND VISUALIZATION
The results are given under the specified directory.
The predicted model was provided as a json file, which is visualized by the
provided JavaScript. Please use *Chrome/FireFox/Safari* browser for best experience.

The following is the manual for the visualization page.
**Visualization Config (Left panel)**:
* **GLOBAL Config**:

* **RESET** : It restores all configs to default. The slider blow resizes the visualization.
* **Enable/Diable tooltip**: By Enabling the tooltip, users are able to see the percentages for each type of cell labels in the node.
* ** Set Color**: Users are allowed to customize the background, text and edge colors using the Set Background/Text/Edge color buttons.
* **CELL PLOTS**:

* **Plot Cells**: show PCA/TSNE/ISOMAP plots for all cells and the clusters (use the radio button to select the dimension reduction method : PCA/T-SNE/ISOMAP/Diffusion Map for visualization).

By clicking the cluster labels on the top, users are able to show/hide the cells from that cluster.
* **TF CONFIG** :

* **Show/Hide TF** : display/hide the regulating TFs (TFs predicted based on the expression of their targets) for each path.
If the targets of a TF x is significantly differentially expressed along a edge e,
then TF x is predicted to regulate edge e.
* **Explore TF** : highlight the regulating paths for given TF; show the expression profile of the input TF and the average fold change of the TF targets.

* **SHow/Hide eTF**: display/hide the regulating eTFs (TFs predicted based on the expression of themselves).
eTFs are the TFs predicted based on expression of themselves instead of the expression
of their targets. If the expression of the TF x in one node (cluster y) is **significiantly** different compared to both
the parent (z) and all the siblings, the TF will be predicted as eTF to regulate the edge between nodes(y->z).
eTFs are very good complements to the target-based TFs as the TF target information is
often limited and incomplete. We may miss important TFs, which only have no or very limited known targets,
using only target-based methods.
* **Explore eTF**: hightlight the regulating paths for given eTF; show the expression profile of the input eTF and the average fold change of the eTF targets.
* **GENE CONFIG**:

* **Show/Hide DE**: display/hide the differentially expressed genes for each edge.
* **Explore DE gene** : highlight the paths with those DE genes and also show the expression profile of the input gene.
* **Find DE genes between** : find the differentially expressed genes between two specified nodes. Use the dropdown menu to specify two nodes for comparison.
On the results page, click the "functional analysis" button to perform a gene enrichment anlayis using PANTHER (or ToppGene by shift+click).

* **Download**:

* **Generate Figure**: download the visualization figure.
* **Generate Json download file**: download the json file, which contains all information of the model.
```
Json format:
{
"GeneList": [List of gene symbols],
"CellList": [ // list of all cells
{
// a cell instance
"TE": [x0,y0], // cell coordinates in 2D space projected by T-SNE
"PE": [x1,y1], // cell coordinates in 2D space projected by PCA
"IE": [x2,y2], // cell coordinates in 2D space projected by ISOMAP
"ME": [x3,y3], // cell coordinates in 2D space projected by Diffusion map
"typeLabel": cell Label, // label of the cell if known
"ID": cell ID,
"T": cell measure time
},
...
],
"NodeList":[ // list of all nodes in Graph
{
// a node instance
"E": [list of gene expression in node, please refer to GeneList for gene symbols],
"parent": parent node index,
"children": list of children node indexes,
"CELL": list of cell indexes, // all the cells assigned to the node
"T": node time //estimated node level
"ID": node ID
"eTF": list of TFs predicted based on their own expressions
},
...
],
"EdgeList":[ // list of all edges in Graph
{
// an edge instance
"to": edge target node index,
"from": edge source node index,
"de": List of differential genes along the edge,
"etf": List of regulating TFs for the edge
},
...
]
}
```
* **Generate TF download File** : download regulating TFs for all paths.
* **Generate DE download file**: download DE genes for all paths.
**Visualization Canvas (Right Panel)**:
* mouse over each node to show the pie chart of cell types within each node (need to enable the tooltip).
* left click each node to show:
* Table 0: Cell IDs (with labels) within the node.
* Tabel 1: Regulating TFs for the edge ending at the node.
* Tabel 2: Regulating eTFs for the edge path ending at the node.
* Table 3: DE genes (up-regulated) for the edge ending at the node.
* Table 4: DE genes (down-regulated) for the edge ending at the node
# EXAMPLES
Run scdiff on given time-series single cell RNA-seq data.
An example script [exampleRun.py](example/exampleRun.py) is provided under the example directory.
**1) Run with automatic config**
```shell
$ scdiff -i example.E -t example.tf_dna -k auto -o example_out
```
* **-i/--input**:
**example.E** is the single cell RNA-seq dataset with following format (tab delimited)
```
cell_ID time cell_label ex_gene1 ex_gene2 ... ex_geneN
```
* cell_ID: ID of the cell.
* time: measure time of the cell RNA-seq.
* cell_lable: known label for the cell (if available) ,if not , denote as NA.
* ex_genei: expression of gene i (log2 gene expression value). Gene expression can be FPKM or RPM or any other acceptable gene expression measurements.
Please read **example.E** for an example of acceptable time-series single cell dataset format.
* **-t/--tfdna**:
**example.tf_dna** provides the TF-DNA interaction information used in this study (TF target inforamtion) with tab delimited format.
```
TF TF_target Score
```
For example:
```
ZNF238 TIMM8B 0.9
SOX9 TIMM8B 0.9
ZEB1 TIMM8B 0.9
GATA4 TIMM8B 0.9
CEBPA RAB30 0.9
NKX2-1 RAB30 0.9
SRF RAB30 0.9
SOX5 RAB30 0.9
SRY RAB30 0.9
POU1F1 RAB30 0.9
POU2F1 RAB30 0.9
NFKB1 KRI1 0.9
E2F1 C11ORF35 0.9
DSP C11ORF35 0.9
ELSPBP1 C11ORF35 0.9
EGR2 C11ORF35 0.9
EGR1 C11ORF35 0.9
NR2F2 C11ORF35 0.9
LMO2 C11ORF35 0.9
ESR2 C11ORF35 0.9
HNF1A C11ORF35 0.9
EGR3 C11ORF35 0.9
```
The TF-DNA directory provides the TF-DNA interaction file used in this study.
* **-k/--clusters**:
This specifies the clustering parameter (String). It's need to be either 'auto' or path to the 'config' file.
Here, 'auto' denotes the clustering parameters will be learned automatically.
* **-o/--output**:
**example_out** is the specified output directory.
**2) Run with user-defined config**
```shell
$scdiff -i example.E -t example.tf_dna -k example.config -o example_out
```
The format of example.E and example.tf_dna are the same as described above.
**example.config** specifies the custom initial clustering parameters. This was used when we have some prior knowledge.
For example, if we know they are how many sub-populations within each time, we can just directly specify the clustering parameters using
the example.config file, which provides better performance.
example.config format(tab delimited)
```
time #_of_clusters
```
For example:
```
14 1
16 2
18 5
```
However, if we don't have any prior knowledge about the sub-populations within each time point. We will just use the automatic initial clustering.
:-k auto.
**3) Run scdiff on large single cell dataset**
```shell
$scdiff -i example.E -t example.tf_dna -k auto -o example_out -l True -s True
```
-i, -t, -k, -o parameters were discussed above.
For very large dataset (e.g., more than 20k cell), it's recommended to filter genes with very low variance.
It significantly cuts down the the memory cost and running time.
* **-l/--large (optional)**
String, if specified as 'True' or '1', scdiff will use LargeType mode to improve the running efficiency (both memory and time).
The performance will be sacrificed to some extent. K-means will be used for Clustering instead of Spectral clustering.
* **-s/--speedup (optional)**
Speedup the convergence, it will reduce the running time significantly. This is highly recommended for large dataset.
Based on testing on lung single cell dataset (Treutlein 2014), the speedup performance is just slightly worse (2 more cells were miss-assigned )
**(4) Run scdiff on large single cell dataset with synchronization disabled and virtual ancestor**
```shell
$scdiff -i example.E -t example.tf_dna -k auto -o example_out -l True -s True -d True -a True
```
-i, -t , -k, -o, -l ,-s parameters were defined above.
* **-d/--dsync (optional)**
If set as 'True' or '1', the cell synchronization will be disabled. By default, the cell synchronization is enabled.
For large dataset, the users can disable the synchronization to speedup. If the authors have prior knowledge, the synchronization of cells are
relatively good, users can also disable the synchronization.
* **-a/--virtualAncestor (optional)**
If set as 'True' or '1', the virtual ancestor node will be built. By default, the ancestor node is the first time point (all cells at the first time point).
**5) example running result**
The following link present the results for an example running.
[example_out](http://www.cs.cmu.edu/~jund/scdiff/result/treutlein2014_lung/treutlein2014_lung.html)
# MODULES & FUNCTIONS
## scdiff module
This python module is used to perform the single cell differentiation analysis and it builds a graph (differentiation). Users can use the modules by
importing scdiff package in their program. Besides the description below, we also provided a module testing example inside the example directory under the name [moduleTestExample.py](example/moduleTestExample.py).
**[scdiff.Cell(Cell_ID, TimePoint, Expression,typeLabel,GeneList)](#cell)<a id="cell"></a>**
This class defines the cell.
**Parameters**:
* **Cell_ID**: String
The ID of the cell.
* **TimePoint**: Integer
Measurement TimePoint of the cell, Integer.
* **Expression**: List of float
Expression of all genes.
* **Cell_Label**: String
The true label for the cell if available, 'NA' if not available. (Note, we don't need this information for the model, but it's useful when
analyzing the result).
* **GeneList** : List of String
List of gene symbols expressed in the cell.
**Output**:
A Cell class instance (with all information regarding to a cell)
**Attributes**:
* **ID **: String
Cell ID
* **T**: Integer
Cell Time
* **GL**: List of String
List of gene names
* **E** : List of float
List of gene expression
**Example**:
```python
import scdiff
from scdiff.scdiff import *
# reading example cells ...
AllCells=[]
print("reading cells...")
with open("example.E","r") as f:
line_ct=0
for line in f:
if line_ct==0:
GL=line.strip().split("\t")[3:]
else:
line=line.strip().split("\t")
iid=line[0]
ti=float(line[1])
li=line[2]
ei=[round(float(item),2) for item in line[3:]]
ci=scdiff.Cell(iid,ti,ei,li,GL)
AllCells.append(ci)
line_ct+=1
print('cell:'+str(line_ct))
```
**[scdiff.Graph(Cells, tfdna, kc, largeType=None, dsync=None, virtualAncestor=None,fChangCut=1.0, etfile=None)](#graph) <a id="graph"></a>**
This class defines the differentiation graph.
**Parameters**:
* **Cells**: Cell instances
Please read [cell](#cell) Class definition for details.
* **tfdna**: String
It specifies the path to the TF-gene interaction file.
* **kc**: String
clustering config. It's a string with either 'auto' or clustering configure file path (-k parameter).
* **largeType**: None(default) or String
whether the single cell data is a 'largeType' (largeType denotes the number of cells in datasets is very large (typically >2k cells).
In such case, the performance will be scarified to improve the running efficiency (e.g. using K-means instead of spectral clustering).
If not set (**None**), the dataset will be regarded as normal, if set as 'True', the dataset will be treated as largeType.
* **dsync**: None(default) or String ('True' or '1')
whether disable the cell time synchronization. By default, the cell time synchronization is enabled. If dsync set as "1" or "True",
this function will be disabled. No cell time synchronization will be made.
* **virtualAncestor**: None (default) or String ('True' or '1')
By default, all cells at the first time will be regarded as the starting ancestor for all cells at later time points.
If users believe that the cells are already differentiated significantly and there are already more than 1 group at the first time point.
Then, a virtual ancestor needs to be used by setting virtualAncestor as "True" or "1" .
* **fChangeCut**: 1.0 (default) or any other float(e.g. 1.5)
By default, we used 1.0 as the cutoff (together with t-test p-value cutoff 0.05) to
find DE genes. Users are allowed to choose other cutoffs using this parameter.
* **etfile**: None(default) or String (path to the TF List file).
By default, we used the 1.6k TFs collected in human and mouse.
Users are allowed to choose their own TF list using this parameter.
**Output**:
A graph instance with all nodes and edges, which represents the differentiation structure for given inputs.
**Attributes**:
* **Cells**: List of Cell instances
* **Nodes**: List of Cluster instances (each cluster represents a node), all nodes in the graph.
* **Edges**: List of Path instances (each represents an edge), all edges in the graph.
* **dTD,dTG,dMb**:
They are all dictionaries about TF-gene interactions.
dTD-> key: TF, value: List of target genes
dTG-> key: gene, value: List of regulating TFs
dMb-> key: TF, value: List of target genes, which are expressing (non-zero) in the given single cell expression dataset.
**Example**:
```python
import scdiff
from scdiff.scdiff import *
print("testing scdiff.Graph module ...")
# creating graph using scdiff.Graph module and examples cells build above
g1=scdiff.Graph(AllCells,"example.tf_dna",'auto')
```
**[scdiff.Clustering(Cells, kc,largeType=None)](#graph)**
This class represents the clustering.
**Parameters**:
* **Cells**: List of Cell
Please read [Cell](#cell) Class definition for details.
* **kc**: String
clustering config. It's a string with either 'auto' or clustering configure file path (-k parameter).
* **largeType**: None(default) or String
whether the single cell is a 'largeType' (largeType denotes the number of cells in datasets is very large (typically >2k cells).
In such case, the performance will be scarified to improve the running efficiency (e.g. using PCA + K-means instead of spectral clustering).
If not set (**None**), the dataset will be regarded as normal, if set as 'True' or '1', the dataset will be treated as largeType.
**Method**: **[getClusteringPars()](#clustering_getClusteringPars)**
* **Output**:
Parameters needed for clustering-dCK and dBS. This function can be used to learn the
clustering parameters.
* **dCK**: dictionary
key:timePoint, value: K (number of clusters, Integer) , e.g {14:1, 16:2, 18:5}
number of clusters for clustering at each time point.
* **dBS**: dictionary
key: timePoint, value: seed (Integer), e.g. {14:0, 16:0, 18:1}
clustering seed for each time point
* **Example**:
```python
import scdiff
from scdiff import *
Clustering_example=scdiff.Clustering(AllCells,'auto',None)
[dCK,dBS]=Clustering_example.getClusteringPars()
```
**Method**: **[performClustering()](#clustering_performClustering)**
* **Output**: Clusters instances (Clustering results), please check [Cluster](#cluster)
for details. This function is used to cluster all the nodes into clusters(Graph nodes).
* **Example**:
```python
import scdiff
from scdiff import *
Clustering_example=scdiff.Clustering(AllCells,'auto',None)
Clusters=Clustering_example.performClustering()
```
**[scdiff.Cluster(Cells,TimePoint,Cluster_ID)](#cluster)<a id="cluster"></a>**
This class defines the node in the differentiation graph.
**Parameters**:
* **Cells**: List of Cell
[Cell](#cell) instances.
* **TimePoint**: Integer
Initial Time Point for Cluster, it's the dominant measurement time for
all cells within the cluster.
* **Cluster_ID**: String
Cluster ID.
**Output**: List of float, this function calculates the average
gene expression of all cells in cluster.
**Attributes**:
* **cells**: List of Cell instances
* **T**: Cluster time (Integer/float)
* **ID**: Cluster ID (String)
* **P**: Parent Cluster (Cluster instance)
* **C**: Children Clusters (List of Cluster instances)
* **E**: Mean gene expression of the Cluster (List of float)
* **R**: Gene expression variance of the Cluster (List of float)
* **GL**: List of gene names (List of String )
**Example**:
```python
import scdiff
from scdiff import *
cluster1=scdiff.Cluster([item for item in AllCells if item.T==14],14,'C1')
```
**[scdiff.Path(fromNode,toNode,Nodes,dTD,dTG,dMb)](#path)**
This class defines the edge in the differentiation graph.
**Parameters**:
* **fromNode**: Cluster
The beginning end of an edge, Cluster instance
* **toNode**: Cluster
The ending end of an edge, Cluster instance
* **Nodes**: List of Cluster
All Nodes in Graph.
* **dTD,dTG,dMb**:
The same as described in the scdiff.Graph class.
**Output**:
Graph edge instance.
**Attributes**:
* **fromNode**: Cluster instance (source node of the edge).
* **toNode**: Cluster instance (target node of the edge).
* **diffG**: differentially expressed genes on the edge.
* **atf**: regulating TFs on the edge.
**Example**:
```python
import scdiff
from scdiff import *
g1=scdiff.Graph(AllCells,"example.tf_dna",'auto')
p1=scdiff.Path(g1.Nodes[0],g1.Nodes[1],g1.Nodes,g1.dTD,g1.dTG,g1.dMb)
```
## viz module
This module is designed to visualize the differentiation graph structure using JavaScript.
**[scdiff.viz(exName,Graph,output)](#viz)**
**Parameters**:
* **exName**: String
The name of the output visualization result.
* **Graph**: Graph
Graph instance, please refer [Graph](#graph).
* **output**: output directory
**Output**:
a visualization folder with HTML page, JavaScript Code and Graph Structure in JSON format.
**Example**:
```python
import os
import scdiff
from scdiff import *
print ("testing scdiff.viz module ...")
# visualizing graph using scdiff.viz module
os.mkdir("e1_out")
scdiff.viz("example",g1,"e1_out")
```
Then, you will find the visualized result page in HTML under 'e1_out' directory.
# CREDITS
This software was developed by ZIV-system biology group @ Carnegie Mellon University.
Implemented by Jun Ding.
Please cite our paper [Reconstructing differentiation networks and their regulation from time series single cell expression data](https://genome.cshlp.org/content/early/2018/01/09/gr.225979.117).
# LICENSE
This software is under MIT license.
see the LICENSE.txt file for details.
# CONTACT
zivbj at cs.cmu.edu
jund at cs.cmu.edu
|
/scdiff-1.1.16.tar.gz/scdiff-1.1.16/README.md
| 0.645902 | 0.864654 |
README.md
|
pypi
|
import matplotlib.pyplot as plt
import matplotlib
import math
from .. import utils
from ._plot_dimensions import PlotDimensions
from ._style_spines import style_spines
from typing import List
# -- Main class: ---------------------------------------------------------------
class Plot(utils.ABCParse):
def __init__(
self,
nplots: int =1,
ncols: int =1,
scale: float =None,
width: float =1,
height: float =1,
hspace: float =0,
wspace: float =0,
width_ratios: List[float]=None,
height_ratios: List[float]=None,
*args,
**kwargs,
):
super().__init__()
self.__parse__(locals(), private = [None])
self._configure_plot_size()
def _configure_plot_size(self):
if not self._is_none(self.scale):
self.height = self.width = self.scale
self._plot_dim_config = PlotDimensions(self.ncols, self.nrows, self.width, self.height)
self.height, self.width = self._plot_dim_config()
@property
def nrows(self):
return math.ceil(self.nplots / self.ncols)
@property
def gridspec(self):
return matplotlib.gridspec.GridSpec(
nrows=self.nrows,
ncols=self.ncols,
width_ratios=self.width_ratios,
height_ratios=self.height_ratios,
hspace=self.hspace,
wspace=self.wspace,
)
@property
def figure(self):
return plt.figure(figsize=(self.width, self.height))
def rm_ticks(self):
for ax in self.axes:
ax.set_xticks([])
ax.set_yticks([])
def linearize_axes(self):
axes = []
for i, row in self.AxesDict.items():
for j, col in row.items():
axes.append(self.AxesDict[i][j])
return axes
def __call__(
self,
linearize: bool = True,
rm_ticks: bool = False,
):
plot_count = 0
self.AxesDict = {}
self.fig = self.figure
gridspec = self.gridspec
for ax_i in range(self.nrows):
self.AxesDict[ax_i] = {}
for ax_j in range(self.ncols):
plot_count += 1
self.AxesDict[ax_i][ax_j] = self.fig.add_subplot(gridspec[ax_i, ax_j])
if plot_count >= self.nplots:
break
if not linearize:
return self.fig, self.AxesDict
else:
self.axes = self.linearize_axes()
if rm_ticks:
self.rm_ticks()
return self.fig, self.axes
# -- API-facing function: ------------------------------------------------------
def plot(
nplots: int = 1,
ncols: int = 1,
scale: float = None,
width: float = 1,
height: float = 1,
hspace: float = 0,
wspace: float = 0,
width_ratios: List[float] = None,
height_ratios: List[float] = None,
linearize=True,
rm_ticks=False,
color=[None],
move=[0],
xy_spines=False,
delete_spines=[[]],
color_spines=[[]],
move_spines=[[]],
):
"""
Parameters:
-----------
Returns:
--------
fig, axes
"""
plot_obj = Plot(
nplots=nplots,
ncols=ncols,
scale=scale,
width=width,
height=height,
hspace=hspace,
wspace=wspace,
width_ratios=width_ratios,
height_ratios=height_ratios,
)
fig, axes = plot_obj(linearize=linearize, rm_ticks=rm_ticks)
color = color * nplots
move = move * nplots
if xy_spines:
delete_spines = [["top", "right"]] * nplots
elif delete_spines == True:
delete_spines = [["top", "bottom", "right", "left"]] * nplots
else:
delete_spines = delete_spines * nplots
color_spines = color_spines * nplots
move_spines = move_spines * nplots
# styling in function requires linearization
if linearize:
for n, ax in enumerate(axes):
style_spines(
ax,
color=color[n],
move=move[n],
delete_spines=delete_spines[n],
color_spines=color_spines[n],
move_spines=move_spines[n],
)
return fig, axes
|
/scdiffeq_plots-0.0.1rc1.tar.gz/scdiffeq_plots-0.0.1rc1/scdiffeq_plots/core/_plot.py
| 0.810479 | 0.494446 |
_plot.py
|
pypi
|
# Scellseg
A style-aware cell instance segmentation tool with pre-training and contrastive fine-tuning
### **Description**<img src="./logo.svg" width="160" title="scellseg" alt="scellseg" align="right" vspace = "30">
We proposed a "pre-trained + fine-tuning" pipeline for cell instance segmentation. To make Scellseg easy to use, we also developed a graphical user interface integrated with functions of annotation, fine-tuning and inference. Biologists can specialize their own cell segmentation model to conduct single-cell image analysis.
### Install
Operating system: It has been tested on Windows 10. Theoretically, it can work on any system that can run Python.
Programing language: Python.
Hardware: >= 8G memory, equipped with a CPU with Core i5 or above.
Our Environment: Python --3.7.4, CUDA --10.1.243, GPU:Nvidia 2080Ti.
Before installation, please check whether you can use conda environment
```
conda create --name scellseg_env python=3.7
activate scellseg_env
pip install scellseg --default-timeout=10000
```
If you get an "Timeout error", increase the number of --default-timeout and try again, for example:
```
pip install scellseg --default-timeout=100000
```
After installing scellseg successfully, you can start the GUI through:
```
activate scellseg_env
python -m scellseg
```
If you have a GPU device and "Use GPU" in GUI is disabled, you should check the version of "torch" and re-install the correct torch version suitable for your CUDA version (use "nvcc -V" to check your CUDA version), for example:
```
nvcc -V
pip install torch==1.7.1+cu101 -f https://download.pytorch.org/whl/cu101/torch_stable.html
```
### How to use GUI
#### **1. Annotation**
Besides the basic function of Cellpose,
a) You can modify the mask of instance directly in pixel level without deleting it and drawing it from scratch. You can check "Edit mask" or [E], in this mode, you need firstly select a mask you wanted to edit, the selected mask will be highlighted, use right-click to add pixels and Shift+right-click to delete pixels
b) You can also take an overall look at of masks you have labelled with a list for each image, each index corresponds to a instance, you can pitch on and add notes to it, besides, the list can be saved and loaded next time
c) Drag a image/mask or a folder is supported, for a image, we autoload its parent directory, for a mask, we autoload its corresponding image and its parent directory. You can use [ctrl+←/→] to cycle through images in current directory
d) You can save the masks in ".png" format
#### 2. Fine-tuning
a) You should prepare your data in one folder with your experiment name like "mito-20211116", here we call it <b>parent folder</b>. Into this folder, it should contain a **shot subfolder** and a **query subfolder**. The shot subfolder contains images and the corresponding labelled masks, the query subfolder contains the images you want to segment. Into the shot subfolder, images should be named with **"\_img" suffix** and masks should be named as **"\_masks" suffix**. Except the suffix, the name of image and mask should be identical, for example, 001_img and 001_masks, notably, 001_masks should not be named as 001_cp_masks or 001_img_cp_masks (You should rename your masks name after annotation). Into the query subfolder, images should be named with **"\_img" suffix**
b) Click "Dataset path" to choose the parent folder of your dataset, such as "mito-20211116"
c) Set the channel you want to segment, you can also provide a chan2 like nuclei channel for better learning
d) Set the epoch you want conduct, the default value is 100, which was used in our paper. You can increase the number for adequate training
e) Set the batch size according to your own GPU, the default value is 100, which was used in our paper
f) You can select different pre-trained model ("Scellseg", "Cellpose", or "Hover") and fine-tuning strategy ("contrastive fine-tuning" or "classic fine-tuning")
g) Click "Start fine-tuning" button to start fine-tuning. After fine-tuning, it will show the saved path of the model file in the bottom of display window (saved at a subfolder in parent folder named "fine-tune", mito-20211116/fine-tune")
#### 3. Inference
a) There are two modes for inference, (1) run segmentation for image in window (2) batch segmentation
b) If you want to conduct batch segmentation, click "Data path" to choose the parent folder of your dataset, such as "mito-20211116" , and set the adequate batch size according to your own GPU
c) You can choose your own model file for inference, the default is the pre-trained Scellseg model file
d) The default "model match threshold" is set to 0.4 and "cellprob threshold" is set to 0.5, which was used in our paper, you can change it for better performance
e) Set the channel you want to segment, you can also provide a chan2 like nuclei channel for better learning, you should set the same setting as fine-tuning process
f) You can get each instance image after inference, click "Data path" to choose the query folder of your dataset, such as "mito-20211116/query", the output files will be saved at a subfolder in parent folder named "single", mito-20211116/single"
### **Declaration**
Our pipeline is heavily based on [Cellpose](https://github.com/MouseLand/cellpose) and we also referred to the following projects:
Hover-Net: https://github.com/vqdang/hover_net
Attention-Unet: https://github.com/ozan-oktay/Attention-Gated-Networks
|
/scellseg-0.1.8.tar.gz/scellseg-0.1.8/README.md
| 0.500244 | 0.955734 |
README.md
|
pypi
|
.. _tutorial_input_formats:
---------------------
Source Format Details
---------------------
HDF5 Input
==========
For HDF5 input (no conversion necessary), you can do your analysis with `scanpy <http://scanpy.rtfd.io>`__ to create an anndata object ``ad``. SCelVis will use embedding coordinates from ``ad.obsm``, cell annotation from ``ad.obs`` and expression data directly from ``ad.raw.X`` (if present) or ``ad.X`` (this should contain normalized and log-transformed expression values for all genes and should be sparse, otherwise performance will suffer)
If present, information about the dataset will be extracted from strings stored in ``ad.uns['about_title']``, ``ad.uns['about_short_title']`` and ``ad.uns['about_readme']`` (assumed to be Markdown).
Information about marker genes will be taken from entries starting with ``marker_`` in ``ad.uns``: entries called ``marker_gene`` (required!), ``marker_cluster``, ``marker_padj``, ``marker_LFC`` will create a table with the columns ``gene``, ``cluster``, ``padj``, and ``LFC``. ``SCelVis`` will also extract marker information from ``ad.uns['rank_genes_groups']``. However, certain datatypes in ``ad.uns`` together with version mismatches of ``scanpy``, ``h5py`` and ``anndata`` can lead to ``.h5ad`` files that are not readable by ``SCelVis`` (see `issue #832 <https://github.com/theislab/scanpy/issues/832>`__). To be on the safe side, it's recommended to delete unneccessary slots in ``ad.uns`` (e.g., ``del ad.uns['rank_genes_groups']``). Also, ``ad.obsm['X_pca']``, ``ad.varm['PCs']`` and entries in ``ad.obsp`` are likely dispensable.
If you prepared your data with ``Seurat`` (v2), you can use ``Convert(from = sobj, to = "anndata", filename = "data.h5ad")`` to get an HDF5 file.
Alternatively, you can use `sceasy <https://github.com/cellgeni/sceasy>`__ to convert your objects into ``anndata`` HDF5 format.
Text Input
==========
For "raw" text input, you need to prepare a file with expression values, cell meta data and coordinates, and potentially information about this dataset as well as cluster markers.
- normalized expression values for each gene (rows) in each cell (columns) can be given either as tab-separated file (dense) or in matrix-market format:
- if your file is called ``expression.tsv.gz``, ``SCelVis`` expects a tab-separated file , e.g., like this::
. cell_1 cell_2 cell_3 ...
gene_1 0.13 0.0 1.5 ...
gene_2 0.0 3.1 0.3 ...
gene_3 0.0 0.0 0.0 ...
- if your file is called ``expression.mtx``, ``SCelVis`` expects this to be a sparse matrix-market file and additional files called ``barcodes.tsv`` (containing a list of cell names / barcodes, one per line, no header or row names) and ``genes.tsv`` (containing a list of gene names, one per line, no header or row names) to be present.
- annotations for each cell can be provided in a tab-separated file called ``annotation.tsv``, e.g., like this::
. cluster genotype ...
cell_1 cluster_1 WT ...
cell_2 cluster_2 KO ...
- embedding coordinates for each cell can be provided in a tab-separated file called ``coords.tsv``, e.g., like this::
. tSNE_1 tSNE_2 UMAP_1 UMAP_2 ...
cell_1 20.53 -10.05 3.9 2.4 ...
cell_2 -5.34 13.94 -1.3 3.4 ...
- an optional tab-separated file called ``markers.tsv`` can contain information about marker genes. **it needs to have a column named ``gene``**, e.g., like this::
gene cluster log2FC adj_pval ...
gene_1 cluster_1 3.4 1.5e-6 ...
gene_2 cluster_1 1.3 0.00004 ...
gene_3 cluster_2 2.1 5.3e-9 ...
- finally, a markdown file (e.g., ``text_input.md``) can provide information about this dataset::
----
title: An Optional Long Data Set Title
short_title: optional short title
----
A verbose description of the data in Markdown format.
conversion to ``.h5ad`` is then performed like so:
.. code-block:: shell
$ scelvis convert --input-dir text_input --output data/text_input.h5ad --about-md text_input.md
in ``examples/dummy_raw.zip`` and ``examples/dummy_about.md`` we provide raw data for a simulated dummy dataset.
if you prepared you data with ``Seurat``, you can export to raw text like this
.. code-block:: r
writeMM(sobj@assays$RNA@data, file = 'expression.mtx')
write.table(Cells(sobj), file = 'barcodes.tsv', col.names = FALSE, row.names = FALSE, sep = ',')
write.table(row.names(sobj@assays$RNA@data), file = 'genes.tsv', col.names = FALSE, row.names = FALSE, sep = ',')
[email protected]$cluster <- paste0('cluster_', [email protected]$seurat_clusters)
write.table([email protected], file = 'annotation.tsv', sep = '\t')
write.table(sobj@[email protected], file = 'coords.tsv', sep = '\t')
Loom Input
==========
for `loompy <http://loompy.org>`__ or `loomR <https://github.com/mojaveazure/loomR>`__ input, you can convert your data like this:
.. code-block:: shell
$ scelvis convert --i input.loom -m markers.tsv -a about.md -o loom_input.h5ad
if you prepared your data with ``Seurat`` (v3), you can use ``as.loom(sobj, filename = "output.loom")`` to get a ``.loom`` file and then convert to ``.h5ad`` with the above command (this is quite slow, however, and exact format specifications for ``.loom`` and ``.h5ad`` are not always compatible between versions)
CellRanger Input
================
Alternatively, the output directory of ``CellRanger`` can be used. This is the directory called ``outs`` containing either a file called ``filtered_gene_bc_matrices_h5.h5`` (version 2) or a file called ``filtered_feature_bc_matrix.h5`` (version 3), and a folder ``analysis`` with clustering, embedding and differential expression results. This will not no any further processing except log-normalization. Additionally, a markdown file provides meta information about the dataset (see above)
.. code-block:: shell
$ mkdir -p data
$ cat <<EOF > data/cellranger.md
----
title: My Project
short_title: my_project
----
This is my project data.
EOF
$ scelvis convert --input-dir cellranger-out --output data/cellranger_input.h5ad --about-md cellranger.md
In ``examples/hgmm_1k_raw`` we provide ``CellRanger`` output for the 1k 1:1 human mouse mix.
Specifically, from the ``outs`` folder we selected
- ``filtered_feature_bc_matrix.h5``
- tSNE and PCA projections from ``analysis/tsne`` and ``analysis/pca``
- clustering from ``analysis/clustering/graphclust`` and
- markers from ``analysis/diffexp/graphclust``
``examples/hgmm_1k_about.md`` contains information about this dataset.
|
/scelvis-0.8.9.tar.gz/scelvis-0.8.9/docs_manual/tutorial_input_formats.rst
| 0.941432 | 0.726159 |
tutorial_input_formats.rst
|
pypi
|
.. _tutorial_analysis:
=================
Analysis Tutorial
=================
This tutorial describes the basics of performing the analysis of data using SCelVis.
For this tutorial, we will use the public demo instance at https://scelvis-demo.bihealth.org.
.. note::
Data to be visualized can either be uploaded into a SCelVis server or it can be defined when the SCelVis server starts.
When using a remote SCelVis server such as the public instance at `scelvis-demo.bihealth.org <https://scelvis-demo.bihealth.org>`_, you will most likely upload your data as shown below.
However, the server can also be started with the path or URL to the data.
This way, computational staff can provide predefined datasets to non-computational staff.
See :ref:`tutorial_cli` for more information.
First, download the file `pbmc.h5ad <https://files.figshare.com/18037739/pbmc.h5ad>`_, which is a published dataset of ~14000 IFN-beta treated and control PBMCs from 8 donors (`GSE96583 <https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE96583>`_; see `Kang et al. <https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE96583>`_). For a simpler dataset, you could also use `hgmm_1k.h5ad <https://github.com/bihealth/scelvis/raw/master/examples/hgmm_1k.h5ad>`_, containing data for 1000 cells from a 1:1 Mixture of Fresh Frozen Human (HEK293T) and Mouse (NIH3T3) Cells (10X v3 chemistry).
-----------
Upload Data
-----------
Then, use the menu on the top right to access the data upload screen :guilabel:`Go To --> Upload Data`.
.. figure:: figures/scelvis_goto_upload.png
:width: 80%
:align: center
Accessing the upload screen.
On the next screen click :guilabel:`Choose File`, select the *pbmc.h5ad* file, and click :guilabel:`Upload`.
The file upload will take a while and return a link to the data analysis screen shown in the next section.
.. note::
Alternatively, you can use the example data set available from the top-right menu: :guilabel:`Go To --> Data Sets --> PBMC`.
------------------------
Cell Annotation Analysis
------------------------
In the beginning of each analysis, the cell annotation screen from the figure below is shown.
.. figure:: figures/scelvis_cell_scatter.png
:width: 80%
:align: center
The cell annotation scatter plot.
1. **Analysis selection tab.**
This control allows to switch between
About
A textual description attached to the dataset.
Cell Annotation
The cell annotation analysis screen that you are looking at.
Gene Expression
The gene expression analysis screen.
2. **Plot type selection.**
This allows to select the plot type for the cell-based analysis.
Note that changing the plot type will change the subsequent controls.
3. **Cell filter button.**
Using this control, you can filter cells based on various properties, see Section :ref:`tutorial_cell_filtering`.
4. **Axis and color control.**
This allows you to select the dimensions to display along the horizontal and vertical axes as well as the
colouring.
5. **Differential expression button.**
This allows you to run a differential expression between two groups of cells, see Section :ref:`tutorial_cell_differential_expression`.
6. **The cell scatter plot.**
This is a dynamic scatter plot. Each dot corresponds to one cell, colored by what is selected from the :guilabel:`select coloring` list.
7. **Plot controls.**
When you move your mouse cursor over the plot then some buttons will appear on the top right.
These are explained in detail in Section :ref:`tutorial_plot_ui_controls` together with some other tricks.
8. **Download data for this plot.**
Download a CSV file with the data for reproducing the plot outside of SCelVis.
Scatter Plot
============
Usually, you would choose embedding coordinates (e.g., tSNE_1 and tSNE_2) for :guilabel:`select x axis` and :guilabel:`select y axis` to create a standard tSNE or UMAP plot. :guilabel:`select coloring` allows to color cells by different cell annotation attributes, e.g., *cluster* for the cluster annotation or *n_counts* for the # of UMIs per cell. The available choices depend on how the dataset was preprocessed. Categorical variables will be shown with a discrete color scale, numerical variables with a gradient.
Alternatively, you could also plot, e.g., # of UMIs vs. # of genes for QC.
Violin and Box Plot
===================
When selecting :guilabel:`violin plot` or :guilabel:`box plot` in the :guilabel:`select plot type` control you can draw violin or box plots. For example, selecting *n_counts* and *n_genes* in the :guilabel:`select variable(s)` and :*orig.ident* in :guilabel:`select coloring` will display the *n_genes* value distribution in the upper panel, and the *n_counts* value distribution in the lower panel for the individual samples of this dataset.
You can use the :guilabel:`select split` list to select whether you want to further split the grouping, e.g., by cluster identity. Hovering the mouse over the violin or box shapes will show you various statistical summaries of the distribution.
Bar Plots
=========
With :guilabel:`bar plot`, you can display summary statistics, such as the number of cells per cluster by selecting *cluster* in :guilabel:`select grouping`. With :guilabel:`select split`, you can further investigate how clusters are populated in the different samples or the different donors. Checking :guilabel:`normalized` will switch from cell numbers to fractions, :guilabel:`stacked` will use stacked bars.
------------------------
Gene Expression Analysis
------------------------
When clicking the :guilabel:`Gene Expression` tab, you can investigate gene expression.
As for the :guilabel:`Cell Annotation` analysis, it starts with the :guilabel:`scatter plot` type in **(1)**. The main difference is that all plot types will use the same list of genes selected in :guilabel:`select gene(s)` in **(4)**.
.. figure:: figures/scelvis_gene_scatter.png
:width: 80%
:align: center
The gene expression scatter plot for a monocyte (CCL2) and a T cell marker (SELL).
1. **Plot type selection.**
This allows to select the plot type for the gene expression analysis.
Note that changing the plot type will change the subsequent controls.
2. **Cell filter button.**
Using this control, you can filter cells based on various properties, see Section :ref:`tutorial_cell_filtering`.
3. **Axis control.**
This allows you to select the dimensions to display along the horizontal and vertical axes.
4. **Selecting genes.**
Select one or multiple genes from this list or enter them by hand.
5. **Show tables.**
Check these boxes to display tables with log2-fold change, p-values and other information for marker genes or differential expression results (if available).
6. **Table selection.**
Genes can be selected from the table by checking the boxes to the left and clicking :guilabel:`use selected genes` to add them to the list.
Scatter Plot
============
Scatter plots for one or multiple genes will be shown in a grid, with expression values rescaled to the same range.
Violin or Box Plot
==================
Violin and Box plots will show one gene per row, with one violin or box per category selected in :guilabel:`select grouping`, or multiple violins/boxes if :guilabel:`select split` is used.
Dot Plot
========
Dot plots summarise gene expression by size (fraction of expressing cells) and color (expression value), with genes in columns and groups (use :guilabel:`select grouping`) in rows. Dots can be subdivided using :guilabel:`select split`.
.. _tutorial_plot_ui_controls:
Plot Interface Controls
=======================
.. figure:: figures/scelvis_movie_plot_controls.gif
:width: 80%
:align: center
A short demonstration of the plot controls in SCelVis.
The buttons on the top right of the plot (standard features of the `Plotly <https://plot.ly/>`_ library) are as follows:
Save plot as image
The plot will be saved in PNG format.
Zoom
After clicking this button, you can select a rectangular area to zoom into.
Pan
Drag and drop the drawing area to move around in the plot.
Box Select, Lasso Select
After clicking this button, you can select a rectangular area on the plot or draw a free from shape to select dots. This will be useful for differential expression analysis.
Zoom In / Zoom Out
Zoom into or out of plot.
Autoscale / Reset Axes
This will reset the scaling to the original area.
You can obtain the same behaviour by double-clicking on a white spot in the plot.
Toggle Spike Lines
Enable horizontal and vertical lines when hovering over a point.
Show Closest / Compare Data on Hover
Change the spike lines behaviour.
Note that you can enable/disable individual groups by clicking their label in the legend.
You can disable all but one group by double-clicking the label.
.. _tutorial_cell_filtering:
Cell Filtering
==============
.. figure:: figures/scelvis_movie_filtering.gif
:width: 80%
:align: center
A short demonstration of cell filtering in SCelVis.
The cell filtering works as follows:
1. Click the :guilabel:`filter cells` button to open the control panel.
2. Select a criterion by which cells should be filtered. Depending on how the data were preprocessed, the list will include cluster annotation, # of UMIs or genes per cell, etc. It is also possible to filter cells by expression of genes.
3. For categorical variables (e.g., cluster identity), checkboxes will appear and specific clusters can be (un)checked in order to include or exlude them from the analysis. For numerical variables (e.g., n_counts or gene expression), a range slider will appear: move the big circles inwards to remove cells outside the selected range.
4. Hit :guilabel:`update plot` to apply these filters to the current plot
5. Filters will be combined with AND logic; active filters are listed above the :guilabel:`update plot` button
6. Click :guilabel:`reset filters` to reset all filters and :guilabel:`update plot` to include all cells in the current plot
7. Note that current filter criteria will be applied to all subsequent plots of the current datasets, both in the :guilabel:`Cell Annotation` and the :guilabel:`Gene Expression` tabs
.. _tutorial_cell_differential_expression:
Differential Expression Analysis
================================
.. figure:: figures/scelvis_movie_differential_expression.gif
:width: 80%
:align: center
A short demonstration of differential expression analysis in SCelVis.
The differential expression analysis is available only when a scatter plot is displayed in the :guilabel:`Cell Annotation` tab. It works as follows:
1. Click the :guilabel:`differential expression` button, opening the controls panel.
2. Then, use either the box or lasso select tool of the plot for selecting cells in the scatter plot.
For example, click the lasso select button in the top of the right of the plot.
Move your mouse cursor to the position you want to start selecting at.
Keep the left mouse button pressed and draw a shape around the cells that you are interested in.
Release the mouse button. then Click :guilabel:`group A`.
3. Repeat step 2 but click :guilabel:`group B`.
4. Click :guilabel:`run` to perform the analysis.
The result could read something like *200 DE genes at 5% FDR* (a maximum of 200 genes will be displayed).
You can click :guilabel:`view groups` to show the groups in the scatter plot, or click :guilabel:`table` to see the resulting DE genes in the :guilabel:`Gene Expression` tab table.
You can also download the :guilabel:`results` or the :guilabel:`parameters` that were used for the DE gene analysis.
Clicking :guilabel:`reset` allows you to start a new DE gene analysis.
-------
The End
-------
This is the end of the data analysis tutorial.
You might want to learn about the conversion of data into the HDF5 format next by reading Section :ref:`tutorial_convert`.
|
/scelvis-0.8.9.tar.gz/scelvis-0.8.9/docs_manual/tutorial_analysis.rst
| 0.963412 | 0.930015 |
tutorial_analysis.rst
|
pypi
|
import calendar
import logging
import os
import pandas as pd
from reegis import config as cfg
from reegis import demand_elec
from reegis import demand_heat
def get_heat_profiles_deflex(
deflex_geo, year, time_index=None, weather_year=None, keep_unit=False
):
"""
Parameters
----------
year
deflex_geo
time_index
weather_year
keep_unit
Returns
-------
"""
# separate_regions=keep all demand connected to the region
separate_regions = cfg.get_list("creator", "separate_heat_regions")
# Add lower and upper cases to be not case sensitive
separate_regions = [x.upper() for x in separate_regions] + [
x.lower() for x in separate_regions
]
# add second fuel to first
# combine_fuels = cfg.get_dict("combine_heat_fuels")
combine_fuels = {"natural gas": "gas"}
# fuels to be dissolved per region
region_fuels = cfg.get_list("creator", "local_fuels")
fn = os.path.join(
cfg.get("paths", "demand"),
"heat_profiles_{year}_{map}".format(year=year, map=deflex_geo.name),
)
demand_region = (
demand_heat.get_heat_profiles_by_region(
deflex_geo, year, to_csv=fn, weather_year=weather_year
)
.groupby(level=[0, 1], axis=1)
.sum()
)
# Decentralised demand is combined to a nation-wide demand if not part
# of region_fuels.
regions = list(
set(demand_region.columns.get_level_values(0).unique())
- set(separate_regions)
)
# If region_fuels is 'all' fetch all fuels to be local.
if "all" in region_fuels:
region_fuels = demand_region.columns.get_level_values(1).unique()
for fuel in demand_region.columns.get_level_values(1).unique():
demand_region["DE", fuel] = 0
for region in regions:
for f1, f2 in combine_fuels.items():
demand_region[region, f1] += demand_region[region, f2]
demand_region.drop((region, f2), axis=1, inplace=True)
cols = list(set(demand_region[region].columns) - set(region_fuels))
for col in cols:
demand_region["DE", col] += demand_region[region, col]
demand_region.drop((region, col), axis=1, inplace=True)
if time_index is not None:
demand_region.index = time_index
if not keep_unit:
msg = (
"The unit of the source is 'TJ'. "
"Will be divided by {0} to get 'MWh'."
)
converter = 0.0036
demand_region = demand_region.div(converter)
logging.debug(msg.format(converter))
demand_region.sort_index(1, inplace=True)
for c in demand_region.columns:
if demand_region[c].sum() == 0:
demand_region.drop(c, axis=1, inplace=True)
return demand_region
def scenario_demand(regions, year, name, opsd_version=None, weather_year=None):
"""
Parameters
----------
regions
year
name
opsd_version
weather_year
Returns
-------
Examples
--------
>>> from reegis import geometries # doctest: +SKIP
>>> fs=geometries.get_federal_states_polygon() # doctest: +SKIP
>>> my_demand=scenario_demand(regions, 2014, "de21") # doctest: +SKIP
>>> int(my_demand["DE01", "district heating"].sum()) # doctest: +SKIP
18639262
>>> int(my_demand["DE05", "all"].sum()) # doctest: +SKIP
10069304
"""
demand_series = {
"electricity demand series": scenario_elec_demand(
pd.DataFrame(),
regions,
year,
name,
weather_year=weather_year,
version=opsd_version,
)
}
if cfg.get("creator", "heat"):
demand_series["heat demand series"] = scenario_heat_demand(
regions, year, weather_year=weather_year
).reset_index(drop=True)
return demand_series
def scenario_heat_demand(regions, year, weather_year=None):
"""
Parameters
----------
regions
year
weather_year
Returns
-------
"""
return get_heat_profiles_deflex(
regions, year, weather_year=weather_year
).sort_index(1)
def scenario_elec_demand(
table, regions, year, name, version=None, weather_year=None
):
"""
Parameters
----------
table
regions
year
name
weather_year
Returns
-------
"""
if weather_year is None:
demand_year = year
else:
demand_year = weather_year
df = demand_elec.get_entsoe_profile_by_region(
regions, demand_year, name, annual_demand="bmwi", version=version
)
df = pd.concat([df], axis=1, keys=["all"]).swaplevel(0, 1, 1)
df = df.reset_index(drop=True)
if not calendar.isleap(year) and len(df) > 8760:
df = df.iloc[:8760]
return pd.concat([table, df], axis=1).sort_index(1)
if __name__ == "__main__":
pass
|
/scenario-builder-0.0.2.tar.gz/scenario-builder-0.0.2/src/scenario_builder/demand.py
| 0.767603 | 0.419767 |
demand.py
|
pypi
|
import logging
from warnings import warn
import pandas as pd
from reegis import commodity_sources
from reegis import config as cfg
from scenario_builder import data
def scenario_commodity_sources(year):
"""
Parameters
----------
year
Returns
-------
Examples
--------
>>> from reegis import geometries
>>> from scenario_builder import powerplants
>>> fs=geometries.get_federal_states_polygon() # doctest: +SKIP
>>> pp=powerplants.scenario_powerplants(dict(), fs, 2014, "federal_states"
... ) # doctest: +SKIP
>>> src=scenario_commodity_sources(pp) # doctest: +SKIP
>>> round(src.loc[("DE", "hard coal"), "costs"], 2) # doctest: +SKIP
12.53
>>> round(src.loc[("DE", "natural gas"), "emission"], 2) # doctest: +SKIP
201.0
"""
if cfg.get("creator", "costs_source") == "reegis":
commodity_src = create_commodity_sources_reegis(year)
elif cfg.get("creator", "costs_source") == "ewi":
commodity_src = create_commodity_sources_ewi()
else:
commodity_src = None
# Add region level to be consistent to other tables
commodity_src.index = pd.MultiIndex.from_product(
[["DE"], commodity_src.index]
)
if cfg.get("creator", "use_CO2_costs") is False:
commodity_src["co2_price"] = 0
commodity_src["annual limit"] = "inf"
return commodity_src
def create_commodity_sources_ewi():
"""
Returns
-------
"""
ewi = data.get_ewi_data()
df = pd.DataFrame()
df["costs"] = ewi.fuel_costs["value"] + ewi.transport_costs["value"]
df["emission"] = ewi.emission["value"].multiply(1000)
df["co2_price"] = float(ewi.co2_price["value"])
missing = "bioenergy"
msg = (
"Costs/Emission for {0} in ewi is missing.\n"
"Values for {0} are hard coded! Use with care."
)
warn(msg.format(missing), UserWarning)
df.loc[missing, "emission"] = 7.2
df.loc[missing, "costs"] = 20
df.loc[missing, "co2_price"] = df.loc["natural gas", "co2_price"]
return df
def create_commodity_sources_reegis(year, use_znes_2014=True):
"""
Parameters
----------
year
use_znes_2014
Returns
-------
"""
msg = (
"The unit for {0} of the source is '{1}'. "
"Will multiply it with {2} to get '{3}'."
)
converter = {
"costs": ["costs", "EUR/J", 1e9 * 3.6, "EUR/MWh"],
"emission": ["emission", "g/J", 1e6 * 3.6, "kg/MWh"],
}
cs = commodity_sources.get_commodity_sources()
rename_cols = {
key.lower(): value
for key, value in cfg.get_dict("source_names").items()
}
cs = cs.rename(columns=rename_cols)
cs_year = cs.loc[year]
if use_znes_2014:
before = len(cs_year[cs_year.isnull()])
cs_year = cs_year.fillna(cs.loc[2014])
after = len(cs_year[cs_year.isnull()])
if before - after > 0:
logging.warning("Values were replaced with znes2014 data.")
cs_year = cs_year.sort_index().unstack()
# convert units
for key in converter.keys():
cs_year[key] = cs_year[key].multiply(converter[key][2])
logging.warning(msg.format(*converter[key]))
return cs_year
|
/scenario-builder-0.0.2.tar.gz/scenario-builder-0.0.2/src/scenario_builder/commodity.py
| 0.797517 | 0.396477 |
commodity.py
|
pypi
|
import calendar
import configparser
import pandas as pd
from reegis import config as cfg
from reegis import mobility
def scenario_mobility(year, table):
"""
Parameters
----------
year
table
Returns
-------
Examples
--------
>>> my_table = scenario_mobility(2015, {})
>>> my_table["mobility_mileage"]["DE"].sum()
diesel 3.769021e+11
petrol 3.272263e+11
other 1.334462e+10
dtype: float64
>>> my_table["mobility_spec_demand"]["DE"].loc["passenger car"]
diesel 0.067
petrol 0.079
other 0.000
Name: passenger car, dtype: float64
>>> my_table["mobility_energy_content"]["DE"]["diesel"]
energy_per_liter [MJ/l] 34.7
Name: diesel, dtype: float64
"""
if calendar.isleap(year):
hours_of_the_year = 8784
else:
hours_of_the_year = 8760
try:
other = cfg.get("creator", "mobility_other")
except configparser.NoSectionError:
other = cfg.get("general", "mobility_other")
mobility_mileage = mobility.get_mileage_by_type_and_fuel(year)
# fetch table of specific demand by fuel and vehicle type (from 2011)
mobility_spec_demand = (
pd.DataFrame(
cfg.get_dict_list("fuel consumption"),
index=["diesel", "petrol", "other"],
)
.astype(float)
.transpose()
)
mobility_spec_demand["other"] = mobility_spec_demand[other]
fuel_usage = mobility_spec_demand.mul(mobility_mileage).sum()
# fetch the energy content of the different fuel types
mobility_energy_content = pd.DataFrame(
cfg.get_dict("energy_per_liter"), index=["energy_per_liter [MJ/l]"]
)[["diesel", "petrol", "other"]]
mobility_energy_content["other"] = mobility_energy_content[other]
# Convert to MW????? BITTE GENAU!!!
energy_usage = fuel_usage.mul(mobility_energy_content).div(3600)
s = energy_usage.div(hours_of_the_year).transpose()[
"energy_per_liter [MJ/l]"
]
table["mobility demand series"] = pd.DataFrame(
index=range(hours_of_the_year), columns=energy_usage.columns
).fillna(1)
table["mobility demand series"] = table["mobility demand series"].mul(
s, axis=1
)
table["mobility demand series"][other] += table["mobility demand series"][
"other"
]
table["mobility demand series"].drop("other", axis=1, inplace=True)
table["mobility demand series"] = (
table["mobility demand series"].astype(float).round().astype(int)
)
table["mobility"] = pd.DataFrame(
index=["diesel", "petrol", "electricity"],
columns=["efficiency", "source", "source region"],
)
for col in table["mobility"].columns:
for idx in table["mobility"].index:
section = "mobility: " + idx
table["mobility"].loc[idx, col] = cfg.get(section, col)
# Add "DE" as region level to be consistent to other tables
table["mobility"].index = pd.MultiIndex.from_product(
[["DE"], table["mobility"].index]
)
table["mobility demand series"].columns = pd.MultiIndex.from_product(
[["DE"], table["mobility demand series"].columns]
)
return table
|
/scenario-builder-0.0.2.tar.gz/scenario-builder-0.0.2/src/scenario_builder/mobility.py
| 0.638948 | 0.354126 |
mobility.py
|
pypi
|
import datetime
import numpy as np
import pandas as pd
import numpyro
import numpyro.distributions as dist
from numpyro.contrib.control_flow import scan
from numpyro.infer import MCMC, NUTS, Predictive
import jax.numpy as jnp
import jax.random as random
from numpyro.infer import MCMC, NUTS, Predictive
import scenario_generator.utils as u
# https://num.pyro.ai/en/stable/tutorials/time_series_forecasting.html
def sgt(y, seasonality, future=0):
"""
TODO!
"""
# heuristically, standard derivation of Cauchy prior depends on
# the max value of data
cauchy_sd = jnp.max(y) / 150
# NB: priors' parameters are taken from
# https://github.com/cbergmeir/Rlgt/blob/master/Rlgt/R/rlgtcontrol.R
nu = numpyro.sample("nu", dist.Uniform(2, 20))
powx = numpyro.sample("powx", dist.Uniform(0, 1))
sigma = numpyro.sample("sigma", dist.HalfCauchy(cauchy_sd))
offset_sigma = numpyro.sample(
"offset_sigma", dist.TruncatedCauchy(low=1e-10, loc=1e-10, scale=cauchy_sd)
)
coef_trend = numpyro.sample("coef_trend", dist.Cauchy(0, cauchy_sd))
pow_trend_beta = numpyro.sample("pow_trend_beta", dist.Beta(1, 1))
# pow_trend takes values from -0.5 to 1
pow_trend = 1.5 * pow_trend_beta - 0.5
pow_season = numpyro.sample("pow_season", dist.Beta(1, 1))
level_sm = numpyro.sample("level_sm", dist.Beta(1, 2))
s_sm = numpyro.sample("s_sm", dist.Uniform(0, 1))
init_s = numpyro.sample("init_s", dist.Cauchy(0, y[:seasonality] * 0.3))
def transition_fn(carry, t):
level, s, moving_sum = carry
season = s[0] * level**pow_season
exp_val = level + coef_trend * level**pow_trend + season
exp_val = jnp.clip(exp_val, a_min=0)
# use expected vale when forecasting
y_t = jnp.where(t >= N, exp_val, y[t])
moving_sum = (
moving_sum + y[t] - jnp.where(t >= seasonality, y[t - seasonality], 0.0)
)
level_p = jnp.where(t >= seasonality, moving_sum / seasonality, y_t - season)
level = level_sm * level_p + (1 - level_sm) * level
level = jnp.clip(level, a_min=0)
new_s = (s_sm * (y_t - level) / season + (1 - s_sm)) * s[0]
# repeat s when forecasting
new_s = jnp.where(t >= N, s[0], new_s)
s = jnp.concatenate([s[1:], new_s[None]], axis=0)
omega = sigma * exp_val**powx + offset_sigma
y_ = numpyro.sample("y", dist.StudentT(nu, exp_val, omega))
return (level, s, moving_sum), y_
N = y.shape[0]
level_init = y[0]
s_init = jnp.concatenate([init_s[1:], init_s[:1]], axis=0)
moving_sum = level_init
with numpyro.handlers.condition(data={"y": y[1:]}):
_, ys = scan(
transition_fn, (level_init, s_init, moving_sum), jnp.arange(1, N + future)
)
if future > 0:
numpyro.deterministic("y_forecast", ys[-future:])
def mcmc_predict(y_train: jnp.array,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2,
verbose: bool = True):
kernel = NUTS(sgt)
mcmc = MCMC(kernel, num_warmup=num_warmup_mcmc, num_samples=num_samples_mcmc, num_chains=num_chains_mcmc)
mcmc.run(random.PRNGKey(0), y_train, seasonality=seasonality_mcmc)
if verbose:
mcmc.print_summary()
samples = mcmc.get_samples()
predictive = Predictive(sgt, samples, return_sites=["y_forecast"])
predict_dist = predictive(random.PRNGKey(1), y_train, seasonality=seasonality_mcmc, future=forecast_length)[
"y_forecast"
]
return predict_dist
def forecast_rb_onboard_power(train_start_date: datetime.date,
train_end_date: datetime.date,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2):
u.sanity_check_date(train_start_date, err_msg="Specified train_start_date is after today!")
u.sanity_check_date(train_end_date, err_msg="Specified train_end_date is after today!")
x, y = u.get_historical_daily_onboarded_power(train_start_date, train_end_date)
y_train = jnp.array(y)
u.err_check_train_data(y_train)
# y_scale = y_train.max()
y_scale = 1
rb_onboard_power_pred = mcmc_predict(y_train/y_scale, forecast_length,
num_warmup_mcmc=num_warmup_mcmc,
num_samples_mcmc=num_samples_mcmc,
seasonality_mcmc=seasonality_mcmc,
num_chains_mcmc=num_chains_mcmc)
rb_onboard_power_pred *= y_scale
forecast_start_date = train_end_date + datetime.timedelta(days=1)
forecast_date_vec = u.make_forecast_date_vec(forecast_start_date, forecast_length)
return forecast_date_vec, rb_onboard_power_pred, x, y
"""
TODO: check on this, for now we use offline data
####### forecast renewal rate
sector_expirations_df = query_starboard_sector_expirations(start_date, current_date)
df_extend_subset = sector_expirations_df.copy()
### predict extensions
y_train_extend = jnp.clip(jnp.array(df_extend_subset['extended_rb'].values), a_min=0.01, a_max=None)[-num_days_train:]
forecast_extend = mcmc_predict(y_train_extend, forecast_length)
####### forecast expirations
y_train_expire = jnp.clip(jnp.array(df_extend_subset['expired_rb'].values), a_min=0.01, a_max=None)[-num_days_train:]
forecast_expire = mcmc_predict(y_train_expire, forecast_length)
"""
def forecast_extensions(train_start_date: datetime.date,
train_end_date: datetime.date,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2):
u.sanity_check_date(train_start_date, err_msg="Specified train_start_date is after today!")
u.sanity_check_date(train_end_date, err_msg="Specified train_end_date is after today!")
x, y = u.get_historical_extensions(train_start_date, train_end_date)
y_train = jnp.array(y)
u.err_check_train_data(y_train)
extensions_pred = mcmc_predict(y_train, forecast_length,
num_warmup_mcmc=num_warmup_mcmc,
num_samples_mcmc=num_samples_mcmc,
seasonality_mcmc=seasonality_mcmc,
num_chains_mcmc=num_chains_mcmc)
forecast_start_date = train_end_date + datetime.timedelta(days=1)
forecast_date_vec = u.make_forecast_date_vec(forecast_start_date, forecast_length)
return forecast_date_vec, extensions_pred, x, y
def forecast_expirations(train_start_date: datetime.date,
train_end_date: datetime.date,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2):
u.sanity_check_date(train_start_date, err_msg="Specified train_start_date is after today!")
u.sanity_check_date(train_end_date, err_msg="Specified train_end_date is after today!")
x, y = u.get_historical_expirations(train_start_date, train_end_date)
y_train = jnp.array(y)
u.err_check_train_data(y_train)
expire_pred = mcmc_predict(y_train, forecast_length,
num_warmup_mcmc=num_warmup_mcmc,
num_samples_mcmc=num_samples_mcmc,
seasonality_mcmc=seasonality_mcmc,
num_chains_mcmc=num_chains_mcmc)
forecast_start_date = train_end_date + datetime.timedelta(days=1)
forecast_date_vec = u.make_forecast_date_vec(forecast_start_date, forecast_length)
return forecast_date_vec, expire_pred, x, y
def forecast_renewal_rate(train_start_date: datetime.date,
train_end_date: datetime.date,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2):
u.sanity_check_date(train_start_date, err_msg="Specified train_start_date is after today!")
u.sanity_check_date(train_end_date, err_msg="Specified train_end_date is after today!")
forecast_date_vec, extensions_pred, x_extend, y_extend = forecast_extensions(train_start_date,
train_end_date,
forecast_length,
num_warmup_mcmc = num_warmup_mcmc,
num_samples_mcmc = num_samples_mcmc,
seasonality_mcmc = seasonality_mcmc,
num_chains_mcmc = num_chains_mcmc)
_, expire_pred, x_expire, y_expire = forecast_expirations(train_start_date,
train_end_date,
forecast_length,
num_warmup_mcmc = num_warmup_mcmc,
num_samples_mcmc = num_samples_mcmc,
seasonality_mcmc = seasonality_mcmc,
num_chains_mcmc = num_chains_mcmc)
if not x_extend.equals(x_expire):
raise ValueError("Unable to get the same amount of data for extensions and expirations!")
renewal_rate_historical = y_extend / (y_extend + y_expire)
renewal_rate_pred = extensions_pred / (extensions_pred + expire_pred)
return forecast_date_vec, renewal_rate_pred, x_extend, renewal_rate_historical
def forecast_filplus_rate(train_start_date: datetime.date,
train_end_date: datetime.date,
forecast_length: int,
num_warmup_mcmc: int = 500,
num_samples_mcmc: int = 100,
seasonality_mcmc: int = 1000,
num_chains_mcmc: int = 2):
"""
1. forecast deal_onboard --> deal_onboard_dist
2. find cc_onboard = rawbyte_onboard - deal_onboard
3. forecast cc_onboard --> cc_onboard_dist
4. find fil_plus_rate_dist = deal_onboard_dist / (cc_onboard_dist + deal_onboard_dist)
"""
u.sanity_check_date(train_start_date, err_msg="Specified train_start_date is after today!")
u.sanity_check_date(train_end_date, err_msg="Specified train_end_date is after today!")
x_deal_onboard_train, y = u.get_historical_deals_onboard(train_start_date, train_end_date)
y_deal_onboard_train = jnp.array(y)
u.err_check_train_data(y_deal_onboard_train)
x_rb_onboard_train, y_rb_onboard_train = \
u.get_historical_daily_onboarded_power(train_start_date, train_end_date)
train_start_date = pd.to_datetime(max(x_deal_onboard_train.values[0], x_rb_onboard_train.values[0]))
train_end_date = pd.to_datetime(min(x_deal_onboard_train.values[-1], x_rb_onboard_train.values[-1]))
ii_start = np.where(train_start_date==x_rb_onboard_train.values)[0][0]
ii_end = np.where(train_end_date==x_rb_onboard_train.values)[0][0]
x_rb_onboard_train = x_rb_onboard_train[ii_start:ii_end]
y_rb_onboard_train = y_rb_onboard_train[ii_start:ii_end]
ii_start = np.where(train_start_date==x_deal_onboard_train.values)[0][0]
ii_end = np.where(train_end_date==x_deal_onboard_train.values)[0][0]
x_deal_onboard_train = x_deal_onboard_train[ii_start:ii_end]
y_deal_onboard_train = y_deal_onboard_train[ii_start:ii_end]
# y_deal_onboard_scale = y_deal_onboard_train.max()
y_deal_onboard_scale = 1
deal_onboard_pred = mcmc_predict(y_deal_onboard_train/y_deal_onboard_scale, forecast_length,
num_warmup_mcmc=num_warmup_mcmc,
num_samples_mcmc=num_samples_mcmc,
seasonality_mcmc=seasonality_mcmc,
num_chains_mcmc=num_chains_mcmc)
forecast_start_date = train_end_date + datetime.timedelta(days=1)
forecast_date_vec = u.make_forecast_date_vec(forecast_start_date, forecast_length)
deal_onboard_pred *= y_deal_onboard_scale
y_cc_onboard_train = jnp.array(y_rb_onboard_train - y_deal_onboard_train)
# y_cc_onboard_scale = y_cc_onboard_train.max()
y_cc_onboard_scale = 1
cc_onboard_pred = mcmc_predict(y_cc_onboard_train/y_cc_onboard_scale, forecast_length,
num_warmup_mcmc=num_warmup_mcmc,
num_samples_mcmc=num_samples_mcmc,
seasonality_mcmc=seasonality_mcmc,
num_chains_mcmc=num_chains_mcmc)
cc_onboard_pred *= y_cc_onboard_scale
xx = x_rb_onboard_train
yy = y_deal_onboard_train / (y_cc_onboard_train + y_deal_onboard_train)
filplus_rate_pred = deal_onboard_pred / (cc_onboard_pred + deal_onboard_pred)
return forecast_date_vec, filplus_rate_pred, xx, yy
|
/scenario_generator-0.1.0.tar.gz/scenario_generator-0.1.0/scenario_generator/mcmc_forecast.py
| 0.549641 | 0.509764 |
mcmc_forecast.py
|
pypi
|
# Scenario Gym - a scenario-centric lightweight simulator


### Accepted to [International Conference on Intelligent Transport Systems 2023](https://2023.ieee-itsc.org/)
Scenario Gym will be presented at the special session Simulation Verification of Autonomous Driving Technologies at ITSC 2023.
## Overview
Scenario Gym is a universal autonomous driving simulation tool that allows fast execution of unconfined, complex scenarios containing a range of road users. It allows rich insight via customised metrics and includes a framework for designing intelligent agents for reactive simulation. It can be used for a variety of tasks relevant for AV development, such agent modelling, controller parameter tuning and deep reinforcement learning.
<p align="center">
<img src="https://raw.githubusercontent.com/driskai/scenario_gym/main/docs/source/_static/gym-ex1.gif" width="20%" />
<img src="https://raw.githubusercontent.com/driskai/scenario_gym/main/docs/source/_static/gym-ex2.gif" width="20%" />
<img src="https://raw.githubusercontent.com/driskai/scenario_gym/main/docs/source/_static/gym-ex3.gif" width="20%" />
</p>
## Overview
Scenario Gym defines a flexible in-memory scenario representation that is compatible with the OpenSCENARIO description language and OpenDRIVE road network representation. Entities can adopt predefined trajectories, or control themselves intelligently with a high-level goal (e.g. reach a target position) or via a complex trained policy. Scenarios are simulated synchronously in discrete time steps within which each agent selects an action and the pose of each entity is updated before moving to the next step.
Intelligent agents interact with the environment through a simple sensor-agent-controller architecture. This streamlines the agent design by splitting it into three components that emulate the design of autonomous agent systems. The sensor component produces a logical observation for the agent from the current global state of the environment. The agent then selects an action and passes it to the controller. The controller manages the physical model of the agent e.g. converting steering and acceleration commands into a new pose. This modular architecture provides reusability and quick iteration of agent designs, not only for vehicular agents but also pedestrians, bicycles and other entity types.
Custom metrics can be implemented to allow quick and specific yet comprehensive insights. Through the scenario representation these can be constructed to efficiently track statistics such as speeds and distances, to record events such as collisions and near misses or to capture more compound measures such as safe distances and risk measures.
<p align="center">
<img src="https://raw.githubusercontent.com/driskai/scenario_gym/main/docs/source/_static/system_overview.svg" width="80%">
</p>
## Installation
Install with `pip`:
```
pip install scenario_gym
```
To install extras for specific integrations or development requirements:
```
pip install scenario_gym[extra]
```
## Getting started
To run a scenario in OpenSCENARIO format:
```python
from scenario_gym import ScenarioGym
gym = ScenarioGym()
gym.load_scenario("path_to_xosc")
gym.rollout()
```
Or load a scenario directly into memory:
```python
from scenario_gym.xosc_interface import import_scenario
scenario = import_scenario("path_to_xosc")
scenario.plot()
```
Several example scenarios are given in the `tests/input_files/Scenarios` directory.
## Intelligent Agents
Agents are defined by a subclass of `Agent` as well as a `Sensor` and a `Controller`. They use implement the `_step` method to produce actions from the observations which will be passed to the controller.
```python
from scenario_gym import Agent
from scenario_gym.sensor import RasterizedMapSensor
from scenario_gym.controller import VehicleController
class ExampleAgent(Agent):
def __init__(self, entity):
controller = VehicleController(entity)
sensor = RasterizedMapSesnor(entity)
super().__init__(
entity,
controller,
sensor,
)
def _step(self, observation):
action = ...
return action
```
To run scenarios with intelligent agents we just define a `create_agent` method which will assign agents to each entity in the scenario. This is passed to the gym instance when loading a scenario. The function must take arguments `scenario` and `entity` and optionally return agents. If an agent is not returned for an entity then the entity will simply follow its predefined trajectory. For example, here we use the `ExampleAgent` implemented above for the ego only:
```python
def create_agent(scenario, entity):
if entity.ref == "ego":
return ExampleAgent(entity)
gym.load_scenario("path_to_xosc", create_agent=create_agent)
gym.rollout()
```
## Metrics
To track performance statistics or record events the `Metric` class can be used. These implement the `_reset` and `_step` method to maintin an internal state across the scenario and the `get_state` method to return their recorded data. A selection metrics are already implemented and can be run by passing them to the `ScenarioGym`:
```python
from scenario_gym.metrics import CollisionMetric, EgoAvgSpeed
gym = ScenarioGym(metrics=[CollisionMetric(), EgoAvgSpeed()])
gym.load_scenario("path_to_xosc")
gym.rollout()
gym.get_metrics()
```
## Deep reinforcement learning
For reinforcement learning applications Scenario Gym supports an OpenAI Gym compatible implementation. When creating the environment we need to specify the observation and action spaces used by the ego agent as well as our `create_agent` function. The observation from the ego agent's sensor will be returned by the environment and the action passed to `step` will be passed to the agent's controller.
```python
from scenario_gym.integrations.openaigym import ScenarioGym
env = ScenarioGym(
observation_space=observation_space,
action_space=action_space,
create_agent=create_agent,
)
obs = env.reset()
action = model(obs)
obs, reward, done, info = env.step(action)
```
For more code examples please see the `examples` directory.
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/README.md
| 0.785966 | 0.994677 |
README.md
|
pypi
|
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Type
import numpy as np
from scenario_gym.entity import Entity
@dataclass
class Observation:
"""Base class for an observation."""
pass
@dataclass
class SingleEntityObservation(Observation):
"""Data for a single entity."""
entity: Entity
t: float
next_t: float
pose: np.ndarray
velocity: np.ndarray
distance_travelled: float
recorded_poses: np.ndarray
entity_state: Any
def combine_observations(
*obs: Tuple[Type[Observation], ...],
prefixes: Optional[Tuple[Optional[str], ...]] = None,
) -> Type[Observation]:
"""
Create a class to combine multiple observations.
The class will inherit the type annotations from the input classes and create
a dataclass from them. If duplicate field names are found the `prefxies`
argument can be used to create unique names by specifying a prexfix used
for arguments from each observation. The created class will accept inputs
from
"""
if prefixes is not None and len(prefixes) != len(obs):
raise ValueError
annots = OrderedDict()
maps = OrderedDict()
for idx, ob in enumerate(obs):
try:
fields = ob.__dataclass_fields__
except AttributeError as e:
raise f"Obsevation {ob} not a dataclass." from e
for f in fields.values():
name = f.name
if name in annots:
if prefixes is None:
continue
else:
pre = prefixes[idx]
name = f"{pre}_{name}"
if name in annots:
raise ValueError(
f"Prefix {pre} still leads to duplicate name for "
f"{name}."
)
annots[name] = f.type
maps[name] = (idx, name)
@classmethod
def from_obs(cls, *obs):
"""Create the class from observation instances."""
args = []
for (i, name) in maps.values():
val = getattr(obs[i], name)
args.append(val)
return cls(*args)
return dataclass(
type(
"CombinedObservation",
(Observation,),
{
"__annotations__": annots,
"from_obs": from_obs,
},
)
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/observation.py
| 0.960491 | 0.528898 |
observation.py
|
pypi
|
from typing import Optional
import numpy as np
class Action:
"""Base class for actions that agents commnicate to controllers."""
pass
class TeleportAction(Action):
"""An action consiting of desired coordinates for the next pose."""
def __init__(
self,
x: float = 0.0,
y: float = 0.0,
z: float = 0.0,
h: float = 0.0,
r: float = 0.0,
p: float = 0.0,
pose: Optional[np.ndarray] = None,
):
"""
Teleport action from coordinates or a pose.
Parameters
----------
x : float
The x coordinate of the action.
y : float
The y coordinate of the action.
z : float
The z coordinate of the action.
h : float
The h coordinate of the action.
r : float
The r coordinate of the action.
p : float
The p coordinate of the action.
pose : Optional[np.ndarray]
The whole pose as a numpy array of shape (6,). Will
overwrite any other coordinates passed.
"""
self.x = pose[0] if pose is not None else x
self.y = pose[1] if pose is not None else y
self.z = pose[2] if pose is not None else z
self.h = pose[3] if pose is not None else h
self.r = pose[4] if pose is not None else r
self.p = pose[5] if pose is not None else p
@property
def pose(self) -> np.ndarray:
"""Return a pose representation of the action as an array of shape (6,)."""
return np.array([self.x, self.y, self.z, self.h, self.r, self.p])
class VehicleAction(Action):
"""An acceleration and a steering update."""
def __init__(self, accel: float, steer: float):
"""
Vehicle action from acceleration and steering.
Parameters
----------
accel : float
The acceleration of the vehicle.
steer : float
The steering angle of angular velocity of the vehicle.
"""
self.acceleration = accel
self.steering = steer
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/action.py
| 0.978036 | 0.685976 |
action.py
|
pypi
|
from abc import ABC, abstractmethod
from typing import Optional, Union
import numpy as np
from scenario_gym.action import Action, TeleportAction, VehicleAction
from scenario_gym.entity import Entity
from scenario_gym.state import State
from scenario_gym.utils import ArrayLike
class Controller(ABC):
"""
Base class for the controller. Takes the agent's action and returns the pose.
When implementing a controller the _step method should return the
new pose for the entity (as an np.ndarray). It may modify the state
in other ways but the controller's step method will update the
entities pose. This is to avoid errors with immutable arrays.
"""
def __init__(self, entity: Entity):
"""Construct the controller from the entity."""
self.entity = entity
def reset(self, state: State) -> None:
"""Reset the controller parameters."""
self._reset(state)
def step(self, state: State, action: Action) -> ArrayLike:
"""Return the agent's next pose from the action."""
return self._step(state, action)
@abstractmethod
def _reset(self, state: State) -> None:
"""Reset the controller parameters."""
pass
@abstractmethod
def _step(self, state: State, action: Action) -> ArrayLike:
"""Return the agent's next pose from the action."""
pass
class ReplayTrajectoryController(Controller):
"""A controller to replay preset trajectories."""
def _reset(self, state: State) -> None:
"""Reset the controller parameters."""
pass
def _step(self, state: State, action: TeleportAction) -> ArrayLike:
"""Return the agent's next pose from the action."""
return action.pose
class VehicleController(Controller):
"""
A vehicle controller using a simple physical model.
Allows acceleration and steering in a given range.
"""
def __init__(
self,
entity: Entity,
max_steer: float = 0.7,
max_accel: float = 5.0,
max_speed: Optional[float] = None,
allow_reverse: bool = False,
):
"""
Construct the controller from the entity.
Parameters
----------
entity : Entity
The entity for the controller.
max_steer : float
The max allowed (absolute) steering angle.
max_accel : float
The max allowed (absolute) acceleration.
max_speed : Optional[float]
If given then the entity is limited to this max speed.
allow_reverse : bool
Allow the vehicle to move backwards. If False then the
vehicle's speed is forced >= 0.
"""
super().__init__(entity)
self.max_steer = max_steer
self.max_accel = max_accel
self.allow_reverse = allow_reverse
self.max_speed = max_speed
def _reset(self, state: State) -> None:
"""Reset the controller parameters."""
self.speed = np.linalg.norm(state.velocities[self.entity][:2])
self.l = self.entity.catalog_entry.bounding_box.length
def _step(
self, state: State, action: Union[VehicleAction, np.ndarray]
) -> ArrayLike:
"""
Return the agent's next pose from the action.
Updates the heading based on the steering angle. Then calculates
the new speed to return the new velocity.
"""
if isinstance(action, VehicleAction):
accel, steer = action.acceleration, action.steering
else:
accel, steer = action
accel = np.clip(accel, -self.max_accel, self.max_accel)
steer = np.clip(steer, -self.max_steer, self.max_steer)
pose = state.poses[self.entity].copy()
dt = state.next_t - state.t
h = pose[3]
dx = self.speed * np.cos(h)
dy = self.speed * np.sin(h)
dh = self.speed * np.tan(steer) / self.l
pose[[0, 1]] += np.array([dx, dy]) * dt
pose[3] += dh * dt
speed = self.speed + accel * dt
if not self.allow_reverse:
speed = np.maximum(0.0, speed)
if self.max_speed is not None:
speed = np.minimum(self.max_speed, speed)
self.speed = speed
return pose
class PIDController(VehicleController):
"""
A PID controller for scenario gym agents.
Selects acceleration and steering to get to a given waypoint. These
are computed using a very simple PID controller for the steering
and acceleration. The acceleration error and steering errors are based
on the lateral and longitudinal error from the target in the vehicles
local frame.
"""
def __init__(
self,
entity: Entity,
steer_Kp: float = 0.03054,
steer_Kd: float = 1.5709,
accel_Kp: float = 0.3753,
accel_Kd: float = 1.8970,
accel_Ki: float = 0.0204,
**kwargs,
):
"""
Construct the controller from the entity.
entity : Entity
The entity for the controller.
steer_Kp : float
The steering proportionality parameter.
steer_Kd : float
The steering derivative parameter.
accel_Kp : float
The acceleration proportionality parameter.
accel_Kd : float
The acceleration derivative parameter.
accel_Ki : float
The acceleration integral parameter.
kwargs:
Keyword arguments for the underlying vehicle model.
"""
super(self.__class__, self).__init__(
entity,
**kwargs,
)
self.steer_Kp = steer_Kp
self.steer_Kd = steer_Kd
self.accel_Kp = accel_Kp
self.accel_Ki = accel_Ki
self.accel_Kd = accel_Kd
def _reset(self, state: State) -> None:
"""Reset the controller parameters."""
self.e_lon_prev = 0.0
self.e_lon_int = 0.0
self.e_lat_prev = 0.0
super(self.__class__, self)._reset(state)
def _step(self, state: State, action: TeleportAction) -> ArrayLike:
"""
Return the agent's next pose from the action.
Calculates the longitudinal and lateral error to use as error
values for the acceleration and steering. Then the parameters
are applied to produce vehicle action values for the vehicle
controller.
"""
# current and target positions
target = action.pose[:2]
pose = state.poses[self.entity].copy()
cur, h = pose[:2], pose[3]
speed = self.speed
# error and derivatives
e = target - cur # (2,)
R = np.array(
[
[np.cos(h), np.sin(h)],
[-np.sin(h), np.cos(h)],
]
)
e_lon, e_lat = R.dot(e)
# steering
if speed > 5.0 and speed <= 15:
gain_adj = 1.0 - 0.9 * ((speed - 5.0)) / 10.0
elif speed > 15:
gain_adj = 0.1
else:
gain_adj = 1.0
e_lat_D = (e_lat - self.e_lat_prev) / state.dt
steer_Kp = self.steer_Kp * gain_adj
steer_Kd = self.steer_Kd * gain_adj
steer = steer_Kp * e_lat + steer_Kd * e_lat_D
# acceleration
e_lon_D = (e_lon - self.e_lon_prev) / state.dt
e_lon_I = self.e_lon_int + e_lon * state.dt
if abs(e_lon) > 0.1:
accel = (
self.accel_Kp * e_lon
+ self.accel_Kd * e_lon_D
+ self.accel_Ki * e_lon_I
)
else:
accel = 0.0
self.e_lat_prev = e_lat
self.e_lon_prev = e_lon
self.e_lon_int = e_lon_I
return super(self.__class__, self)._step(state, VehicleAction(accel, steer))
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/controller.py
| 0.961362 | 0.711177 |
controller.py
|
pypi
|
from abc import ABC, abstractclassmethod
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
from lxml.etree import Element
from scenariogeneration import xosc
from scenario_gym.utils import ArgsKwargs, load_properties_from_xml
@dataclass(frozen=True)
class Catalog:
"""A catalog for catalog entries."""
name: str
group_name: str
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the catalog from a dictionary."""
return cls(data["name"], data["group_name"])
def to_dict(self) -> Dict[str, Any]:
"""Write the catalog to a dictionary."""
return {"name": self.name, "group_name": self.group_name}
class CatalogObject(ABC):
"""
Base class for objects loaded from catalogs.
Subclasses should implement the `load_data_from_xml` class method wwhich takes
the specific xml element that contains the data and returns the arguements
and keyword arguments for the class constructor. It should not return the
class itself since this way the methods can make use of loading methods from
parent classes.
The attribute xosc_namex can be set to the element names that the object
represents. For example, if `xosc_names = ["Vehicle"]` then any elements with
the tag `Vehicle` will be loaded by this entry. If not set then the class name
will be used.
"""
xosc_names: Optional[List[str]] = None
@classmethod
def from_xml(cls, element: Element, catalog: Optional[Catalog] = None):
"""Create the class from an xml element."""
args, kwargs = cls.load_data_from_xml(element, catalog=catalog)
return cls(*args, **kwargs)
@abstractclassmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the object from an xml element."""
raise NotImplementedError
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""
Create the object from a dictionary.
Must be implemented to allow json serialization.
"""
raise NotImplementedError
def to_dict(self) -> Dict[str, Any]:
"""
Write the object to a dictionary.
Must be implemented to allow json serialization.
"""
raise NotImplementedError
def to_xosc(self) -> xosc.VersionBase:
"""Write the object to an xosc object."""
raise NotImplementedError
@dataclass
class BoundingBox(CatalogObject):
"""A bounding box defined by its length, width and center."""
width: float
length: float
center_x: float
center_y: float
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the bounding box data form an xml element."""
if element.tag != "BoundingBox":
raise TypeError(f"Expected BoundingBox element not {element.tag}.")
bb_center = element.find("Center")
bb_dimensions = element.find("Dimensions")
return (
float(bb_dimensions.attrib["width"]),
float(bb_dimensions.attrib["length"]),
float(bb_center.attrib["x"]),
float(bb_center.attrib["y"]),
), {}
@classmethod
def from_dict(cls, data: Dict[str, float]):
"""Load the bounding box from a dictionary."""
return cls(
data["width"],
data["length"],
data["center_x"],
data["center_y"],
)
def to_dict(self) -> Dict[str, float]:
"""Write the bounding box to a jsonable dictionary."""
return {
"width": self.width,
"length": self.length,
"center_x": self.center_x,
"center_y": self.center_y,
}
def to_xosc(self) -> xosc.BoundingBox:
"""Write the bounding box to an xosc bounding box."""
return xosc.BoundingBox(
self.width,
self.length,
0.0,
self.center_x,
self.center_y,
0.0,
)
@dataclass
class CatalogEntry(CatalogObject):
"""
A single catalog entry. Holds catalog information and a bounding box.
Parameters
----------
catalog : Optional[Catalog]
The catalog from which the entry is loaded.
catalog_entry : str
The name of the specific entry.
catalog_category : Optional[str]
The category of the entry.
catalog_type : str
The catalog type e.g Vehicle or Pedestrian.
bounding_box : BoundingBox
The bounding box of the entry.
properties : Dict[str, Union[float, str]]
Any properties associated with the element.
files: List[str]
A list of filepaths for external files.
"""
catalog: Optional[Catalog]
catalog_entry: str
catalog_category: Optional[str]
catalog_type: str
bounding_box: BoundingBox
properties: Dict[str, Union[float, str]]
files: List[str]
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the catalog entry from an xml element."""
entry_name = element.attrib["name"]
cname = element.tag.lower() + "Category"
category = element.attrib[cname] if cname in element.attrib else None
bb = element.find("BoundingBox")
bb = BoundingBox.from_xml(bb, catalog=catalog)
properties, files = load_properties_from_xml(element)
return (
catalog,
entry_name,
category,
element.tag,
bb,
properties,
files,
), {}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the catalog entry from a dictionary."""
catalog = data.get("catalog", None)
if catalog is not None:
catalog = Catalog.from_dict(catalog)
return cls(
catalog,
data["catalog_entry"],
data["catalog_category"],
data["catalog_type"],
BoundingBox.from_dict(data["bounding_box"]),
data.get("properties", {}),
data.get("files", []),
)
def to_dict(self) -> Dict[str, Any]:
"""Write the catalog entry to a dictionary."""
return {
"catalog": self.catalog.to_dict() if self.catalog else None,
"catalog_entry": self.catalog_entry,
"catalog_category": self.catalog_category,
"catalog_type": self.catalog_type,
"bounding_box": self.bounding_box.to_dict(),
"properties": self.properties,
"files": self.files,
}
def to_xosc(self) -> xosc.VersionBase:
"""Create an xosc entity object from the catalog entry."""
obj = xosc.MiscObject(
self.catalog_entry,
1.0,
getattr(
xosc.MiscObjectCategory,
self.catalog_category,
xosc.MiscObjectCategory.none,
),
self.catalog_category,
self.bounding_box.to_xosc(),
)
for k, v in self.properties.items():
obj.add_property(k, v)
for f in self.files:
obj.add_property_file(f)
return obj
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/catalog_entry.py
| 0.970479 | 0.443841 |
catalog_entry.py
|
pypi
|
from typing import Optional
from scenario_gym.action import Action, TeleportAction
from scenario_gym.controller import (
Controller,
PIDController,
ReplayTrajectoryController,
)
from scenario_gym.entity import Entity
from scenario_gym.observation import Observation
from scenario_gym.scenario import Scenario
from scenario_gym.sensor import EgoLocalizationSensor, Sensor
from scenario_gym.state import State
from scenario_gym.trajectory import Trajectory
from scenario_gym.utils import ArrayLike
class Agent:
"""Base agent class. Processes observations to select an action."""
def __init__(self, entity: Entity, controller: Controller, sensor: Sensor):
"""
Construct an agent given an entity, controller and sensor.
Parameters
----------
entity : Entity
The entity that the agent will control.
controller : Controller
The controller for the agent.
sensor : Sensor:
The sensor module for the agent.
"""
self.entity = entity
self.controller = controller
self.sensor = sensor
self._last_action: Optional[Action] = None
self._last_reward: Optional[float] = None
self._trajectory: Optional[Trajectory] = None
def reset(self, state: State) -> None:
"""Reset the agent state at the start of the scenario."""
self.last_action = None
self.last_reward = None
self.sensor.reset(state)
self.controller.reset(state)
self._reset()
def step(self, state: State) -> ArrayLike:
"""Select an action from the current observation."""
obs = self.sensor.step(state)
action = self._step(obs)
self.last_action = action
return self.controller.step(state, action)
def _reset(self) -> None:
"""Reset the agent state at the start of the scenario."""
pass
def _step(self, observation: Observation) -> Action:
"""Select an action from the current observation."""
pass
def finish(self, state: State) -> None:
"""Process the end of the scenario."""
pass
@property
def trajectory(self) -> Trajectory:
"""
Return the trajectory of the agent.
By default this is the underlying entities trajectory but can be overridden.
"""
return (
self._trajectory
if self._trajectory is not None
else self.entity.trajectory
)
@trajectory.setter
def trajectory(self, trajectory: Trajectory):
self._trajectory = trajectory
@property
def last_action(self) -> Action:
"""Return the previous action selected by the agent."""
return self._last_action
@last_action.setter
def last_action(self, action: Action) -> None:
self._last_action = action
def reward(self, state: State) -> Optional[float]:
"""Return and cache the reward for the agent from the current state."""
r = self._reward(state)
if r is not None:
self.last_reward = r
return r
def _reward(self, state: State) -> Optional[float]:
"""Return the reward for the agent from the current state."""
pass
@property
def last_reward(self) -> Optional[float]:
"""Get the last reward."""
return self._last_reward
@last_reward.setter
def last_reward(self, reward: Optional[float]) -> None:
self._last_reward = reward
class ReplayTrajectoryAgent(Agent):
"""Agent for entities that follow trajectories predefined in the scenario."""
def _reset(self) -> None:
"""Reset the agent state at the start of the scenario."""
pass
def _step(self, observation: Observation) -> Action:
"""Return the agent's next pose."""
new_pose = self.trajectory.position_at_t(observation.next_t)
return TeleportAction(pose=new_pose)
class PIDAgent(Agent):
"""Agent following a specified trajectory with a PID controller."""
def __init__(self, entity: Entity, **controller_kwargs):
super().__init__(
entity,
PIDController(entity, **controller_kwargs),
EgoLocalizationSensor(entity),
)
def _reset(self) -> None:
"""Reset the agent state at the start of the scenario."""
pass
def _step(self, observation: Observation) -> TeleportAction:
"""Get the next waypoint from the agent's trajectory."""
pos = self.trajectory.position_at_t(observation.next_t)
return TeleportAction(x=pos[0], y=pos[1], z=pos[2])
def _create_agent(scenario: Scenario, entity: Entity) -> Optional[Agent]:
"""
Return a replay trajectory agent.
This is the default create agent function used by the gym.
Parameters
----------
scenario : Scenario
The scenario object.
entity : Entity
The specific entity within the scenario.
"""
if entity.ref == "ego":
controller = ReplayTrajectoryController(entity)
sensor = EgoLocalizationSensor(entity)
return ReplayTrajectoryAgent(entity, controller, sensor)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/agent.py
| 0.965908 | 0.629917 |
agent.py
|
pypi
|
from __future__ import annotations
from copy import copy
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from scipy.interpolate import interp1d
from scenario_gym.utils import ArrayLike, NDArray, cached_property
class Trajectory:
"""
A Scenario Gym representation of a trajectory.
Note that trajectories consist of immutable arrays. To modify a trajectory
one must copy the data and init a new one:
```
new_data = trajectory.data.copy()
# apply changes
new_t = Trajectory(new_data)
```
"""
_fields = ("t", "x", "y", "z", "h", "p", "r")
t: Optional[NDArray] = None
x: Optional[NDArray] = None
y: Optional[NDArray] = None
z: Optional[NDArray] = None
h: Optional[NDArray] = None
p: Optional[NDArray] = None
r: Optional[NDArray] = None
def __init__(self, data: NDArray, fields: Tuple[str] = _fields):
"""
Trajectory constructor.
Parameters
----------
data : np.ndarray
The trajectory data as a numpy array of (num_points, num_fields).
By default the columns should be t, x, y, z, h, p, r otherwise the
fields argument should be passed.
fields : List[str]
The field names for each column of data. Must contain t, x and y and
must be a subset of _fields.
"""
if not all(f in fields for f in ("t", "x", "y")):
raise ValueError("Trajectory cannot be created with t, x and y values.")
if data.ndim != 2 or data.shape[1] != len(fields):
raise ValueError(
f"Invalid shape: {data.shape}. Expected: (N, {len(fields)}). Either"
" pass `fields` to specify the columns given or ensure that columns"
f" for all of {self._fields} are provided."
)
perm = [fields.index(f) for f in self._fields if f in fields]
data = data[:, perm]
data = data[np.unique(data[:, 0], return_index=True)[1]]
n = data.shape[0]
_data: List[NDArray] = []
for f in self._fields:
d = data[:, perm.index(fields.index(f))] if f in fields else np.zeros(n)
if f not in fields or (f in fields and np.isfinite(d).sum() != n):
if f == "h" and n == 1:
d = np.zeros(1)
elif f == "h" and n > 1:
t = _data[0]
fn = interp1d(
t,
np.array(_data[1:3]).T,
axis=0,
fill_value="extrapolate",
)
d = np.arctan2(*np.flip(fn(t + 1e-2) - fn(t - 1e-2), axis=1).T)
d = _resolve_heading(d)
elif f in ("z", "p", "r"):
d = np.zeros(n)
else:
raise ValueError(
f"Invalid values found for {f}. Values required for xyt."
)
elif f == "h":
d = _resolve_heading(d)
_data.append(d)
setattr(self, f, d)
# we will make the data readonly
self._data = np.array(_data).T.copy()
self._data.flags.writeable = False
self._interpolated: Optional[Callable[[ArrayLike], NDArray]] = None
self._interpolated_s: Optional[Callable[[ArrayLike], NDArray]] = None
self._grad_fn = None
@property
def data(self) -> NDArray:
"""
Get the underlying trajectory data.
Note this property has no setter. To modify the trajectory data one must
copy the data and init a new trajectory:
```
new_data = trajectory.data.copy()
# apply changes
new_t = Trajectory(new_data)
```
"""
return self._data
def __len__(self) -> int:
"""Return the number of points in the trajectory."""
return len(self.data)
def __getitem__(self, idx: int) -> NDArray:
"""Get the idx'th point in the trajectory."""
return self.data[idx]
@cached_property
def min_t(self) -> float:
"""Return the first timestamp of the trajectory."""
return self.t.min()
@cached_property
def max_t(self) -> float:
"""Return the final timestamp of the trajectory."""
return self.t.max()
@cached_property
def s(self) -> NDArray:
"""Return the distance travelled at each point."""
ds = np.linalg.norm(np.diff(self.data[:, [1, 2]], axis=0), axis=1).cumsum()
return np.hstack([[0.0], ds])
@cached_property
def arclength(self) -> float:
"""Total distance travelled."""
return self.s[-1]
def position_at_t(
self,
t: Union[float, ArrayLike],
extrapolate: Union[bool, Tuple[bool, bool]] = (False, False),
) -> Optional[NDArray]:
"""
Compute the position of the entity at time t.
Can vectorise over t.
Parameters
----------
t : float
The time at which the position is returned. Linearly interpolates
the trajectory control points to find the position.
extrapolate : Union[bool, Tuple[bool, bool]]
Whether to extrapolate the trajectory if the time given is outside
of the range of the trajectory. If False then None will be returned
for such times. If a Tuple is given then first and second elements
correspond to whether to extrapolate before and after the trajectory
respectively or to fix them.
Returns
-------
Optional[np.ndarray]
The position as a numpy array. If the time given is outside of the
range of the trajectory and extrapolate is False then None is returned.
"""
t = np.array(t)
if self._interpolated is None:
data = self.data
if data.shape[0] == 1:
data = np.repeat(data, 2, axis=0)
data[-1, 0] += 1e-3
self._interpolated = interp1d(
data[:, 0],
data[:, 1:],
bounds_error=False,
fill_value="extrapolate",
axis=0,
)
if isinstance(extrapolate, tuple):
ext_bck, ext_fwd = extrapolate
extrapolate = True
else:
ext_bck = ext_fwd = extrapolate
if t.ndim == 0:
if not extrapolate and (t < self.min_t or t > self.max_t):
return None
elif t < self.min_t and not ext_bck:
return self.data[0, 1:]
elif t > self.max_t and not ext_fwd:
return self.data[-1, 1:]
return self._interpolated(t)
poses = self._interpolated(t)
if not ext_bck:
poses = np.where(t[:, None] < self.min_t, self.data[0, None, 1:], poses)
if not ext_fwd:
poses = np.where(
t[:, None] > self.max_t, self.data[-1, None, 1:], poses
)
return poses
def position_at_s(self, s: float) -> NDArray:
"""
Compute the position of the entity at distance travelled s.
Parameters
----------
s : float
The arclength at which the position is returned. Linearly
interpolates the trajectory control points to find the position.
Returns
-------
np.ndarray
The position as a numpy array.
"""
if self._interpolated_s is None:
data = self.data
s_ = self.s
s_, idx = np.unique(s_, return_index=True)
data = data[idx]
if data.shape[0] == 1:
data = np.repeat(data, 2, axis=0)
data[-1, 0] += 1e-3
s_ = np.hstack([s_[0] - 1e-3, s[0]])
self._interpolated_s = interp1d(
s_,
data,
bounds_error=False,
fill_value=(data[0, :], data[-1, :]),
axis=0,
)
out = self._interpolated_s(s)
out[..., 0] = np.where(s == 0, 0, out[..., 0])
return out
def velocity_at_t(
self, t: Union[float, ArrayLike], eps: float = 1e-4
) -> NDArray:
"""
Compute the velocity of the entity at time t.
Parameters
----------
t : float
The time at which the velocity is returned.
eps : float
The epsilon used to compute the velocity.
Returns
-------
np.ndarray
The velocity as a numpy array.
"""
t = np.array(t)
inside = np.logical_and(self.min_t <= t, t <= self.max_t)
v_in = (
self.position_at_t(t + eps / 2, extrapolate=True)
- self.position_at_t(t - eps / 2, extrapolate=True)
) / eps
v_out = np.zeros(t.shape + (6,))
if t.ndim >= 1:
inside = inside.reshape(-1, 1)
return np.where(inside, v_in, v_out)
def is_stationary(self) -> bool:
"""Return True if the trajectory is stationary."""
return is_stationary(self.data)
def __copy__(self) -> Trajectory:
"""Create a copy of the trajectory."""
return self.__class__(self.data.copy())
def copy(self) -> Trajectory:
"""Create a copy of the trajectory."""
return copy(self)
def translate(self, x: np.ndarray) -> Trajectory:
"""
Create a new trajectory by translating the current by x.
Parameters
----------
x : np.ndarray
Translation quantity. Must broadcast to the data so must be
a matrix, vector or scalar.
Returns
-------
Trajectory
The translated trajectory.
"""
if x.ndim == 1:
x = x[None, :]
return self.__class__(self.data + x)
def rotate(self, h: float) -> Trajectory:
"""
Create a new trajectory by rotating the current by h around O.
Parameters
----------
h : float
The angle of rotation (about the origin).
Returns
-------
Trajectory
The rotated trajectory.
"""
new_data = self.data.copy()
xy = new_data[None, 0, [1, 2]]
new_data[:, [1, 2]] = (new_data[:, [1, 2]] - xy).dot(
np.array(
[
[np.cos(h), np.sin(h)],
[-np.sin(h), np.cos(h)],
]
)
) + xy
new_data[:, 4] = (new_data[:, 4] + h) % (2.0 * np.pi)
return self.__class__(new_data)
def smooth_headings(self) -> Trajectory:
"""
Create a new trajectory by smoothing the existing headings.
Returns
-------
Trajectory
The smoothed trajectory.
"""
s = self.s
d = np.arctan2(
*np.flip(
self.position_at_s(s + 1e-2)[:, 1:3]
- self.position_at_s(s - 1e-2)[:, 1:3],
axis=1,
).T
)
d = _resolve_heading(d)
new_data = self.data.copy()
new_data[:, 4] = d
return self.__class__(new_data)
def subsample(
self,
points_per_s: Optional[float] = None,
points_per_t: Optional[float] = None,
curvature: bool = False,
**kwargs,
) -> Trajectory:
"""
Create a new trajectory with a given frequency of control points.
The control points can either be equally spaced across time or across arc by
passing the keyword arguments `points_per_t` or `points_per_s` respectively.
Exactly one of these keywords must be passed.
Parameters
----------
points_per_s : Optional[float]
Number of control points per unit of arc.
points_per_t : Optional[float]
Number of control points per unit of time.
curvature: bool
If given will given curvature sampling to subsample the trajectory.
"""
if (points_per_s is None) == (points_per_t is None):
raise ValueError(
"Exactly one of `points_per_s` or `points_per_t` must be supplied."
)
if curvature:
return self.curvature_subsample(
points_per_s=points_per_s,
points_per_t=points_per_t,
**kwargs,
)
if points_per_t:
n = int(max(1, np.ceil((self.max_t - self.min_t) * points_per_t)))
ts = np.linspace(self.min_t, self.max_t, n)
data = self.position_at_t(ts)
return self.__class__(np.concatenate([ts[:, None], data], axis=1))
n = int(max(1, np.ceil(self.arclength * points_per_s)))
ss = np.linspace(0, self.arclength, n)
data = self.position_at_s(ss)
return self.__class__(data)
def curvature_subsample(
self,
points_per_s: Optional[float] = None,
points_per_t: Optional[float] = None,
eps: float = 1e-3,
weight: float = 5.0,
) -> np.ndarray:
"""
Subsample by sampling points arround high curvature areas.
Parameters
----------
points_per_s : Optional[float]
Number of control points per unit of arc.
points_per_t : Optional[float]
Number of control points per unit of time.
eps : float
Epsilon parameter for computing gradients of the trajectory.
weight : float
Temperature for sampling distribution. Higher values give points more
densley sampled around high curvature areas.
"""
if points_per_s is not None:
n = int(np.maximum(1, points_per_s * self.arclength))
elif points_per_t is not None:
n = int(np.maximum(1, points_per_t * self.max_t))
else:
raise ValueError(
"Exactly one of `points_per_s` or `points_per_t` must be supplied."
)
s = self.s
if self._grad_fn is None:
fn = self.position_at_s
grads = (fn(s + eps)[:, [1, 2]] - fn(s - eps)[:, [1, 2]]) / (2 * eps)
self._grad_fn = interp1d(s, grads, axis=0, fill_value="extrapolate")
grad_fn = self._grad_fn
second_grad = (grad_fn(s[1:-1] + eps) - grad_fn(s[1:-1] - eps)) / (2 * eps)
curv = np.linalg.norm(second_grad, axis=1)
dist = np.exp(weight * curv) / np.exp(weight * curv).sum()
num_points = int(np.clip(n - 2, 1, dist.shape[0]))
idxs = np.random.choice(
dist.shape[0],
size=(num_points,),
replace=False,
p=dist,
)
s_vals = s[np.hstack([[0], 1 + np.sort(idxs), [s.shape[0] - 1]])]
return self.__class__(fn(s_vals))
def to_json(self) -> List[List[float]]:
"""Write the trajectory to a jsonable list."""
return self.data.tolist()
def _resolve_heading(h: NDArray) -> NDArray:
"""Update heading so that there are no large jumps."""
deltas = np.diff(h) % (2 * np.pi)
deltas = np.where(deltas > np.pi, deltas - 2 * np.pi, deltas)
return np.hstack([h[0], deltas]).cumsum()
def is_stationary(data: np.ndarray) -> bool:
"""
Check if an entity is stationary for the entire scenario.
Any nan values are replaced with 0s.
"""
return (
len(
np.unique(
np.where(
np.isnan(data[:, 1:]),
0.0,
data[:, 1:],
),
axis=0,
)
)
<= 1
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/trajectory.py
| 0.955121 | 0.844152 |
trajectory.py
|
pypi
|
from contextlib import suppress
from functools import lru_cache
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from lxml.etree import Element
from shapely.geometry import Polygon
from shapely.strtree import STRtree
try:
from functools import cached_property
except ImportError:
def cached_property(fn):
"""Replace cached_property with a size 1 lru cache."""
return property(lru_cache(maxsize=1)(fn))
try:
from numpy.typing import ArrayLike, NDArray
except ImportError:
ArrayLike = NDArray = np.ndarray
ArgsKwargs = Tuple[Tuple[Any, ...], Dict[str, Any]]
def detect_geom_collisions(
geoms: List[Polygon],
others: Optional[List[Polygon]] = None,
) -> Dict[Polygon, List[Polygon]]:
"""
Detect collisions between polygons.
Parameters
----------
geoms : List[Polygon]
The geometries to use.
others : Optional[List[Polygon]]
Additional geometries to include when checking for collisions
with each geometry in geoms.
Returns
-------
Dict[Polygon, List[Polygon]]
A dictionary that maps each polygon in geoms to the polygons which it
intersects.
"""
all_geoms = geoms if others is None else geoms + others
tree = STRtree(all_geoms)
return {
g: [
g_prime
for g_prime in tree.geometries.take(
tree.query(g, predicate="intersects").tolist()
)
if g != g_prime
]
for g in geoms
}
def load_properties_from_xml(
element: Element,
) -> Tuple[Dict[str, Union[str, float]], List[str]]:
"""
Load properties from the xml element.
These can be either `Property` or `File` elements. `Property` elements are
given with `name` and `value` attributes (`name` must be unique for the
entry) and are returned as a dict of values indexed by `name`. The value
will be converted to a float if it can otherwise the string will be
returned. `File` elements must have a `filepath` attribute which will
be parsed. Multiple files can be stored with one entity.
Returns
-------
properties : Dict[str, Union[str, float]]
A dictionary of properties indexed by name.
files : List[str]
A list of filepaths for external files.
"""
files = []
properties = {}
prop = element.find("Properties")
if prop is not None:
for child in prop.findall("Property"):
try:
v = child.attrib["value"]
with suppress(ValueError):
v = float(v)
properties[child.attrib["name"]] = v
except KeyError as e:
raise RuntimeError(
"Property could not be loaded without `value` key."
) from e
for file in prop.findall("File"):
files.append(file.attrib["filepath"])
return properties, files
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/utils.py
| 0.945682 | 0.571826 |
utils.py
|
pypi
|
import inspect
import os
import warnings
from argparse import ArgumentParser
from typing import Any, Dict, List, Optional, Type, Union
import yaml
from scenario_gym.agent import Agent, ReplayTrajectoryAgent
from scenario_gym.controller import ReplayTrajectoryController
from scenario_gym.entity import Entity
from scenario_gym.metrics import Metric
from scenario_gym.scenario import Scenario
from scenario_gym.scenario_gym import ScenarioGym
from scenario_gym.sensor import EgoLocalizationSensor
def load_keywords(obj: Type, exclude: Optional[List[str]] = None) -> Dict[str, Any]:
"""Find keyword arguments of the object."""
sig = inspect.signature(obj.__init__)
return {
k: v.default
for k, v in sig.parameters.items()
if v.default != inspect._empty and (exclude is None or k not in exclude)
}
class ScenarioManager:
"""Provides functionality to manage running large numbers of scenarios."""
GYM_PARAMETERS = load_keywords(ScenarioGym, exclude=["metrics"])
PARAMETERS = {}
@classmethod
def generate_parser(cls) -> ArgumentParser:
"""Generate an argument parser for the manager."""
parser = ArgumentParser(description=f"CLI for {cls.__name__}.")
params = {
**cls.GYM_PARAMETERS,
**cls.VIEWER_PARAMETERS,
**cls.PARAMETERS,
}
for k, v in params.items():
if isinstance(v, bool):
parser.add_argument(
f"--{k.replace(' ', '_')}",
action="store_false" if v else "store_true",
)
elif isinstance(v, (str, int, float)):
parser.add_argument(
f"--{k.replace(' ', '_')}",
default=v,
type=type(v),
)
elif isinstance(v, (list, tuple)):
parser.add_argument(
f"--{k.replace(' ', '_')}",
default=v,
nargs="+",
)
elif v is None:
parser.add_argument(
f"--{k.replace(' ', '_')}",
default=v,
type=float,
)
else:
warnings.warn(f"Type {type(v)} not supported.")
return parser
@classmethod
def from_cli(cls, **kwargs):
"""Construct the manager from command line arguments."""
parser = cls.generate_parser()
args = parser.parse_args()
return cls(
**{
k: v
for k, v in args.__dict__.items()
if k in cls.PARAMETERS and v is not None
},
**kwargs,
)
def __init__(
self,
config_path: Optional[str] = None,
metrics: Optional[List[Metric]] = None,
viewer_params: Optional[Dict[str, Any]] = None,
**kwargs,
):
"""
Construct the manager from input parameters.
Parameters
----------
config_path : Optional[str]
A path to a yaml file of the parameters to be used.
metrics : Optional[List[Metric]]
List of metrics to measure.
viewer_params : Optional[Dict[str, Any]]
Dictionary of parameters to use for the Viewer. Since the viewer can be
a custom class its parameters should be passed as a dictionary so the
manager will know which parameters should be passed for it.
**kwargs:
Parameters given as keywords. Will override any parameters
from a config file.
"""
self.load_params(path=config_path, **kwargs)
self.metrics = metrics.copy() if metrics is not None else []
self.viewer_params = viewer_params.copy() if viewer_params else {}
def load_params(self, config_path: Optional[str] = None, **kwargs) -> None:
"""Load all parameters required and set them as attributes."""
params = yaml.safe_load(open(config_path, "r")) if config_path else {}
self.PARAMETERS = self.PARAMETERS.copy()
self.combined_config = {
**self.GYM_PARAMETERS,
**self.PARAMETERS,
**params,
**kwargs,
}
for k, v in self.combined_config.items():
if not hasattr(self, k):
setattr(self, k, v.copy() if isinstance(v, (list, dict)) else v)
@property
def parameter_names(self) -> List[str]:
"""Return the names of all parameters."""
return self.gym_parameters_names + self.viewer_parameters_names
@property
def parameters(self) -> List[str]:
"""Return all the parameters for the gym and viewer."""
return {**self.gym_parameters, **self.viewer_parameters}
@property
def gym_parameter_names(self) -> List[str]:
"""Return the names of all gym parameters."""
return list(self.GYM_PARAMETERS)
@property
def gym_parameters(self) -> Dict[str, Any]:
"""Return the parameters needed for the ScenarioGym constructor."""
return {k: getattr(self, k) for k in self.GYM_PARAMETERS}
@property
def viewer_parameter_names(self) -> List[str]:
"""Return the names of all viewer parameters."""
return list(self.viewer_params)
@property
def viewer_parameters(self) -> Dict[str, Any]:
"""Return the parameters needed for the rendering module."""
return self.viewer_params
def make_gym(self, **kwargs) -> ScenarioGym:
"""Create a gym instance with the given config."""
return ScenarioGym(
metrics=self.metrics,
**self.gym_parameters,
**self.viewer_parameters,
**kwargs,
)
def create_agent(self, scenario: Scenario, entity: Entity) -> Agent:
"""
Construct the agents when loading a scenario.
Parameters
----------
scenario : Scenario
The scenario object.
entity : Entity
The specific entity within the scenario.
"""
if entity.ref == "ego":
controller = ReplayTrajectoryController(entity)
sensor = EgoLocalizationSensor(entity)
return ReplayTrajectoryAgent(entity, controller, sensor)
def add_metric(self, m: Metric) -> None:
"""Add a metric to the manager."""
self.metrics.append(m)
def on_rollout_start(self, gym: ScenarioGym) -> None:
"""Run before the rollout when running scenarios."""
pass
def on_rollout_end(self, gym: ScenarioGym) -> None:
"""Run after the rollout when running scenarios."""
pass
def run_scenario(
self,
scenario: Union[str, Scenario],
render: bool = False,
record: bool = False,
**kwargs,
) -> List[Any]:
"""
Run a single scenario in the gym.
Parameters
----------
scenario : Union[str, Scenario],
The filepath of the OpenScenario file for the scenario or
the scenario object.
render : bool
Whether to render the scenario.
record : bool
Whether to record the scenario to OpenScenario.
"""
gym = self.make_gym()
if record:
gym.record()
if isinstance(scenario, str):
gym.load_scenario(scenario, create_agent=self.create_agent)
elif isinstance(scenario, Scenario):
gym.set_scenario(scenario, create_agent=self.create_agent)
else:
raise ValueError(f"{scenario}: should be a scenario or a file.")
self.on_rollout_start(gym)
gym.rollout(render=render, **kwargs)
self.on_rollout_end(gym)
if record:
gym.recorder.get_state()
return [m.get_state() for m in self.metrics]
def run_scenarios(
self,
scenarios: List[str],
render: bool = False,
record: bool = False,
**kwargs,
) -> List[List[Any]]:
"""
Run a batch of scenarios.
Parameters
----------
scenarios : List[Union[str, Scenario]]
The filepaths of the OpenScenario files for the scenarios or
the raw scenario objects.
render : bool
Whether to render each scenario.
record : bool
Whether to record each scenario to OpenScenario.
Returns
-------
List[List[Any]]
The values for each metric after each scenario.
"""
results = []
gym = self.make_gym()
if record:
gym.record()
for scenario in scenarios:
if isinstance(scenario, str):
gym.load_scenario(scenario, create_agent=self.create_agent)
elif isinstance(scenario, Scenario):
gym._set_scenario(scenario, create_agent=self.create_agent)
else:
raise ValueError(f"{scenario}: should be a scenario or a file.")
gym.rollout(render=render, **kwargs)
if record:
gym.recorder.get_state()
results.append([m.get_state() for m in self.metrics])
return results
def save_config(self, path: str = "./params.yml") -> None:
"""
Write the config parameters to a yaml file.
Parameters
----------
path : str
The filepath for the output.
"""
path = os.path.splitext(path)[0] + ".yml"
with open(path, "w") as f:
yaml.dump(self.combined_config, f)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/manager.py
| 0.907907 | 0.320848 |
manager.py
|
pypi
|
import os
from typing import Any, Callable, Dict, List, Optional, Type, Union
from scenario_gym.agent import Agent, _create_agent
from scenario_gym.entity import Entity
from scenario_gym.metrics import Metric
from scenario_gym.scenario import Scenario
from scenario_gym.state import State
from scenario_gym.viewer import Viewer
from scenario_gym.xosc_interface import import_scenario
class ScenarioGym:
"""The main class that loads and runs scenarios."""
@classmethod
def run_scenarios(
cls,
paths: List[str],
render: bool = False,
**kwargs,
) -> None:
"""Rollout the scenarios in paths."""
gym = cls(**kwargs)
for path in paths:
gym.load_scenario(path)
gym.rollout(render=render)
def __init__(
self,
timestep: float = 1.0 / 30.0,
persist: bool = False,
viewer_class: Optional[Type[Viewer]] = None,
terminal_conditions: Optional[
List[Union[str, Callable[[State], bool]]]
] = None,
state_callbacks: Optional[List[Callable[[State], None]]] = None,
metrics: Optional[List[Metric]] = None,
**viewer_parameters,
):
"""
Init the gym.
All arguments for the constructor of the viewer class should be
passed as keyword arguments which will be stored.
Parameters
----------
timestep: float
Time between steps in the gym.
persist: bool
If True then entities will persist for the entire scenario.
viewer_class: Type[Viewer]
Class type of the viewer that will be inisitalised.
terminal_conditions : Optional[List[Union[str, Callable[[State], bool]]]]
Conditions that if any are met will end the scenario.
state_callbacks: Optional[List[Callable[[State], None]]]
Additional methods to be called on the state after every step.
metrics: List[Metric]
List of metrics to measure.
viewer_parameters:
Keyword arguments for viewer_class.
"""
self.timestep = timestep
self.persist = persist
if viewer_class is None and "fps" not in viewer_parameters:
viewer_parameters["fps"] = int(1.0 / self.timestep)
self.viewer_parameters = viewer_parameters.copy()
if terminal_conditions is None:
terminal_conditions = ["max_length"]
self.terminal_conditions = terminal_conditions
if state_callbacks is None:
state_callbacks = []
self.state_callbacks = state_callbacks
if viewer_class is None:
self._get_viewer()
else:
self.viewer_class = viewer_class
self._render_enabled = True
self.state: Optional[State] = None
self.viewer: Optional[Viewer] = None
self.reset_gym()
if metrics is not None:
self.add_metrics(metrics)
def _get_viewer(self) -> None:
"""Get the viewer if it is not provided."""
try:
from scenario_gym.viewer.opencv import OpenCVViewer
self.viewer_class = OpenCVViewer
self._render_enabled = True
except ImportError:
self._render_enabled = False
self.viewer_class = None
def reset_gym(self) -> None:
"""
Reset the state of the gym.
Closes the viewer, removes any metrics and unloads the scenario.
"""
self.close()
self.state = None
self.metrics = []
def add_metrics(self, metrics: List[Metric]) -> None:
"""Add metrics to the gym."""
self.metrics.extend(metrics)
def load_scenario(
self,
scenario_path: str,
create_agent: Callable[[Scenario, Entity], Optional[Agent]] = _create_agent,
relabel: bool = False,
**kwargs,
) -> None:
"""
Load a scenario from a file.
Parameters
----------
scenario_path : str
The scenario file to be loaded. Can be OpenSCENARIO or JSON.
create_agent : Callable[[str, Entity], Optional[Agent]]
A function that returns an agent to control a given entity.
relabel : bool
If given, all entities will be relabeled to ego, vehicle_1,
vehicle_2, ..., pedestrian_1, ..., entity_1, ...
"""
if scenario_path.endswith(".json"):
scenario = Scenario.from_json(scenario_path, **kwargs)
else:
scenario = import_scenario(
scenario_path,
relabel=relabel,
**kwargs,
)
self.set_scenario(
scenario, scenario_path=scenario_path, create_agent=create_agent
)
def set_scenario(
self,
scenario: Scenario,
scenario_path: Optional[str] = None,
create_agent: Callable[[Scenario, Entity], Optional[Agent]] = _create_agent,
) -> None:
"""
Update the current scenario and create agents.
Parameters
----------
scenario : Scenario
The scenario object.
scenario_path : Optional[str]
The path to the scenario file if it was loaded from one.
create_agent : Callable[[str, Entity], Optional[Agent]]
A function that returns an agent to control a given entity.
"""
self.state = State(
scenario,
scenario_path=scenario_path,
persist=self.persist,
conditions=self.terminal_conditions,
state_callbacks=self.state_callbacks,
)
self.create_agents(create_agent=create_agent)
self.reset_scenario()
def create_agents(
self,
create_agent: Callable[[Scenario, Entity], Optional[Agent]] = _create_agent,
) -> None:
"""
Create the agents for the scenario.
Parameters
----------
create_agent : Callable[[str, Entity], Optional[Agent]]
A function that will return an agent for the given entity or
return None if that entity does not need an agent. All entities
without agents will replay their trajectories.
"""
non_agents, non_agent_trajs = [], []
for entity in self.state.scenario.entities:
agent = create_agent(self.state.scenario, entity)
if agent is not None:
self.state.agents[entity] = agent
else:
non_agents.append(entity)
non_agent_trajs.append(entity.trajectory)
self.state.non_agents.add_entities(non_agents, non_agent_trajs)
def get_start_time(self, scenario: Scenario) -> float:
"""Get the start time of the scenario."""
return max((0.0, scenario.ego.trajectory.min_t))
def reset_scenario(self) -> None:
"""Reset the state to the beginning of the current scenario."""
self.close()
if not (self.state is None or self.state.scenario is None):
t0 = self.get_start_time(self.state.scenario)
if self.state.t != t0:
self.state.reset(t0)
for m in self.metrics:
m.reset(self.state)
def step(self) -> None:
"""Process a single step in the environment."""
self.state.next_t = self.state.t + self.timestep
# get the new poses
new_poses = {}
for entity, agent in self.state.agents.items():
if entity in self.state.poses:
pose = agent.step(self.state)
if pose is not None:
new_poses[entity] = pose
elif self.persist:
new_poses[entity] = self.state.poses[entity]
elif entity.trajectory.min_t >= self.state.t:
# the agent is initialised at its start position
new_poses[entity] = entity.trajectory.position_at_t(
self.state.next_t
)
new_poses.update(self.state.non_agents.step(self.state))
# update the poses and current time
self.state.step(new_poses)
# metrics and rendering
for m in self.metrics:
m.step(self.state)
if self.viewer is not None:
self.state.last_keystroke = self.render()
def rollout(
self, render: bool = False, video_path: Optional[str] = None
) -> None:
"""Rollout the current scenario fully."""
self.reset_scenario()
if render:
self.state.last_keystroke = self.render(video_path=video_path)
while not self.state.is_done:
self.step()
for agent in self.state.agents.values():
agent.finish(self.state)
self.close()
def render(self, video_path: Optional[str] = None) -> None:
"""Render the state of the gym."""
if self.viewer is None:
self.reset_viewer(video_path=video_path)
return self.viewer.render(self.state)
def reset_viewer(self, video_path: Optional[str] = None) -> None:
"""Reset the viewer at the start of a new rollout."""
if self.viewer is None:
if not self._render_enabled:
raise ValueError(
"Rendering is disabled since no `viewer_class` was provided "
"and the default viewer could not be imported. Perhaps OpenCV "
"is not installed?"
)
self.viewer = self.viewer_class(**self.viewer_parameters)
else:
self.viewer.close()
if video_path is None:
path = self.state.scenario_path
video_dir = os.path.join(os.path.dirname(path), "../Recordings")
if os.path.exists(video_dir):
video_path = os.path.join(
video_dir,
os.path.splitext(
os.path.basename(path),
)[0]
+ ".mp4",
)
else:
video_path = os.path.splitext(self.state.scenario_path)[0] + ".mp4"
self.viewer.reset(video_path)
def close(self) -> None:
"""Close the gym."""
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def get_metrics(self) -> Dict[str, Any]:
"""Get the current metric states."""
values = {}
for metric in self.metrics:
value = metric.get_state()
if isinstance(value, dict):
for k, v in value.items():
if isinstance(k, str):
values[f"{metric.name}_{k}"] = v
elif value is not None:
values[metric.name] = value
return values
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/scenario_gym.py
| 0.89556 | 0.508971 |
scenario_gym.py
|
pypi
|
from __future__ import annotations
from copy import copy
from inspect import getfullargspec
from typing import Any, Dict, Optional, Type
import numpy as np
from shapely.geometry import Polygon
from scenario_gym.catalog_entry import BoundingBox, CatalogEntry
from scenario_gym.trajectory import Trajectory
from scenario_gym.utils import ArrayLike, NDArray
class Entity:
"""
An entity in the gym.
An entity consists of a catalog entry and a pose. Note that poses
are immutable arrays. Once a pose is set to the entity it cannot
be changed, only overwritten. It could be modified e.g. by calling
pose.copy() and then modifying the copied array and setting
that back to the pose.
"""
@classmethod
def _catalog_entry_type(cls) -> Type[CatalogEntry]:
"""Get the type of catalog entry that is used for this entity."""
args = getfullargspec(cls.__init__)
ce = args.args[1]
try:
ce_type = args.annotations[ce]
except KeyError as e:
raise NotImplementedError(
f"Subclass {cls.__name__} has no type annotation for catalog entry."
) from e
if isinstance(ce_type, str) and ce_type == "CatalogEntry":
ce_type = CatalogEntry
elif isinstance(ce_type, str) or not issubclass(ce_type, CatalogEntry):
raise TypeError("Catalog entry type must be a catalog entry subclass.")
return ce_type
def __init__(
self,
catalog_entry: CatalogEntry,
trajectory: Optional[Trajectory] = None,
ref: Optional[str] = None,
):
"""
Construct an entity.
Parameters
----------
catalog_entry: CatalogEntry
The catalog entry used for the entity.
trajectory : trajectory
The trajectory for the entity.
ref : Optional[str]
The unique reference for the entity from the OpenScenario file.
"""
self.ref = ref
self.catalog_entry = catalog_entry
self._trajectory = trajectory
@property
def trajectory(self) -> Trajectory:
"""Get the trajectory for the entity."""
return self._trajectory
@trajectory.setter
def trajectory(self, trajectory: Trajectory) -> None:
"""Set the trajectory for the entity."""
self._trajectory = trajectory
@property
def bounding_box(self) -> BoundingBox:
"""Get the bounding box of the entity from its catalog entry."""
return self.catalog_entry.bounding_box
@property
def type(self) -> Optional[str]:
"""Get the catalog type of the entity. E.g. Vehicle, Pedestrian."""
return self.catalog_entry.catalog_type.replace("Catalogs", "")
def __copy__(self) -> Entity:
"""Create a copy of an entity without copying the catalog_entry."""
return self.__class__(
self.catalog_entry,
trajectory=None if self.trajectory is None else self.trajectory.copy(),
ref=self.ref,
)
def copy(self) -> Entity:
"""Create a copy of an entity without copying the catalog_entry."""
return copy(self)
def get_bounding_box_points(self, pose: ArrayLike) -> NDArray:
"""
Compute the bounding box coordinates in the global frame for the given pose.
Returns in the order: RR, FR, FL, RL.
Parameters
----------
pose : Optional[ArrayLike]
An array of the entities pose. May broadcast (..., [x, y, (z), h, ...]).
"""
ref_xy, h = pose[..., :2], pose[..., 3 if pose.shape[-1] > 3 else 2]
n = h.ndim
R = np.array([[np.cos(h), np.sin(h)], [-np.sin(h), np.cos(h)]]).transpose(
*(tuple(i + 2 for i in range(n)) + (0, 1))
)
points = np.array(
[
[
self.bounding_box.center_x - 0.5 * self.bounding_box.length,
self.bounding_box.center_y + 0.5 * self.bounding_box.width,
],
[
self.bounding_box.center_x + 0.5 * self.bounding_box.length,
self.bounding_box.center_y + 0.5 * self.bounding_box.width,
],
[
self.bounding_box.center_x + 0.5 * self.bounding_box.length,
self.bounding_box.center_y - 0.5 * self.bounding_box.width,
],
[
self.bounding_box.center_x - 0.5 * self.bounding_box.length,
self.bounding_box.center_y - 0.5 * self.bounding_box.width,
],
]
)
points = ref_xy[..., None, :] + np.einsum("ij,...jk->...ik", points, R)
return points
def get_bounding_box_geom(self, pose: Optional[ArrayLike]) -> Polygon:
"""
Return a Polygon representing the bounding box global frame.
Returns cached values if the pose is None and they are there.
Parameters
----------
pose : Optional[ArrayLike]
An array of the entities pose. Only one may be given.
"""
return Polygon(self.get_bounding_box_points(pose))
def is_static(self) -> bool:
"""Return True if the entity is static."""
return self.trajectory.data.shape[0] == 1
def to_dict(self) -> Dict[str, Any]:
"""Write the entity to a jsonable dictionary."""
return {
"ref": self.ref,
"trajectory": self.trajectory.to_json(),
"catalog_entry": self.catalog_entry.to_dict(),
"entity_class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Create the entity from the json dictionary."""
return cls(
cls._catalog_entry_type().from_dict(data["catalog_entry"]),
trajectory=Trajectory(np.array(data["trajectory"])),
ref=data.get("ref"),
)
class StaticEntity(Entity):
"""Used for entities with only one control point."""
@Entity.trajectory.setter
def trajectory(self, trajectory: Trajectory) -> None:
"""Check that the trajectory is static."""
if trajectory.data.shape[0] != 1:
raise ValueError(
"Recieved multiple control points for static entity: {self.ref}"
)
self._trajectory = trajectory
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/entity/base.py
| 0.953329 | 0.513303 |
base.py
|
pypi
|
from typing import Dict, List, Optional, TypeVar
import numpy as np
from scipy.interpolate import interp1d
from scenario_gym.trajectory import Trajectory
from scenario_gym.utils import ArrayLike
from .base import Entity
State = TypeVar("State")
class BatchReplayEntity:
"""
A single object used to represent multiple entities.
Will replay exact trajectories from OpenScenario files. Computation is
vectorized for efficiency.
"""
def __init__(
self,
timestep: Optional[float] = None,
persist: bool = False,
):
"""Init the batch entity with no assigned entities."""
self.entities: List[Entity] = []
self.trajectories: List[Trajectory] = []
self.persist = persist
self.timestep = timestep
self.max_t = 0.0
def step(self, state: State) -> Dict[Entity, ArrayLike]:
"""
Take a single step in the gym.
Returns the pose of each entity at the next timestamp. If enduring entities
is set to True then only entities present at the current time will be
returned.
"""
t = state.next_t
new_poses = {}
if len(self.entities) > 0:
pos = self.fn(t) # (m, num_ents)
for e, p in zip(self.entities, pos):
if (
self.persist
or e.is_static()
or (t >= e.trajectory.min_t and t <= e.trajectory.max_t)
):
new_poses[e] = p
return new_poses
def add_entities(
self,
entities: List[Entity],
trajs: List[Trajectory],
) -> None:
"""
Add entities that are to be batched together.
This will reset the entities in the scenario so all entities
must be passed at once.
Parameters
----------
entities : List[Entity]
The entities to be used.
trajs : List[Trajectory]
The trajectory for each entity.
"""
self.entities.clear()
self.trajectories.clear()
self.max_t = 0.0
if entities:
self.entities.extend(entities)
self.trajectories.extend(trajs)
num_ents = len(self.entities)
datas = []
for t in self.trajectories:
d = np.nan_to_num(t.data)
if d.shape[0] == 1:
d = np.repeat(d, 2, axis=0)
d[-1, 0] += 1e-1 # to prevent nan
datas.append(d)
m = datas[0].shape[1] - 1
ts = np.array(
sorted(list(set([t for d in datas for t in d[:, 0]])))
) # (N,)
self.max_t = ts[-1]
interpd = []
for d in datas:
x = interp1d(
d[:, 0],
d[:, 1:].T,
bounds_error=False,
fill_value=(d[0, 1:], d[-1, 1:]),
)(
ts
).T # (N, m)
interpd.append(x)
X = np.concatenate(interpd, axis=1) # (N, num_ents * m)
if self.timestep:
all_ts = np.arange(0.0, self.max_t, self.timestep)
all_Xs = interp1d(
ts,
X.T,
bounds_error=False,
fill_value=(X[0], X[-1]),
)(all_ts).T
self.fn = lambda t: all_Xs[np.abs(all_ts - t).argmin()].reshape(
num_ents, m
)
else:
interp = interp1d(
ts,
X.T,
bounds_error=False,
fill_value=(X[0], X[-1]),
)
self.fn = lambda t: interp(t).reshape(num_ents, m)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/entity/batch.py
| 0.917363 | 0.473779 |
batch.py
|
pypi
|
from dataclasses import dataclass
from typing import Any, Dict, Optional
from lxml.etree import Element
from scenariogeneration import xosc
from scenario_gym.catalog_entry import (
ArgsKwargs,
BoundingBox,
Catalog,
CatalogEntry,
)
from scenario_gym.entity.base import Entity
from scenario_gym.trajectory import Trajectory
@dataclass
class MiscObjectCatalogEntry(CatalogEntry):
"""Catalog entry for a pedestrian."""
mass: Optional[float]
xosc_names = ["MiscObject"]
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the vehicle from an xml element."""
base_args, _ = super().load_data_from_xml(element, catalog=catalog)
mass = element.attrib.get("mass")
if mass is not None:
mass = float(mass)
return base_args + (mass,), {}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the pedestrian from a dictionary."""
catalog = (
Catalog.from_dict(data["catalog"])
if data.get("catalog") is not None
else None
)
return cls(
catalog,
data["catalog_entry"],
data["catalog_category"],
data["catalog_type"],
BoundingBox.from_dict(data["bounding_box"]),
data.get("properties", {}),
data.get("files", []),
data.get("mass"),
)
def to_dict(self) -> Dict[str, Any]:
"""Write the pedestrian to a dictionary."""
data = super().to_dict()
data["mass"] = self.mass
return data
def to_xosc(self) -> xosc.MiscObject:
"""Write the pedestrian to xosc."""
obj = xosc.MiscObject(
self.catalog_entry,
self.mass,
getattr(
xosc.MiscObjectCategory,
self.catalog_category,
xosc.MiscObjectCategory.none,
),
self.bounding_box.to_xosc(),
)
for k, v in self.properties.items():
obj.add_property(k, v)
for f in self.files:
obj.add_property_file(f)
return obj
class MiscObject(Entity):
"""Entity class for pedestrians."""
def __init__(
self,
catalog_entry: MiscObjectCatalogEntry,
trajectory: Optional[Trajectory] = None,
ref: Optional[str] = None,
):
super().__init__(catalog_entry, trajectory=trajectory, ref=ref)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/entity/misc.py
| 0.940742 | 0.248568 |
misc.py
|
pypi
|
from dataclasses import dataclass
from typing import Any, Dict, Optional
from lxml.etree import Element
from scenariogeneration import xosc
from scenario_gym.catalog_entry import (
ArgsKwargs,
BoundingBox,
Catalog,
CatalogEntry,
)
from scenario_gym.entity.base import Entity
from scenario_gym.trajectory import Trajectory
@dataclass
class PedestrianCatalogEntry(CatalogEntry):
"""Catalog entry for a pedestrian."""
mass: Optional[float]
xosc_names = ["Pedestrian"]
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the vehicle from an xml element."""
base_args, _ = super().load_data_from_xml(element, catalog=catalog)
mass = element.attrib.get("mass")
if mass is not None:
mass = float(mass)
return base_args + (mass,), {}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the pedestrian from a dictionary."""
catalog = (
Catalog.from_dict(data["catalog"])
if data.get("catalog") is not None
else None
)
return cls(
catalog,
data["catalog_entry"],
data["catalog_category"],
data["catalog_type"],
BoundingBox.from_dict(data["bounding_box"]),
data.get("properties", {}),
data.get("files", []),
data.get("mass"),
)
def to_dict(self) -> Dict[str, Any]:
"""Write the pedestrian to a dictionary."""
data = super().to_dict()
data["mass"] = self.mass
return data
def to_xosc(self) -> xosc.Pedestrian:
"""Write the pedestrian to xosc."""
obj = xosc.Pedestrian(
self.catalog_entry,
self.mass,
self.bounding_box.to_xosc(),
getattr(
xosc.PedestrianCategory,
self.catalog_category,
xosc.PedestrianCategory.pedestrian,
),
)
for k, v in self.properties.items():
obj.add_property(k, v)
for f in self.files:
obj.add_property_file(f)
return obj
class Pedestrian(Entity):
"""Entity class for pedestrians."""
def __init__(
self,
catalog_entry: PedestrianCatalogEntry,
trajectory: Optional[Trajectory] = None,
ref: Optional[str] = None,
):
super().__init__(catalog_entry, trajectory=trajectory, ref=ref)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/entity/pedestrian.py
| 0.943854 | 0.243755 |
pedestrian.py
|
pypi
|
from dataclasses import dataclass
from typing import Any, Dict, Optional
from lxml.etree import Element
from scenariogeneration import xosc
from scenario_gym.catalog_entry import (
ArgsKwargs,
BoundingBox,
Catalog,
CatalogEntry,
CatalogObject,
)
from scenario_gym.entity.base import Entity
from scenario_gym.trajectory import Trajectory
@dataclass
class Axle(CatalogObject):
"""A front or rear axle of a vehicle."""
max_steering: float
wheel_diameter: float
track_width: float
position_x: float
position_z: float
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the bounding box data form an xml element."""
return (
float(element.attrib["maxSteering"]),
float(element.attrib["wheelDiameter"]),
float(element.attrib["trackWidth"]),
float(element.attrib["positionX"]),
float(element.attrib["positionZ"]),
), {}
def to_dict(self) -> None:
"""Write the vehicle catalog entry to a dictionary."""
return {
"max_steering": self.max_steering,
"wheel_diameter": self.wheel_diameter,
"track_width": self.track_width,
"position_x": self.position_x,
"position_z": self.position_z,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the vehicle catalog entry from a dictionary."""
return cls(
data.get("max_steering"),
data.get("wheel_diameter"),
data.get("track_width"),
data.get("position_x"),
data.get("position_z"),
)
def to_xosc(self) -> xosc.Axle:
"""Write the vehicle catalog entry to an xosc element."""
return xosc.Axle(
self.max_steering,
self.wheel_diameter,
self.track_width,
self.position_x,
self.position_z,
)
@dataclass
class VehicleCatalogEntry(CatalogEntry):
"""Catalog entry for a vehicle."""
mass: Optional[float]
max_speed: Optional[float]
max_deceleration: Optional[float]
max_acceleration: Optional[float]
front_axle: Optional[Axle]
rear_axle: Optional[Axle]
xosc_names = ["Vehicle"]
@classmethod
def load_data_from_xml(
cls,
element: Element,
catalog: Optional[Catalog] = None,
) -> ArgsKwargs:
"""Load the vehicle from an xml element."""
base_args, _ = super().load_data_from_xml(element, catalog=catalog)
performance = element.find("Performance")
front_axle = element.find("Axles/FrontAxle")
rear_axle = element.find("Axles/RearAxle")
mass = float(element.attrib["mass"]) if "mass" in element.attrib else None
if performance is not None:
max_speed = float(performance.attrib["maxSpeed"])
max_dec = float(performance.attrib["maxDeceleration"])
max_acc = float(performance.attrib["maxAcceleration"])
else:
max_speed = max_dec = max_acc = None
front_axle = (
Axle.from_xml(front_axle, catalog=catalog)
if front_axle is not None
else None
)
rear_axle = (
Axle.from_xml(rear_axle, catalog=catalog)
if rear_axle is not None
else None
)
veh_args = (
mass,
max_dec,
max_acc,
max_speed,
front_axle,
rear_axle,
)
return base_args + veh_args, {}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the vehicle from a dictionary."""
catalog = (
Catalog.from_dict(data["catalog"])
if data.get("catalog") is not None
else None
)
return cls(
catalog,
data["catalog_entry"],
data["catalog_category"],
data["catalog_type"],
BoundingBox.from_dict(data["bounding_box"]),
data.get("properties", {}),
data.get("files", []),
data.get("mass"),
data.get("max_speed"),
data.get("max_deceleration"),
data.get("max_acceleration"),
Axle.from_dict(data["front_axle"])
if data.get("front_axle") is not None
else None,
Axle.from_dict(data["rear_axle"])
if data.get("rear_axle") is not None
else None,
)
def to_dict(self) -> Dict[str, Any]:
"""Write the scenario to a dictionary."""
data = super().to_dict()
data.update(
{
"mass": self.mass,
"max_speed": self.max_speed,
"max_deceleration": self.max_deceleration,
"max_acceleration": self.max_acceleration,
"front_axle": self.front_axle.to_dict()
if self.front_axle is not None
else None,
"rear_axle": self.rear_axle.to_dict()
if self.rear_axle is not None
else None,
}
)
return data
def to_xosc(self) -> xosc.Vehicle:
"""Create an xosc entity object from the catalog entry."""
obj = xosc.Vehicle(
self.catalog_entry,
getattr(
xosc.VehicleCategory,
self.catalog_category,
xosc.VehicleCategory.car,
),
self.bounding_box.to_xosc(),
self.front_axle.to_xosc(),
self.rear_axle.to_xosc(),
self.max_speed,
self.max_acceleration,
self.max_deceleration,
mass=self.mass,
)
for k, v in self.properties.items():
obj.add_property(k, v)
for f in self.files:
obj.add_property_file(f)
return obj
class Vehicle(Entity):
"""Class for vehicles."""
def __init__(
self,
catalog_entry: VehicleCatalogEntry,
trajectory: Optional[Trajectory] = None,
ref: Optional[str] = None,
):
super().__init__(catalog_entry, trajectory=trajectory, ref=ref)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/entity/vehicle.py
| 0.946621 | 0.336304 |
vehicle.py
|
pypi
|
import os
import warnings
from contextlib import suppress
from typing import Dict, List, Optional, Type
import numpy as np
from lxml import etree
from lxml.etree import Element
from scenario_gym.entity import Entity, Pedestrian, Vehicle
from scenario_gym.road_network import RoadNetwork
from scenario_gym.scenario import Scenario, ScenarioAction
from scenario_gym.scenario.actions import UserDefinedAction
from scenario_gym.trajectory import Trajectory
from scenario_gym.utils import load_properties_from_xml
from .catalogs import load_object, read_catalog
def import_scenario(
osc_file: str,
relabel: bool = True,
entity_types: Optional[List[Type[Entity]]] = None,
) -> Scenario:
"""
Import a scenario from an OpenScenario file.
Parameters
----------
osc_file : str
The filepath to the OpenScenario file.
relabel : bool
Whether to relabel entities after loading.
entity_types : Optional[List[Type[Entity]]]
Additional entity types to use when loading the scenario. Can be used to
allow custom entities to be loaded from OpenSCENARIO.
"""
if not os.path.exists(osc_file):
raise FileNotFoundError
cwd = os.path.dirname(osc_file)
et = etree.parse(osc_file)
osc_root = et.getroot()
entities = {}
# Read catalogs:
catalogs: Dict[str, Dict[str, Entity]] = {}
for catalog_location in osc_root.iterfind("CatalogLocations/"):
rel_catalog_path = catalog_location.find("Directory").attrib["path"]
if not os.path.isabs(rel_catalog_path):
catalog_path = os.path.join(cwd, rel_catalog_path)
else:
catalog_path = rel_catalog_path
for catalog_file in os.listdir(catalog_path):
if catalog_file.endswith(".xosc"):
catalog, entries = read_catalog(
os.path.join(catalog_path, catalog_file),
entity_types=entity_types,
)
catalogs[catalog.name] = entries
# Import road network:
rn_path = None
scene_graph_file = osc_root.find("RoadNetwork/SceneGraphFile")
if scene_graph_file is not None:
rn_path = scene_graph_file.attrib["filepath"]
else:
logic_file = osc_root.find("RoadNetwork/LogicFile")
if logic_file is not None:
rn_path = logic_file.attrib["filepath"]
road_network = None
if rn_path is not None:
if not os.path.isabs(rn_path):
filepath = os.path.join(cwd, rn_path)
else:
filepath = rn_path
extension = os.path.splitext(filepath)[1]
if extension == "":
filepath = f"{filepath}.json"
with suppress(FileNotFoundError):
road_network = RoadNetwork.create_from_file(filepath)
# add the entities to the scenario
for scenario_object in osc_root.iterfind("Entities/ScenarioObject"):
entity_ref = scenario_object.attrib["name"]
cat_ref = scenario_object.find("CatalogReference")
if cat_ref is None:
ent = None
for element in scenario_object.getchildren():
ent = load_object(element)
if ent is None:
warnings.warn(
"Could not find a catalog reference or entry for entity "
f"{ent.tag}.Perhaps you need to add an entity type to "
"`entity_types`."
)
else:
ent.ref = entity_ref
entities[entity_ref] = ent
else:
catalog_name = cat_ref.attrib["catalogName"]
entry_name = cat_ref.attrib["entryName"]
try:
entity = catalogs[catalog_name][entry_name].copy()
entity.ref = entity_ref
entities[entity_ref] = entity
except KeyError as e:
if catalog_name not in catalogs:
warnings.warn(f"Could not find catalog: {catalog_name}")
elif entry_name not in catalogs[catalog_name]:
warnings.warn(
f"Could not find entry {entry_name} in catalog "
f"{catalog_name}."
)
else:
raise e
# Read init actions:
for private in osc_root.iterfind("Storyboard/Init/Actions/Private"):
entity_ref = private.attrib["entityRef"]
for wp in private.iterfind(
"PrivateAction/TeleportAction/Position/WorldPosition"
):
tp = traj_point_from_time_and_position(0, wp)
# Add a single-waypoint trajectory:
if entity_ref in entities:
entities[entity_ref].trajectory = Trajectory(np.stack([tp], axis=0))
# Read maneuver actions:
actions = []
for man_group in osc_root.iterfind("Storyboard/Story/Act/ManeuverGroup"):
entity_ref = man_group.find("Actors/EntityRef")
assert (
entity_ref is not None
), "Could not find entity reference in maneuver group."
entity_ref = entity_ref.attrib["entityRef"]
entity = entities.get(entity_ref)
if entity is None:
continue
for event in man_group.findall("Maneuver/Event"):
traj_action = event.find(
"Action/PrivateAction/RoutingAction/FollowTrajectoryAction"
)
if traj_action is not None:
trajectory = read_trajectory_event(
traj_action,
road_network=road_network,
)
if trajectory is not None:
entity.trajectory = trajectory
continue
user_action = event.find("Action/UserDefinedAction")
start_trigger = event.find("StartTrigger")
if user_action is not None:
actions.extend(
load_user_defined_action(
entity,
user_action,
start_trigger=start_trigger,
)
)
header = osc_root.find("FileHeader")
if header is not None:
properties, files = load_properties_from_xml(header)
if files and "files" not in properties:
properties["files"] = files
else:
properties = {}
scenario = Scenario(
list(entities.values()),
name=os.path.splitext(os.path.basename(osc_file))[0],
road_network=road_network,
properties=properties,
actions=actions,
)
if relabel:
scenario = relabel_scenario(scenario)
return scenario
def read_trajectory_event(
trajectory_action: Element,
road_network: Optional[RoadNetwork] = None,
) -> Optional[Trajectory]:
"""Read a trajectory event from a ManeuverGroup."""
# trajectory points
trajectory_points = []
vertices = trajectory_action.findall(
"TrajectoryRef/Trajectory/Shape/Polyline/Vertex"
)
vertices.extend(trajectory_action.findall("Trajectory/Shape/Polyline/Vertex"))
if not vertices:
return None
for vertex in vertices:
t = float(vertex.attrib["time"])
wp = vertex.find("Position/WorldPosition")
trajectory_points.append(traj_point_from_time_and_position(t, wp))
traj_data = np.stack(trajectory_points, axis=0)
if (np.isnan(traj_data[:, 3]).sum() > 0) and (road_network is not None):
traj_data[:, 3] = road_network.elevation_at_point(
traj_data[:, 1], traj_data[:, 2]
)
return Trajectory(traj_data)
def load_user_defined_action(
entity: Entity,
user_action: Element,
start_trigger: Optional[Element] = None,
) -> List[ScenarioAction]:
"""Load a user-defined action from an OpenSCENARIO file."""
cond = start_trigger.find(
"ConditionGroup/Condition/ByValueCondition/SimulationTimeCondition"
)
t = float(cond.attrib.get("value"))
acts = []
for child in user_action.getchildren():
acts.append(
UserDefinedAction(
t,
child.tag,
entity.ref,
{k: v for k, v in child.attrib.items()},
)
)
return acts
def relabel_scenario(scenario: Scenario) -> Scenario:
"""
Relabel the entities of the scenario.
Will be relabelled to ego, vehicle_1, vehicle_2,
..., pedestrian_1, ..., other_1, ...
"""
vehicles, pedestrians, others = 0, 0, 0
scenario.entities[0].ref = "ego"
old_to_new = {}
for e in scenario.entities[1:]:
cur = e.ref
with suppress(KeyError):
scenario._ref_to_entity.pop(cur)
if isinstance(e, Vehicle):
e.ref = f"vehicle_{vehicles}"
vehicles += 1
elif isinstance(e, Pedestrian):
e.ref = f"pedestrian_{pedestrians}"
pedestrians += 1
else:
e.ref = f"other_{others}"
others += 1
scenario._ref_to_entity[e.ref] = e
old_to_new[cur] = e.ref
for action in scenario.actions:
if action.entity_ref in old_to_new:
action.entity_ref = old_to_new[action.entity_ref]
return scenario
def traj_point_from_time_and_position(t, world_position) -> np.ndarray:
"""Return the trajectory point as an array [t, x, y, z, h, p, r]."""
return np.array(
[
t,
float(world_position.attrib["x"]),
float(world_position.attrib["y"]),
float(world_position.attrib.get("z", np.NaN)),
float(world_position.attrib.get("h", np.NaN)),
float(world_position.attrib.get("p", np.NaN)),
float(world_position.attrib.get("r", np.NaN)),
],
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/xosc_interface/read.py
| 0.761627 | 0.252321 |
read.py
|
pypi
|
import os
from typing import List, Optional
from xml.etree import ElementTree as ET
import numpy as np
from scenariogeneration import xosc
from scenario_gym.entity import Entity
from scenario_gym.scenario import Scenario
from scenario_gym.trajectory import is_stationary
def write_scenario(
scenario: Scenario,
filepath: str,
base_road_network_path: str = "../Road_Networks",
road_network_extenstion: str = "json",
base_catalog_path: str = "../Catalogs",
use_catalog_references: bool = True,
osc_minor_version: int = 2,
) -> None:
"""
Write a scenario to an OpenScenario file.
Parameters
----------
scenario : Scenario
The scenario object.
filepath : str
The desired filepath.
base_road_network_path : str
Base path to the road networks.
road_network_extenstion : str
The extension of the road network file.
base_catalog_path : str
Base relative path to the catalogs.
use_catalog_references : bool
Whether to use catalog references for entities that have catalogs.
osc_minor_version : int
The OpenScenario minor version.
"""
name = (
scenario.name
if scenario.name is not None
else (os.path.splitext(os.path.basename(filepath)[-1])[0])
)
rn_name = (
scenario.road_network.name
if scenario.road_network.name is not None
else None
)
scenegraph = os.path.join(
base_road_network_path,
f"{rn_name}.{road_network_extenstion}",
)
rn = xosc.RoadNetwork("", scenegraph)
entities = xosc.Entities()
catalog = xosc.Catalog()
for e in scenario.entities:
ce = e.catalog_entry
if use_catalog_references and ce.catalog is not None:
if ce.catalog_type not in catalog.catalogs:
catalog_dir = os.path.join(
base_catalog_path,
ce.catalog.group_name,
f"{ce.catalog_type}Catalogs",
)
catalog.add_catalog(f"{ce.catalog_type}Catalog", catalog_dir)
obj = xosc.CatalogReference(ce.catalog.name, ce.catalog_entry)
else:
obj = ce.to_xosc()
entities.add_scenario_object(e.ref, obj)
init = xosc.Init()
for e in scenario.entities:
if is_stationary(e.trajectory.data[:, 1:]):
pose = e.trajectory.data[0, 1:]
if not np.isfinite(pose[3]):
raise ValueError(f"Heading must be finite but is {pose[2]}.")
action = xosc.TeleportAction(
xosc.WorldPosition(
*(float(p) if np.isfinite(p) else None for p in pose)
)
)
init.add_init_action(e.ref, action)
act = xosc.Act(
filepath.replace(".xosc", ""),
get_simulation_time_trigger(0),
)
maneuver_groups = []
for idx, e in enumerate(scenario.entities):
m_group = get_maneuver_group(
e, osc_minor_version=osc_minor_version, check_stationary=(idx > 0)
)
if m_group:
maneuver_groups.append(m_group)
act.add_maneuver_group(m_group)
story = xosc.Story(name)
story.add_act(act)
sb = xosc.StoryBoard(init)
sb.add_story(story)
properties = xosc.Properties()
for k, v in scenario.properties.items():
if k == "files" and isinstance(v, list):
for f in v:
properties.add_file(f)
else:
properties.add_property(k, str(v))
desc = (
f"Scenario {name} recorded in the dRISK Scenario Gym subject to the dRISK "
"License Agreement (https://drisk.ai/license/)."
)
s = xosc.Scenario(
desc,
"∂RISK",
xosc.ParameterDeclarations(),
entities=entities,
storyboard=sb,
roadnetwork=rn,
catalog=catalog,
osc_minor_version=osc_minor_version,
header_properties=properties,
)
element = ET.Element("OpenSCENARIO")
element.extend(
(
s.header.get_element(),
s.parameters.get_element(),
s.catalog.get_element(),
s.roadnetwork.get_element(),
s.entities.get_element(),
s.storyboard.get_element(),
)
)
s.write_xml(filepath)
def get_simulation_time_trigger(t: float, delay: float = 0.0) -> xosc.ValueTrigger:
"""Get a simulation time trigger."""
return xosc.ValueTrigger(
"startSimTrigger",
delay=delay,
conditionedge=xosc.ConditionEdge.rising,
valuecondition=xosc.SimulationTimeCondition(
value=t, rule=xosc.Rule.greaterThan
),
)
def get_follow_trajectory_event(
e: Entity,
osc_minor_version: int = 0,
check_stationary: bool = True,
) -> Optional[xosc.Event]:
"""Get a follow trajectory event for an entity."""
if check_stationary and is_stationary(e.trajectory.data):
return None
ts, poses = e.trajectory.t, e.trajectory.data[:, 1:]
positions = [
xosc.WorldPosition(*(float(p) if np.isfinite(p) else None for p in pose))
for pose in poses
]
polyline = xosc.Polyline(ts.tolist(), positions)
traj = xosc.Trajectory(f"{e.ref}_trajectory", False)
traj.add_shape(polyline)
follow_trajectory_action = xosc.FollowTrajectoryAction(
traj,
following_mode=xosc.FollowingMode.position,
reference_domain=xosc.ReferenceContext.absolute,
scale=1,
offset=0,
)
follow_trajectory_action.version_minor = osc_minor_version
follow_trajectory_event = xosc.Event(
f"{e.ref}_follow_trajectory_event",
xosc.Priority.override,
)
follow_trajectory_event.add_action(
"follow_trajectory_action",
follow_trajectory_action,
)
follow_trajectory_event.add_trigger(get_simulation_time_trigger(0))
return follow_trajectory_event
def get_events(
e: Entity, osc_minor_version: int = 0, check_stationary: bool = True
) -> List[xosc.Event]:
"""Get events for the given entity."""
events = []
follow_trajectory_event = get_follow_trajectory_event(
e,
osc_minor_version=osc_minor_version,
check_stationary=check_stationary,
)
if follow_trajectory_event:
events.append(follow_trajectory_event)
# NOTE: ignoring non-trajectory actions for now
return events
def get_maneuver(
e: Entity, osc_minor_version: int = 0, check_stationary: bool = True
) -> Optional[xosc.Maneuver]:
"""Get maneuvers for the given entity."""
events = get_events(
e, osc_minor_version=osc_minor_version, check_stationary=check_stationary
)
if events:
maneuver = xosc.Maneuver(f"{e.ref}_maneuver")
for event in events:
maneuver.add_event(event)
return maneuver
def get_maneuver_group(
e: Entity, osc_minor_version: int = 0, check_stationary: bool = True
) -> Optional[xosc.ManeuverGroup]:
"""Get the maneuver group for the given entity."""
maneuver = get_maneuver(
e, osc_minor_version=osc_minor_version, check_stationary=check_stationary
)
if maneuver:
mangrp = xosc.ManeuverGroup(f"{e.ref}_maneuver_group")
mangrp.add_actor(e.ref)
mangrp.add_maneuver(maneuver)
return mangrp
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/xosc_interface/write.py
| 0.856122 | 0.284677 |
write.py
|
pypi
|
import os
from collections import defaultdict
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Type
from lxml import etree
from lxml.etree import Element
from scenariogeneration import xosc
from scenario_gym.catalog_entry import Catalog, CatalogEntry
from scenario_gym.entity import DEFAULT_ENTITY_TYPES, Entity
def load_object(
entry: Element,
catalog: Optional[Catalog] = None,
entity_types: Tuple[Type[Entity]] = DEFAULT_ENTITY_TYPES,
catalog_objects: Optional[List[Type[CatalogEntry]]] = None,
) -> Optional[Entity]:
"""Try to load a catalog entry with given catalog objects."""
if catalog_objects is None:
catalog_objects = [Ent._catalog_entry_type() for Ent in entity_types]
for Ent, Obj in zip(entity_types, catalog_objects):
types = Obj.xosc_names if Obj.xosc_names is not None else [Obj.__name__]
if entry.tag in types:
obj = Obj.from_xml(entry, catalog=catalog)
return Ent(obj)
@lru_cache(maxsize=None)
def read_catalog(
catalog_file: str,
entity_types: Optional[Tuple[Type[Entity]]] = None,
) -> Tuple[Catalog, Dict[str, Entity]]:
"""
Read a catalog and return it's name and a dictionary of entities.
Parameters
----------
catalog_file : str
Filepath of the catalog file.
entity_types : Optional[Tuple[Type[CatalogObject]]]
Tuple of extra subclasses of CatalogObject that will be used when reading
catalogs. Can be used for reading custom objects from catalog files. Must
be immutable or lru_cache
Returns
-------
catalog : Catalog
The catalog.
entries : Dict[str, Entity]
A dictionary mapping each entry name to an entity with that catalog entry.
These can then be cloned to create entities using the chosen catalog entry.
"""
if entity_types is None:
entity_types = DEFAULT_ENTITY_TYPES
else:
entity_types = entity_types + DEFAULT_ENTITY_TYPES
catalog_objects = [Ent._catalog_entry_type() for Ent in entity_types]
et = etree.parse(catalog_file)
osc_root = et.getroot()
catalog_element = osc_root.find("Catalog")
try:
catalog_group_name = catalog_file.split(os.sep)[-3]
except IndexError:
catalog_group_name = "Catalog"
catalog = Catalog(catalog_element.attrib["name"], catalog_group_name)
entries = {}
for element in catalog_element.getchildren():
entry = load_object(
element,
catalog=catalog,
entity_types=entity_types,
catalog_objects=catalog_objects,
)
if entry is None:
entry = Entity(CatalogEntry.from_xml(element, catalog=catalog))
entries[entry.catalog_entry.catalog_entry] = entry
return catalog, entries
def write_catalogs(dirname: str, entries: List[CatalogEntry]) -> None:
"""Create catalog files for a list of catalog entries."""
c_to_e = defaultdict(list)
for entry in entries:
c_to_e[entry.catalog].append(entry)
group_to_c = defaultdict(list)
for catalog in c_to_e:
group_to_c[catalog.group_name].append(catalog)
for group_name, catalogs in group_to_c.items():
os.mkdir(os.path.join(dirname, group_name))
for catalog in catalogs:
c_entries = c_to_e[catalog]
catalog_type = f"{c_entries[0].catalog_type}Catalogs"
catalog_dir = os.path.join(dirname, group_name, catalog_type)
if not os.path.exists(catalog_dir):
os.mkdir(catalog_dir)
catalog_file = os.path.join(catalog_dir, f"{catalog.name}.xosc")
catalog_obj = xosc.CatalogFile()
catalog_obj.create_catalog(
catalog_file,
catalog.name,
(
f"This {c_entries[0].catalog_type}.lower() catalog was "
"generated by scenario_gym."
),
"scenario_gym",
)
for entry in c_entries:
catalog_obj.add_to_catalog(entry.to_xosc())
catalog_obj.dump()
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/xosc_interface/catalogs.py
| 0.800458 | 0.290327 |
catalogs.py
|
pypi
|
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from shapely.geometry import LineString, Polygon
from scenario_gym.utils import ArgsKwargs
from .base import RoadGeometry, RoadLike
class LaneType(Enum):
"""Enumerates OpenDrive standard lane types."""
none = 0
driving = 1
HOV = 2
bidirectional = 3
biking = 4
border = 5
bus = 6
connectingRamp = 7
curb = 8
entry = 9
exit = 10
median = 11
mwyEntry = 12
mwyExit = 13
offRamp = 14
onRamp = 15
parking = 16
rail = 17
restricted = 18
roadWorks = 19
shoulder = 20
sidewalk = 21
special1 = 22
special2 = 23
special3 = 24
stop = 25
taxi = 26
tram = 27
class Lane(RoadLike):
"""
A lane object.
The lane has a center and a boundary and holds information of its successor
and predeceessor lanes.
"""
walkable = False
@classmethod
def load_data_from_dict(cls, l: Dict[str, Any]) -> ArgsKwargs:
"""Create from dictionary."""
args, kwargs = super().load_data_from_dict(l)
typ = l["type"] if "type" in l else "driving"
lane_type = LaneType[typ if typ in LaneType.__members__ else "driving"]
return (
*args,
list(set(l["successors"])) if "successors" in l else [],
list(set(l["predecessors"])) if "predecessors" in l else [],
lane_type,
), kwargs
def __init__(
self,
id: str,
boundary: Polygon,
center: LineString,
successors: List[str],
predecessors: List[str],
_type: Union[str, LaneType],
elevation: Optional[np.ndarray] = None,
):
super().__init__(id, boundary, center, elevation=elevation)
self.successors = successors
self.predecessors = predecessors
if isinstance(_type, str):
if _type not in LaneType.__members__:
raise ValueError(
f"{self.type} is not a valid lane type. "
"Check objects.py to see all valid lane types.",
)
else:
_type = LaneType[_type]
self._type = _type
@property
def type(self) -> LaneType:
"""Get the type of the lane."""
return self._type
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary of the objects data."""
data = super().to_dict()
data.update(
{
"successors": self.successors,
"predecessors": self.predecessors,
}
)
if self.type is not None:
data["type"] = self.type.name
return data
class Road(RoadLike):
"""
A road object.
The road has a center and a boundary and lanes.
"""
walkable = False
@classmethod
def load_data_from_dict(cls, r: Dict[str, Any]) -> ArgsKwargs:
"""Create from dictionary."""
args, kwargs = super().load_data_from_dict(r)
lanes = [Lane.from_dict(l) for l in r["lanes" if "lanes" in r else "Lanes"]]
return (*args, lanes), kwargs
def __init__(
self,
id: str,
boundary: Polygon,
center: LineString,
lanes: List[Lane],
elevation: Optional[np.ndarray] = None,
):
super().__init__(id, boundary, center, elevation=elevation)
self.lanes = lanes
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary of the objects data."""
data = super().to_dict()
data["lanes"] = [l.to_dict() for l in self.lanes]
return data
class Intersection(RoadGeometry):
"""
An intersection object.
The intersection has a boundary, connecting lanes and the ids of
the roads it connects.
"""
driveable = True
walkable = False
@classmethod
def load_data_from_dict(cls, i: Dict[str, Any]) -> ArgsKwargs:
"""Create from dictionary."""
args, kwargs = super().load_data_from_dict(i)
lanes = [Lane.from_dict(l) for l in i["lanes" if "lanes" in i else "Lanes"]]
return (*args, lanes, i["connecting_roads"]), kwargs
def __init__(
self,
id: str,
boundary: Polygon,
lanes: List[Lane],
connecting_roads: List[str],
elevation: Optional[np.ndarray] = None,
):
super().__init__(id, boundary, elevation=elevation)
self.lanes = lanes
self.connecting_roads = connecting_roads
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary of the objects data."""
data = super().to_dict()
data.update(
{
"lanes": [l.to_dict() for l in self.lanes],
"connecting_roads": self.connecting_roads,
}
)
return data
class Pavement(RoadLike):
"""
A pavement object.
The pavement has a boundary and a center.
"""
driveable = False
class Crossing(RoadLike):
"""
A crossing object.
The crossing has a boundary and center and ids of pavements it connects.
"""
driveable = False
@classmethod
def load_data_from_dict(cls, c: Dict[str, Any]) -> ArgsKwargs:
"""Create from dictionary."""
args, kwargs = super().load_data_from_dict(c)
return (
*args,
c["pavements" if "pavements" in c else "Pavements"],
), kwargs
def __init__(
self,
id: str,
boundary: Polygon,
center: LineString,
pavements: List[str],
elevation: Optional[np.ndarray] = None,
):
super().__init__(id, boundary, center, elevation=elevation)
self.pavements = pavements
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary of the objects data."""
data = super().to_dict()
data["pavements"] = self.pavements
return data
class Building(RoadGeometry):
"""
A geometry describing the area of a building.
These are modelled as solid blocks that cannot be
entered by vehicles or pedestrians.
"""
driveable = False
impenetrable = True
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/road_network/objects.py
| 0.930261 | 0.417271 |
objects.py
|
pypi
|
from typing import Any, Dict, Optional
import numpy as np
from shapely.geometry import LineString, Polygon
from shapely.validation import make_valid
from scenario_gym.utils import ArgsKwargs
from .utils import load_road_geometry_from_json, polygon_to_data
class RoadObject:
"""
Base class for an object in the road network.
All objects have an id attribute and implement __eq__ and
__hash__ methods.
"""
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Create from dictionary."""
args, kwargs = cls.load_data_from_dict(data)
return cls(*args, **kwargs)
@classmethod
def load_data_from_dict(cls, data: Dict[str, Any]) -> ArgsKwargs:
"""Load raw data from dictionary."""
return (data["Id" if "Id" in data else "id"],), {}
def __init__(self, id: str):
self.id = id
def __eq__(self, other: Any) -> bool:
"""Check if another road object is the same as the current object."""
if isinstance(other, str):
return self.id == other
return hasattr(other, "id") and (other.id == self.id)
def __hash__(self) -> int:
"""Return a hash of the id."""
return hash(self.id)
def __repr__(self) -> str:
"""Return a repr string with the object type and its id."""
return f"{self.__class__.__name__}(id={self.id})"
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary with id."""
return {"id": self.id}
class RoadGeometry(RoadObject):
"""
A geometric object in the road.
These objects have a boundary given by a shapely polygon.
The driveable variable indicates if vehicles should use
the geometry. Similarly the walkable surface variable affects
whether the geometry is included in the walkable surface. The
impenetrable variable indicates if a geometry may not be entered
by any entity. An instance or subclass my overwrite these variables.
"""
driveable = True
walkable = True
impenetrable = False
@classmethod
def load_data_from_dict(cls, data: Dict[str, Any]) -> ArgsKwargs:
"""Load raw data from dictionary."""
(obj_id,), _ = super().load_data_from_dict(data)
boundary, _ = load_road_geometry_from_json(data)
if "Elevation" in data and data["Elevation"] is not None:
elevation = np.array(data["Elevation"])
else:
elevation = None
return (obj_id, boundary), {"elevation": elevation}
def __init__(
self, id: str, boundary: Polygon, elevation: Optional[np.ndarray] = None
):
super().__init__(id)
self.boundary = self._fix_boundary(boundary)
if elevation is not None:
assert (
elevation.ndim == 2 and elevation.shape[1] == 3
), "Invalid shape for elevation profile."
self.elevation = elevation
def _fix_boundary(
self,
boundary: Polygon,
maxiter: int = 2000,
tol: float = 1e-3,
) -> Polygon:
"""
Fix the boundary if it is invalid.
If the boundary is not a polygon or is not a valid geometry then tries to
fix it with the `make_valid` function or by repeatedly buffering it with a
small tolerance. If this fails then a ValueError is raised.
"""
if isinstance(boundary, Polygon) and boundary.is_valid:
return boundary
if not boundary.is_valid:
new = make_valid(boundary)
if isinstance(new, Polygon) and new.is_valid:
return new
new = boundary.buffer(0.0)
for _ in range(maxiter):
new = new.buffer(tol)
if new.is_valid and isinstance(new, Polygon):
return new
raise ValueError("Invalid geometry.")
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary with id and boundary."""
data = super().to_dict()
data["Boundary"] = polygon_to_data(self.boundary)
data["Elevation"] = (
self.elevation.tolist() if self.elevation is not None else None
)
return data
class RoadLike(RoadGeometry):
"""
A geometry with a center line.
Used for roads, lanes, pavements, etc.
"""
@classmethod
def load_data_from_dict(cls, data: Dict[str, Any]) -> ArgsKwargs:
"""Load raw data from dictionary."""
boundary, center = load_road_geometry_from_json(data)
if "Elevation" in data and data["Elevation"] is not None:
elevation = np.array(data["Elevation"])
else:
elevation = None
return (data["Id" if "Id" in data else "id"], boundary, center), {
"elevation": elevation
}
def __init__(
self,
id: str,
boundary: Polygon,
center: LineString,
elevation: Optional[np.ndarray] = None,
):
super().__init__(id, boundary, elevation=elevation)
self.center = center
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary with id, boundary and center."""
data = super().to_dict()
data["Center"] = [
{"x": float(x), "y": float(y)} for x, y in self.center.coords
]
return data
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/road_network/base.py
| 0.964355 | 0.602705 |
base.py
|
pypi
|
import json
from contextlib import suppress
from functools import _lru_cache_wrapper, lru_cache, partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from pyxodr.road_objects.network import RoadNetwork as xodrRoadNetwork
from scipy.interpolate import LinearNDInterpolator, NearestNDInterpolator
from scipy.spatial import Delaunay
from shapely.geometry import MultiPolygon, Point, Polygon
from shapely.ops import unary_union
from scenario_gym.utils import ArrayLike, NDArray, cached_property
from .base import RoadGeometry, RoadObject
from .objects import (
Building,
Crossing,
Intersection,
Lane,
LaneType,
Pavement,
Road,
)
from .xodr import xodr_to_sg_roads
class RoadNetwork:
"""
A collection of roads, intersections, etc that form a road network.
The road network implements layers that give different objects in
the network. Default layers can be seein the object_names attribute.
Any list objects that subclasses RoadObject or RoadGeometry can be passed
as keywords to add custom objects to the road network.
"""
_default_object_names: Dict[str, Type[RoadObject]] = {
"roads": Road,
"intersections": Intersection,
"lanes": Lane,
"pavements": Pavement,
"crossings": Crossing,
"buildings": Building,
}
@classmethod
def create_from_file(cls, filepath: str):
"""
Create the road network from a file.
Parameters
----------
filepath : str
The path to the file.
"""
path = Path(filepath).absolute()
if not path.exists():
raise FileNotFoundError(f"File not found at: {path}.")
if path.suffix in (".json", ""):
return cls.create_from_json(filepath)
elif path.suffix == ".xodr":
return cls.create_from_xodr(filepath)
raise ValueError(f"Unknown file type: {path.suffix}.")
@classmethod
@lru_cache(maxsize=15)
def create_from_json(cls, filepath: str):
"""
Create the road network from a json file.
Parameters
----------
filepath : str
The path to the json file.
"""
with open(filepath) as f:
data = json.load(f)
return cls.create_from_dict(data, name=Path(filepath).stem)
@classmethod
@lru_cache(maxsize=15)
def create_from_xodr(
cls,
filepath: str,
resolution: float = 0.1,
simplify_tolerance: float = 0.2,
ignored_lane_types: Optional[Tuple[str]] = None,
):
"""
Import a road network from an OpenDRIVE file.
Will first parse the road network and then convert it to
a scenario_gym road network. Every lane section of the file
is converted to a road and each lane within the section is
converted to a lane. Connectivity information is stored in
the lanes. Any lane of type None is ignored.
Parameters
----------
filepath : str
The filepath to the xodr file.
resolution : float
Resolution for importing the base OpenDRIVE file.
simplify_tolerance : float
Points per m for simplifying center and boundary lines.
ignored_lane_types : Tuple[str], optional
A tuple of lane types that should be ignored from the
OpenDRIVE file. If unspecified, no types are ignored.
"""
path = Path(filepath).absolute()
if not path.exists():
raise FileNotFoundError(f"File not found at: {path}.")
if ignored_lane_types is not None:
ignored_lane_types = set(ignored_lane_types)
# parse OpenDRIVE file
xodr_network = xodrRoadNetwork(
str(path),
resolution=resolution,
ignored_lane_types=ignored_lane_types,
)
roads = xodr_to_sg_roads(
xodr_network,
simplify_tolerance,
)
return cls(roads=roads, name=path.stem)
@classmethod
def create_from_dict(cls, data: Dict, **kwargs):
"""
Create a road network from a dictoinary of road data.
The dictionary must have keys 'Roads' and 'Intersections' and
optionally with keys for other road objects. Each of these must hold
a list of dicts with the data for each object. These should hold their
required fields e.g. Center, Boundary, successors, predecessors.
"""
assert (
"Roads" in data or "roads" in data
), "Json data must contain road information."
assert (
"Intersections" in data or "intersections" in data
), "Json data must contain intersection information."
objects = {}
for obj, obj_cls in cls._default_object_names.items():
if obj in data:
key = obj
elif obj.capitalize() in data:
key = obj.capitalize()
else:
continue
objects[obj] = [obj_cls.from_dict(obj_data) for obj_data in data[key]]
properties = data.get("properties")
if "name" not in kwargs and "name" in data:
kwargs["name"] = data["name"]
return cls(**kwargs, properties=properties, **objects)
def __init__(
self,
name: Optional[str] = None,
properties: Optional[Dict[str, Any]] = None,
**road_objects: Dict[str, List[RoadObject]],
):
"""
Construct the road network.
This takes lists of road objects as keywords. The keyword used determines
how the objects will be stored. E.g. `roads=[...]` will define the `roads`
attribute. This way custom road objects can be passed e.g. passing
`road_markings=[...]` will mean a `road_markings` attribute is created.
Every object in the list must be a subclass of `RoadObject`. `roads` and
`intersections` must be passed even if they are empty lists.
Parameters
----------
name: Optional[str]
Optional name for the road network.
properties: Optional[Dict[str, Any]]
Optional properties for the road network.
road_objects : Dict[str, List[RoadObject]]
The road objects as keywords. `roads` and `intersections` must be
passed.
"""
self.name = name
self.properties = properties if properties is not None else {}
# cached elevation interpolation functions
self._hull = None
self._inside_fn = None
self._outisde_fn = None
self._lane_parents: Dict[Lane, Optional[Union[Road, Intersection]]] = {}
self.object_names = self._default_object_names.copy()
self.object_classes = {v: k for k, v in self.object_names.items()}
all_object_names = list(
set(self.object_names.keys())
.union(road_objects.keys())
.difference(["roads", "intersections"])
)
for object_name in ["roads", "intersections"] + all_object_names:
objects = (
road_objects[object_name] if object_name in road_objects else []
)
assert all((isinstance(obj, RoadObject) for obj in objects)), (
"Only lists of RoadObject subclasses should be provided not:"
f"{object_name}."
)
if object_name not in self.object_names:
self.object_names[object_name] = (
objects[0].__class__ if objects else RoadObject
)
self.add_new_road_object(objects, object_name)
def add_new_road_object(
self, objs: Union[RoadObject, List[RoadObject]], obj_name: str
) -> None:
"""
Add a new object type to the road network.
This will add an attribute for the raw objects as a list as well
as a public attribute if it does not already exist. It will also add
an add_{obj_name} method to add new objects to the list.
"""
if hasattr(self, f"_{obj_name}"):
raise ValueError(
f"Road network already has {obj_name}. Use self.add_{obj_name}."
)
setattr(self, f"_{obj_name}", objs)
try:
getattr(self, obj_name)
except AttributeError:
setattr(self, obj_name, objs)
try:
getattr(self, f"add_{obj_name}")
except AttributeError:
setattr(
self,
f"add_{obj_name}",
partial(self._add_obj, obj_name=obj_name),
)
def _add_obj(self, objs: List[RoadObject], obj_name: Optional[str] = None):
if obj_name is None:
raise ValueError("Must provide obj_name")
getattr(self, f"_{obj_name}").extend(
objs if isinstance(objs, list) else [objs]
)
self.clear_cache()
@cached_property
def roads(self) -> List[Road]:
"""Get all roads in the road network."""
return self._roads
@cached_property
def intersections(self) -> List[Intersection]:
"""Get all intersections in the road network."""
return self._intersections
@cached_property
def lanes(self) -> List[Lane]:
"""Get all lanes in the road network."""
return list(
set(sum([x.lanes for x in self.roads + self.intersections], [])).union(
self._lanes
)
)
@cached_property
def road_network_objects(self) -> List[RoadObject]:
"""Get all the road objects in the network."""
return [
obj for obj_name in self.object_names for obj in getattr(self, obj_name)
]
@cached_property
def road_network_geometries(self) -> List[RoadGeometry]:
"""Get all road geometries in the network."""
geoms = []
for obj_name, obj_class in self.object_names.items():
if issubclass(obj_class, RoadGeometry):
geoms.extend(getattr(self, obj_name))
return geoms
@cached_property
def driveable_surface(self) -> MultiPolygon:
"""Get the union of boundaries of driveable geometries."""
merged = unary_union(
[g.boundary for g in self.road_network_geometries if g.driveable]
)
return MultiPolygon([merged]) if isinstance(merged, Polygon) else merged
@cached_property
def walkable_surface(self) -> MultiPolygon:
"""Get the union of boundaries of non-driveable geometries."""
merged = unary_union(
[g.boundary for g in self.road_network_geometries if g.walkable]
)
return MultiPolygon([merged]) if isinstance(merged, Polygon) else merged
@cached_property
def impenetrable_surface(self) -> MultiPolygon:
"""Get the union of all impenetrable geometries."""
merged = unary_union(
[g.boundary for g in self.road_network_geometries if g.impenetrable]
)
return MultiPolygon([merged]) if isinstance(merged, Polygon) else merged
def object_by_id(self, i: str) -> RoadObject:
"""Get the object with the given id."""
return self._object_by_id[i]
@cached_property
def _object_by_id(self) -> Dict[str, RoadObject]:
"""Return a dict indexing all objects by id."""
return {x.id: x for x in self.road_network_objects}
@cached_property
def driveable_lanes(self) -> List[Lane]:
"""Get all driveable lanes in the network."""
return [l for l in self.lanes if l.type is LaneType["driving"]]
@cached_property
def _lanes_by_id(self) -> Dict[str, Lane]:
"""Return a dict indexing all lanes by id."""
return {l.id: l for l in self.lanes}
def get_successor_lanes(self, l: Lane) -> List[Lane]:
"""Get lanes that succeed the given lane."""
return [self._lanes_by_id[l_] for l_ in l.successors]
def get_predecessor_lanes(self, l: Lane) -> List[Lane]:
"""Get lanes that predecess the given lane."""
return [self._lanes_by_id[l_] for l_ in l.predecessors]
def get_connecting_roads(self, i: Intersection) -> List[Road]:
"""Get roads that connect to the given intersection."""
return [r for r in self.roads if r in i.connecting_roads]
def get_intersections(self, r: Road) -> List[Intersection]:
"""Get intersections that connect to the given road."""
return [i for i in self.intersections if r in i.connecting_roads]
def get_lane_parent(self, l: Lane) -> Optional[Union[Road, Intersection]]:
"""Get the object that the lane belongs to."""
if l not in self._lane_parents:
for x in self.roads + self.intersections:
if l in x.lanes:
self._lane_parents[l] = x
return x
self._lane_parents[l] = None
return self._lane_parents[l]
def get_geometries_at_point(
self,
x: float,
y: float,
) -> Tuple[List[str], List[RoadGeometry]]:
"""
Get all geometries at a given xy point.
TODO: Move to a spatial index for speed.
Parameters
----------
x : float
The x-coordinate at the point.
y : float
The y-coordinate at the point.
Returns
-------
Tuple[List[str], List[RoadObject]]
A list of string identifiers of the geometry (e.g. Road, Lane)
and the actual objects.
"""
p = Point(x, y)
names, geoms = [], []
for x in self.road_network_geometries:
if x.boundary.contains(p):
names.append(x.__class__.__name__)
geoms.append(x)
return names, geoms
def to_dict(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return a dict representation of the road network."""
data = {"name": self.name, "properties": self.properties}
for obj_name in self.object_names:
data[obj_name] = [obj.to_dict() for obj in getattr(self, obj_name)]
return data
def to_json(self, filepath: str) -> None:
"""Save the road network to json file."""
data = self.to_dict()
with open(filepath, "w") as f:
json.dump(data, f)
def clear_cache(self) -> None:
"""Clear the cached properties and lru cache methods."""
self._lane_parents.clear()
self._hull = None
self._inside_fn = None
self._outisde_fn = None
for method in dir(self.__class__):
obj = getattr(self.__class__, method)
if isinstance(obj, _lru_cache_wrapper):
getattr(self, method).__func__.cache_clear()
elif (
isinstance(cached_property, type)
and isinstance(obj, cached_property)
and (method in self.__dict__)
):
del self.__dict__[method]
else:
with suppress(AttributeError):
func = obj.__func__
if isinstance(func, _lru_cache_wrapper) and (
obj.__self__ is self
):
func.cache_clear()
def elevation_at_point(self, x: ArrayLike, y: ArrayLike) -> NDArray:
"""Estimate the elevation at (x, y) by interpolating."""
x = np.array(x)
y = np.array(y)
if self._hull is None:
self._interpolate_elevation()
x_ndim, y_ndim = x.ndim, y.ndim
if x_ndim not in (0, 1) or y_ndim not in (0, 1):
raise ValueError("x and y must be 0 or 1 dimensional.")
if x_ndim == 0:
x = np.array([x])
if y_ndim == 0:
y = np.array([y])
if x.shape[0] == 1 and y.shape[0] > 1:
x = np.repeat(x, y.shape[0])
elif y.shape[0] == 1 and x.shape[0] > 1:
y = np.repeat(y, x.shape[0])
xy = np.column_stack((x, y))
inside = self._hull.find_simplex(xy) >= 0
res = np.empty(xy.shape[0])
if np.any(inside):
zs_in = self._inside_fn(xy[inside])
res[inside] = zs_in
if np.any(~inside):
zs_out = self._outside_fn(xy[~inside])
res[~inside] = zs_out
if x_ndim == y_ndim == 1:
res = res.squeeze()
return res
def _interpolate_elevation(self) -> None:
"""Interpolate the elevation values of the geometries."""
elevs = [
geom.elevation
for geom in self.road_network_geometries
if geom.elevation is not None
]
if not elevs:
# this used to be a lambda function returning zeros but
# that was not pickleable so interp2d is used with zero inputs
elevation_values = np.array(
[
[0, 1, 0],
[1, 0, 0],
[1, 1, 0],
[0, 0, 0],
]
)
else:
elevation_values = np.concatenate(elevs, axis=0)
if elevation_values.shape[0] > 5000:
n = np.ceil(elevation_values.shape[0] / 5000)
elevation_values = elevation_values[:: int(n)]
self._hull = Delaunay(elevation_values[:, :2])
self._inside_fn = LinearNDInterpolator(
elevation_values[:, :2],
elevation_values[:, 2],
)
self._outside_fn = NearestNDInterpolator(
elevation_values[:, :2],
elevation_values[:, 2],
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/road_network/road_network.py
| 0.901759 | 0.442817 |
road_network.py
|
pypi
|
from typing import Dict, List, Optional, Tuple
import numpy as np
from pyxodr.road_objects.lane import Lane as xodrLane
from pyxodr.road_objects.network import RoadNetwork as xodrRoadNetwork
from shapely.geometry import LineString, Polygon
from scenario_gym.road_network import Lane, LaneType, Road
def xodr_lane_to_sg(
lane: xodrLane,
simplify_tolerance: float,
) -> Optional[Lane]:
"""Convert an OpenDRIVE lane to a scenario_gym lane."""
if lane.type is None:
return None
lane_traffic_flow_line = LineString(lane.traffic_flow_line[:, :2])
if simplify_tolerance is not None:
lane_traffic_flow_line = lane_traffic_flow_line.simplify(simplify_tolerance)
elevation = lane.traffic_flow_line
boundary = np.vstack(
[
lane.boundary_line,
np.flip(lane.lane_offset_line, axis=0),
]
)
lane_boundary = Polygon(boundary)
try:
lane_type = LaneType[lane.type]
except KeyError as e:
raise KeyError(
f"Lane {lane.id} has an invalid lane type: {lane.type}. Allowed lane "
f"types are {list(LaneType.__members__)}."
) from e
return Lane(
repr(lane),
lane_boundary,
lane_traffic_flow_line,
[],
[],
lane_type,
elevation=elevation,
)
def road_to_sg(
xodr_road,
simplify_tolerance: float,
) -> List[Road]:
"""Convert an OpenDRIVE road into a list of roads."""
roads, old_to_new_lanes = [], {}
for _, xodr_lane_section in enumerate(xodr_road.lane_sections):
if simplify_tolerance is not None:
(
x_boundary,
y_boundary,
) = xodr_lane_section.boundary.exterior.simplify(simplify_tolerance).xy
else:
x_boundary, y_boundary = xodr_lane_section.boundary.exterior.xy
road_boundary = Polygon(list(zip(x_boundary, y_boundary)))
xyz_centre = xodr_lane_section.get_offset_line()
road_center = LineString(xyz_centre[:, :2])
if simplify_tolerance is not None:
road_center = road_center.simplify(simplify_tolerance)
road_elevation = xyz_centre
lanes = []
for lane in xodr_lane_section.lanes:
sg_lane = xodr_lane_to_sg(lane, simplify_tolerance)
if sg_lane is not None:
lanes.append(sg_lane)
old_to_new_lanes[lane] = sg_lane
road = Road(
repr(xodr_lane_section),
road_boundary,
road_center,
lanes=lanes,
elevation=road_elevation,
)
roads.append(road)
return roads, old_to_new_lanes
def add_connection(conn: Tuple[Lane, Lane]) -> None:
"""Connect a pair of successive lanes."""
pre, suc = conn
if pre not in suc.predecessors:
suc.predecessors.append(pre.id)
if suc not in pre.successors:
pre.successors.append(suc.id)
def xodr_to_sg_roads(
road_network: xodrRoadNetwork,
simplify_tolerance: float,
) -> List[Road]:
"""
Convert a pyxodr road network to a list of roads.
Parameters
----------
road_network : xodrRoadNetwork
Imported OpenDRIVE file.
simplify_tolerance : float
Points per m for simplifying center and boundary lines.
"""
xodr_id_to_sg_road_objects: Dict[int, List[Road]] = {}
xodr_lane_to_sg_lane: Dict[xodrLane, Lane] = {}
for road in road_network.get_roads():
roads, old_to_new_lanes = road_to_sg(road, simplify_tolerance)
xodr_id_to_sg_road_objects[road.id] = roads
xodr_lane_to_sg_lane.update(old_to_new_lanes)
for xodr_lane, sg_lane in xodr_lane_to_sg_lane.items():
successor_lanes = xodr_lane.traffic_flow_successors
for successor_lane in successor_lanes:
try:
successor_sg_lane = xodr_lane_to_sg_lane[successor_lane]
except KeyError:
raise KeyError(
f"Could not find successor lane {successor_lane} in "
+ "OpenDRIVE to Scenario Gym dict; one of the successors of "
+ f"{xodr_lane}."
)
add_connection((sg_lane, successor_sg_lane))
return list(set().union(*xodr_id_to_sg_road_objects.values()))
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/road_network/xodr.py
| 0.934813 | 0.550668 |
xodr.py
|
pypi
|
from abc import abstractmethod
from types import MethodType
from typing import Any, Callable, Optional, Tuple
from scenario_gym.agent import Agent
from scenario_gym.scenario_gym import ScenarioGym
try:
from dm_env import Environment, TimeStep, restart, termination, transition
except ImportError:
raise ImportError(
"dm_env is required for this module. Install it with `pip install dm_env`."
)
class ScenarioGym(ScenarioGym, Environment):
"""
Scenario Gym subclass compatible with dm_env.
This is still an abstract class which requires implementation of the method
`observation_spec` and `action_spec`. This is because these are required as
methods for `dm_env.Environment` but are not known by `ScenarioGym` until the
ego agent is defined. Therefore subclasses should implement the specific specs
required for the ego agent for the chosen experiment.
"""
def __init__(
self,
*args,
update_scenario: Optional[Callable[[ScenarioGym], None]] = None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.ego_agent: Optional[Agent] = None
if update_scenario is not None:
self.update_scenario = MethodType(update_scenario, self)
def update_scenario(self) -> None:
"""Update the loaded scenario when reset is called."""
pass
def reset(self) -> TimeStep:
"""Reset the environment."""
self.update_scenario()
obs = self._reset()
return restart(obs)
def _reset(self) -> Any:
"""Reset the environment."""
if self.state.scenario is None:
raise ValueError("No scenario has been set.")
self.reset_scenario()
try:
self.ego_agent = self.state.agents[self.state.scenario.ego]
except KeyError:
raise KeyError("No agent named ego.")
self.state.next_t = self.state.t + self.timestep
return self.ego_agent.sensor.step(self.state)
def step(self, action) -> TimeStep:
"""Process an action and get the next timetsep."""
if (
self.state.scenario is None
or self.state.is_done
or self.ego_agent is None
):
return self.reset()
obs, reward = self._step(action)
if self.state.is_done:
return termination(reward, obs)
return transition(reward, obs)
def _step(self, action) -> Tuple[Any, float]:
"""Process the given action."""
new_poses = {}
for agent in self.state.agents.values():
if agent is self.ego_agent:
agent.last_action = action
new_poses[agent.entity] = agent.controller.step(self.state, action)
else:
new_poses[agent.entity] = agent.step(self.state)
new_poses.update(self.state.non_agents.step(self.state))
# update the poses and current time
self.state.step(new_poses)
# get reward of next state
reward = self.ego_agent.reward(self.state)
# rendering and metrics
for m in self.metrics:
m.step(self.state)
if self.viewer is not None:
self.state.last_keystroke = self.render()
# process ego part of next state
self.state.next_t = self.state.t + self.timestep
ego_obs = self.ego_agent.sensor.step(self.state)
if self.state.is_done:
for agent in self.state.scenario.agents.values():
agent.finish(self.state)
return ego_obs, reward
@abstractmethod
def observation_spec(self) -> Any:
"""Return the observation spec for the environment."""
raise NotImplementedError()
@abstractmethod
def action_spec(self) -> Any:
"""Return the action spec for the environment."""
raise NotImplementedError()
def rollout(self, *args, **kwargs) -> None:
"""Raise an error if rollout is called with this env."""
raise NotImplementedError("Rollout is not supported for this environment.")
def reset_scenario(self) -> None:
"""Reset scenario and reference to old ego agent."""
super().reset_scenario()
self.ego_agent = None
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/integrations/deepmind_env.py
| 0.903598 | 0.54468 |
deepmind_env.py
|
pypi
|
from __future__ import annotations
from math import inf
from types import MethodType
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from packaging import version
from scenario_gym.action import Action
from scenario_gym.agent import Agent
from scenario_gym.controller import VehicleController
from scenario_gym.entity import Entity
from scenario_gym.observation import Observation
from scenario_gym.scenario import Scenario
from scenario_gym.scenario_gym import ScenarioGym, _create_agent
from scenario_gym.sensor.map import RasterizedMapSensor
from scenario_gym.state import TERMINAL_CONDITIONS, State
try:
import gym
from gym import Env
from gym.spaces import Box, Space
except ImportError as e:
raise ImportError(
"gym is required for this module. Install it with `pip install gym`."
) from e
class ScenarioGym(ScenarioGym, Env):
"""
An OpenAI gym compatible version of the Scenario Gym.
Provides an explicit interface to the observations of the
ego agent. The agent must implement the reward method of the
Agent class.
"""
metadata = {"render_modes": []}
_new_reset = version.parse(gym.__version__) >= version.parse("0.22.0")
def __init__(
self,
action_space: Optional[Space] = None,
observation_space: Optional[Space] = None,
reward_range: Tuple[float, float] = (-inf, inf),
terminal_conditions: Optional[
List[Union[str, Callable[[State], bool]]]
] = None,
timestep: float = 0.1,
create_agent: Optional[
Callable[[Scenario, Entity], Optional[Agent]]
] = None,
select_scenario: Optional[
Callable[[ScenarioGym], Union[Scenario, str]]
] = None,
**kwargs,
):
"""
Construct the ScenarioGym environment.
Parameters
----------
action_space : Optional[Space]
The action space for the ego agent. If not given a Box space is
assumed for acceleration and steering actions.
observation_space : Optional[Space]
The observation space for the ego agent. If not given a Box space is
assumed for a rasterized map.
reward_range : Tuple[float, float]
Optional reward range parameter for gym.
terminal_conditions : Optional[List[Union[str, Callable[[State], bool]]]]
Terminal condtiions for the scenario gym. If not given then max_length,
ego_collision and ego_off_road are used.
timestep : float
Timestep for the scenario_gym.
create_agent : Optional[Callable[[Scenario, Entity], Optional[Agent]]]
Create agent function for the gym. Should return an agent for the
ego entity. If not given then `ScenarioGym.create_agent` will be used.
select_scenario : Optional[Callable[[], Union[Scenario, str]]]
Function that selects the scenario to be run each time `reset()` is
called. Takes just self as argument and should return either the xosc
filepath or the scenario object. If not given then
`ScenarioGym.select_scenario` will be used.
"""
if terminal_conditions is None:
terminal_conditions = ["max_length", "ego_collision", "ego_off_road"]
super().__init__(
terminal_conditions=terminal_conditions,
timestep=timestep,
**kwargs,
)
if action_space is None:
action_space = Box(
low=np.array([-5.0, -0.9]).astype(np.float32),
high=np.array([5.0, 0.9]).astype(np.float32),
shape=(2,),
)
if observation_space is None:
observation_space = Box(
low=np.float32(0.0),
high=np.float32(1.0),
shape=(2, 128, 128),
)
self.action_space = action_space
self.observation_space = observation_space
self.reward_range = reward_range
if create_agent is not None:
self.create_agent = create_agent
if select_scenario is not None:
self.select_scenario = MethodType(select_scenario, self)
def on_reset(self) -> None:
"""Run just before the reset is executed."""
pass
def after_reset(self) -> None:
"""Run just after the reset is executed."""
pass
def reset(
self,
seed: Optional[int] = None,
return_info: bool = False,
options: Optional[Dict] = None,
) -> Union[Observation, Tuple[Observation, Dict]]:
"""
Reset the environment.
Resets the state and computes the observation for the ego agent.
Possible options:
scenario : Union[Scenario, str]
A chosen scenario object or filepath to the xosc to be used.
"""
self.on_reset()
if self._new_reset:
super().reset(seed=seed)
else:
super().seed(seed)
if (options is not None) and ("scenario" in options):
s = options["scenario"]
else:
s = self.select_scenario()
if s is not None:
if isinstance(s, Scenario):
self.set_scenario(s)
else:
self.load_scenario(s)
elif self.state.scenario is None:
raise ValueError("No scenario has been set.")
else:
self.reset_scenario()
self.state.next_t = self.state.t + self.timestep
ego_obs = self.ego_agent.sensor.step(self.state)
self.after_reset()
return (ego_obs, {}) if return_info else ego_obs
def step(self, action: Action) -> Tuple[Observation, float, bool, Dict]:
"""
Run one timestep of the environment.
The action for the ego is processed and `step` is called on
all other agents/entities. Then the state is processed and the
reward is for the
Returns
-------
next_state : Observation
Observation of the next state for the ego.
reward : float
The reward returned from the next state.
done : bool
Whether the next state is terminal.
info : Dict
Additional info.
"""
if self.state.is_done:
raise ValueError("Step called when state is terminal.")
new_poses = {}
for agent in self.state.agents.values():
if agent is self.ego_agent:
agent.last_action = action
new_poses[agent.entity] = agent.controller.step(self.state, action)
else:
new_poses[agent.entity] = agent.step(self.state)
new_poses.update(self.state.non_agents.step(self.state))
# update the poses and current time
self.state.step(new_poses)
# get reward of next state
reward = self.ego_agent.reward(self.state)
# rendering and metrics
if self.viewer is not None:
self.state.last_keystroke = self.render()
for m in self.metrics:
m.step(self.state)
# process ego part of next state
self.state.next_t = self.state.t + self.timestep
ego_obs = self.ego_agent.sensor.step(self.state)
if self.state.is_done:
for agent in self.state.agents.values():
agent.finish(self.state)
return ego_obs, reward, self.state.is_done, {}
def rollout(self, *args, **kwargs):
"""Raise an error if rollout is called with this env."""
raise NotImplementedError("Rollout is not supported for this environment.")
def render(
self,
mode: None = None,
video_path: Optional[str] = None,
) -> Optional[int]:
"""Render the environment."""
return super().render(video_path=video_path)
def load_scenario(
self, *args, create_agent: Optional[Callable] = None, **kwargs
) -> None:
"""
Load a scenario from an OpenScenario file.
Sets the default argument of `create_agent` to `self.create_agent`.
"""
if create_agent is None:
create_agent = self.create_agent
super().load_scenario(*args, create_agent=create_agent, **kwargs)
def set_scenario(
self, *args, create_agent: Optional[Callable] = None, **kwargs
) -> None:
"""
Set the scenario explicitly.
Sets the default argument of `create_agent` to `self.create_agent`.
"""
if create_agent is None:
create_agent = self.create_agent
super().set_scenario(*args, create_agent=create_agent, **kwargs)
def select_scenario(self) -> Optional[Union[str, Scenario]]:
"""Update the scenario when reset is called."""
return None
def create_agents(
self,
create_agent: Callable[[Scenario, Entity], Optional[Agent]] = _create_agent,
) -> None:
"""Check there is an ego agent."""
super().create_agents(create_agent=create_agent)
try:
self.ego_agent = self.state.agents[self.state.scenario.ego]
except KeyError as e:
raise KeyError("No agent for ego.") from e
@staticmethod
def create_agent(scenario: Scenario, entity: Entity) -> Optional[Agent]:
"""Create the agents for the scenario."""
if entity.ref == "ego":
return RLAgent(
entity,
VehicleController(entity, max_steer=0.9, max_accel=5.0),
MapOnlySensor(
entity, channels_first=True, height=30, width=30, n=128
),
)
class MapOnlySensor(RasterizedMapSensor):
"""Sensor returning only the rasterized map."""
def _step(self, state: State) -> np.ndarray:
"""Get the map from the base sensor's observation."""
return super()._step(state).map
class RLAgent(Agent):
"""Example agent recieving negative rewards for collisions going off road."""
def reward(self, state: State) -> Optional[float]:
"""Return the reward for the agent from the current state."""
if state.is_done:
if TERMINAL_CONDITIONS["ego_off_road"](state):
return -1.0
elif TERMINAL_CONDITIONS["ego_collision"](state):
return -1.0
return 0.01
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/integrations/openaigym.py
| 0.962691 | 0.595316 |
openaigym.py
|
pypi
|
import json
from contextlib import suppress
from pathlib import Path
from typing import Dict
import numpy as np
from shapely.geometry import LineString, Polygon
from scenario_gym.catalog_entry import BoundingBox, Catalog, CatalogEntry
from scenario_gym.entity import Entity
from scenario_gym.road_network import (
Lane,
LaneType,
Road,
RoadGeometry,
RoadNetwork,
)
from scenario_gym.scenario import Scenario
from scenario_gym.trajectory import Trajectory
try:
import pandas as pd
except ImportError:
raise ImportError(
"""
\tPandas is required for this integration.
\tInstall by `pip install pandas`.
"""
)
class Lane(Lane):
"""Add the argoverse information to the lane object."""
def __init__(
self,
is_intersection: bool,
left_neighbour_id: str,
right_neighbour_id: str,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.is_intersection = is_intersection
self.left_neighbour_id = left_neighbour_id
self.right_neighbour_id = right_neighbour_id
track_types = [
"VEHICLE",
"PEDESTRIAN",
"MOTORCYCLIST",
"CYCLIST",
"BUS",
"STATIC",
"BACKGROUND",
"CONSTRUCTION",
"RIDERLESS_BICYCLE",
"UNKNOWN",
]
class Catalogs:
"""
Catalogs used in the argoverse dataset.
Using the dimensions defined in their scenario_visualization.py
"""
vehicle_box = BoundingBox(1.8, 3.8, 0.0, 0.0)
argoverse_catalog = Catalog("ArgoverseCatalog", "ArgoverseCatalogs")
vehicle = CatalogEntry(
argoverse_catalog,
"vehicle",
"car",
"Vehicle",
vehicle_box,
{},
[],
)
pedestrian_box = BoundingBox(0.4, 0.4, 0.0, 0.0)
pedestrian = CatalogEntry(
argoverse_catalog,
"pedestrian",
"pedestrian",
"Pedestrian",
pedestrian_box,
{},
[],
)
motorbike_box = BoundingBox(0.2, 0.8, 0.0, 0.0)
motorcyclist = CatalogEntry(
argoverse_catalog,
"motorcyclist",
"motorbike",
"Vehicle",
motorbike_box,
{},
[],
)
cyclist_box = BoundingBox(0.7, 2.0, 0.0, 0.0)
cyclist = CatalogEntry(
"ArgoverseCatalog", "cyclist", "bicycle", "Vehicle", cyclist_box, {}, []
)
bus_box = BoundingBox(2.8, 11.0, 0.0, 0.0)
bus = CatalogEntry(argoverse_catalog, "bus", "bus", "Vehicle", bus_box, {}, [])
riderless_bicycle_box = BoundingBox(0.3, 1.5, 0.0, 0.0)
riderless_bicycle = CatalogEntry(
argoverse_catalog,
"riderless_bicycle",
"obstacle",
"Vehicle",
riderless_bicycle_box,
{},
[],
)
def import_argoverse_scenario(path: str) -> Scenario:
"""
Import a recorded scenario from the argoverse data.
This assumes fixed bounding box sizes for each entity. For now
ignoring object types: background, construction,
static and unkown.
"""
path = Path(path)
scenario_id = path.parts[-1]
pq_path = Path(path, f"scenario_{scenario_id}.parquet")
main_df = pd.read_parquet(pq_path).sort_values("timestep")
dfs = list(main_df.groupby("track_id"))
all_ids = sorted(main_df["track_id"].unique())
assert "AV" in all_ids, "No AV found to use as ego."
all_ids.remove("AV")
entities = []
for track_id, df in dfs:
if track_id != "AV" and not df["observed"].any():
continue
# get catalog
object_type = df["object_type"].iloc[0]
with suppress(AttributeError):
catalog_entry = getattr(Catalogs, object_type)
# get start and end in seconds
start = df["start_timestamp"].iloc[0] / 1e9
end = df["end_timestamp"].iloc[0] / 1e9
num = df["num_timestamps"].iloc[0] - 1
t_scale = (end - start) / num
# build trajectory
traj_data = df[
[
"timestep",
"position_x",
"position_y",
"heading",
]
].to_numpy()
traj_data[:, 0] = t_scale * traj_data[:, 0]
v0 = df[["velocity_x", "velocity_y"]].iloc[0].to_numpy()
t_pre = np.array(
[
-0.1,
*(traj_data[0, [1, 2]] - 0.1 * v0),
traj_data[0, 3],
]
)
traj_data = np.concatenate(
[
t_pre[None],
traj_data,
],
axis=0,
)
trajectory = Trajectory(traj_data, fields=["t", "x", "y", "h"])
entity_ref = (
f"entity_{1+all_ids.index(track_id)}" if track_id != "AV" else "ego"
)
entity = Entity(catalog_entry, ref=entity_ref)
entity.trajectory = trajectory
entities.append(entity)
ego = None
for e in entities:
if e.ref == "ego":
ego = e
break
if ego is not None:
entities.remove(ego)
entities.insert(0, ego)
road_network_data = json.load(
open(Path(path, f"log_map_archive_{scenario_id}.json"), "r")
)
road_network = create_argoverse_road_network(road_network_data)
scenario = Scenario(
entities,
name=scenario_id,
road_network=road_network,
)
return scenario
def create_argoverse_road_network(data: Dict) -> RoadNetwork:
"""Create a road network from the argoverse log map."""
driveable_areas = []
for area in data["drivable_areas"].values():
poly = Polygon([[v["x"], v["y"]] for v in area["area_boundary"]])
driveable_areas.append(
RoadGeometry(
area["id"],
poly,
)
)
roads = []
all_lanes = set([l["id"] for l in data["lane_segments"].values()])
for l_data in data["lane_segments"].values():
center = LineString([[d["x"], d["y"]] for d in l_data["centerline"]])
boundary = center.buffer(1.75, cap_style=2)
lane = Lane(
l_data["is_intersection"],
l_data["left_neighbor_id"],
l_data["right_neighbor_id"],
l_data["id"],
boundary,
center,
list(set(l_data["successors"]).intersection(all_lanes)),
list(set(l_data["predecessors"]).intersection(all_lanes)),
LaneType.driving, # data["lane_type"],
)
roads.append(
Road(
f"road_{l_data['id']}",
boundary,
center,
[lane],
)
)
# TODO pedestrian crossings
return RoadNetwork(
roads=roads,
intersections=[],
driveable_areas=driveable_areas,
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/integrations/argoverse.py
| 0.849535 | 0.433742 |
argoverse.py
|
pypi
|
from dataclasses import dataclass, field
from random import choice
from typing import Dict, Optional, Tuple
import numpy as np
from nuscenes import NuScenes
from nuscenes.prediction import PredictHelper
from nuscenes.prediction.input_representation.static_layers import load_all_maps
from scipy.spatial.transform import Rotation
from shapely.geometry import LineString
from scenario_gym.catalog_entry import BoundingBox, Catalog, CatalogEntry
from scenario_gym.entity import Entity
from scenario_gym.road_network import Lane, LaneType, Road, RoadNetwork
from scenario_gym.scenario import Scenario
from scenario_gym.trajectory import Trajectory
@dataclass
class NuScenesInstanceData:
"""Class for storing data for NuScenes "instances" (entities)."""
category_name: str
trajectory: list = field(default_factory=list)
times: list = field(default_factory=list)
sizes: list = field(default_factory=list)
rotations: list = field(default_factory=list)
class Catalogs:
"""
Catalogs used in the nuScenes dataset.
Using the dimensions defined "at runtime" from the bounding box data.
"""
nuScenes_catalog = Catalog("nuScenesCatalog", "nuScenesCatalogs")
class NuScenesImporter:
"""
Class for importing nuScenes scenes into scenario gym scenarios.
Parameters
----------
data_root : str
Dataset root path to be passed into the NuScenes constructor.
dataset : str, optional
Dataset name to be passed into the NuScenes constructor, by default
"v1.0-mini"
map_radius_multiplier : float, optional
When including a map around a scene, a centrepoint will be computed as the
average position of all trajectories; a radius will be computed as the max
x and y range of coordinates. All lanes within this radius multiplied by
the map_radius_multiplier of the centrepoint will be included. By default
1.5
pre_loaded_data : NuScenes, optional
Pre-loaded (indexed) data object, for much faster init if the NuScenes
dataset has been pre-loaded already, by default None.
"""
def __init__(
self,
data_root: str,
dataset: str = "v1.0-mini",
map_radius_multiplier: float = 1.5,
pre_loaded_data: Optional[NuScenes] = None,
):
self.data_root = data_root
self.dataset = dataset
if pre_loaded_data is not None:
self.data = pre_loaded_data
else:
self.data = NuScenes(self.dataset, dataroot=self.data_root)
self.predict_helper = PredictHelper(self.data)
self.maps = load_all_maps(self.predict_helper)
self.map_radius_multiplier = map_radius_multiplier
def _convert_nuScenes_map_to_road_network(
self, map_name, centre_coordinate: np.ndarray, map_radius: float
) -> RoadNetwork:
map = self.maps[map_name]
lane_records = map.get_records_in_radius(
*centre_coordinate, map_radius, ["lane", "lane_connector"]
)
roads = []
lane_centres = map.discretize_lanes(
lane_records["lane"], 0.1
) | map.discretize_lanes(lane_records["lane_connector"], 0.1)
lane_keys_and_records = [("lane", l) for l in lane_records["lane"]] + [
("lane_connector", l) for l in lane_records["lane_connector"]
]
lane_ids = set([l[1] for l in lane_keys_and_records])
for lane_key, lane_record in lane_keys_and_records:
lane = map.get(lane_key, lane_record)
bounding_poly = map.extract_polygon(lane["polygon_token"])
lane_centre = LineString(np.array(lane_centres[lane_record])[:, :2])
sg_lane = Lane(
lane_record,
bounding_poly,
lane_centre,
[
l_id
for l_id in map.get_outgoing_lane_ids(lane_record)
if l_id in lane_ids
],
[
l_id
for l_id in map.get_incoming_lane_ids(lane_record)
if l_id in lane_ids
],
LaneType.driving,
elevation=np.array(lane_centres[lane_record]),
)
roads.append(
Road(
f"road_{lane_key}_{lane_record}",
bounding_poly,
lane_centre,
[sg_lane],
)
)
road_network = RoadNetwork(
roads=roads,
intersections=[],
)
return road_network
def convert_instance_sample_token_to_gym(
self,
ego_instance_token: str,
sample_token: str,
seconds_history: float = 2.0,
seconds_future: float = 6.0,
) -> Scenario:
"""
Convert an (instance token, sample token) pair to a scenario gym scenario.
Note in the resulting scenario, the sample token will occur at t==0. Since
rendering begins at t==0, to render the entire scenario, translate all
entities in time.
Parameters
----------
ego_instance_token : str
Instance token. This instance (entity) will be treated as the ego.
sample_token : str
Sample token to treat as t == 0. Later and earlier sample tokens will
be queried to create the scenario.
seconds_history : float, optional
Seconds before the provided sample_token to query samples for, by
default 2.0
seconds_future : float, optional
Seconds after the provided sample_token to query samples for, by
default 6.0
Returns
-------
Scenario
Scenario gym scenario corresponding to this NuScenes data.
"""
# Link from instance IDs to relevant instance (entity) data
# We will use the current sample token as t = 0.
# Note scenario simulation starts at t=0, so to simulate the whole scenario
# including past, the scenario.translate method should be used.
instance_token_to_data: Dict[str, NuScenesInstanceData] = {}
instance_token_to_past_data = self.predict_helper.get_past_for_sample(
sample_token,
seconds=seconds_history,
in_agent_frame=False,
just_xy=False,
)
instance_token_to_current_data = {
d["instance_token"]: [d]
for d in self.predict_helper.get_annotations_for_sample(sample_token)
}
instance_token_to_future_data = self.predict_helper.get_future_for_sample(
sample_token,
seconds=seconds_future,
in_agent_frame=False,
just_xy=False,
)
for instance_token in (
instance_token_to_past_data.keys()
| instance_token_to_future_data.keys()
| instance_token_to_current_data.keys()
):
past_data = instance_token_to_past_data.get(instance_token, [])
current_data = instance_token_to_current_data.get(instance_token, [])
future_data = instance_token_to_future_data.get(instance_token, [])
past_times = np.linspace(
-0.5,
-0.5 * (len(past_data)),
len(past_data),
)
future_times = np.linspace(
0.5,
0.5 * (len(future_data)),
len(future_data),
)
combined_times = list(past_times) + [0.0] + list(future_times)
combined_data = past_data + current_data + future_data
assert len(combined_data) == len(combined_times)
trajectory = [annotation["translation"] for annotation in combined_data]
sizes = [annotation["size"] for annotation in combined_data]
rotations = [annotation["rotation"] for annotation in combined_data]
instance_token_to_data[instance_token] = NuScenesInstanceData(
combined_data[0]["category_name"],
trajectory=trajectory,
times=combined_times,
sizes=sizes,
rotations=rotations,
)
map_name = self.predict_helper.get_map_name_from_sample_token(sample_token)
entities, road_network = self._convert_to_entities_road_network(
instance_token_to_data, map_name, ego_instance_token=ego_instance_token
)
scenario = Scenario(
entities,
name="_".join(((ego_instance_token, sample_token))),
road_network=road_network,
)
return scenario
def convert_scene_to_gym(
self, scene_token: str, ego_instance_token: Optional[str] = None
) -> Scenario:
"""
Convert a complete nuScenes scene to a scenario gym scenario.
Where ego_instance_token is provided, the instance (agent) which corresponds
to that token will be treated as the ego. Otherwise, a random car will be
chosen.
See https://www.nuscenes.org/nuscenes#data-format for nuScenes schema.
Parameters
----------
scene_token : str
Unique identifier for a nuScenes scene
ego_instance_token : Optional[str], optional
Identifier for the instance to be used as the scenario gym ego, by
default None
Returns
-------
Scenario
Converted scenario gym scenario corresponding to this nuScenes scene.
Raises
------
KeyError
If the provided ego_instance_token is not found in the scene.
ValueError
If no ego_instance_token is provided and there are no cars in the scene
to be chosen as the ego.
"""
scene_data = self.data.get("scene", scene_token)
sample_annotations: list[list[dict]] = []
first_sample_token = scene_data["first_sample_token"]
last_sample_token = scene_data["last_sample_token"]
current_sample_token = first_sample_token
while current_sample_token != last_sample_token:
if current_sample_token is None or current_sample_token == "":
print(
"WARNING: Got an unexpected sample token of "
+ str(current_sample_token)
)
break
sample_annotations.append(
self.predict_helper.get_annotations_for_sample(current_sample_token)
)
current_sample_token = self.data.get("sample", current_sample_token)[
"next"
]
# Since nuScenes sampled at 2Hz
times = np.linspace(
0.0,
0.5 * (len(sample_annotations) - 1),
len(sample_annotations),
)
# Now link from instance IDs to relevant instance (entity) data
instance_token_to_data: Dict[str, NuScenesInstanceData] = {}
for sample, time in zip(sample_annotations, times):
for annotation in sample:
instance_token = annotation["instance_token"]
if instance_token not in instance_token_to_data.keys():
instance_token_to_data[instance_token] = NuScenesInstanceData(
annotation["category_name"]
)
instance_token_to_data[instance_token].trajectory.append(
annotation["translation"]
)
instance_token_to_data[instance_token].times.append(time)
instance_token_to_data[instance_token].sizes.append(
annotation["size"]
)
instance_token_to_data[instance_token].rotations.append(
annotation["rotation"]
)
map_name = self.predict_helper.get_map_name_from_sample_token(
first_sample_token
)
(entities, road_network,) = self._convert_to_entities_road_network(
instance_token_to_data, map_name, ego_instance_token=ego_instance_token
)
scenario = Scenario(
entities,
name=scene_token,
road_network=road_network,
)
return scenario
def _convert_to_entities_road_network(
self,
instance_token_to_data,
map_name,
ego_instance_token: Optional[str] = None,
) -> Tuple[list[Entity], RoadNetwork]:
if ego_instance_token is not None:
if ego_instance_token not in instance_token_to_data.keys():
raise KeyError("Ego instance token not found in scene.")
else:
potential_ego_instance_tokens = [
i
for i, d in instance_token_to_data.items()
if "vehicle.car" in d.category_name
]
if len(potential_ego_instance_tokens) == 0:
raise ValueError("No potential ego vehicles in scene (no cars).")
ego_instance_token = choice(potential_ego_instance_tokens)
print(f"Chose ego instance token {ego_instance_token}")
entities: Entity = []
instance_tokens = set(instance_token_to_data.keys()) - set(
[ego_instance_token]
)
instance_tokens = [ego_instance_token] + list(instance_tokens)
for instance_token in instance_tokens:
instance_data = instance_token_to_data[instance_token]
entity_type = (
"Pedestrian"
if instance_data.category_name.split(".")[0]
in {"human", "pedestrian"}
else "Vehicle"
)
entity_category = "_".join(instance_data.category_name.split(".")[1:])
# The scenario gym works in a slightly different way from the nuScenes
# dataset. Bounding boxes are set at the catalog level and are fixed in
# time.
bounding_box = BoundingBox(
*np.array(instance_data.sizes).mean(axis=0)[:2], 0.0, 0.0
)
setattr(
Catalogs,
instance_token,
CatalogEntry(
Catalogs.nuScenes_catalog,
instance_data.category_name,
entity_category,
entity_type,
bounding_box,
{},
[],
),
)
rotations = Rotation.from_quat(instance_data.rotations).as_euler("xyz")[
:, 0
]
sg_rotations = np.pi * np.ones_like(rotations) - rotations
traj_data = np.vstack(
[
np.array(instance_data.times),
np.array(instance_data.trajectory).T[:2],
sg_rotations,
]
).T
trajectory = Trajectory(traj_data, fields=["t", "x", "y", "h"])
entity_ref = (
f"entity_{instance_token}"
if instance_token != ego_instance_token
else "ego"
)
entity = Entity(getattr(Catalogs, instance_token), ref=entity_ref)
entity.trajectory = trajectory
entities.append(entity)
all_trajectory_data = np.vstack(
[np.array(d.trajectory) for d in instance_token_to_data.values()]
)
x_range, y_range, _ = np.ptp(all_trajectory_data, axis=0)
radius = max([x_range, y_range]) * self.map_radius_multiplier
centre_coordinate = np.mean(all_trajectory_data, axis=0)[:2]
road_network = self._convert_nuScenes_map_to_road_network(
map_name, centre_coordinate, radius
)
return entities, road_network
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/integrations/nuScenes.py
| 0.940803 | 0.425128 |
nuScenes.py
|
pypi
|
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
from shapely.geometry import MultiPolygon
from shapely.ops import unary_union
from shapely.prepared import prep
from shapely.vectorized import contains
from scenario_gym.entity import Entity
from scenario_gym.observation import SingleEntityObservation
from scenario_gym.road_network import RoadNetwork
from scenario_gym.state import State
from scenario_gym.utils import ArrayLike, NDArray
from .base import Sensor
@dataclass
class MapObservation(SingleEntityObservation):
"""Observation with a raster map."""
map: np.ndarray
class RasterizedMapSensor(Sensor):
"""
Returns a rasterized semantic map as a 2d grid of vectors.
Additional custom layers may be implemented by subclassing this sensor
and implementing a perpare and a get method for the new layer.
The prepare method should be called `_prepare_{}_layer` where {} is replaced
with the string layer name. This method is called once when the road network
is first seen and can be used to prepare any required data e.g. using prep on
any shapely objects to improve performance.
The get method should be called `_{}_layer` where {} is replaced with the
string layer name. This method is called each step with the state and the map
coordiantes to return the value of the map at each coordinate.
"""
_all_layers: List[str] = [
"entity",
"driveable_surface",
"road",
"intersection",
"lane",
"walkable_surface",
"pavement",
"crossing",
]
def __init__(
self,
entity: Entity,
layers: Optional[List[str]] = None,
height: float = 20.0,
width: float = 20.0,
freq: Optional[float] = 1.0,
n: Optional[int] = None,
channels_first: bool = False,
):
"""
Init the sensor.
Parameters
----------
entity : Entity
The entity.
layers : Optional[List[str]]
The layers to be observed as string names. The available layers can be
found in `RasterizedMapSensor._all_layers`. The order in which these
are returned will be used for the output array.
height : float
The length of the box around the entity in the y-direction in the
local frame.
width : float
The length of the box around the entity in the x-direction in the
local frame.
freq : int
The frequency of sampling points for the rasterized map. Only one of
`freq` and `n` should be passed.
n : Optional[int]
The number of sampling points for the rasterized map. Only one of
`freq` and `n` should be passed.
channels_first : bool
If given returns (C, W, H) rather than (W, H, C)
"""
super().__init__(entity)
self.layers = (
layers if layers is not None else ["entity", "driveable_surface"]
)
self.check_layers()
self.height = height
self.width = width
self.channels_first = channels_first
if n is None:
assert freq is not None, "At least one of n and freq must be provided."
self.nw, self.nh = int(freq * width), int(freq * height)
else:
self.nw = self.nh = n
self.X = np.array(
np.meshgrid(
np.linspace(-self.width / 2, self.width / 2, self.nw),
np.linspace(-self.height / 2, self.height / 2, self.nh),
)
).transpose(1, 2, 0)
def check_layers(self) -> None:
"""Check that all layers are implemented correctly."""
for layer in self.layers:
try:
getattr(self, f"_{layer}_layer")
getattr(self, f"_prepare_{layer}_layer")
except AttributeError:
raise NotImplementedError(
f"Layer {layer} does not have a get and/or a prepare method."
)
def _reset(self, state: State) -> MapObservation:
"""Reset the sensor at the start of the scenario."""
self._road_network: Optional[RoadNetwork] = None
return self._step(state)
def _step(self, state: State) -> MapObservation:
"""Return the rasterized map around the entity."""
if self._road_network is None:
self._prepare_layers(state)
pose = state.poses[self.entity]
coords = self._get_coords(pose).reshape(-1, 2)
layers = [getattr(self, f"_{l}_layer")(state, coords) for l in self.layers]
obs_map = np.array(layers).reshape(len(layers), self.nw, self.nw)
return MapObservation(
self.entity,
*state.get_entity_data(self.entity),
obs_map if self.channels_first else obs_map.transpose(1, 2, 0),
)
@property
def output_shape(self) -> Tuple[int, int, int]:
"""Return the output shape of the rasterized map."""
if self.channels_first:
return (len(self.layers), self.nw, self.nh)
return (self.nw, self.nh, len(self.layers))
def _get_coords(self, pose: ArrayLike) -> NDArray:
"""Get the coordinates at which the map should be constructed."""
X = self.X # (nw, nh, 2)
xy, theta = pose[[0, 1]], pose[3] + math.pi / 2
R = np.array(
[
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
]
)
return (X @ R.T) + xy[None, None, :]
def _prepare_layers(self, state: State) -> None:
"""Get all data needed to compute the map at future timesteps."""
self._road_network = state.scenario.road_network
for layer in self.layers:
getattr(self, f"_prepare_{layer}_layer")(state)
def _prepare_entity_layer(self, state: State) -> None:
"""Prepare the entity layer."""
pass
def _entity_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""
Check which points are occupied by the bounding box of an entity.
Note: this includes the sensor's own entity.
"""
entities = prep(
MultiPolygon(
[e.get_bounding_box_geom(pose) for e, pose in state.poses.items()]
)
)
return contains(entities, coords[:, 0], coords[:, 1])
def _prepare_driveable_surface_layer(self, state: State) -> None:
"""Prepare the driveable surface layer."""
self._driveable_surface = prep(self._road_network.driveable_surface)
def _driveable_surface_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which of the given points lie in the driveable surface."""
return contains(self._driveable_surface, coords[:, 0], coords[:, 1])
def _prepare_road_layer(self, state: State) -> None:
"""Prepare the road layer."""
self._roads = prep(
unary_union(
[r.boundary for r in self._road_network.roads],
)
)
def _road_layer(self, state: State, coords: ArrayLike) -> ArrayLike:
"""Check which points lie in a road."""
return contains(self._roads, coords[:, 0], coords[:, 1])
def _prepare_intersection_layer(self, state: State) -> None:
"""Prepare the intersection layer."""
self._intersections = prep(
unary_union(
[i.boundary for i in self._road_network.intersections],
)
)
def _intersection_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which points lie in an intersection."""
return contains(self._intersections, coords[:, 0], coords[:, 1])
def _prepare_lane_layer(self, state: State) -> None:
"""Prepare the lane layer."""
self._lanes = prep(
unary_union(
[l.boundary for r in self._road_network.roads for l in r.lanes],
)
)
def _lane_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which points lie in a lane."""
return contains(self._lanes, coords[:, 0], coords[:, 1])
def _prepare_walkable_surface_layer(self, state: State) -> None:
"""Prepare the walkable surface layer."""
self._walkable_surface = prep(self._road_network.walkable_surface)
def _walkable_surface_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which points lie in a walkable surface."""
return contains(self._walkable_surface, coords[:, 0], coords[:, 1])
def _prepare_pavement_layer(self, state: State) -> None:
"""Prepare the pavement layer."""
self._pavements = prep(
unary_union([p.boundary for p in self._road_network.pavements])
)
def _pavement_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which points lie in a pavement."""
return contains(self._pavements, coords[:, 0], coords[:, 1])
def _prepare_crossing_layer(self, state: State) -> None:
"""Prepare the crossing layer."""
self._crossings = prep(
unary_union([c.boundary for c in self._road_network.crossings])
)
def _crossing_layer(self, state: State, coords: ArrayLike) -> NDArray:
"""Check which points lie in a pedestrian crossing."""
return contains(self._crossings, coords[:, 0], coords[:, 1])
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/sensor/map.py
| 0.951762 | 0.747846 |
map.py
|
pypi
|
from dataclasses import dataclass
from typing import Dict, List
import numpy as np
from scenario_gym.entity import Entity
from scenario_gym.observation import (
Observation,
SingleEntityObservation,
combine_observations,
)
from scenario_gym.state import State, detect_collisions
from .base import Sensor
class CombinedSensor(Sensor):
"""Combines different observations into one."""
def __init__(self, entity: Entity, *sensors: Sensor):
"""Init the sensor."""
assert [s.entity == entity for s in sensors]
super().__init__(entity)
self.sensors = sensors
self.obs_class = None
def _reset(self, state: State) -> Observation:
"""Reset all sensors."""
init_obs = [s.reset(state) for s in self.sensors]
self.obs_class = combine_observations(*(obs.__class__ for obs in init_obs))
return self.obs_class.from_obs(*init_obs)
def _step(self, state: State) -> Observation:
"""Get observations from all sensors."""
return self.obs_class.from_obs(*(s.step(state) for s in self.sensors))
class EgoLocalizationSensor(Sensor):
"""Observation containing just the base entity information."""
def _reset(self, state: State) -> SingleEntityObservation:
"""Return the entity observation."""
return self._step(state)
def _step(self, state: State) -> SingleEntityObservation:
"""Return the entity observation."""
return SingleEntityObservation(
self.entity, *state.get_entity_data(self.entity)
)
@dataclass
class FutureCollisionObservation(SingleEntityObservation):
"""Observation with future collision information."""
future_collision: bool
class FutureCollisionDetector(Sensor):
"""
Detects any future collisions with the ego in the scenario.
Entity trajectories are used to obtain their future position
and compare t to the sensor's entity.
"""
def __init__(self, entity: Entity, horizon: float = 5.0):
"""
Init the sensor.
Parameters
----------
entity : Entity
The entity.
horizon : float
The time horizon over which to look for collisions.
"""
super().__init__(entity)
self.horizon = horizon
def _reset(self, state: State) -> FutureCollisionObservation:
"""Return future collisions."""
return self._step(state)
def _step(self, state: State) -> FutureCollisionObservation:
"""Return future collisions."""
ents = {e: None for e in state.scenario.entities if e != self.entity}
# check for collisions over the horizon
future_collision = False
for t in np.linspace(state.t, state.t + self.horizon, 10):
ego_pose = self.entity.trajectory.position_at_t(t)
for e in ents:
ents[e] = e.trajectory.position_at_t(t)
collisions = detect_collisions({self.entity: ego_pose}, ents)
if len(collisions[self.entity]) > 0:
future_collision = True
return FutureCollisionObservation(
self.entity,
*state.get_entity_data(self.entity),
future_collision,
)
@dataclass
class CollisionObservation(SingleEntityObservation):
"""Observation with detected collisions."""
collisions: Dict[Entity, List[Entity]]
class GlobalCollisionDetector(Sensor):
"""Returns collisions observed in the scene."""
def _reset(self, state: State) -> CollisionObservation:
"""Return the collision observation."""
return self._step(state)
def _step(self, state: State) -> CollisionObservation:
"""Return the collision observation."""
return CollisionObservation(
self.entity,
*state.get_entity_data(self.entity),
state.collisions(),
)
@dataclass
class KeyboardObservation(SingleEntityObservation):
"""Observation with detected collisions."""
last_keystroke: Dict[Entity, List[Entity]]
class KeyboardInputDetector(Sensor):
"""Detects keyboard input."""
def _reset(self, state: State) -> KeyboardObservation:
"""Return the collision observation."""
return self._step(state)
def _step(self, state: State) -> KeyboardObservation:
"""Return the keyboard observation."""
return KeyboardObservation(
self.entity,
*state.get_entity_data(self.entity),
state.last_keystroke,
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/sensor/common.py
| 0.969971 | 0.661554 |
common.py
|
pypi
|
import random
from itertools import chain
from typing import Dict, List, Optional, Tuple
import numpy as np
from scenario_gym.road_network import RoadNetwork
class RouteFinder:
"""Find routes along walkable areas in the road network."""
def __init__(self, rn: RoadNetwork):
"""Construct the graph representation."""
self.rn = rn
(
self.graph,
self.node_to_idx,
self.node_data,
) = make_pedestrian_connection_graph(rn)
def find_route(
self, start: np.ndarray, finish: np.ndarray
) -> Optional[List[Tuple[float, float]]]:
"""Find the route or return None if one cannot be found."""
return find_route(self.graph, self.node_data, start, finish)
def generate_route(
self,
n: int,
start: Optional[np.ndarray] = None,
no_repeat: bool = False,
) -> List[Tuple[float, float]]:
"""Generate a route by a random walk."""
if start is not None:
n0 = min(
self.node_data,
key=lambda x: np.linalg.norm(self.node_data[x] - start),
)
route = [n0]
else:
route = [random.choice(self.graph.keys())]
while len(route) < n:
suc = self.graph[route[-1]]
if no_repeat:
suc = list(set(suc).difference(route))
if not suc:
break
route.append(random.choice(suc))
return [self.node_data[i] for i in route]
def make_pedestrian_connection_graph(
rn: RoadNetwork,
) -> Tuple[Dict[int, List[int]], Dict[str, int], Dict[int, Tuple[float, float]]]:
"""
Build a graph representing the walkable surface of the road network.
Nodes are positions in the pavements and crossings with edges connecting
nodes if pedestrians can walk between them.
Parameters
----------
rn : RoadNetwork
The road network.
Returns
-------
graph : Dict[int, List[int]]
The graph represented as a dictionary with integer node indexes and
lists of neighbours of each node as values.
node_to_idx : Dict[str, int]
A dictionary giving a string identifier of each node index. The string
identifier is '{road_object_id}_{index_in_object}'.
node_data : Dict[int, Tuple[float, float]]
A dictionary giving the xy coordinates of each node.
"""
graph = {}
node_to_idx = {}
node_data = {}
pavement_coords = {}
for p in rn.pavements:
pavement_coords[p.id] = np.array(
[
np.array(p.center.interpolate(x, normalized=False).xy).squeeze()
for x in np.linspace(0.0, p.center.length, int(p.center.length))
]
)
crossing_coords = {}
for c in rn.crossings:
crossing_coords[c.id] = np.array(
[
np.array(c.center.interpolate(x, normalized=False).xy).squeeze()
for x in np.linspace(0.0, c.center.length, int(c.center.length))
]
)
for obj, coords in chain(pavement_coords.items(), crossing_coords.items()):
for i, (x, y) in enumerate(coords):
node_to_idx[f"{obj}_{i}"] = len(node_to_idx)
graph[node_to_idx[f"{obj}_{i}"]] = []
node_data[node_to_idx[f"{obj}_{i}"]] = (x, y)
for obj, coords in chain(pavement_coords.items(), crossing_coords.items()):
for i in range(len(coords) - 1):
graph[node_to_idx[f"{obj}_{i}"]].append(node_to_idx[f"{obj}_{i+1}"])
graph[node_to_idx[f"{obj}_{i+1}"]].append(node_to_idx[f"{obj}_{i}"])
for c in rn.crossings:
for p in c.pavements:
c_coords, p_coords = crossing_coords[c.id], pavement_coords[p]
c_idx, p_idx = np.unravel_index(
np.linalg.norm(
c_coords[:, None, :] - p_coords[None, :, :], axis=-1
).argmin(),
(c_coords.shape[0], p_coords.shape[0]),
)
graph[node_to_idx[f"{c.id}_{c_idx}"]].append(
node_to_idx[f"{p}_{p_idx}"]
)
graph[node_to_idx[f"{p}_{p_idx}"]].append(
node_to_idx[f"{c.id}_{c_idx}"]
)
return graph, node_to_idx, node_data
def shortest_path(
graph: Dict[int, List[int]],
start: int,
goal: int,
) -> Optional[List[int]]:
"""
Find the shortest path between two nodes in the graph.
Returns
-------
path : Optional[List[int]]
The path as a list of nodes. Will return None if the start and goal are
not path connected.
"""
explored = []
queue = [[start]]
if start == goal:
return [start]
while queue:
path = queue.pop(0)
node = path[-1]
if node not in explored:
neighbours = graph[node]
for neighbour in neighbours:
new_path = path.copy()
new_path.append(neighbour)
queue.append(new_path)
if neighbour == goal:
return new_path
explored.append(node)
def find_route(
graph: Dict[int, List[int]],
node_data: Dict[int, Tuple[float, float]],
start: np.ndarray,
finish: np.ndarray,
) -> Optional[np.ndarray]:
"""
Find the shortest path in the walkable area given by the graph.
First finds the closest nodes to the start and finish then connects
them with the shortest path in the graph. Will return None if the
start and finish are not path connected.
"""
if not node_data:
return np.array([start] + [finish])
start_node = min(
node_data, key=lambda n: np.linalg.norm(np.array(node_data[n]) - start)
)
end_node = min(
node_data,
key=lambda n: np.linalg.norm(np.array(node_data[n]) - finish),
)
route = shortest_path(graph, start_node, end_node)
if route is None:
return None
xy = [list(node_data[n]) for n in route]
return np.array([start] + xy + [finish])
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/pedestrian/route.py
| 0.858881 | 0.527438 |
route.py
|
pypi
|
from typing import Tuple, Union
import numpy as np
from shapely.geometry import MultiPolygon, Point, Polygon
from shapely.ops import nearest_points
from scenario_gym.agent import Agent
from scenario_gym.entity import Entity
from scenario_gym.pedestrian.behaviour import PedestrianBehaviour
from scenario_gym.pedestrian.observation import PedestrianObservation
from scenario_gym.pedestrian.random_walk import RandomWalkParameters
from scenario_gym.utils import NDArray
from scenario_gym.viewer import rotate_coords
class SocialForceParameters(RandomWalkParameters):
"""Parameters for the social force model."""
distance_threshold = 3
sight_weight = 0.5
sight_weight_use = True
sight_angle = 200
relaxation_time = 1.5
ped_repulse_V = 1.0
ped_repulse_sigma = 1.0
ped_attract_C = 0.0
boundary_repulse_U = 10.0
boundary_repulse_R = 0.2
imp_boundary_repulse_U = 2.0
imp_boundary_repulse_R = 0.1
class SocialForce(PedestrianBehaviour):
"""Social force model."""
def __init__(self, params: SocialForceParameters):
"""Init the behaviour model."""
super().__init__(params)
self.bias_lon = params.bias_lon
self.bias_lat = params.bias_lat
self.std_lon = params.std_lon
self.std_lat = params.std_lat
def _step(self, observation: PedestrianObservation, agent: Agent) -> Tuple:
"""Return the new speed and heading using the social force model."""
# Start with attraction force to goal point
force_sum = self._force_to_goal(
observation,
agent.route[agent.goal_idx],
agent.speed_desired,
)
for (
pedestrian,
pose,
vel,
) in observation.near_peds: # Forces from other pedestrians
# Vector of agent's sight (velocity angle + head angle)
view_dir_vector = rotate_coords(vel[[0, 1]], observation.head_rot_angle)
view_dir_unit_vector = view_dir_vector / (
np.linalg.norm(view_dir_vector) + 0.0000000001
)
force_repulsion = self._force_pedestrian_repulsion(
observation, (pedestrian, pose, vel)
)
force_attraction = self._force_pedestrian_attraction(
observation, (pedestrian, pose, vel)
)
if self.params.sight_weight_use:
force_sum += (
self._sight_weight(force_repulsion, view_dir_unit_vector)
* force_repulsion
)
force_sum += (
self._sight_weight(force_attraction, view_dir_unit_vector)
* force_attraction
)
else:
force_sum += force_attraction
force_sum += force_repulsion
# get current position
point = Point(*observation.pose[:2])
# Force from closest walkable boundary
if observation.walkable_surface.area > 0:
if observation.walkable_surface.contains(point):
force_sum += self._force_boundary(
observation,
observation.walkable_surface,
self.params.boundary_repulse_R,
self.params.boundary_repulse_U,
)
# Force from immovable boundary
if observation.impenetrable_surface.area > 0:
sign = 1 - 2 * observation.impenetrable_surface.contains(point)
force_sum += sign * self._force_boundary(
observation,
observation.impenetrable_surface,
self.params.imp_boundary_repulse_R,
self.params.imp_boundary_repulse_U,
)
# Random fluctuations
speed_rand = np.random.normal(self.bias_lon, self.std_lon)
heading_rand = np.random.normal(self.bias_lat, self.std_lat)
speed = min(
np.linalg.norm(force_sum) + speed_rand,
agent.speed_desired * self.max_speed_factor,
) # Limit to max speed
heading = np.arctan2(force_sum[1], force_sum[0]) + heading_rand
agent.force = force_sum
return speed, heading
def _force_to_goal(
self,
obs: PedestrianObservation,
goal_point: NDArray,
speed_desired: float,
) -> np.ndarray:
"""Compute the attraction force from the goal."""
agent_pos = obs.pose[[0, 1]]
agent_vel = obs.velocity[[0, 1]]
dir_vector = goal_point - agent_pos
dir_vector_norm = np.linalg.norm(dir_vector)
if dir_vector_norm == 0:
dir_vector_norm += 0.000000001
unit_dir_vector = dir_vector / dir_vector_norm
force_vector = (
1
/ self.params.relaxation_time
* (speed_desired * unit_dir_vector - agent_vel)
)
return force_vector
def _force_pedestrian_repulsion(
self,
obs: PedestrianObservation,
other_pedestrian: Tuple[Entity, NDArray, NDArray],
) -> NDArray:
"""Compute the repulsion force from other pedestrians."""
agent_pos = obs.pose[[0, 1]]
other_ped, other_pose, other_v = other_pedestrian
other_pos = other_pose[[0, 1]]
other_dir = other_v[[0, 1]]
# Vector to other agent
r_ao = agent_pos - other_pos
r_ao_norm = np.linalg.norm(r_ao)
# Auxiliary calculations
v_vel_magnitude = np.linalg.norm(other_dir) + 0.0000000001
unit_other_dir = other_dir / v_vel_magnitude
other_step = v_vel_magnitude * (obs.next_t - obs.t)
r_ao_other = r_ao - other_step * unit_other_dir
r_ao_other_norm = np.linalg.norm(r_ao_other) + 0.0000000001
# Ellipse semi-minor axis b
b = (1 / 2) * np.sqrt((r_ao_norm + r_ao_other_norm) ** 2 - other_step**2)
db = (
(1 / 4)
* (1 / b)
* (r_ao_norm + r_ao_other_norm)
* (r_ao / r_ao_norm + r_ao_other / r_ao_other_norm)
)
force_vector = (
self.params.ped_repulse_V
/ self.params.ped_repulse_sigma
* np.exp(-b / self.params.ped_repulse_sigma)
* db
) # gradient
return force_vector
def _force_pedestrian_attraction(
self,
obs: PedestrianObservation,
other_pedestrian: Tuple[Entity, NDArray, NDArray],
) -> NDArray:
"""Compute the attraction force from other pedestrians."""
agent_pos = obs.pose[[0, 1]]
other_pos = other_pedestrian[1][[0, 1]]
# Vector to other agent
r_ao = agent_pos - other_pos
return 2 * self.params.ped_attract_C * r_ao # gradient
def _force_boundary(
self,
obs: PedestrianObservation,
object: Union[Polygon, MultiPolygon],
param_r: float,
param_u: float,
) -> NDArray:
"""
Compute the force from the boundary of an object.
Can be an attractive or a repulsive force.
"""
agent_pos = obs.pose[[0, 1]]
agent_point = Point(*agent_pos)
closest_point, _ = nearest_points(object, agent_point)
closest_pos = np.array(closest_point.xy).squeeze()
r_aB = agent_pos - closest_pos
r_aB_norm = np.linalg.norm(r_aB)
r_aB_unit = r_aB / (r_aB_norm + 0.0000000001)
return (
param_u / param_r * r_aB_unit * np.exp(-r_aB_norm / param_r)
) # gradient
def _sight_weight(
self, force_vector: np.ndarray, view_dir_unit_vector: np.ndarray
) -> float:
"""Compute the weight force depending on angle of sight."""
dot_dir = np.dot(view_dir_unit_vector, force_vector) / (
np.linalg.norm(force_vector) + 0.0000000001
)
if dot_dir >= np.cos(self.params.sight_angle / 2 * np.pi / 180):
return 1.0
return self.params.sight_weight
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/pedestrian/social_force.py
| 0.947174 | 0.527256 |
social_force.py
|
pypi
|
from typing import List
from scenario_gym.entity import Entity, Pedestrian
from scenario_gym.pedestrian.observation import PedestrianObservation
from scenario_gym.sensor import Sensor
from scenario_gym.state import State
class PedestrianSensor(Sensor):
"""Returns observation (complete state) for pedestrian entities."""
def __init__(
self,
entity: Entity,
head_rot_angle: float = 0.0,
distance_threshold: float = 1.0,
):
"""
Init the pedestrian sensor.
Parameters
----------
entity : Entity
The pedestrian entity.
head_rot_angle : float
Rotation angle of pedestrian head in rad wrt to heading
(0 means looking forward).
distance_threshold: float
Only pedestrians within this distance of the entity will be considered.
"""
super().__init__(entity)
self.head_rot_angle = head_rot_angle
self.distance_threshold = distance_threshold
def _reset(self, state: State) -> PedestrianObservation:
"""Reset the sensor."""
return self._step(state)
def _step(self, state: State) -> PedestrianObservation:
"""Produce the pedestrian observation."""
near_peds = self.get_nearby_pedestrians(state)
return PedestrianObservation(
self.entity,
*state.get_entity_data(self.entity),
self.head_rot_angle,
near_peds,
state.scenario.road_network.walkable_surface,
state.scenario.road_network.impenetrable_surface,
)
def get_nearby_pedestrians(self, state: State) -> List[Entity]:
"""Get other pedestrians within a radius of the entity."""
return [
(e, state.poses[e], state.velocities[e])
for e in state.get_entities_in_radius(
*state.poses[self.entity][:2],
self.distance_threshold,
)
if (isinstance(e, Pedestrian) or (e.type == "Pedestrian"))
and (e != self.entity)
]
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/pedestrian/sensor.py
| 0.959592 | 0.694665 |
sensor.py
|
pypi
|
from typing import List
import numpy as np
from shapely.geometry import LineString, Point
from scenario_gym.agent import Agent
from scenario_gym.entity import Entity
from scenario_gym.pedestrian.action import PedestrianAction
from scenario_gym.pedestrian.behaviour import PedestrianBehaviour
from scenario_gym.pedestrian.controller import PedestrianController
from scenario_gym.pedestrian.observation import PedestrianObservation
from scenario_gym.pedestrian.sensor import PedestrianSensor
class PedestrianAgent(Agent):
"""A pedestrian agent with a behaviour model."""
def __init__(
self,
entity: Entity,
route: List[np.array],
speed_desired: float,
behaviour: PedestrianBehaviour,
max_speed: float = 5.0,
head_rot_angle: float = 0.0,
distance_threshold: float = 1.0,
):
"""Init the agent."""
super().__init__(
entity,
PedestrianController(entity, max_speed=max_speed),
PedestrianSensor(
entity,
head_rot_angle=head_rot_angle,
distance_threshold=distance_threshold,
),
)
self.goal_idx = 0
self.speed_desired = speed_desired
self.behaviour = behaviour
self.force = np.array([0.0, 0.0])
self.route = route
self.route_geom = LineString(route)
self.route_arcs = np.hstack(
[[0.0], np.linalg.norm(np.diff(route, axis=0), axis=1).cumsum()]
)
def _step(self, observation: PedestrianObservation) -> PedestrianAction:
"""
Produce the next action.
Parameters
----------
observation : PedestrianObservation
All info from environment within perception radius.
"""
# Change goal point to next one in path of close enough
if self.goal_idx <= len(self.route) - 1:
s = self.route_geom.project(Point(*observation.pose[:2]))
self.goal_idx = np.argwhere(self.route_arcs <= s).max() + 1
if self.goal_idx <= len(self.route) - 1:
speed, heading = self.behaviour.step(observation, self)
else: # reached goal
speed = 0
heading = 0
self.force[:] = 0
return PedestrianAction(speed, heading)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/pedestrian/agent.py
| 0.947697 | 0.495239 |
agent.py
|
pypi
|
from __future__ import annotations
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from shapely.geometry import MultiPolygon, Point, Polygon
from shapely.vectorized import contains
from scenario_gym.callback import StateCallback
from scenario_gym.entity import BatchReplayEntity, Entity
from scenario_gym.road_network import RoadObject
from scenario_gym.scenario import Scenario, ScenarioAction
from scenario_gym.state.utils import detect_collisions
from scenario_gym.trajectory import Trajectory, is_stationary
Agent = TypeVar("Agent")
class State:
"""
The global state of the gym.
Holds the current time, the terminal state and the positions and velocities of
all the entities.
Can also be parameterised with different end conditions for the scenario.
E.g. to end when the recorded scenario ends or if a collision occurs.
Additional information may be provided through custom methods passed
as state_callbacks.
"""
def __init__(
self,
scenario: Scenario,
scenario_path: Optional[str] = None,
persist: bool = False,
conditions: Optional[List[Union[str, Callable[[State], bool]]]] = None,
state_callbacks: Optional[Dict[str, StateCallback]] = None,
):
"""
Init the state.
Parameters
----------
scenario : Scenario
The scenario to be simulated.
scenario_path : Optional[str]
The path to the scenario file if loaded from one.
persist : bool
Whether entities should persist in the simulation.
conditions : Optional[List[Union[str, Callable[[State], bool]]]]
Terminal conditions that will end the scenario if any is met. May be a
string referencing an entry of the TERMINAL_CONDITIONS dictionary.
state_callbacks : Optional[List[StateCallback]]
Methods to be called on the state when the timestep is updated.
Can be used to add additional information to the state that can is then
accessible by all agents.
"""
self._scenario = scenario
self.scenario_path = scenario_path
self.persist = persist
if conditions is None:
self.terminal_conditions = [TERMINAL_CONDITIONS["max_length"]]
else:
self.terminal_conditions = [
cond if callable(cond) else TERMINAL_CONDITIONS[cond]
for cond in conditions
]
self.state_callbacks = [] if state_callbacks is None else state_callbacks
self.next_t: Optional[float] = None
self._t: Optional[float] = None
self._prev_t: Optional[float] = None
self.is_done = False
self.last_keystroke: Optional[int] = None
self._collisions: Optional[Dict[Entity, List[Entity]]] = None
self._callbacks: Dict[Type[StateCallback], StateCallback] = {}
self.unapplied_actions: List[ScenarioAction]
self.action_apply_times: Dict[ScenarioAction, float]
self.all_entities: List[Entity]
self.poses: Dict[Entity, np.ndarray]
self.prev_poses: Dict[Entity, np.ndarray]
self.velocities: Dict[Entity, np.ndarray]
self.distances: Dict[Entity, float]
self.entity_state: Dict[Entity, Any]
self._recorded_poses: Dict[Entity, List[Tuple[float, np.ndarray]]]
self.agents: Dict[Entity, Agent] = {}
self.non_agents = BatchReplayEntity(persist=persist)
@property
def scenario(self) -> Scenario:
"""Get the current scenario."""
return self._scenario
def reset(self, t_0: float) -> None:
"""
Reset the state to the initial timestep.
Parameters
----------
t_0 : float
Initial timestep.
"""
self._reset_data()
self.is_done = False
# set initial poses
# always use extrapolation for previous poses to get the initial velocity
velocities, poses = {}, {}
for entity in self.all_entities:
pose = entity.trajectory.position_at_t(
t_0,
extrapolate=(
entity.is_static()
or ((False, False) if self.persist else False)
),
)
if pose is not None:
poses[entity] = pose
velocities[entity] = entity.trajectory.velocity_at_t(t_0)
self.update_poses(t_0, poses)
self.velocities.update(velocities)
self.prev_t = t_0 - 0.1
self.update_actions()
for cb in self.state_callbacks:
cb.reset(self)
self.update_callbacks()
for agent in self.agents.values():
agent.reset(self)
def _reset_data(self) -> None:
"""Reset stored simulation data."""
self.next_t: Optional[float] = None
self._t: Optional[float] = None
self._prev_t: Optional[float] = None
self.unapplied_actions = self.scenario.actions.copy()
self.action_apply_times = {
a: float("nan") for a in self.scenario.actions.copy()
}
self.all_entities = self.scenario.entities.copy()
self.poses: Dict[Entity, np.ndarray] = {}
self.prev_poses: Dict[Entity, np.ndarray] = {}
self.velocities: Dict[Entity, np.ndarray] = {}
self.distances: Dict[Entity, float] = dict.fromkeys(self.all_entities, 0.0)
self.entity_state: Dict[Entity, Any] = dict.fromkeys(self.all_entities)
self._recorded_poses: Dict[Entity, List[Tuple[float, np.ndarray]]] = {}
for entity in self.all_entities:
self._recorded_poses[entity] = []
def step(self, new_poses: Dict[Entity, np.ndarray]) -> None:
"""Update by one timestep."""
self._clear_cache()
self.update_poses(self.next_t, new_poses.copy())
self.update_actions()
self.update_callbacks()
self.is_done = self.check_terminal()
def _clear_cache(self) -> None:
"""Clear cached data on step."""
self._collisions = None
self._callbacks = {}
@property
def t(self):
"""Get the time in seconds (s)."""
return self._t
@t.setter
def t(self, t: float) -> None:
self.prev_t = self._t
self._t = t
return self._t
@property
def prev_t(self) -> float:
"""Get the previous time (s)."""
return self._prev_t
@prev_t.setter
def prev_t(self, prev_t: float) -> None:
self._prev_t = prev_t
@property
def dt(self) -> float:
"""Get the previous timestep."""
return self.t - self.prev_t
def update_poses(self, t: float, new_poses: Dict[Entity, np.ndarray]) -> None:
"""
Update poses of all entities.
The poses dictionary will be replaced with the new_poses and previous poses
will be updated for those entities in new_poses. For an entity in new_poses
which is not in the current poses (i.e. a new entity) the previous pose will
be estimated using trajectory extrapolation (to correctly calculate the
initial velocity).
"""
self.t = t
prev_poses = {}
for e in new_poses:
if e in self.poses:
prev_poses[e] = self.poses[e]
elif self.prev_t is not None:
prev_poses[e] = e.trajectory.position_at_t(
self.prev_t, extrapolate=True
)
self.prev_poses = prev_poses
self.poses = new_poses
if self.prev_t is not None:
self.update_statistics()
for entity, pose in self.poses.items():
self._recorded_poses[entity].append((self.t, pose))
def update_statistics(self) -> None:
"""Update entity velocities and distance travelled."""
self.velocities = {}
for entity in self.poses:
self.velocities[entity] = (
self.poses[entity] - self.prev_poses[entity]
) / self.dt
self.distances[entity] += np.linalg.norm(
(self.poses[entity] - self.prev_poses[entity])[:3]
)
def update_actions(self) -> None:
"""Update state actions."""
unapplied: List[ScenarioAction] = []
for act in self.unapplied_actions:
if act.trigger_condition(self):
self.apply_action(act)
self.action_apply_times[act] = self.t
else:
unapplied.append(act)
self.unapplied_actions = unapplied
def apply_action(self, action: ScenarioAction) -> None:
"""Apply an action to the state."""
entity = self.scenario.entity_by_name(action.entity_ref)
if entity is None:
warnings.warn(
f"No entity with name {action.entity_ref} was found for action "
f"{action.__class__.__name__}."
)
else:
action.apply(self, entity)
def update_callbacks(self) -> None:
"""Update all state callbacks."""
for m in self.state_callbacks:
m(self)
def check_terminal(self) -> bool:
"""Check if the state is terminal."""
return any(cond(self) for cond in self.terminal_conditions)
def recorded_poses(
self,
entity: Optional[Entity] = None,
) -> Union[np.ndarray, Dict[Entity, np.ndarray]]:
"""Get recorded poses for each or a given entity."""
if entity is not None:
poses = self._recorded_poses.get(entity, None)
if not poses:
return np.empty((0, 7))
ts, poses = map(np.array, zip(*poses))
return np.concatenate([ts[:, None], poses], axis=1)
data: Dict[Entity, np.ndarray] = {}
for ent, poses in self._recorded_poses.items():
if not poses:
data[ent] = np.empty((0, 7))
else:
ts, poses = map(np.array, zip(*poses))
data[ent] = np.concatenate([ts[:, None], poses], axis=1)
return data
def get_entity_data(
self, entity: Entity
) -> Tuple[float, float, np.ndarray, np.ndarray, float, np.ndarray, Any]:
"""Get state data for a specific entity."""
return (
self.t,
self.next_t,
self.poses.get(entity, None),
self.velocities.get(entity, None),
self.distances.get(entity, None),
self.recorded_poses(entity=entity),
self.entity_state.get(entity, None),
)
def collisions(self) -> Dict[Entity, List[Entity]]:
"""Return collisions between entities at the current time."""
if self._collisions is None:
self._collisions = detect_collisions(self.poses)
return self._collisions
def get_callback(
self, Callback: Type[StateCallback]
) -> Optional[StateCallback]:
"""Get a particular type of callback."""
if Callback not in self._callbacks:
for callback in self.state_callbacks:
if isinstance(callback, Callback):
self._callbacks[Callback] = callback
return self._callbacks.get(Callback)
def get_entity_box_points(self, e: Entity) -> np.ndarray:
"""Get the coordinates of the bounding box of an entity."""
return e.get_bounding_box_points(self.poses[e])
def get_entity_box_geom(self, e: Entity) -> Polygon:
"""Get the geometry of the bounding box of an entity."""
return e.get_bounding_box_geom(self.poses[e])
def get_road_info_at_entity(
self, e: Entity
) -> Tuple[List[str], List[RoadObject]]:
"""Return the road network information at the entities location."""
if not self.scenario.road_network:
return [], []
return self.scenario.road_network.get_geometries_at_point(
*self.poses[e][:2]
)
def get_entities_in_area(
self, area: Union[MultiPolygon, Polygon]
) -> List[Entity]:
"""
Return all entities who's center point is within an area.
Parameters
----------
area : Union[MultiPolygon, Polygon]
A shapely geometry covering the chosen area.
"""
pos = np.array([pose[:2] for pose in self.poses.values()])
in_area = contains(area, pos[:, 0], pos[:, 1])
return [e for e, b in zip(self.poses, in_area) if b]
def get_entities_in_radius(self, x: float, y: float, r: float) -> List[Entity]:
"""
Get entities with center point within a circle.
Parameters
----------
x : float
The x-coordinate of the center.
y : float
The x-coordinate of the center.
r : float
The radius of the center.
"""
return self.get_entities_in_area(Point(x, y).buffer(r))
def to_scenario(self, name: Optional[str] = None) -> Scenario:
"""Create a scenario from the historical data in the state."""
if name is None:
name = (
f"Simulation of {self.scenario.name}"
if self.scenario.name is None
else None
)
entities = []
for entity, poses in self.recorded_poses().items():
new_entity = deepcopy(entity)
if is_stationary(poses):
poses = poses[None, 0]
new_entity.trajectory = Trajectory(poses)
entities.append(new_entity)
return Scenario(
entities,
name=name,
road_network=self.scenario.road_network,
actions=self.scenario.actions,
)
TERMINAL_CONDITIONS = {
"max_length": lambda s: s.t + s.dt > s.scenario.length,
"collision": lambda s: any(len(l) > 0 for l in s.collisions().values()),
"ego_collision": lambda s: len(s.collisions()[s.scenario.entities[0]]) > 0,
"ego_off_road": lambda s: not (
s.scenario.road_network.driveable_surface.contains(
Point(*s.poses[s.scenario.entities[0]][:2])
)
if s.scenario.entities[0] in s.poses
else False
),
}
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/state/state.py
| 0.946929 | 0.589303 |
state.py
|
pypi
|
import math
from enum import Enum
from typing import List, Optional, Tuple
import numpy as np
from shapely.geometry import Polygon
from scenario_gym.entity import Entity
from scenario_gym.metrics.base import Metric
from scenario_gym.state import State
def angle_between(x: float, a_low: float, a_high: float) -> bool:
"""Return True if angle x is between a_low and a_high."""
x = x % (math.pi * 2)
a_low = a_low % (math.pi * 2)
a_high = a_high % (math.pi * 2)
return (
((a_low < x) or (x <= a_high))
if (a_low >= a_high)
else (a_low <= x < a_high)
)
class CollisionTypes(Enum):
"""Enumerates possible collision types."""
other = 0
t_bone = 1
head_on = 2
rear_end = 3
side_swipe = 4
non_vehicle = 5
class CollisionPoints(Enum):
"""Enumerates possible collision points around a bounding box."""
front = 0
front_corner = 1
side = 2
back = 3
back_corner = 4
class CollisionMetric(Metric):
"""
Detects and classifies collisions with the ego.
Records all collisions between entities and the ego. If the
hazard entity is a vehicle then the collision is classified
into t_bone, head_on, rear_end or side_swipe. If not then it
is recorded as other.
"""
name = "collisions"
def __init__(self, c_tol: float = 0.4, name: Optional[str] = None):
self.ego: Optional[Entity] = None
self.collisions: List[Tuple[float, str, CollisionTypes]] = []
self.c_tol = c_tol
super().__init__(name=name)
def _reset(self, state: State) -> None:
"""Reset the ego and recorded collisions."""
self.ego = state.scenario.ego
self.collisions: List[Tuple[float, str, CollisionTypes]] = []
self.last_timestep: List[Entity] = []
def _step(self, state: State) -> None:
"""Update recorded collisions."""
for e_other in state.collisions()[self.ego]:
if e_other not in self.last_timestep:
self.collisions.append(self.record_collision(state, e_other))
self.last_timestep = state.collisions()[self.ego].copy()
def get_state(self) -> List[Tuple[float, str, str]]:
"""Return the recorded collisions."""
return [(t, ref, c.name) for t, ref, c in self.collisions]
def record_collision(
self, state: State, hazard: Entity
) -> Tuple[float, str, CollisionTypes]:
"""Classify the collision and record it."""
if hazard.catalog_entry.catalog_type != "Vehicle":
return (state.t, hazard.ref, CollisionTypes.non_vehicle)
ego_box = self.ego.get_bounding_box_geom(state.poses[self.ego])
hazard_box = hazard.get_bounding_box_geom(state.poses[hazard])
collision_point = np.array(
ego_box.intersection(hazard_box).centroid.xy
).squeeze()
collision_angle = (hazard.pose[3] - self.ego.pose[3]) % (math.pi * 2)
ego_angle = (
np.arctan2(*np.flip(collision_point - self.ego.pose[:2]))
- self.ego.pose[3]
) % (math.pi * 2)
hazard_angle = (
np.arctan2(*np.flip(collision_point - hazard.pose[:2])) - hazard.pose[3]
) % (math.pi * 2)
ego_point = self.get_collision_point(ego_box, ego_angle, self.ego.pose[3])
hazard_point = self.get_collision_point(
hazard_box, hazard_angle, hazard.pose[3]
)
ego_front = ego_point in (
CollisionPoints.front,
CollisionPoints.front_corner,
)
ego_back = ego_point in (CollisionPoints.back, CollisionPoints.back_corner)
hazard_front = hazard_point in (
CollisionPoints.front,
CollisionPoints.front_corner,
)
hazard_back = hazard_point in (
CollisionPoints.back,
CollisionPoints.back_corner,
)
if ego_front and hazard_front:
if angle_between(
collision_angle,
math.pi / 4,
3 * math.pi / 4,
) or angle_between(
collision_angle,
5 * math.pi / 4,
7 * math.pi / 4,
):
ctype = CollisionTypes.t_bone
elif angle_between(
collision_angle,
7 * math.pi / 4,
math.pi / 4,
):
ctype = CollisionTypes.side_swipe
else:
ctype = CollisionTypes.head_on
elif (ego_front or ego_back) and (hazard_front or hazard_back):
if angle_between(
collision_angle,
math.pi / 4,
3 * math.pi / 4,
) or angle_between(
collision_angle,
5 * math.pi / 4,
7 * math.pi / 4,
):
ctype = CollisionTypes.t_bone
else:
ctype = CollisionTypes.rear_end
elif any([ego_front, ego_back, hazard_front, hazard_back]):
if angle_between(
collision_angle,
math.pi / 4,
3 * math.pi / 4,
) or angle_between(
collision_angle,
5 * math.pi / 4,
7 * math.pi / 4,
):
ctype = CollisionTypes.t_bone
else:
ctype = CollisionTypes.side_swipe
else:
ctype = CollisionTypes.side_swipe
return state.t, hazard.ref, ctype
def get_collision_point(
self,
box: Polygon,
angle: float,
heading: float,
) -> CollisionPoints:
"""Classify the collision point of the angle given the bounding box."""
c_tol = self.c_tol
corners = (
np.arctan2( # corners are BL, FL, FR, BR
*np.flip(
np.array(box.exterior.coords[:-1]).T
- np.array(box.centroid.xy),
axis=0,
),
)
- heading
)
if (angle_between(angle, corners[1] - c_tol, corners[1] + c_tol)) or (
angle_between(angle, corners[2] - c_tol, corners[2] + c_tol)
):
return CollisionPoints.front_corner
elif (angle_between(angle, corners[0] - c_tol, corners[0] + c_tol)) or (
angle_between(angle, corners[3] - c_tol, corners[3] + c_tol)
):
return CollisionPoints.back_corner
elif angle_between(angle, corners[0] + c_tol, corners[3] - c_tol):
return CollisionPoints.back
elif angle_between(angle, corners[2] - c_tol, corners[1] + c_tol):
return CollisionPoints.front
return CollisionPoints.side
class CollisionPointMetric(Metric):
"""
Finds the co-ordinate, and relative agles of collision.
Records the position of any collisions that
occur between entities and the ego. Collects
the (x, y) coordinate and relative angle [rad]
between the entities at the collision point.
"""
name = "collision_points"
def __init__(self, name: Optional[str] = None):
self.ego: Optional[Entity] = None
self.collisions: List[Tuple[str, np.ndarray, float]] = []
super().__init__(name=name)
def _reset(self, state: State) -> None:
"""Reset the ego and recorded collisions."""
self.ego = state.scenario.ego
self.collisions: List[Tuple[str, np.ndarray, float]] = []
self.last_timestep: List[Entity] = []
def _step(self, state: State) -> None:
"""Update recorded collision angle and position."""
for e_other in state.collisions()[self.ego]:
if e_other not in self.last_timestep:
self.collisions.append(
self.record_collision_position(state, e_other)
)
self.last_timestep = state.collisions()[self.ego].copy()
def get_state(self) -> List[Tuple[str, np.ndarray, float]]:
"""Return the entity reference, coordinates and angle of collisions."""
return self.collisions
def record_collision_position(
self, state: State, hazard: Entity
) -> Tuple[str, np.ndarray, float]:
"""Calculate the coordinate and relative angle of entities at collision."""
ego_box = self.ego.get_bounding_box_geom(state.poses[self.ego])
hazard_box = hazard.get_bounding_box_geom(state.poses[hazard])
collision_point = np.array(
ego_box.intersection(hazard_box).centroid.xy
).squeeze()
collision_angle = (hazard.pose[3] - self.ego.pose[3]) % (math.pi * 2)
return hazard.ref, collision_point, collision_angle
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/metrics/collision.py
| 0.936052 | 0.625038 |
collision.py
|
pypi
|
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Type
from scenario_gym.callback import StateCallback
from scenario_gym.state import State
class Metric(ABC):
"""
Base class for a metric in scenario_gym.
All metrics implement reset and step methods to update internal states during
scenario rollout and the get_state method to return the metric value.
The `required_callbacks` attribute can be set to a list of StateCallback
subclasses. At reset the state will be checked to make sure each is present and
the instance of each will be stored in `self.callbacks` in the same order as
required callbacks.
"""
name: Optional[str] = None
required_callbacks: List[Type[StateCallback]] = []
def __init__(self, name: Optional[str] = None):
"""
Construct metric and set the name.
Parameters
----------
name : Optional[str]
If not given then the name attribute will be used or
the class name if that is None also.
"""
if name is not None:
self.name = name
elif self.name is None:
self.name = self.__class__.__name__
self.callbacks: List[StateCallback] = []
def reset(self, state: State) -> None:
"""Reset the metric at the start of a new scenario."""
self.callbacks.clear()
for CB in self.required_callbacks:
cb = state.get_callback(CB)
if cb is None:
raise ValueError(
"Cannot run metric {} without callback {}.".format(
self.__class__.__name__,
CB.__name__,
)
)
self.callbacks.append(cb)
self._reset(state)
def step(self, state: State) -> None:
"""Update the metric after one timestep."""
self._step(state)
@abstractmethod
def _reset(self, state: State) -> None:
"""Reset the metric at the start of a new scenario."""
raise NotImplementedError
@abstractmethod
def _step(self, state: State) -> None:
"""Update the metric after one timestep."""
raise NotImplementedError
@abstractmethod
def get_state(self) -> Any:
"""Return the current value of the metric."""
raise NotImplementedError
def cache_metric(Met: Type[Metric]) -> Type[Metric]:
"""Wrap _step to cache the value whenver the state is terminal."""
prev_step = Met._step
Met.previous_value = None
def new_step(self, state):
prev_step(self, state)
if state.is_done:
self.previous_value = self.get_state()
Met._step = new_step
return Met
def cache_mean(Met: Type[Metric]) -> Type[Metric]:
"""Wrap _step to keep a running mean of the value."""
def previous_value(self):
val = self._previous_value
self._previous_value = 0.0
self._prev_count = 0
return val
prev_step = Met._step
Met._previous_value = 0.0
Met._prev_count = 0
Met.previous_value = property(previous_value)
def new_step(self, state):
prev_step(self, state)
if state.is_done:
self._prev_count += 1
self._previous_value += (
self.get_state() - self._previous_value
) / self._prev_count
Met._step = new_step
return Met
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/metrics/base.py
| 0.966553 | 0.494507 |
base.py
|
pypi
|
from typing import Dict, Iterable, List, Tuple
import numpy as np
from numpy.linalg import norm
def inverse_direction(vector: Iterable, normalised: bool = True) -> List[float]:
"""
Return the inverse of a 2D vector, Iterable -> Iterable.
Uses clockwise-rotating sign convention: (x, y) --> (y, -x)
Optional parameter normalised (default True):
-- True: returns vector of unit length
-- False: returns vector of same length as the input vector
"""
assert len(vector) == 2, "Invalid vector dimension: {0}".format(len(vector))
if normalised:
n = norm([vector[1], vector[0]])
return [vector[1] / n, -vector[0] / n]
return [vector[1], -vector[0]]
def coord_change(
vector: List[float],
direction: List[float],
centre: Tuple[float, float] = (0, 0),
) -> List[float]:
"""
Change vector coordinates to new frame of reference.
Uses Galilean transformation to change the components of <vector> to a new
coordinate system defined by an origin at <centre> and with components parallel
and perpendicular to <direction>
Optional argument centre (default [0, 0])
"""
assert len(vector) == 2, (
"coord_change is implemented to work in 2D. Passed vector dimension: "
+ str(len(vector))
)
vector = np.array(vector)
centre = np.array(centre)
inv_dir = inverse_direction(direction)
return [np.dot((vector - centre), inv_dir), np.dot(vector - centre, direction)]
def acceleration(
entity_poses: List[List[float]],
dt: float,
parallel_velocity: bool = False,
i: int = 0,
) -> List[float]:
"""
Find entity acceleration from three consecutive poses.
Optional argument parallel_velocity (default False):
-- True: acceleration is resolved in coords para and perp to velocity
-- False: acceleration remains in pose coords
"""
try:
entity_pose_2 = entity_poses[i + 2][1:3]
entity_pose_1 = entity_poses[i + 1][1:3]
entity_pose_0 = entity_poses[i][1:3]
except IndexError:
# If too few poses received, default with accel = [0, 0]
return [0.0, 0.0]
velocity_1 = (entity_pose_1 - entity_pose_2) / dt
velocity_0 = (entity_pose_0 - entity_pose_1) / dt
accel = np.array((velocity_0 - velocity_1) / dt)
if not parallel_velocity:
return accel
# Resolve acceleration vector into [para to velocity, perp to velocity]
return [
np.dot(velocity_1, accel) / norm(velocity_1),
np.dot([-velocity_1[1], velocity_1[0]] / norm(velocity_1), accel),
]
def ahead(ego: Dict, haz: Dict) -> bool:
"""
Determine if ego is ahead of hazard.
Takes two entities (ego and hazard) and returns True if the ego is ahead
of the hazard when using ego's preferential frame of reference such that:
-- ego is at position [0.0, 0.0]
-- ego direction is [0, 1] ...
-- ... requiring ego velocity to be parallel to y axis
"""
ego_position = ego["position"][1]
haz_position = haz["position"][1]
return ego_position > haz_position
def direction(heading: float) -> list:
"""
Turn heading into normalised direction vector.
angle (rad) --> [x component, y component]
Where 0 rad corresponds to positive x unit vector, and a positively
increasing angle moves in counter-clockwise direction.
"""
return [np.cos(heading), np.sin(heading)]
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/metrics/rss/rss_utils.py
| 0.919159 | 0.893913 |
rss_utils.py
|
pypi
|
import warnings
from collections import OrderedDict
from typing import Dict, List, Tuple
import numpy as np
from numpy.linalg import norm
from shapely.geometry import LineString, Polygon
from scenario_gym.callback import StateCallback
from scenario_gym.entity import Entity
from scenario_gym.metrics.rss.rss_utils import (
acceleration,
ahead,
coord_change,
direction,
inverse_direction,
)
from scenario_gym.state import State
class RSSParameters:
"""RSS parameters."""
RESPONSE_TIME = 0.6 # SECONDS
MIN_LONG_ACCEL = 1.2 * 9.81 # METRES PER SECONDS SQUARED
MAX_LONG_ACCEL = 1.2 * 9.81 # METRES PER SECONDS SQUARED
MIN_SAFE_CLEARANCE = 0.1 # METRES
SHADOW_LENGTH = 100 # METRES
VISIBLE_RADIUS = 50 # METRES
LANE_ANGLE_VARIATION = 0.985 # COS(ANGLE)
TIME_HORIZON = 3 # SECONDS
class RSSDistances(StateCallback):
"""
Determine if the ego-entity distance has become unsafe.
This is the per-timestep pre-computation for the first two rules of the
RSS metric. Raw values are calculated and attached to the global state or
entity attributes. Unsafe distances are flagged per entity in the global
state, which the metric uses to return a boolean value per rule.
"""
def _reset(self, state: State) -> None:
"""Reset callback and declares variables."""
self.ego = state.scenario.ego
self.entities = state.scenario.entities
# Initialise default callback parameters
self.ego_params = {}
self.entity_params = {e: {} for e in self.entities[1:]}
self.safe_distances = {e: [0.0, 0.0] for e in self.entities[1:]}
self.intersect = {e: ["safe"] for e in self.entities[1:]}
self.entity_safe_ratios = {
entity: [float("inf"), float("inf")] for entity in self.entities
}
def __call__(self, state: State) -> None:
"""
Label each entity to specify if its distances to the ego are unsafe.
Collates current timestep data for each entity, and determines the
safety of the ego with respect to its longitudinal and lateral distance
to each entity.
-- Creates entity parameter dictionary from current pose and bounding box
-- Calculates safe lateral and longitudinal distance to each entity
-- Calculates safe distance ratios for rss-based colour-coding
-- Determines, for each entity, if its position intersects the
corresponding ego safe buffer, and if so, which direction should be
flagged as unsafe
"""
if state.t == 0.0:
# Require at least two poses to calculate velocity
return
# Establish Ego parameters
ego_heading = direction(state.poses[self.ego][3])
ego_inverse_heading = inverse_direction(list(ego_heading))
entity_params = OrderedDict()
ego_position = state.poses[self.ego][0:2]
# Create a dictionary for each entity of form:
# {position, heading, velocity, acceleration, box_points, length, width}
# With directional and positional parameters defined with respect to ego
# frame.
for entity in state.poses:
entity_dictionary = self.get_entity_parameters(
state,
entity,
ego_heading,
ego_inverse_heading,
ego_position,
state.dt,
)
if entity_dictionary is not None:
entity_params[entity] = entity_dictionary
# Calculate safe distances between ego and all other entities
ego_params = entity_params.pop(self.ego)
safe_distances = OrderedDict()
for entity, params in entity_params.items():
safe_long = abs(self.safe_longitudinal_distance(ego_params, params))
safe_lat = abs(self.safe_lateral_distance(ego_params, params))
safe_distances[entity] = [safe_lat, safe_long]
self.ego_params = ego_params
self.entity_params = entity_params
self.safe_distances = safe_distances
# Check for each entity if there is an intersection of safe distance buffer
# and assign safe distance ratios to entity as an attribute
for e in entity_params:
self.entity_safe_ratios[e] = self.safe_ratios(
self.ego, ego_params, entity_params[e], safe_distances[e]
)
self.intersect[e].append(
self.unsafe_distance(
ego_params,
entity_params[e],
self.intersect[e],
safe_distances[e],
)
)
@staticmethod
def safe_ratios(
ego_entity: Entity,
ego: Dict,
haz: Dict,
safe_distances: Dict[Entity, float],
) -> List[float]:
"""
Attach safe_distance ratios to entity.
safe ratio defined as actual_distance / safe_distance -> larger ratio safer
"""
if ego_entity in safe_distances:
safe_lat = safe_distances[ego_entity] + 0.5 * abs(
np.dot(
[haz["width"], haz["length"]], inverse_direction(haz["heading"])
)
)
safe_long = safe_distances[ego_entity] + 0.5 * abs(
np.dot([haz["width"], haz["length"]], haz["heading"])
)
else:
safe_lat = 0.5 * ego["width"]
safe_long = 0.5 * ego["length"]
actual_lat = max(
1e-6,
abs(haz["position"][0])
- 0.5 * ego["width"]
- 0.5
* abs(
np.dot(
[haz["width"], haz["length"]], inverse_direction(haz["heading"])
)
),
)
actual_long = max(
1e-6,
abs(haz["position"][1])
- 0.5 * ego["length"]
- 0.5 * abs(np.dot([haz["width"], haz["length"]], haz["heading"])),
)
return [abs(actual_lat / safe_lat), abs(actual_long / safe_long)]
@staticmethod
def unsafe_distance(
ego: Dict, haz: Dict, intersect: List[str], safe_distances: List[float]
) -> str:
"""
Determine if longitudinal or lateral distance is hazardous to ego.
This method is called per timestep per entity, and returns a flag to be
attached to the global state.
-- Laterally unsafe if both longitudinal and lateral distances are unsafe,
and the lateral distance became unsafe after the longitudinal distance.
-- Longitudinally unsafe if both longitudinal and lateral distances are
unsafe, and the longitudinal distance became unsafe after the lateral
distance.
"""
if "unsafe_lateral" in intersect or "unsafe_longitudinal" in intersect:
# Already found
intersect.append("found")
return intersect
buffer, lengths, widths = RSSDistances.generate_buffer(ego, safe_distances)
assert (
buffer.area > 0.0
), "safe_longitudinal: buffer constructed as a 'Z' rather than as a '[]'"
hazard_area = Polygon(haz["box_points"])
if hazard_area.intersects(buffer):
# Find which direction became unsafe last: this is the unsafe direction
for j in range(len(intersect), 0, -1):
try:
if intersect[j - 1] == "lateral":
# longitudinal intersection last: longitudinally unsafe
return "unsafe_longitudinal"
elif intersect[j - 1] == "longitudinal":
# lateral intersection last: laterally unsafe
return "unsafe_lateral"
except IndexError:
break
# default, if no previous intersection found.
if j == 1:
ego_dim = [ego["width"], ego["length"]]
if abs(
abs(haz["position"][0])
- abs(np.dot(haz["position"], ego_dim))
) / safe_distances[0] > abs(
abs(
haz["position"][1]
- np.dot(
haz["position"],
inverse_direction(ego_dim),
)
)
/ safe_distances[1]
):
return "unsafe_longitudinal"
else:
return "unsafe_lateral"
# This entity does not intersect buffer, so check if a dimension intersects
return RSSDistances.write_intersections(lengths, widths, haz)
@staticmethod
def safe_longitudinal_distance(ego: Dict, haz: Dict) -> float:
"""Determine if the longitudinal distance is safe."""
MAX_LONG_ACCEL = RSSParameters.MAX_LONG_ACCEL
MIN_LONG_ACCEL = RSSParameters.MIN_LONG_ACCEL
MIN_SAFE_CLEARANCE = RSSParameters.MIN_SAFE_CLEARANCE
RESPONSE_TIME = RSSParameters.RESPONSE_TIME
ego_direction = ego["heading"]
hazard_direction = haz["heading"]
ego_velocity = ego["velocity"]
hazard_velocity = haz["velocity"]
max_long_accel = abs(
MAX_LONG_ACCEL * np.dot(ego_direction, hazard_direction)
)
if np.dot(ego_direction, hazard_direction) > 0:
# Moving in the same direction (longitudinal component)
if ahead(ego, haz):
vf = norm(ego_velocity)
vr = np.dot(hazard_velocity, ego_direction)
else:
vf = np.dot(hazard_velocity, ego_direction)
vr = norm(ego_velocity)
if vr == 0.0:
# If rear car is stationary, safe
return MIN_SAFE_CLEARANCE + 0.5 * ego["length"]
d0 = RSSDistances.long_dist_same_direction(
vf, vr, max_long_accel, RESPONSE_TIME, MIN_LONG_ACCEL
)
else:
# Moving in the opposite direction (longitudinal component)
v1 = abs(np.dot(ego_velocity, ego_direction))
v2 = -abs(np.dot(hazard_velocity, ego_direction))
# Makes no distinction between driver in correct or incorrect lane,
# where RSS is specific
if np.sign(haz["position"][1]) == np.sign(haz["velocity"][1]):
return MIN_SAFE_CLEARANCE + 0.5 * ego["length"]
d0 = RSSDistances.long_dist_opp_direction(
v1, v2, max_long_accel, RESPONSE_TIME, MIN_LONG_ACCEL
)
return d0 + MIN_SAFE_CLEARANCE + 0.5 * ego["length"]
@staticmethod
def safe_lateral_distance(ego: Dict, haz: Dict) -> float:
"""Determine if the lateral distance is safe."""
MAX_LONG_ACCEL = RSSParameters.MAX_LONG_ACCEL
MIN_LONG_ACCEL = RSSParameters.MIN_LONG_ACCEL
MIN_SAFE_CLEARANCE = RSSParameters.MIN_SAFE_CLEARANCE
RESPONSE_TIME = RSSParameters.RESPONSE_TIME
haz_position = np.array(haz["position"])
# Resolve into velocity components para and perp to hazard's velocity
# Velocity takes form [longitudinal, lateral]
# Define lateral velocity as ego's velocity component perp to hazard
v = haz["velocity"][0] # component perpendicular to ego's heading
max_lat_accel = MAX_LONG_ACCEL * abs(
np.dot(inverse_direction(ego["heading"]), haz["heading"])
)
min_lat_accel = MIN_LONG_ACCEL * abs(
np.dot(inverse_direction(ego["heading"]), haz["heading"])
)
if np.sign(-haz_position[0]) == np.sign(v):
# lateral convergence
v = abs(v)
if v == 0.0:
# Driving parallel, safe for sufficient constant distance.
return MIN_SAFE_CLEARANCE + 0.5 * ego["width"]
d0 = RSSDistances.lat_dist(
v, max_lat_accel, min_lat_accel, RESPONSE_TIME
)
else:
# lateral divergence: safe distance is the avg of car widths
# plus min_safe_distance
d0 = 0
return d0 + MIN_SAFE_CLEARANCE + 0.5 * ego["width"]
@staticmethod
def write_intersections(
buffer_lengths: List[LineString],
buffer_widths: List[LineString],
haz_dict: Dict,
) -> str:
"""
Flag buffer intersection direction.
For ego and another entity, check if the entity has a smaller distance in
the longitudinal or lateral direction than the corresponding safe distance,
and appends this to the recorded list.
"""
haz_area = Polygon(haz_dict["box_points"])
lat_inter = False
long_inter = False
if haz_area.intersects(buffer_lengths[0]) or haz_area.intersects(
buffer_lengths[1]
):
lat_inter = True
if haz_area.intersects(buffer_widths[0]) or haz_area.intersects(
buffer_widths[1]
):
long_inter = True
if lat_inter and long_inter:
new_record = "both"
elif lat_inter:
new_record = "lateral"
elif long_inter:
new_record = "longitudinal"
else:
new_record = "safe"
return new_record
@staticmethod
def get_entity_parameters(
state: State,
entity: Entity,
ego_heading: List[float],
ego_inverse_heading: List[float],
ego_position: List[float],
dt: float,
) -> Dict:
"""Calculate entity parameters and returns these as a dictionary."""
entity_pose = state.poses[entity]
entity_velocity = state.velocities[entity]
if len(entity_pose) != 6:
warnings.warn(
"Entity pose should have six elements, [x, y, z, h, r, p]. "
"Received {0} elements.".format(len(entity_pose))
)
return
ego_position = np.array(ego_position)
entity_heading = direction(entity_pose[3])
entity_acceleration = acceleration(state.recorded_poses(entity), dt)
# All vectors take form [lateral, longitudinal] / [x, y]
entity_dictionary = {
"position": coord_change(entity_pose[0:2], ego_heading, ego_position),
"heading": [
np.dot(entity_heading, ego_inverse_heading),
np.dot(entity_heading, ego_heading),
],
"velocity": [
np.dot(entity_velocity[:2], ego_inverse_heading),
np.dot(entity_velocity[:2], ego_heading),
],
"accel": [
np.dot(
entity_acceleration,
ego_inverse_heading,
),
np.dot(entity_acceleration, ego_heading),
],
"box_points": [
coord_change(point, ego_heading, ego_position)
for point in entity.get_bounding_box_points(entity_pose)
],
"length": entity.catalog_entry.bounding_box.length,
"width": entity.catalog_entry.bounding_box.width,
}
return entity_dictionary
@staticmethod
def generate_buffer(
ego: Dict, safe_distances: List
) -> Tuple[Polygon, List[LineString]]:
"""
Generate ego safe buffer corresponding to entity's safe distances.
Generates a rectangular buffer around the entity with length and width
corresponding to safe longitudinal and lateral distances with respect to
the other entity. Returns the Polygon of this buffer along with a list of
linestrings of its lengths and widths.
"""
assert ego["position"] == [0.0, 0.0], ego["position"]
try:
safe_longitudinal_distance = safe_distances[1]
safe_lateral_distance = safe_distances[0]
except IndexError:
# Safe distances not calculated
warnings.warn(
"RSSDistances generate_buffer: Safe distances not calculated: "
"Buffer cannot be instantiated. Default safe distances as 3 "
"metres lateral, 5 metres safe longitudinal"
)
safe_longitudinal_distance = 5
safe_lateral_distance = 3
buffer_vector = [
np.array([0, safe_longitudinal_distance]),
np.array([safe_lateral_distance, 0]),
]
buffer = [
np.array(buffer_vector[0] + buffer_vector[1]),
np.array(buffer_vector[0] - buffer_vector[1]),
np.array(-buffer_vector[0] - buffer_vector[1]),
np.array(-buffer_vector[0] + buffer_vector[1]),
]
widths = [
LineString(
[
[100 * buffer[0][0], buffer[0][1]],
[100 * buffer[1][0], buffer[1][1]],
]
),
LineString(
[
[100 * buffer[2][0], buffer[2][1]],
[100 * buffer[3][0], buffer[3][1]],
]
),
]
lengths = [
LineString(
[
[buffer[0][0], 100 * buffer[0][1]],
[buffer[2][0], 100 * buffer[2][1]],
]
),
LineString(
[
[buffer[1][0], 100 * buffer[1][1]],
[buffer[3][0], 100 * buffer[3][1]],
]
),
]
return Polygon(buffer), lengths, widths
@staticmethod
def long_dist_same_direction(
vf: float,
vr: float,
max_long_accel: float,
RESPONSE_TIME: float,
MIN_LONG_ACCEL: float,
) -> float:
"""Return the minimum safe longitudinal distance for same directions."""
return max(
0,
vr * RESPONSE_TIME
+ min(
vf**2 / (2 * max_long_accel),
0.5 * max_long_accel * RESPONSE_TIME**2,
)
+ (vr + RESPONSE_TIME * max_long_accel) ** 2 / (2 * MIN_LONG_ACCEL)
- vf**2 / (2 * max_long_accel),
)
@staticmethod
def long_dist_opp_direction(
v1: float,
v2: float,
max_long_accel: float,
RESPONSE_TIME: float,
MIN_LONG_ACCEL: float,
) -> float:
"""Return the minimum safe longitudinal distance for opposing directions."""
return max(
0,
(
(2 * v1 + RESPONSE_TIME * max_long_accel) * RESPONSE_TIME / 2
+ (v1 + RESPONSE_TIME * max_long_accel) ** 2 / (2 * MIN_LONG_ACCEL)
+ (2 * abs(v2) + RESPONSE_TIME * max_long_accel) * RESPONSE_TIME / 2
+ (abs(v2) + RESPONSE_TIME * max_long_accel) ** 2
/ (2 * MIN_LONG_ACCEL)
),
)
@staticmethod
def lat_dist(
v: float, max_lat_accel: float, min_lat_accel: float, RESPONSE_TIME: float
):
"""Return the minimum safe lateral distance between the entity and ego."""
return max(
0,
0.5 * RESPONSE_TIME * (2 * v + RESPONSE_TIME * max_lat_accel)
+ (v + RESPONSE_TIME * max_lat_accel) ** 2 / (2 * min_lat_accel)
- 0.5 * RESPONSE_TIME**2 * max_lat_accel
- (RESPONSE_TIME * max_lat_accel) ** 2 / (2 * min_lat_accel),
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/metrics/rss/callback.py
| 0.918251 | 0.594728 |
callback.py
|
pypi
|
from enum import Enum
from typing import Dict, List
from scenario_gym.metrics import Metric
from scenario_gym.road_network import road_network
from scenario_gym.state import State
from .callback import RSSDistances
class Rules(Enum):
"""Enumerate the five rules."""
safe_longitudinal = 0
safe_lateral = 1
class RSSBehaviourDetection:
"""
Find behaviours corresponding to the five RSS rules.
Instantiates class based on current timestep state and
parameters. Calls each of the behaviour methods, one per
RSS rule. These methods are lightweight, with most computation
performed in the rss callback. Each method returns a bool
value corresponding to whether or not the rule is obeyed at each
timestep -- if the rule has not already been failed.
"""
# Initialise and call the behaviour functions
def __init__(
self,
metrics: Dict,
ego: Dict,
entities: List[Dict],
safe_distances: List[List[float]],
road_network: road_network,
dt: float,
intersect: List[List[str]],
collisions,
):
self.metrics = metrics
self.ego = ego
self.entities = entities
self.safe_distances = safe_distances
self.road_network = road_network
self.dt = dt
self.intersect = intersect
self.collisions = collisions
def __call__(self):
"""Call behaviour methods for current timestep."""
outcomes = {}
for rule in Rules:
outcome = getattr(self, rule.name)
outcomes[rule.name] = outcome()
intersect = self.intersect
return outcomes, intersect
# Behaviour functions
def safe_longitudinal(self) -> bool:
"""
Rule 1: ego at a safe longitudinal distance.
Returns True if longitudinal distance is less than the minimum
safe distance when the lateral distance is also unsafe, with the safe
longitudinal distance crossed last.
"""
if not self.metrics["safe_longitudinal"]:
# Already found
return True
# Check if any entities have been flagged as being unsafe longitudinally.
for entity_record in self.intersect.values():
if "unsafe_longitudinal" in entity_record:
return False
return True
def safe_lateral(self) -> bool:
"""
Rule 2: ego at a safe lateral distance.
Returns True if lateral distance is less than the minimum
safe distance when the longitudinal distance is also unsafe, with the
safe lateral distance crossed last.
"""
if not self.metrics["safe_lateral"]:
# Already found
return True
# Check if any entities have been flagged as being unsafe laterally.
for entity_record in self.intersect.values():
if "unsafe_lateral" in entity_record:
return False
return True
class RSS(Metric):
"""
Determine if the ego follow the 5 rules of RSS.
_reset() resets state with each rule set to True by default
_step() calls RSSBehaviourDetection to check if the ego is
obeying the RSS rules at the current timestep. rss/callback
is responsible for bulk of the computation and is called prior
to the metric call at each timestep.
get_state() returns dictionary of bools, one per rule
-- True: the rule is obeyed at every timestep
-- False: the rule is disobeyed in at least one timestep
(may not be fault of ego - separate metric for blame)
"""
required_callbacks = [RSSDistances]
def _reset(self, state: State) -> None:
"""Reset behaviour."""
self.rss_callback = self.callbacks[0]
self.behaviour = None
self.ego = state.scenario.ego
self.metrics_ = {rule.name: True for rule in Rules}
def _step(self, state: State) -> None:
"""Update the metric to find behaviour at the point of interest."""
if state.t == 0.0:
# Require at least two poses to calculate velocity
return
ego, entities, safe_distances, intersect = (
self.rss_callback.ego_params,
self.rss_callback.entity_params,
self.rss_callback.safe_distances,
self.rss_callback.intersect,
)
rules = RSSBehaviourDetection(
metrics=self.metrics_,
ego=ego,
entities=entities,
safe_distances=safe_distances,
road_network=state.scenario.road_network,
dt=state.dt,
intersect=self.rss_callback.intersect,
collisions=state.collisions(),
)
outcomes, intersect = rules()
self.intersect = intersect
for rule, outcome in outcomes.items():
if outcome is False:
self.metrics_[rule] = outcome
def get_state(self) -> Dict[str, bool]:
"""Return state."""
return self.metrics_
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/metrics/rss/rss.py
| 0.933104 | 0.676339 |
rss.py
|
pypi
|
from __future__ import annotations
import json
import warnings
from contextlib import suppress
from copy import copy
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type
import matplotlib.pyplot as plt
import numpy as np
from scenario_gym.entity import Entity, MiscObject, Pedestrian, Vehicle
from scenario_gym.road_network import RoadNetwork
from scenario_gym.scenario.actions import ScenarioAction, UpdateStateVariableAction
from scenario_gym.trajectory import Trajectory
from scenario_gym.utils import cached_property
class Scenario:
"""
The scenario_gym representation of a scenario.
A scenario consists of a set of entities and a road network. The entities have
trajectories and catalog entries and may have additional entity specific
properties. The scenario also may have a list of actions which can specify
events that occur. E.g. a traffic light changing color.
"""
def __init__(
self,
entities: List[Entity],
name: Optional[str] = None,
road_network: Optional[RoadNetwork] = None,
actions: Optional[List[ScenarioAction]] = None,
properties: Optional[Dict[Any, Any]] = None,
):
self._entities = entities
self._ref_to_entity: Dict[str, Entity] = {e.ref: e for e in entities}
self.name = name
self.road_network = road_network
self.actions = actions if actions is not None else []
self.properties = properties if properties is not None else {}
self._vehicles: Optional[List[Entity]] = None
self._pedestrians: Optional[List[Entity]] = None
@property
def entities(self) -> List[Entity]:
"""Get the entities in the scenario."""
return self._entities
@property
def ego(self) -> Entity:
"""
Get the ego entity.
The ego entity is defined as the entity with the ref "ego" or the first
entity if no entity has the ref "ego".
"""
ego = self.entity_by_name("ego")
if ego is not None:
return ego
return self.entities[0]
@property
def vehicles(self) -> List[Entity]:
"""Get the entities that have vehicle catalogs."""
if self._vehicles is None:
self._vehicles = [e for e in self.entities if isinstance(e, Vehicle)]
return self._vehicles
@property
def pedestrians(self) -> List[Entity]:
"""Get the entities that have pedestrian catalogs."""
if self._pedestrians is None:
self._pedestrians = [
e for e in self.entities if isinstance(e, Pedestrian)
]
return self._pedestrians
@property
def trajectories(self) -> Dict[str, Trajectory]:
"""Return a dictionary mapping entity references to the trajectory."""
return {e.ref: e.trajectory for e in self.entities}
@cached_property
def length(self) -> float:
"""Return the length of the scenario in seconds."""
return max([t.max_t for t in self.trajectories.values()])
def entity_by_name(self, e_ref: str) -> Optional[Entity]:
"""Return an entity given a unique reference."""
with suppress(KeyError):
return self._ref_to_entity[e_ref]
def __copy__(self) -> Scenario:
"""Create a copy of a scenario without copying the road network."""
return self.__class__(
name=f"Copy of {self.name}" if self.name is not None else None,
road_network=self.road_network,
actions=[a.copy() for a in self.actions],
entities=[e.copy() for e in self.entities],
properties=self.properties,
)
def copy(self) -> Scenario:
"""Create a copy of the scenario."""
return copy(self)
def add_entity(self, e: Entity, inplace: bool = False) -> Scenario:
"""Create a new scenario with the entity added."""
if e.ref in self._ref_to_entity:
i = 0
while True:
new_ref = f"{e.ref}_{i}"
if new_ref not in self._ref_to_entity:
break
i += 1
old_ref = e.ref
e.ref = new_ref
warnings.warn(
f"An entity with ref {old_ref} exists. Adding with ref {new_ref}."
)
scenario = self.copy() if not inplace else self
scenario._entities.append(e)
scenario._ref_to_entity[e.ref] = e
scenario._vehicles = None
scenario._pedestrians = None
return scenario
def remove_entity(self, e: Entity, inplace: bool = False) -> Scenario:
"""Create a new scenario with the entity added."""
idx = self._entities.index(e)
scenario = self.copy() if not inplace else self
scenario._entities.pop(idx)
scenario._ref_to_entity.pop(e.ref)
scenario._vehicles = None
scenario._pedestrians = None
return scenario
def make_ego(self, e: Entity, inplace: bool = False) -> Scenario:
"""Set e to the ego entity."""
try:
idx = self._entities.index(e)
except ValueError:
idx = None
scenario = self.copy() if not inplace else self
if idx is not None:
e = scenario._entities.pop(idx)
else:
scenario._ref_to_entitiy[e.ref] = e
scenario._entities.insert(0, e)
scenario.vehicles = None
scenario.pedestrians = None
return scenario
def add_action(self, action: ScenarioAction, inplace: bool = False) -> Scenario:
"""Add an action to the scenario."""
scenario = self.copy() if not inplace else self
scenario.actions.append(action)
return scenario
def translate(self, x: np.ndarray, inplace: bool = False) -> Scenario:
"""Return a new scenario with all entities translated."""
scenario = self.copy() if not inplace else self
for e in scenario.entities:
e.trajectory = e.trajectory.translate(x)
actions = []
for a in scenario.actions:
actions.append(a.translate(x, inplace=inplace))
scenario.actions = actions
return scenario
def reset_start(self, entity: Optional[Entity] = None) -> Scenario:
"""Reset the start time to the start of an entity's trajectory."""
if entity is None:
entity = self.ego
start_time = entity.trajectory.min_t
return self.translate(np.array([-start_time, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
@classmethod
def from_dict(
cls,
data: Dict[str, Any],
e_classes: Tuple[Type[Entity]] = (Vehicle, Pedestrian, Entity),
a_classes: Tuple[Type[ScenarioAction]] = (UpdateStateVariableAction,),
):
"""Load the scenario from a dictionary."""
entities = []
for e_data in data["entities"]:
for Ent in e_classes:
if Ent.__name__ == e_data.get("entity_class", Entity):
break
entities.append(Ent.from_dict(e_data))
road_network = data.get("road_network")
if road_network is not None:
if road_network.get("path") is not None:
path = Path(road_network["path"])
if path.exists():
road_network = RoadNetwork.create_from_file(path)
elif road_network.get("name") is not None:
road_network = RoadNetwork(name=road_network["name"])
else:
road_network = None
else:
road_network = RoadNetwork.create_from_dict(road_network)
actions = []
for a_data in data.get("actions", ()):
for Act in a_classes:
if Act.__name__ == a_data.get(
"action_class", "UpdateStateVariableAction"
):
break
actions.append(Act.from_dict(a_data))
return cls(
entities,
name=data.get("name"),
road_network=road_network,
actions=actions,
properties=data.get("properties", {}),
)
def to_dict(
self,
road_network_path: Optional[str] = "../Road_Networks",
) -> Dict[str, Any]:
"""Write the scenario to a dictionary."""
if self.road_network is None:
road_network = None
elif road_network_path is not None:
if not Path(road_network_path).is_file():
road_network_path = str(
Path(
road_network_path,
f"{self.road_network.name}.json",
)
)
road_network = {
"path": road_network_path,
"name": self.road_network.name,
}
else:
road_network = self.road_network.to_dict()
return {
"entities": [e.to_dict() for e in self.entities],
"name": self.name,
"actions": [act.to_dict() for act in self.actions],
"road_network": road_network,
"properties": self.properties,
}
@classmethod
def from_json(
cls,
path: str,
road_network_dir: Optional[str] = None,
e_classes: Tuple[Type[Entity]] = (Vehicle, Pedestrian, Entity),
a_classes: Tuple[Type[ScenarioAction]] = (UpdateStateVariableAction,),
):
"""
Load the scenario from a json file.
Parameters
----------
path : str
The path to the json file.
road_network_dir : str, optional
The directory to search for the road network file. If the road network
path in the json is absolute then this will be ignored. Otherwise the
path used will be (road_network_dir / road network path) if the
road_network_dir is relative otherwise it will be the `directory of
path / road_network_dir / road network path`.
e_classes : Tuple[Type[Entity]], optional
The classes to use for loading entities.
a_classes : Tuple[Type[ScenarioAction]], optional
The classes to use for loading actions.
"""
with open(path, "r") as f:
data = json.load(f)
rn = data.get("road_network")
if rn is not None and rn.get("path") is not None:
rn_path = Path(data["road_network"]["path"])
if not rn_path.is_absolute():
if road_network_dir is None:
rn_path = Path(path).parent / rn_path
elif Path(road_network_dir).is_absolute():
rn_path = Path(road_network_dir) / rn_path
else:
rn_path = Path(
Path(path).parent,
road_network_dir,
rn_path,
)
data["road_network"]["path"] = str(rn_path)
return cls.from_dict(
data,
e_classes=e_classes,
a_classes=a_classes,
)
def to_json(
self, path, road_network_path: Optional[str] = "../Road_Networks"
) -> None:
"""Write the scenario to a json file."""
data = self.to_dict(road_network_path=road_network_path)
with open(path, "w") as f:
json.dump(data, f)
def describe(self) -> None:
"""Generate a text overview of the scenario."""
rn = self.road_network.name if self.road_network is not None else "None"
name = (
self.name.replace(".xosc", "") if self.name is not None else "scenario"
)
title = f"Scenario: {name}"
under_header = "=" * len(title)
entity_header = "Entity".ljust(10) + "Type".ljust(10) + "Cateogry".ljust(10)
entities = ""
for e in self.entities:
entities += (
f"{e.ref}".ljust(10)
+ f"{e.type}".ljust(10)
+ f"{e.catalog_entry.catalog_category}".ljust(10)
+ "\n"
)
print(
f"""
{title}
{under_header}
Road network: {rn}
Number of entities: {len(self.entities)}
Total duration: {self.length:.4}s
Entities
--------
{entity_header}
{entities}
"""
)
def plot(self, figsize: Tuple[int, int] = (10, 10), show: bool = True) -> None:
"""
Visualise the scenario.
Parameters
----------
figsize : Tuple[int, int]
The figure size.
show : bool
If set to False will not call `plt.show` so the figure can be modified
or saved.
"""
name = self.name if self.name is not None else "Scenario"
plt.figure(figsize=figsize)
if self.road_network is not None:
for geom in self.road_network.driveable_surface.geoms:
plt.fill(*geom.exterior.xy, c="gray", alpha=0.25)
for i in geom.interiors:
plt.fill(*i.xy, c="white")
for r in self.road_network.roads:
plt.plot(*r.center.xy, c="white")
for i, e in enumerate(self.entities):
if i == 0:
c = "r"
elif isinstance(e, Pedestrian):
c = "g"
elif isinstance(e, MiscObject):
c = "gray"
else:
c = "b"
plt.plot(*e.trajectory.data[:, [1, 2]].T, c=c, label=e.ref)
plt.plot(*e.trajectory.data[0, [1, 2]].T, c=c, marker="o")
data = np.vstack([e.trajectory.data[:, [1, 2]] for e in self.entities])
b_min, b_max = data.min(0), data.max(0)
plt.axis("equal")
plt.xlim(b_min[0] - 10.0, b_max[0] + 10.0)
plt.ylim(b_min[1] - 10.0, b_max[1] + 10.0)
plt.legend()
plt.title(name)
if show:
plt.show()
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/scenario/scenario.py
| 0.910279 | 0.542742 |
scenario.py
|
pypi
|
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import Any, Dict, Optional, TypeVar
import numpy as np
from scenario_gym.entity import Entity
State = TypeVar("State")
class ScenarioAction(ABC):
"""
Base class for scenario actions.
Actions are applied at the first timestamp with time greater than or equal to
the action time. They are applied with the _apply method which must be
implemented.
"""
def __init__(
self,
action_class: str,
entity_ref: str,
action_variables: Dict[str, Any],
):
"""
Create the action.
action_class : str
The name of the action class.
entity_ref : str
Reference of the entity to which the action applies.
action_variables : Dict[str, Any]
Dictionary of action variables.
"""
self.action_class = action_class
self.entity_ref = entity_ref
self.action_variables = action_variables
def apply(self, state: State, entity: Optional[Entity]) -> None:
"""Apply the action to the environment state."""
self._apply(state, entity)
@abstractmethod
def _apply(self, state: State, entity: Optional[Entity]) -> None:
"""Apply the action to the environment state."""
raise NotImplementedError
@abstractmethod
def trigger_condition(self, state: State) -> bool:
"""Condition for when to apply the action."""
raise NotImplementedError
def copy(self):
"""Return a copy of the action."""
return deepcopy(self)
def translate(self, x: np.ndarray, inplace: bool = False):
"""Translate the action."""
return self.copy() if not inplace else self
def to_dict(self) -> Dict[str, Any]:
"""Write the action to a dictionary."""
return {
"action_class": self.action_class,
"entity_ref": self.entity_ref,
"action_variables": self.action_variables,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the action from a dictionary."""
return cls(
data["action_class"],
data["entity_ref"],
data["action_variables"],
)
class FixedTAction(ScenarioAction):
"""Action that is applied at a fixed time."""
def __init__(self, t: float, *args, **kwargs):
"""
Create the action.
Parameters
----------
t : float
The time at which the action should be applied.
"""
super().__init__(*args, **kwargs)
self.t = t
def trigger_condition(self, state: State) -> bool:
"""Update when the state time is greater than action time."""
return state.t >= self.t
def translate(self, x: np.ndarray, inplace: bool = False):
"""Translate the action."""
act = self.copy() if not inplace else self
act.t += x[0]
return act
def to_dict(self) -> Dict[str, Any]:
"""Write the action to a dictionary."""
data = super().to_dict()
data["t"] = self.t
return data
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the action from a dictionary."""
return cls(
data["t"],
data["action_class"],
data["entity_ref"],
data["action_variables"],
)
class UserDefinedAction(FixedTAction):
"""Custom action provided by the user."""
def _apply(self, state: State, entity: Optional[Entity]) -> None:
"""Apply the user-defined action."""
pass
class UpdateStateVariableAction(FixedTAction):
"""Action that sets state variables for the entity."""
def _apply(self, state: State, entity: Optional[Entity]) -> None:
"""Update the entity with action variables."""
if entity is not None:
if state.entity_state[entity] is None:
state.entity_state[entity] = {}
for k, v in self.action_variables.items():
state.entity_state[entity][k] = v
def trigger_condition(self, state: State) -> bool:
"""Update when the state time is greater than action time."""
return state.t > self.t
def to_dict(self) -> Dict[str, Any]:
"""Write the action to a dictionary."""
return {
"t": self.t,
"action_class": self.action_class,
"entity_ref": self.entity_ref,
"action_variables": self.action_variables,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Load the action from a dictionary."""
return cls(
data["t"],
data["action_class"],
data["entity_ref"],
data["action_variables"],
)
|
/scenario_gym-0.4.5.tar.gz/scenario_gym-0.4.5/scenario_gym/scenario/actions.py
| 0.945349 | 0.512632 |
actions.py
|
pypi
|
import logging
from .prepare.constraints import calculate_constraint_values_and_bounds
from .linear_programming_optimiser import optimise_scenarios_with_linear_programming
from .linear_programming_optimiser import errors
from .linear_interpolator.interpolate_scenarios import \
(linear_interpolate, interpolate_all_metrics)
from .gradient_optimiser.optimise_gradient import optimise_scenarios_with_gradient_method
from .mixed_integer_programming.mip_optimiser import optimise_scenarios_with_mip
log = logging.getLogger(__name__)
def optimise_scenarios(scenarios, settings, method=None):
"""
Optimisation function that - if method is not specifically entered - first tries
to use a linear programming solution. If the constraint vs maximise metrics is
not a concave function (so no diminishing returns), a gradient method will
be used to find an optimal solution.
The function will return the results, which includes a data frame with the
interpolated values of all provided metrics
:param scenarios
Dictionary of numpy arrays containing estimates of metrics in different scenarios
:param settings
Dictionary at least containing the key values for the constraint_metric,
maximise_metric and identifier so they can be selected from the scenarios
and the constraint value
:param method
Indicate whether to use linear programming or gradient method to optimize
:return: result
Dictionary with the result of the optimisation: a message, total optimised value,
success status, constraint metric values and a data frame containing the
metric values per identifier as a result of the optimization
"""
_validate_inputs_settings(scenarios, settings, method)
constraint_metric_name = settings.get('constraint_metric')
constraint_value = settings.get('constraint_value')
constraint = calculate_constraint_values_and_bounds(
scenarios.get(constraint_metric_name), constraint_value)
if constraint.get('status') == 'ok':
optimised = _run_optimiser(scenarios, settings, method)
else:
optimised = _get_bounded_results(scenarios, settings, constraint)
optimised['data_frame'] = _interpolate_results(scenarios, settings,
optimised)
optimised['method'] = method
return optimised
def _validate_inputs_settings(scenarios, settings, method):
methods = (None, 'linear', 'gradient', 'mip')
if method not in methods:
raise KeyError('Method %s is not supported' % method)
if not isinstance(scenarios, dict):
raise TypeError(
'Scenarios should be provided as a dictionary with numpy arrays')
if not isinstance(settings, dict):
raise TypeError('Settings should be provided as a dictionary')
mandatory_settings = [
'constraint_metric', 'constraint_value', 'maximise_metric',
'identifier'
]
missing_settings = [
sett for sett in mandatory_settings if sett not in settings.keys()
]
if missing_settings:
raise KeyError('Missing following settings: %s' % missing_settings)
mandatory_scenarios_keys = [
settings[key] for key in mandatory_settings
if key != 'constraint_value'
]
scenario_keys = list(scenarios.keys())
missing_keys = [
key for key in mandatory_scenarios_keys if key not in scenario_keys
]
if missing_keys:
raise KeyError(
'Missing the following keys in scenarios: %s' % missing_keys)
return True
def _get_optimisation_input(scenarios, settings):
maximise_metric_key = settings.get('maximise_metric')
constraint_metric_key = settings.get('constraint_metric')
constraint_value = settings.get('constraint_value')
to_maximise_metric = scenarios[maximise_metric_key]
constraint_metric = scenarios[constraint_metric_key]
return {
'to_maximise_metric': to_maximise_metric,
'constraint_metric': constraint_metric,
'constraint_value': constraint_value
}
def _run_optimiser(scenarios, settings, method):
if method == 'linear':
log.info('Optimising using linear programming...')
return optimise_scenarios_with_linear_programming(scenarios, settings)
if method == 'gradient':
log.info('Optimising using gradient method...')
return optimise_scenarios_with_gradient_method(scenarios, settings)
if method == 'mip':
log.info('Optimising using mixed integer programming...')
return optimise_scenarios_with_mip(scenarios, settings)
try:
log.info('Trying linear programming solution')
return optimise_scenarios_with_linear_programming(scenarios, settings)
except errors.ScenariosNotConcaveError as e:
log.info(e)
log.info('Optimise using gradient method...')
return optimise_scenarios_with_gradient_method(scenarios, settings)
def _get_bounded_results(scenarios, settings, constraint):
from_metric = settings.get('constraint_metric')
to_metric = settings.get('maximise_metric')
constraint_metric_values = constraint.get('bounded_values')
maximised_result = linear_interpolate(scenarios, from_metric, to_metric,
constraint_metric_values)
return {
'message': constraint.get('message'),
'maximised_result': maximised_result.sum(),
'success': False,
'constraint_metric_values': constraint_metric_values,
'iterations': 0
}
def _interpolate_results(scenarios, settings, optimised):
return interpolate_all_metrics(scenarios, settings['constraint_metric'],
settings['identifier'],
optimised['constraint_metric_values'])
|
/scenario-optimiser-0.2.4.tar.gz/scenario-optimiser-0.2.4/src/scenario_optimiser/optimise.py
| 0.87339 | 0.581689 |
optimise.py
|
pypi
|
from copy import deepcopy
import numpy as np
from scipy.optimize import minimize, LinearConstraint
def transform_to_concave(scenarios, constraint_metric, optimise_metric):
concave_scenarios = deepcopy(scenarios)
concave_optimise_metric, mse_deviation = _calculate_concave_scenarios(
scenarios[constraint_metric], scenarios[optimise_metric]
)
concave_scenarios[optimise_metric] = concave_optimise_metric
return {
"concave_scenarios": concave_scenarios,
"mse_deviation": mse_deviation,
}
def _calculate_concave_scenarios(constraint_metric_values, to_maximise_metric_values):
n_rows, n_cols = constraint_metric_values.shape
concave_values = np.zeros(to_maximise_metric_values.shape)
mse_deviation = np.zeros((n_rows,))
for row_idx in range(n_rows):
result = _calculate_concave_scenario(
constraint_metric_values[row_idx, :], to_maximise_metric_values[row_idx, :]
)
concave_values[row_idx, :] = _add_concave_values(
n_cols, result["concave_values"]
)
mse_deviation[row_idx] = result["mse_deviation"]
return concave_values, mse_deviation
def _calculate_concave_scenario(x_values, y_values):
x_trimmed, y_trimmed = _trim(x_values, y_values)
y_diff = np.array([0])
if _too_few_bid_landscape_points(x_trimmed):
return {"concave_values": y_values, "mse_deviation": y_diff}
result = _compute_concave_points(x_trimmed, y_trimmed)
concave_y_values = _get_untrimmed_values(x_values, x_trimmed, result)
return {"concave_values": concave_y_values, "mse_deviation": round(result.fun, 2)}
def _trim(x_values, y_values):
x_unique, unique_indices = np.unique(x_values, return_index=True)
return x_unique, y_values[unique_indices]
def _too_few_bid_landscape_points(x_trimmed):
return x_trimmed.shape[0] < 3
def _compute_concave_points(x, y):
y_pred_0 = np.zeros(y.shape)
return minimize(
lambda y_pred: _objective(y_pred, y),
y_pred_0,
constraints=[
_compute_increase_constraint(y_pred_0),
_compute_concave_constraint(x),
],
)
def _objective(y_pred, y):
return np.mean(np.square(y_pred - y))
def _compute_concave_constraint(constraint_metric_values):
a = constraint_metric_values.copy()
n_columns = a.shape[0]
n_rows = n_columns - 2
new_array = np.zeros((n_rows, n_columns))
for row_idx in range(n_rows):
new_array[row_idx, row_idx] = -1 / (a[row_idx + 1] - a[row_idx])
new_array[row_idx, row_idx + 1] = 1 / (a[row_idx + 1] - a[row_idx]) + 1 / (
a[row_idx + 2] - a[row_idx + 1]
)
new_array[row_idx, row_idx + 2] = -1 / (a[row_idx + 2] - a[row_idx + 1])
return LinearConstraint(new_array, [0] * n_rows, [np.inf] * n_rows)
def _compute_increase_constraint(x):
n_columns = x.shape[0]
n_rows = n_columns - 1
new_array = np.zeros((n_rows, n_columns))
for row_idx in range(n_rows):
new_array[row_idx, row_idx] = -1
new_array[row_idx, row_idx + 1] = 1
return LinearConstraint(new_array, [0] * n_rows, [np.inf] * n_rows)
def _add_concave_values(n_cols, concave_values):
n_values = concave_values.shape[0]
last_value = concave_values[-1]
if n_values < n_cols:
extra_values = np.repeat(last_value, n_cols - n_values)
return np.concatenate((concave_values, extra_values))
return concave_values
def _get_untrimmed_values(x_values, x_trimmed, result):
concave_values = []
trimmed_concave_values = result.x
for x_value in x_values:
mask = x_trimmed == x_value
concave_values.append(trimmed_concave_values[mask][0])
return np.array(concave_values)
|
/scenario-optimiser-0.2.4.tar.gz/scenario-optimiser-0.2.4/src/scenario_optimiser/linear_programming_optimiser/transform_to_concave.py
| 0.726329 | 0.478894 |
transform_to_concave.py
|
pypi
|
import logging
import numpy as np
from scipy.optimize import linprog
from .intercepts_slopes import calculate_intercepts_and_slopes, scenarios_concave
from .errors import ScenariosNotConcaveError
log = logging.getLogger(__name__)
def optimise_scenarios_with_linear_programming(scenarios, settings):
"""
x = vector of constraint metric values and objective metric values
Objective: minimize c transposed * x
Subject to: Ax <= b and x between x_min and x_max
"""
intercepts, slopes = calculate_intercepts_and_slopes(
scenarios, settings["constraint_metric"], settings["maximise_metric"]
)
if not scenarios_concave(slopes).all():
raise ScenariosNotConcaveError("Scenarios are not concave")
log.debug("Scenarios are concave, proceeding with linear programming solution")
rows, cols = slopes.shape
c = _get_linear_objective_coefficients(rows)
A_ub = _calculate_inequality_matrix(slopes, rows, cols)
b_ub = _calculate_inequality_vector(scenarios, settings, intercepts, rows, cols)
A_eq = _get_equality_constraint_params(rows)
b_eq = settings.get("constraint_value")
result = linprog(c, A_ub, b_ub, A_eq, b_eq)
return {
"message": result["message"],
"maximised_result": -result["fun"],
"success": result["success"],
"constraint_metric_values": result["x"][:rows],
"iterations": result["nit"],
}
def _get_linear_objective_coefficients(rows):
return np.concatenate((np.zeros((rows,)), -np.ones((rows,))), axis=0)
def _calculate_inequality_matrix(slopes, rows, cols):
identity_matrix = np.identity(rows)
left_upper = np.concatenate((identity_matrix, -identity_matrix), axis=0)
right_upper = np.zeros((2 * rows, rows))
left_bottom, right_bottom = _calculate_inequality_parameters(slopes, rows, cols)
upper = np.concatenate((left_upper, right_upper), axis=1)
lower = np.concatenate((left_bottom, right_bottom), axis=1)
return np.concatenate((upper, lower), axis=0)
def _calculate_inequality_parameters(slopes, rows, cols):
scenarios_slope_params = np.zeros((rows * cols, rows))
scenarios_intercept_params = np.zeros((rows * cols, rows))
for r in np.arange(0, rows):
for c in np.arange(0, cols):
row = r * cols + c
scenarios_slope_params[row, r] = -slopes[r, c]
scenarios_intercept_params[row, r] = 1
return scenarios_slope_params, scenarios_intercept_params
def _calculate_inequality_vector(scenarios, settings, intercepts, rows, cols):
x_min, x_max = _calculate_min_and_max_values_constraint_metric(scenarios, settings)
top = np.concatenate((x_max, -x_min), axis=0)
intercepts_vector = np.zeros((rows * cols, 1))
for k in np.arange(0, rows):
for j in np.arange(0, cols):
row = k * cols + j
intercepts_vector[row] = intercepts[k, j]
return np.concatenate((top, intercepts_vector), axis=0)
def _calculate_min_and_max_values_constraint_metric(scenarios, settings):
constraint_metric = settings.get("constraint_metric")
return (
np.min(scenarios.get(constraint_metric), axis=1, keepdims=True),
np.max(scenarios.get(constraint_metric), axis=1, keepdims=True),
)
def _get_equality_constraint_params(rows):
return np.concatenate((np.ones((1, rows)), np.zeros((1, rows))), axis=1)
def _get_constraint_value(settings):
return np.array([settings.get("constraint_value")])
|
/scenario-optimiser-0.2.4.tar.gz/scenario-optimiser-0.2.4/src/scenario_optimiser/linear_programming_optimiser/optimise_linear.py
| 0.634317 | 0.50891 |
optimise_linear.py
|
pypi
|
from collections import namedtuple
import numpy as np
from mip import Model, xsum, maximize, BINARY, CONTINUOUS, LinExpr
from scenario_optimiser.mixed_integer_programming.piecewise_linear import (
PiecewiseLinearFunction,
)
def optimise_scenarios_with_mip(scenarios, settings):
constraint_metric = settings.get("constraint_metric")
optimise_metric = settings.get("maximise_metric")
constraint_value = settings.get("constraint_value")
plfs = _to_piecewise_linear_funcs(scenarios, constraint_metric, optimise_metric)
plfs = list(plfs)
optimum = maximize_sum(plfs, constraint_value)
return _summarize_optimized_result(optimum)
def _to_piecewise_linear_funcs(scenarios, constraint_metric, optimise_metric):
constraint_metric_values = scenarios.get(constraint_metric)
optimise_metric_values = scenarios.get(optimise_metric)
n_rows = constraint_metric_values.shape[0]
for row_idx in range(n_rows):
row = (constraint_metric_values[row_idx, :], optimise_metric_values[row_idx, :])
constraint_values, optimise_values = _get_unique_values(row)
yield PiecewiseLinearFunction(constraint_values, optimise_values)
def _get_unique_values(row):
"""The algorithm goes much faster if we remove any duplicate values in the
bid landscapes (needed to have the same number of columns per identifier to
support the gradient method)"""
unique_constraint_values = np.unique(row[0])
if len(unique_constraint_values) < 2:
return row[0][:2], row[1][:2]
indices = [int(np.where(value == unique_constraint_values)[0]) for value in row[0]]
unique_indices = list(set(indices))
unique_maximise_values = row[1][unique_indices]
return unique_constraint_values, unique_maximise_values
def _summarize_optimized_result(optimum):
opt_total_result = sum([line.revenue for line in optimum])
opt_constraint_metric_values = np.array([line.cost for line in optimum])
return {
"message": "optimisation completed successfully",
"maximised_result": opt_total_result,
"success": True,
"constraint_metric_values": opt_constraint_metric_values,
"iterations": 1,
}
Line = namedtuple("Line", ["cost", "revenue", "plf"])
def maximize_sum(plfs, max_costs, print_logs=False):
# pylint: disable=too-many-locals
m = Model("bid-landscapes")
m.solver.set_verbose(1 if print_logs else 0)
costs = LinExpr()
objective = LinExpr()
xs = []
ws = []
for (_, plf) in enumerate(plfs):
k = len(plf)
w = [m.add_var(var_type=CONTINUOUS) for _ in range(0, k)]
x = [m.add_var(var_type=BINARY) for _ in range(0, k - 1)]
xs.append(x)
ws.append(w)
m += xsum(w[i] for i in range(0, k)) == 1
for i in range(0, k):
m += w[i] >= 0
m += w[0] <= x[0]
for i in range(1, k - 1):
m += w[i] <= x[i - 1] + x[i]
m += w[k - 1] <= x[k - 2]
m += xsum(x[k] for k in range(0, k - 1)) == 1
for i in range(0, k):
costs.add_term(w[i] * plf.a[i])
objective.add_term(w[i] * plf.b[i])
m += costs <= max_costs
m.objective = maximize(objective)
m.optimize()
optimum = []
for (i, plf) in enumerate(plfs):
k = len(plf)
u_i = sum(ws[i][j].x * plf.a[j] for j in range(0, k))
v_i = sum(ws[i][j].x * plf.b[j] for j in range(0, k))
optimum.append(Line(cost=u_i, revenue=v_i, plf=plf))
return optimum
|
/scenario-optimiser-0.2.4.tar.gz/scenario-optimiser-0.2.4/src/scenario_optimiser/mixed_integer_programming/mip_optimiser.py
| 0.767864 | 0.525369 |
mip_optimiser.py
|
pypi
|
import numpy as np
class LinearInterpolator:
"""
Linear interpolator for a 2d-array
"""
def __init__(self, scenarios):
self.scenarios = scenarios
def get(self, from_metric, metric_values, to_metric):
verified_metric_values = self._verify_metric_values_are_in_scenario_range(
from_metric, metric_values)
positions = self._look_up_positions_in_scenarios(
from_metric, metric_values)
scenarios = self._calculate_scenarios_values(
from_metric, verified_metric_values, positions, to_metric)
return self._calculate_to_metric_values(from_metric, to_metric,
scenarios)
def _verify_metric_values_are_in_scenario_range(self, metric, values):
scenarios = self.scenarios.get(metric)
max_values = np.max(scenarios, axis=1, keepdims=True)
min_values = np.min(scenarios, axis=1, keepdims=True)
return np.minimum(max_values, np.maximum(values, min_values))
def _look_up_positions_in_scenarios(self, metric, values):
values = values.reshape(-1, 1)
scenarios = self.scenarios.get(metric)
mask = scenarios <= values
return np.sum(mask, axis=1, keepdims=True) - 1
def _calculate_scenarios_values(self, from_metric, from_metric_values,
positions, to_metric):
positions_higher = self._get_positions_above_current(
from_metric, positions)
scenarios = dict()
for metric in [to_metric, from_metric]:
metric_scenarios = self.scenarios.get(metric)
scenarios_low = self._get_values_by_position(metric_scenarios, positions)
scenarios_high = self._get_values_by_position(
metric_scenarios, positions_higher)
scenarios[metric + '_low'] = scenarios_low
scenarios[metric + '_range'] = scenarios_high - scenarios_low
scenarios[from_metric +
'_position'] = np.squeeze(from_metric_values) - scenarios_low
return scenarios
def _get_positions_above_current(self, metric, positions):
scenarios = self.scenarios.get(metric)
columns = scenarios.shape[1]
positions_higher = positions + 1
positions_higher[positions_higher > columns - 1] = columns - 1
return positions_higher
@staticmethod
def _get_values_by_position(scenarios, positions):
row_indices = list(np.arange(scenarios.shape[0]))
column_indices = list(np.squeeze(positions, axis=1))
return scenarios[row_indices, column_indices]
def _calculate_to_metric_values(self, from_metric, to_metric, scenarios):
base_values = self._get_lowest_values(scenarios, to_metric)
slopes = self._calculate_slopes_of_to_metric(scenarios, from_metric,
to_metric)
positions = self._get_from_metric_positions(scenarios, from_metric)
return base_values + slopes * positions
@staticmethod
def _get_lowest_values(scenarios, to_metric):
return scenarios[to_metric + '_low']
@staticmethod
def _calculate_slopes_of_to_metric(scenarios, from_metric, to_metric):
from_metric_scenario_range = scenarios[from_metric + '_range']
to_metric_scenario_range = scenarios[to_metric + '_range']
no_zeros_from_metric_scenario_range = from_metric_scenario_range.copy()
no_zeros_from_metric_scenario_range[from_metric_scenario_range ==
0] = 1
slopes = to_metric_scenario_range / no_zeros_from_metric_scenario_range
slopes[from_metric_scenario_range == 0] = 0
return slopes
@staticmethod
def _get_from_metric_positions(scenarios, from_metric):
return scenarios[from_metric + '_position']
|
/scenario-optimiser-0.2.4.tar.gz/scenario-optimiser-0.2.4/src/scenario_optimiser/linear_interpolator/interpolate_linear.py
| 0.913121 | 0.677887 |
interpolate_linear.py
|
pypi
|
# ScenarIO
[](https://isocpp.org)
[][scenario]
[][gym-ignition]
[](https://github.com/robotology/gym-ignition/actions)
[][pypi]
[][pypi]
[][pypi]
[][pypi]
[][pypi]
[pypi]: https://pypi.org/project/scenario/
[gym-ignition]: https://github.com/robotology/gym-ignition
[scenario]: https://github.com/robotology/gym-ignition/tree/master/scenario
**SCEN**e interf**A**ces for **R**obot **I**nput / **O**utput.
||||
|:---:|:---:|:---:|
| ![][pendulum] | ![][panda] | ![][icub] |
[icub]: https://user-images.githubusercontent.com/469199/99262746-9e021a80-281e-11eb-9df1-d70134b0801a.png
[panda]: https://user-images.githubusercontent.com/469199/99263111-0cdf7380-281f-11eb-9cfe-338b2aae0503.png
[pendulum]: https://user-images.githubusercontent.com/469199/99262383-321fb200-281e-11eb-89cc-cc31f590daa3.png
## Description
**ScenarIO** is a C++ abstraction layer to interact with simulated and real robots.
It mainly provides the following
[C++ interfaces](https://github.com/robotology/gym-ignition/tree/master/scenario/core/include/scenario/core):
- `scenario::core::World`
- `scenario::core::Model`
- `scenario::core::Link`
- `scenario::core::Joint`
These interfaces can be implemented to operate on different scenarios,
including robots operating on either simulated worlds or in real-time.
ScenarIO currently fully implements **Gazebo ScenarIO**,
a simulated back-end that interacts with [Ignition Gazebo](https://ignitionrobotics.org).
The result allows stepping the simulator programmatically, ensuring a fully reproducible behaviour.
It relates closely to other projects like
[pybullet](https://github.com/bulletphysics/bullet3) and [mujoco-py](https://github.com/openai/mujoco-py).
A real-time backend that interacts with the [YARP](https://github.com/robotology/yarp) middleware is under development.
ScenarIO can be used either from C++ ([APIs](https://robotology.github.io/gym-ignition/master/breathe/core.html))
or from Python ([APIs](https://robotology.github.io/gym-ignition/master/apidoc/scenario/scenario.bindings.html)).
If you're interested to know the reasons why we started developing ScenarIO and why we selected Ignition Gazebo
for our simulations, visit the _Motivations_ section of the
[website][website].
## Installation
ScenarIO only supports a single distribution of the Ignition suite.
Visit our [Support Policy](https://robotology.github.io/gym-ignition/master/installation/support_policy.html)
to check the distribution currently supported.
Then, install the supported Ignition suite following the
[official instructions](https://ignitionrobotics.org/docs/fortress).
### Python
Execute, preferably in a [virtual environment](https://docs.python.org/3.8/tutorial/venv.html):
```bash
pip install scenario
```
### C++
You can either clone and install the standalone project:
```cmake
git clone https://github.com/robotology/gym-ignition
cd gym-ignition/scenario
cmake -S . -B build/
cmake --build build/ --target install
```
or include it in your CMake project with
[`FetchContent`](https://cmake.org/cmake/help/latest/module/FetchContent.html).
## Usage
You can find some examples that show the usage of ScenarIO in the _Getting Started_ section of the
[website][website].
## Contributing
Please visit the _Limitations_ section of the [website][website] and check the
[`good first issue`](https://github.com/robotology/gym-ignition/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
and
[`help wanted`](https://github.com/robotology/gym-ignition/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22)
issues.
You can visit our community forum hosted in [GitHub Discussions](https://github.com/robotology/gym-ignition/discussions).
Even without coding skills, replying user's questions is a great way of contributing.
If you use ScenarIO in your application and want to show it off, visit the
[Show and tell](https://github.com/robotology/gym-ignition/discussions/categories/show-and-tell) section!
Pull requests are welcome.
For major changes, please open a [discussion](https://github.com/robotology/gym-ignition/discussions)
first to propose what you would like to change.
## Citation
```bibtex
@INPROCEEDINGS{ferigo2020gymignition,
title={Gym-Ignition: Reproducible Robotic Simulations for Reinforcement Learning},
author={D. {Ferigo} and S. {Traversaro} and G. {Metta} and D. {Pucci}},
booktitle={2020 IEEE/SICE International Symposium on System Integration (SII)},
year={2020},
pages={885-890},
doi={10.1109/SII46433.2020.9025951}
}
```
## License
[LGPL v2.1](https://choosealicense.com/licenses/lgpl-2.1/) or any later version.
We vendor some resources from the Ignition code base.
For this reason, Gazebo ScenarIO is double-licensed with the
[Apache License](https://choosealicense.com/licenses/apache-2.0/).
[website]: https://robotology.github.io/gym-ignition
|
/scenario-1.3.1.tar.gz/scenario-1.3.1/README.md
| 0.750644 | 0.917562 |
README.md
|
pypi
|
import os
from typing import Optional, List
from kcu import sh, kpath
def create_scenes(
in_path: str,
output_folder_path: str,
threshold: float=0.5,
min_scene_duration: float=1.5,
max_scene_duration: float=30,
debug: bool=False
) -> Optional[List[str]]:
os.makedirs(output_folder_path, exist_ok=True)
timestamps_path = os.path.join(output_folder_path, 'timestamps')
scene_paths = []
if __create_timestamp_file(in_path, timestamps_path, threshold=threshold, debug=debug):
timestamps = __get_timestamps_from_file(timestamps_path)
if timestamps:
timestamps.insert(0, 0)
for index, start_ts in enumerate(timestamps[:-1]):
start_ts += 0.05
duration = timestamps[index+1] - start_ts -0.05
if duration < min_scene_duration or duration > max_scene_duration:
continue
scene_path = os.path.join(output_folder_path, str(index) + 'video.mp4')
__create_scene(in_path, scene_path, start_ts, duration, debug=debug)
scene_paths.append(scene_path)
os.remove(timestamps_path)
return scene_paths
return None
# Threshold - the scene change detection score values are between [0-1].
# PRIVATE METHODS
def __create_timestamp_file(in_path: str, out_path: str, threshold: float, debug: bool=False) -> bool:
sh.sh(
'ffmpeg -y -i {} -filter:v "select=\'gt(scene,{})\',showinfo" -f null - 2> {}'.format(in_path, threshold, out_path),
debug=debug
)
return os.path.exists(out_path)
def __get_timestamps_from_file(in_path: str) -> Optional[List[float]]:
with open(in_path, 'r') as file:
video_data = file.read().replace('\n', '')
return [float(x.split(' ')[0]) for x in video_data.split('pts_time:')[1:]]
def __create_scene(in_path: str, out_path: str, start_ts: str, duration: str, debug: bool=False) -> bool:
sh.sh(
'ffmpeg -y -ss {} -t {} -i {} {} -async 1'.format(start_ts, duration, in_path, out_path),
debug=debug
)
return os.path.exists(out_path)
|
/scene_cutter-0.0.9-py3-none-any.whl/scene_cutter/scene_cutter.py
| 0.721449 | 0.276275 |
scene_cutter.py
|
pypi
|
import trimesh
import numpy as np
import os
from scene_graph_predictor_pc.src.model.model import MMGNet
from scene_graph_predictor_pc.src.utils.config import Config
from scene_graph_predictor_pc.src.utils import util, define, util, op_utils, util_ply
from itertools import product
import torch
import torch.nn as nn
def load_config():
r"""loads model config
"""
local_file_path = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(local_file_path, 'config/vlsat.json')
# load config file
config = Config(config_path)
return config
def generate_data(plydata):
# get file path
local_file_path = os.path.dirname(os.path.abspath(__file__))
classNames = util.read_txt_to_list(os.path.join(local_file_path, 'data/classes.txt'))
# read relationship class
relationNames = util.read_relationships(os.path.join(local_file_path, 'data/relationships.txt'))
points = np.array(plydata.vertices)
instances = util_ply.read_labels(plydata).flatten()
nodes = list(np.unique(instances))
if 0 in nodes: # remove background
nodes.remove(0)
edge_indices = list(product(list(range(len(nodes))), list(range(len(nodes)))))
edge_indices = [i for i in edge_indices if i[0]!=i[1]]
instances_box = dict()
dim_point = points.shape[-1]
obj_points = torch.zeros([len(nodes), 128, dim_point])
descriptor = torch.zeros([len(nodes), 11])
for i, instance_id in enumerate(nodes):
# get node point
try:
obj_pointset = points[np.where(instances == instance_id)[0]]
except:
print('error')
min_box = np.min(obj_pointset[:,:3], 0) - 0.2
max_box = np.max(obj_pointset[:,:3], 0) + 0.2
instances_box[instance_id] = (min_box,max_box)
choice = np.random.choice(len(obj_pointset), 128, replace=True)
obj_pointset = obj_pointset[choice, :]
descriptor[i] = op_utils.gen_descriptor(torch.from_numpy(obj_pointset)[:,:3])
obj_pointset = torch.from_numpy(obj_pointset.astype(np.float32))
obj_pointset[:,:3] = zero_mean(obj_pointset[:,:3])
obj_points[i] = obj_pointset
return obj_points, descriptor, edge_indices, classNames, relationNames, nodes
def zero_mean(point):
mean = torch.mean(point, dim=0)
point -= mean.unsqueeze(0)
return point
class SceneGraphPredictor(nn.Module):
def __init__(self) -> None:
super().__init__()
config = load_config()
self.model = MMGNet(config)
def load(self, path: str) -> None:
self.model.load(path)
@torch.no_grad()
def inference(self, plydata: trimesh, topk: int) -> list:
# inference
obj_points, descriptor, edge_indices, classNames, relationNames, instance_ids = generate_data(plydata)
res = self.model.inference(obj_points, descriptor, edge_indices, topk)
res_list = []
for i in res:
res_list.append({
"object_id":instance_ids[i[0].item()],
"object_class":classNames[i[1].item()],
"subject_id":instance_ids[i[2].item()],
"subject_class":classNames[i[3].item()],
"relation_class":relationNames[i[4].item()],
"confidence":i[5].item()
})
return res_list
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/inference.py
| 0.445771 | 0.193986 |
inference.py
|
pypi
|
from torch.optim.lr_scheduler import _LRScheduler, EPOCH_DEPRECATION_WARNING
import warnings,types
class BatchMultiplicativeLR(_LRScheduler):
"""Multiply the learning rate of each parameter group by the factor given
in the specified function. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in optimizer.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> lmbda = lambda epoch: 0.95
>>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, lr_lambda, last_epoch=-1):
self.optimizer = optimizer
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)))
self.lr_lambdas = list(lr_lambda)
self.last_epoch = last_epoch
super().__init__(optimizer, last_epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
return state_dict
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop('lr_lambdas')
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
state_dict['lr_lambdas'] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
def step(self, batchsize=None, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
self.batchsize = batchsize
values = self.get_lr()
# print(values)
else:
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for param_group, lr in zip(self.optimizer.param_groups, values):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch > 0:
return [lr * lmbda(self.last_epoch, self.batchsize)
for lr, lmbda in zip(self.base_lrs, self.lr_lambdas)]
else:
return list(self.base_lrs)
def update_lr(epoch,batchsize):
return batchsize
if __name__ == '__main__':
import torch
class model(torch.nn.Module):
def __init__(self):
super().__init__()
self.nn = torch.nn.Linear(3, 5)
def forward(self,x):
return self.nn(x)
m = model()
optim = torch.optim.Adam(m.parameters())
# lmbda = lambda epoch, batchsize: epoch * 0.95
scheduler = BatchMultiplicativeLR(optim,update_lr)
optim.step()
scheduler.step(batchsize=2)
optim.step()
scheduler.step(batchsize=4)
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/utils/optimizer.py
| 0.946966 | 0.402979 |
optimizer.py
|
pypi
|
import os,sys,time,math,torch
import numpy as np
from torch_geometric.nn.conv import MessagePassing
def read_txt_to_list(file):
output = []
with open(file, 'r') as f:
for line in f:
entry = line.rstrip().lower()
output.append(entry)
return output
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def gen_descriptor(pts:torch.tensor):
'''
centroid_pts,std_pts,segment_dims,segment_volume,segment_lengths
[3, 3, 3, 1, 1]
'''
assert pts.ndim==2
assert pts.shape[-1]==3
# centroid [n, 3]
centroid_pts = pts.mean(0)
# # std [n, 3]
std_pts = pts.std(0)
# dimensions [n, 3]
segment_dims = pts.max(dim=0)[0] - pts.min(dim=0)[0]
# volume [n, 1]
segment_volume = (segment_dims[0]*segment_dims[1]*segment_dims[2]).unsqueeze(0)
# length [n, 1]
segment_lengths = segment_dims.max().unsqueeze(0)
return torch.cat([centroid_pts,std_pts,segment_dims,segment_volume,segment_lengths],dim=0)
class Gen_edge_descriptor(MessagePassing):#TODO: move to model
""" A sequence of scene graph convolution layers """
def __init__(self, flow="source_to_target"):
super().__init__(flow=flow)
def forward(self, descriptor, edges_indices):
size = self.__check_input__(edges_indices, None)
coll_dict = self.__collect__(self.__user_args__,edges_indices,size, {"x":descriptor})
msg_kwargs = self.inspector.distribute('message', coll_dict)
edge_feature = self.message(**msg_kwargs)
return edge_feature
def message(self, x_i, x_j):
# source_to_target
# (j, i)
# 0-2: centroid, 3-5: std, 6-8:dims, 9:volume, 10:length
# to
# 0-2: offset centroid, 3-5: offset std, 6-8: dim log ratio, 9: volume log ratio, 10: length log ratio
edge_feature = torch.zeros_like(x_i)
# centroid offset
edge_feature[:,0:3] = x_i[:,0:3]-x_j[:,0:3]
# std offset
edge_feature[:,3:6] = x_i[:,3:6]-x_j[:,3:6]
# dim log ratio
edge_feature[:,6:9] = torch.log(x_i[:,6:9] / x_j[:,6:9])
# volume log ratio
edge_feature[:,9] = torch.log( x_i[:,9] / x_j[:,9])
# length log ratio
edge_feature[:,10] = torch.log( x_i[:,10] / x_j[:,10])
# edge_feature, *_ = self.ef(edge_feature.unsqueeze(-1))
return edge_feature.unsqueeze(-1)
def pytorch_count_params(model, trainable=True):
"count number trainable parameters in a pytorch model"
s = 0
for p in model.parameters():
if trainable:
if not p.requires_grad: continue
try:
s += p.numel()
except:
pass
return s
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=25, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None, silent=False):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if not silent:
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
if not silent:
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
if not silent:
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
if not silent:
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None,silent=False):
self.update(self._seen_so_far + n, values,silent=silent)
def check(x,y):
x = x if isinstance(x, list) or isinstance(x, tuple) else [x]
y = y if isinstance(y, list) or isinstance(y, tuple) else [y]
[np.testing.assert_allclose(x[i].flatten(), y[i].flatten(), rtol=1e-03, atol=1e-05) for i in range(len(x))]
def export(model:torch.nn.Module, inputs:list,pth:str, input_names:list, output_names:list, dynamic_axes:dict):
import onnxruntime as ort
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else [inputs]
torch.onnx.export(model = model, args = tuple(inputs), f=pth,
verbose=False,export_params=True,
do_constant_folding=True,
input_names=input_names, output_names=output_names,
dynamic_axes=dynamic_axes,opset_version=12)
with torch.no_grad():
model.eval()
sess = ort.InferenceSession(pth)
x = model(*inputs)
ins = {input_names[i]: inputs[i].numpy() for i in range(len(inputs))}
y = sess.run(None, ins)
check(x,y)
inputs = [torch.cat([input,input],dim=0) for input in inputs]
x = model(*inputs)
ins = {input_names[i]: inputs[i].numpy() for i in range(len(inputs))}
y = sess.run(None, ins)
check(x,y)
def get_tensorboard_logs(pth_log):
for (dirpath, dirnames, filenames) in os.walk(pth_log):
break
l = list()
for filename in filenames:
if filename.find('events') >= 0: l.append(filename)
return l
def create_dir(dir):
from pathlib import Path
Path(dir).mkdir(parents=True, exist_ok=True)
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/utils/op_utils.py
| 0.655115 | 0.629945 |
op_utils.py
|
pypi
|
import torch
from scene_graph_predictor_pc.src.model.model_utils.model_base import BaseModel
from scene_graph_predictor_pc.src.utils import op_utils
from scene_graph_predictor_pc.src.utils.eval_utils import inference_triplet
from scene_graph_predictor_pc.src.model.model_utils.network_GNN import GraphEdgeAttenNetworkLayers
from scene_graph_predictor_pc.src.model.model_utils.network_PointNet import PointNetfeat, PointNetCls, PointNetRelCls, PointNetRelClsMulti
class Baseline(BaseModel):
"""
512 + 256 baseline
"""
def __init__(self, config, num_obj_class, num_rel_class, dim_descriptor=11):
super().__init__('Mmgnet', config)
self.mconfig = mconfig = config.MODEL
with_bn = mconfig.WITH_BN
dim_point = 3
if mconfig.USE_RGB:
dim_point +=3
if mconfig.USE_NORMAL:
dim_point +=3
dim_f_spatial = dim_descriptor
dim_point_rel = dim_f_spatial
self.dim_point=dim_point
self.dim_edge=dim_point_rel
self.num_class=num_obj_class
self.num_rel=num_rel_class
self.flow = 'target_to_source'
self.clip_feat_dim = self.config.MODEL.clip_feat_dim
dim_point_feature = 512
if self.mconfig.USE_SPATIAL:
dim_point_feature -= dim_f_spatial-3 # ignore centroid
# Object Encoder
self.obj_encoder = PointNetfeat(
global_feat=True,
batch_norm=with_bn,
point_size=dim_point,
input_transform=False,
feature_transform=mconfig.feature_transform,
out_size=dim_point_feature)
# Relationship Encoder
self.rel_encoder = PointNetfeat(
global_feat=True,
batch_norm=with_bn,
point_size=dim_point_rel,
input_transform=False,
feature_transform=mconfig.feature_transform,
out_size=mconfig.edge_feature_size)
self.gcn = GraphEdgeAttenNetworkLayers(512,
256,
self.mconfig.DIM_ATTEN,
self.mconfig.N_LAYERS,
self.mconfig.NUM_HEADS,
self.mconfig.GCN_AGGR,
flow=self.flow,
attention=self.mconfig.ATTENTION,
use_edge=self.mconfig.USE_GCN_EDGE,
DROP_OUT_ATTEN=self.mconfig.DROP_OUT_ATTEN)
self.obj_predictor = PointNetCls(num_obj_class, in_size=512,
batch_norm=with_bn, drop_out=True)
if mconfig.multi_rel_outputs:
self.rel_predictor = PointNetRelClsMulti(
num_rel_class,
in_size=mconfig.edge_feature_size,
batch_norm=with_bn,drop_out=True)
else:
self.rel_predictor = PointNetRelCls(
num_rel_class,
in_size=mconfig.edge_feature_size,
batch_norm=with_bn,drop_out=True)
def forward(self, obj_points, edge_indices, descriptor=None, batch_ids=None, istrain=False):
obj_feature = self.obj_encoder(obj_points)
tmp = descriptor[:,3:].clone()
tmp[:,6:] = tmp[:,6:].log() # only log on volume and length
obj_feature = torch.cat([obj_feature, tmp],dim=1)
''' Create edge feature '''
with torch.no_grad():
edge_feature = op_utils.Gen_edge_descriptor(flow=self.flow)(descriptor, edge_indices)
rel_feature = self.rel_encoder(edge_feature)
gcn_obj_feature, gcn_rel_feature, _ = self.gcn(obj_feature, rel_feature, edge_indices)
rel_cls = self.rel_predictor(gcn_rel_feature)
obj_logits = self.obj_predictor(gcn_obj_feature)
return obj_logits, rel_cls
def inference(self, obj_points, descriptor, edge_indices, topk):
edge_indices = torch.tensor(edge_indices).long()
obj_points = obj_points.permute(0,2,1).contiguous()
with torch.no_grad():
obj_pred, rel_pred = self(obj_points, edge_indices.t().contiguous(), descriptor, istrain=False)
print("Start inference")
predicts = inference_triplet(obj_pred, rel_pred, edge_indices, topk)
return predicts
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/model/vlsat/model.py
| 0.622689 | 0.195825 |
model.py
|
pypi
|
import torch.nn as nn
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def init_weights(self, init_type='normal', gain=0.02, bias_value=0.0,
target_op = None):
'''
initialize network's weights
init_type: normal | xavier_normal | kaiming | orthogonal | xavier_unifrom
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
'''
def init_func(m):
classname = m.__class__.__name__
if target_op is not None:
if classname.find(target_op) == -1:
return False
if hasattr(m, 'param_inited'):
return
# print('classname',classname)
if hasattr(m, 'weight'):# and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier_normal':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'xavier_unifrom':
nn.init.xavier_uniform_(m.weight.data, gain=gain)
elif init_type == 'constant':
nn.init.constant_(m.weight.data, gain)
else:
raise NotImplementedError()
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, bias_value)
m.param_inited = True
self.init_apply(init_func)
def getParamList(self,x):
return list(x.parameters())
def init_apply(self, fn):
for m in self.children():
if hasattr(m, 'param_inited'):
if m.param_inited is False:
m.init_apply(fn)
else:
m.apply(fn)
fn(self)
return self
class mySequential(nn.Sequential, BaseNetwork):
def __init__(self, *args):
super(mySequential, self).__init__(*args)
def forward(self, *inputs):
for module in self._modules.values():
if type(inputs) == tuple:
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs
if __name__ == "__main__":
pass
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/model/model_utils/networks_base.py
| 0.765111 | 0.29908 |
networks_base.py
|
pypi
|
import torch
from torch_geometric.nn.conv import MessagePassing
from scene_graph_predictor_pc.src.model.model_utils.networks_base import mySequential
def MLP(channels: list, do_bn=False, on_last=False, drop_out=None):
""" Multi-layer perceptron """
n = len(channels)
layers = []
offset = 0 if on_last else 1
for i in range(1, n):
layers.append(
torch.nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-offset):
if do_bn:
layers.append(torch.nn.BatchNorm1d(channels[i]))
layers.append(torch.nn.ReLU())
if drop_out is not None:
layers.append(torch.nn.Dropout(drop_out))
return mySequential(*layers)
def build_mlp(dim_list, activation='relu', do_bn=False,
dropout=0, on_last=False):
layers = []
for i in range(len(dim_list) - 1):
dim_in, dim_out = dim_list[i], dim_list[i + 1]
layers.append(torch.nn.Linear(dim_in, dim_out))
final_layer = (i == len(dim_list) - 2)
if not final_layer or on_last:
if do_bn:
layers.append(torch.nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(torch.nn.ReLU())
elif activation == 'leakyrelu':
layers.append(torch.nn.LeakyReLU())
if dropout > 0:
layers.append(torch.nn.Dropout(p=dropout))
return torch.nn.Sequential(*layers)
class Gen_Index(MessagePassing):
""" A sequence of scene graph convolution layers """
def __init__(self,flow="target_to_source"):
super().__init__(flow=flow)
def forward(self, x, edges_indices):
size = self.__check_input__(edges_indices, None)
coll_dict = self.__collect__(self.__user_args__,edges_indices,size, {"x":x})
msg_kwargs = self.inspector.distribute('message', coll_dict)
x_i, x_j = self.message(**msg_kwargs)
return x_i, x_j
def message(self, x_i, x_j):
return x_i,x_j
class Aggre_Index(MessagePassing):
def __init__(self,aggr='add', node_dim=-2,flow="source_to_target"):
super().__init__(aggr=aggr, node_dim=node_dim, flow=flow)
def forward(self, x, edge_index,dim_size):
size = self.__check_input__(edge_index, None)
coll_dict = self.__collect__(self.__user_args__, edge_index, size,{})
coll_dict['dim_size'] = dim_size
aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
x = self.aggregate(x, **aggr_kwargs)
return x
if __name__ == '__main__':
flow = 'source_to_target'
# flow = 'target_to_source'
g = Gen_Index(flow = flow)
edge_index = torch.LongTensor([[0,1,2],
[2,1,0]])
x = torch.zeros([3,5])
x[0,:] = 0
x[1,:] = 1
x[2,:] = 2
x_i,x_j = g(x,edge_index)
print('x_i',x_i)
print('x_j',x_j)
tmp = torch.zeros_like(x_i)
tmp = torch.zeros([5,2])
edge_index = torch.LongTensor([[0,1,2,1,0],
[2,1,1,1,1]])
for i in range(5):
tmp[i] = -i
aggr = Aggre_Index(flow=flow,aggr='max')
xx = aggr(tmp, edge_index,dim_size=x.shape[0])
print(x)
print(xx)
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/model/model_utils/network_util.py
| 0.824885 | 0.401101 |
network_util.py
|
pypi
|
import torch
import torch.nn as nn
from scene_graph_predictor_pc.src.model.model_utils.network_util import build_mlp, Gen_Index, Aggre_Index, MLP
from scene_graph_predictor_pc.src.model.model_utils.networks_base import BaseNetwork
import inspect
from collections import OrderedDict
import os
from scene_graph_predictor_pc.src.utils import op_utils
class TripletEdgeNet(torch.nn.Module):
def __init__(self,dim_node,dim_edge,use_bn=False):
super().__init__()
self.name = 'TripletEdgeNet'
self.dim_node=dim_node
self.dim_edge=dim_edge
self.nn = build_mlp([dim_node*2+dim_edge,2*(dim_node+dim_edge),dim_edge],
do_bn= use_bn, on_last=False)
def forward(self, x_i, edge_feature,x_j):
x_ = torch.cat([x_i,edge_feature,x_j],dim=1)#.view(b, -1, 1)
return self.nn(x_)
def trace(self, pth = './tmp',name_prefix=''):
params = inspect.signature(self.forward).parameters
params = OrderedDict(params)
names_i = [name for name in params.keys()]
names_o = ['y']
x_1 = torch.rand(1, self.dim_node)
e = torch.rand(1, self.dim_edge)
x_2 = torch.rand(1, self.dim_node)
self(x_1,e,x_2)
name = name_prefix+'_'+self.name
op_utils.export(self, (x_1,e,x_2), os.path.join(pth, name),
input_names=names_i, output_names=names_o,
dynamic_axes = {names_i[0]:{0:'n_edge'},
names_i[1]:{0:'n_edge'},
names_i[2]:{0:'n_edge'}})
names = dict()
names['model_'+name] = dict()
names['model_'+name]['path'] = name
names['model_'+name]['input']=names_i
names['model_'+name]['output']=names_o
return names
class MultiHeadedEdgeAttention(torch.nn.Module):
def __init__(self, num_heads: int, dim_node: int, dim_edge: int, dim_atten: int, use_bn=False,
attention = 'fat', use_edge:bool = True, **kwargs):
super().__init__()
assert dim_node % num_heads == 0
assert dim_edge % num_heads == 0
assert dim_atten % num_heads == 0
self.name = 'MultiHeadedEdgeAttention'
self.dim_node=dim_node
self.dim_edge=dim_edge
self.d_n = d_n = dim_node // num_heads
self.d_e = d_e = dim_edge // num_heads
self.d_o = d_o = dim_atten // num_heads
self.num_heads = num_heads
self.use_edge = use_edge
self.nn_edge = build_mlp([dim_node*2+dim_edge,(dim_node+dim_edge),dim_edge],
do_bn= use_bn, on_last=False)
DROP_OUT_ATTEN = None
if 'DROP_OUT_ATTEN' in kwargs:
DROP_OUT_ATTEN = kwargs['DROP_OUT_ATTEN']
# print('drop out in',self.name,'with value',DROP_OUT_ATTEN)
self.attention = attention
assert self.attention in ['fat']
if self.attention == 'fat':
if use_edge:
self.nn = MLP([d_n+d_e, d_n+d_e, d_o],do_bn=use_bn,drop_out = DROP_OUT_ATTEN)
else:
self.nn = MLP([d_n, d_n*2, d_o],do_bn=use_bn,drop_out = DROP_OUT_ATTEN)
self.proj_edge = build_mlp([dim_edge,dim_edge])
self.proj_query = build_mlp([dim_node,dim_node])
self.proj_value = build_mlp([dim_node,dim_atten])
else:
raise NotImplementedError('')
def forward(self, query, edge, value):
batch_dim = query.size(0)
edge_feature = self.nn_edge( torch.cat([query,edge,value],dim=1) )#.view(b, -1, 1)
if self.attention == 'fat':
value = self.proj_value(value)
query = self.proj_query(query).view(batch_dim, self.d_n, self.num_heads)
edge = self.proj_edge(edge).view(batch_dim, self.d_e, self.num_heads)
if self.use_edge:
prob = self.nn(torch.cat([query,edge],dim=1)) # b, dim, head
else:
prob = self.nn(query) # b, dim, head
prob = prob.softmax(1)
x = torch.einsum('bm,bm->bm', prob.reshape_as(value), value)
return x, edge_feature, prob
def trace(self, pth = './tmp',name_prefix=''):
params = inspect.signature(self.forward).parameters
params = OrderedDict(params)
names_i = [name for name in params.keys()]
names_o = ['w_'+names_i[0], 'prob']
x1 = torch.rand(1, self.dim_node)
e = torch.rand(1, self.dim_edge)
x2 = torch.rand(1, self.dim_node)
self(x1,e,x2)
name = name_prefix+'_'+self.name
op_utils.export(self, (x1,e,x2), os.path.join(pth, name),
input_names=names_i, output_names=names_o,
dynamic_axes = {names_i[0]:{0:'n_edge'},
names_i[1]:{0:'n_edge'},
names_i[2]:{0:'n_edge'}})
names = dict()
names['model_'+name] = dict()
names['model_'+name]['path'] = name
names['model_'+name]['input']=names_i
names['model_'+name]['output']=names_o
return names
class GraphEdgeAttenNetwork(BaseNetwork):
def __init__(self, num_heads, dim_node, dim_edge, dim_atten, aggr= 'max', use_bn=False,
flow='target_to_source',attention = 'fat',use_edge:bool=True, **kwargs):
super().__init__() # "Max" aggregation.
self.name = 'edgeatten'
self.dim_node=dim_node
self.dim_edge=dim_edge
self.index_get = Gen_Index(flow=flow)
self.index_aggr = Aggre_Index(aggr=aggr,flow=flow)
self.attention = attention
assert self.attention in [ 'fat']
if self.attention == 'fat':
self.edgeatten = MultiHeadedEdgeAttention(
dim_node=dim_node,dim_edge=dim_edge,dim_atten=dim_atten,
num_heads=num_heads,use_bn=use_bn,attention=attention,use_edge=use_edge, **kwargs)
self.prop = build_mlp([dim_node+dim_atten, dim_node+dim_atten, dim_node],
do_bn= use_bn, on_last=False)
else:
raise NotImplementedError('')
def forward(self, x, edge_feature, edge_index):
assert x.ndim == 2
assert edge_feature.ndim == 2
x_i, x_j = self.index_get(x, edge_index)
xx, gcn_edge_feature, prob = self.edgeatten(x_i,edge_feature,x_j)
xx = self.index_aggr(xx, edge_index, dim_size = x.shape[0])
xx = self.prop(torch.cat([x,xx],dim=1))
return xx, gcn_edge_feature, prob
def trace(self, pth = './tmp', name_prefix=''):
n_node=2
n_edge=4
x = torch.rand(n_node, self.dim_node)
edge_feature = torch.rand(n_edge, self.dim_edge)
edge_index = torch.randint(0, n_node-1, [2,n_edge])
edge_index[0] = torch.zeros([n_edge])
edge_index[1] = torch.ones([n_edge])
self.eval()
self(x,edge_feature,edge_index)
x_i, x_j = self.index_get(x, edge_index)
xx, edge_feature, prob = self.edgeatten(x_i,edge_feature,x_j)
xx = self.index_aggr(xx, edge_index, dim_size = x.shape[0])
# y = self.prop(torch.cat([x,xx],dim=1))
names_i = ['x_in']
names_o = ['x_out']
name_nn = name_prefix+'_'+self.name+'_prop'
cated=torch.cat([x, xx], dim=1)
op_utils.export(self.prop, (cated), os.path.join(pth, name_nn),
input_names=names_i, output_names=names_o,
dynamic_axes = {names_i[0]:{0:'n_node'}})
names_nn = dict()
names_nn['model_'+name_nn] = dict()
names_nn['model_'+name_nn]['path'] = name_nn
names_nn['model_'+name_nn]['input']=names_i
names_nn['model_'+name_nn]['output']=names_o
name = name_prefix+'_'+self.name
names_atten = self.edgeatten.trace(pth, name)
names = dict()
names[name] = dict()
names[name]['atten'] = names_atten
names[name]['prop'] = names_nn
return names
class GraphEdgeAttenNetworkLayers(torch.nn.Module):
""" A sequence of scene graph convolution layers """
def __init__(self, dim_node, dim_edge, dim_atten, num_layers, num_heads=1, aggr= 'max',
use_bn=False,flow='target_to_source',attention = 'fat', use_edge:bool=True, **kwargs):
super().__init__()
self.num_layers = num_layers
self.num_heads = num_heads
self.gconvs = torch.nn.ModuleList()
self.drop_out = None
if 'DROP_OUT_ATTEN' in kwargs:
self.drop_out = torch.nn.Dropout(kwargs['DROP_OUT_ATTEN'])
for _ in range(self.num_layers):
self.gconvs.append(GraphEdgeAttenNetwork(num_heads,dim_node,dim_edge,dim_atten,aggr,
use_bn=use_bn,flow=flow,attention=attention,use_edge=use_edge, **kwargs))
def forward(self, node_feature, edge_feature, edges_indices):
probs = list()
for i in range(self.num_layers):
gconv = self.gconvs[i]
node_feature, edge_feature, prob = gconv(node_feature, edge_feature, edges_indices)
if i < (self.num_layers-1) or self.num_layers==1:
node_feature = torch.nn.functional.relu(node_feature)
edge_feature = torch.nn.functional.relu(edge_feature)
if self.drop_out:
node_feature = self.drop_out(node_feature)
edge_feature = self.drop_out(edge_feature)
if prob is not None:
probs.append(prob.cpu().detach())
else:
probs.append(None)
return node_feature, edge_feature, probs
|
/scene_graph_predictor_pc-0.1.1-py3-none-any.whl/scene_graph_predictor_pc/src/model/model_utils/network_GNN.py
| 0.764452 | 0.36815 |
network_GNN.py
|
pypi
|
from inspect import ismodule, ismethod
from typing import Optional, Set, Union, List
from aiogram import Dispatcher
from scene_manager import StorageSettings
from scene_manager.loader import utils
from scene_manager.loader.models import HandlersStorage, SceneModel
from scene_manager.loader.utils import get_class_attr
from scene_manager.scenes.base import BaseScene
from scene_manager.storages import redis
from scene_manager.storages.base import BaseStorage
from loguru import logger
from aiogram.utils.mixins import ContextInstanceMixin
from functools import lru_cache
@lru_cache
def get_user_attr(user_class) -> Set[str]:
all_dir = get_class_attr(user_class)
return all_dir - get_class_attr(BaseScene)
class Loader(ContextInstanceMixin):
def __init__(
self,
*,
dispatcher: Optional[Dispatcher] = None,
storage: Optional[BaseStorage] = None,
path_to_scenes: Optional[Union[str, List[str]]] = None,
) -> None:
self._dispatcher = dispatcher
self.data_storage = storage or redis.RedisStorage(StorageSettings())
self.handlers_storage = HandlersStorage()
self.is_scenes_loaded = False
if path_to_scenes is None:
path_to_scenes = ["./scenes"]
elif isinstance(path_to_scenes, str):
path_to_scenes = [path_to_scenes]
self._path_to_scenes = path_to_scenes
self.set_current(self)
def load_scenes(self) -> None:
self.is_scenes_loaded = True
self._class_distribution()
def _class_distribution(self) -> None:
logger.debug("Start load scenes")
for user_class in self._loading_classes():
user_class = user_class(self.data_storage, dispatcher=self._dispatcher)
self._recording_scene(user_class)
def _recording_scene(self, user_class) -> None:
scene_storage = self._find_scene_storage(user_class)
for user_method in get_user_attr(user_class):
user_attr = getattr(user_class, user_method)
if not ismethod(user_attr):
continue
scene_model = SceneModel(
scene_name=user_method, handler=user_attr, link_to_object=user_class, config=user_class.config
)
self.handlers_storage.set_scene(scene_storage, scene_model)
logger.debug(f"Add scene {scene_storage}, {user_method}, {scene_model}")
def _find_scene_storage(self, user_class):
for scenes_type in self.handlers_storage.scenes_storages.keys():
if not isinstance(user_class, scenes_type):
continue
return scenes_type
def _loading_classes(self) -> list:
classes = list()
files_path = list()
for directory in self._path_to_scenes:
files_path.extend(utils.recursive_load_files(directory))
for file_path in files_path:
module = utils.load_module(file_path)
classes.extend(self._get_classes(module))
return classes
def _get_classes(self, module) -> list:
user_classes = list()
module_dirs = utils.get_class_attr(module)
for module_dir in module_dirs:
user_class = getattr(module, module_dir)
try:
if not ismodule(user_class) and user_class not in self.handlers_storage.scenes_storages.keys():
user_classes.append(user_class)
except Exception as e:
logger.exception(f"Error in module check: {e}")
return user_classes
|
/scene_manager-0.1.0.tar.gz/scene_manager-0.1.0/scene_manager/loader/loader.py
| 0.786131 | 0.182426 |
loader.py
|
pypi
|
from functools import wraps
from typing import Callable, List, Union, Optional
from aiogram import types
from aiogram.types import ContentType
from scene_manager.utils import content_type_checker
from abc import ABC, abstractmethod
def context_types_filter(
context_types: List[Union[ContentType, str]], otherwise_handler: Optional[Callable] = None
) -> Callable:
def decorator(func: Callable):
@wraps(func)
async def wrapper(self, message: types.Message) -> Callable:
if content_type_checker(message, context_types):
return await func(self, message)
elif otherwise_handler is not None:
return await otherwise_handler(self, message)
return wrapper
return decorator
def query_data_filter(data: List[str], otherwise_handler: Optional[Callable] = None) -> Callable:
def decorator(func: Callable):
@wraps(func)
async def wrapper(self, query: types.CallbackQuery) -> Callable:
if isinstance(query.data, str) and query.data in data:
return await func(self, query)
elif isinstance(query.data, list) and set(query.data).issubset(set(data)):
return await func(self, query)
elif otherwise_handler is not None:
return await otherwise_handler(self, query)
return wrapper
return decorator
class BaseFilter(ABC):
@abstractmethod
def __call__(self) -> bool:
raise NotImplementedError
@abstractmethod
def otherwise_handler(self):
raise NotImplementedError
def filter_manager(filter) -> Callable:
def decorator(func: Callable):
@wraps(func)
async def wrapper(self, ctx) -> Callable:
filter_obj = filter(ctx)
filter_result = await filter_obj()
if filter_result:
return await func(self, ctx)
elif getattr(filter_obj, "otherwise_handler", None) is not None:
return await filter_obj.otherwise_handler()
return wrapper
return decorator
|
/scene_manager-0.1.0.tar.gz/scene_manager-0.1.0/scene_manager/tools/filters.py
| 0.901864 | 0.281751 |
filters.py
|
pypi
|
import argparse
import glob
import logging
from math import atan2, degrees, fabs, sin, radians, cos
import numpy as np
import os
import cv2
from scene_text.detector import EASTDetector
from scene_text.recognizer import MORANRecognizer
log = logging.getLogger(__name__)
def sort_poly(p):
min_axis = np.argmin(np.sum(p, axis=1))
p = p[[min_axis, (min_axis+1)%4, (min_axis+2)%4, (min_axis+3)%4]]
if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):
return p
else:
return p[[0, 3, 2, 1]]
def dumpRotateImage(img, degree, pt1, pt2, pt3, pt4):
height, width = img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
matRotation[0, 2] += (widthNew - width) / 2
matRotation[1, 2] += (heightNew - height) / 2
imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
pt1 = list(pt1)
pt3 = list(pt3)
[[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
[[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
imgOut = imgRotation[int(pt1[1]):int(pt3[1]), int(pt1[0]):int(pt3[0])]
height, width = imgOut.shape[:2]
return imgOut
class AllWordsRecognizer:
"""Pipeline for detection and recognition of all words in an image"""
def __init__(self, text_direction='ltr'):
self.text_direction = text_direction
self.detector = EASTDetector()
self.recognizer = MORANRecognizer()
def get_all_words(self, image):
"""Return lists of words and corresponding boxes"""
log.debug("start processing image of shape {}".format(image.shape))
boxes = self.detector.detect(image)
words = []
if boxes is not None:
log.debug("detected {} words".format(len(boxes)))
for box in boxes:
# avoid submitting errors
box = sort_poly(box.astype(np.int32))
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
continue
pt1 = (box[0, 0], box[0, 1])
pt2 = (box[1, 0], box[1, 1])
pt3 = (box[2, 0], box[2, 1])
pt4 = (box[3, 0], box[3, 1])
word_img = dumpRotateImage(image, degrees(atan2(pt2[1] - pt1[1], pt2[0] - pt1[0])), pt1, pt2, pt3, pt4)
word = (self.recognizer.recognize(word_img))[self.text_direction]
words.append(word)
log.debug("completed recognition")
return words, boxes
def main():
logging.basicConfig(level=logging.DEBUG
, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
font = cv2.FONT_HERSHEY_COMPLEX
fontScale = 0.5
fontColor = (0,69,255)
lineType = 1
def get_image_paths(image_path):
types = ('.jpg', '.png', '.jpeg', '.JPG')
if os.path.isfile(image_path) and image_path.endswith(types):
return [image_path]
files = []
for t in types:
files.extend(glob.glob(os.path.join(image_path, '*' + t)))
return files
parser = argparse.ArgumentParser()
parser.add_argument('input_image_path', type=str)
parser.add_argument('output_path', type=str)
FLAGS = parser.parse_args()
pipeline = AllWordsRecognizer()
img_list = get_image_paths(FLAGS.input_image_path)
logging.info('Found {} images'.format(len(img_list)))
for img_file in img_list:
img = cv2.imread(img_file)[:, :, ::-1]
words, boxes=pipeline.get_all_words(img)
if words:
res_file = os.path.join(
FLAGS.output_path,
'{}.txt'.format(
os.path.basename(img_file).split('.')[0]))
with open(res_file, 'w') as f:
for idx, box in enumerate(boxes):
bottomLeftCornerOfText = (box[0, 0], box[0, 1])
cv2.putText(img[:, :, ::-1], words[idx],
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
f.write('{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{}\r\n'.format(
box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],words[idx]
))
cv2.polylines(img[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1)
img_path = os.path.join(FLAGS.output_path, os.path.basename(img_file))
cv2.imwrite(img_path, img[:, :, ::-1])
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/scene_text.py
| 0.497315 | 0.3975 |
scene_text.py
|
pypi
|
from collections import OrderedDict
import logging
import os
import cv2
from PIL import Image
import torch
from torch.autograd import Variable
from .MORAN_v2.tools import utils
from .MORAN_v2.tools import dataset
from .MORAN_v2.models.moran import MORAN
log = logging.getLogger(__name__)
class MORANRecognizer:
def __init__(self
, model_path = os.path.join(os.path.dirname(__file__), 'MORAN_v2/demo.pth')):
alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$'
self.cuda_flag = False
if torch.cuda.is_available():
self.cuda_flag = True
self.MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=self.cuda_flag)
self.MORAN = self.MORAN.cuda()
else:
self.MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=self.cuda_flag)
if not os.path.isfile(model_path):
log.info('loading model from Google Drive URL')
from scene_text.util import download_file_from_google_drive
download_file_from_google_drive('1IDvT51MXKSseDq3X57uPjOzeSYI09zip',
model_path)
log.info('loading pretrained model from %s' % model_path)
if self.cuda_flag:
state_dict = torch.load(model_path)
else:
state_dict = torch.load(model_path, map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
self.MORAN.load_state_dict(MORAN_state_dict_rename)
for p in self.MORAN.parameters():
p.requires_grad = False
self.MORAN.eval()
self.converter = utils.strLabelConverterForAttention(alphabet, ':')
self.transformer = dataset.resizeNormalize((100, 32))
def recognize(self, cv2_img):
cv2_im = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
image = pil_im.convert('L')
image = self.transformer(image)
if self.cuda_flag:
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
text = torch.LongTensor(1 * 5)
length = torch.IntTensor(1)
text = Variable(text)
length = Variable(length)
max_iter = 20
t, l = self.converter.encode('0'*max_iter)
utils.loadData(text, t)
utils.loadData(length, l)
output = self.MORAN(image, length, text, text, test=True, debug=True)
preds, preds_reverse = output[0]
demo = output[1]
_, preds = preds.max(1)
_, preds_reverse = preds_reverse.max(1)
sim_preds = self.converter.decode(preds.data, length.data)
sim_preds = sim_preds.strip().split('$')[0]
sim_preds_reverse = self.converter.decode(preds_reverse.data, length.data)
sim_preds_reverse = sim_preds_reverse.strip().split('$')[0]
# cv2.imshow("demo", demo)
# cv2.waitKey()
return {'ltr': sim_preds, 'rtl': sim_preds_reverse }
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/moran.py
| 0.582254 | 0.167389 |
moran.py
|
pypi
|
import torch
from torch.autograd import Variable
import tools.utils as utils
import tools.dataset as dataset
from PIL import Image
from collections import OrderedDict
import cv2
from models.moran import MORAN
model_path = './demo.pth'
img_path = './demo/gtsoukas/6.png'
alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$'
cuda_flag = False
if torch.cuda.is_available():
cuda_flag = True
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=cuda_flag)
MORAN = MORAN.cuda()
else:
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=cuda_flag)
print('loading pretrained model from %s' % model_path)
if cuda_flag:
state_dict = torch.load(model_path)
else:
state_dict = torch.load(model_path, map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
MORAN.load_state_dict(MORAN_state_dict_rename)
for p in MORAN.parameters():
p.requires_grad = False
MORAN.eval()
converter = utils.strLabelConverterForAttention(alphabet, ':')
transformer = dataset.resizeNormalize((100, 32))
image = Image.open(img_path).convert('L')
image = transformer(image)
if cuda_flag:
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
text = torch.LongTensor(1 * 5)
length = torch.IntTensor(1)
text = Variable(text)
length = Variable(length)
max_iter = 20
t, l = converter.encode('0'*max_iter)
utils.loadData(text, t)
utils.loadData(length, l)
output = MORAN(image, length, text, text, test=True, debug=True)
preds, preds_reverse = output[0]
demo = output[1]
_, preds = preds.max(1)
_, preds_reverse = preds_reverse.max(1)
sim_preds = converter.decode(preds.data, length.data)
sim_preds = sim_preds.strip().split('$')[0]
sim_preds_reverse = converter.decode(preds_reverse.data, length.data)
sim_preds_reverse = sim_preds_reverse.strip().split('$')[0]
print('\nResult:\n' + 'Left to Right: ' + sim_preds + '\nRight to Left: ' + sim_preds_reverse + '\n\n')
cv2.imshow("demo", demo)
cv2.waitKey()
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/MORAN_v2/demo.py
| 0.422505 | 0.341363 |
demo.py
|
pypi
|
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from scene_text.recognizer.MORAN_v2.models.fracPickup import fracPickup
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True, dropout=0.3)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class AttentionCell(nn.Module):
def __init__(self, input_size, hidden_size, num_embeddings=128, CUDA=True):
super(AttentionCell, self).__init__()
self.i2h = nn.Linear(input_size, hidden_size,bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.score = nn.Linear(hidden_size, 1, bias=False)
self.rnn = nn.GRUCell(input_size+num_embeddings, hidden_size)
self.hidden_size = hidden_size
self.input_size = input_size
self.num_embeddings = num_embeddings
self.fracPickup = fracPickup(CUDA=CUDA)
def forward(self, prev_hidden, feats, cur_embeddings, test=False):
nT = feats.size(0)
nB = feats.size(1)
nC = feats.size(2)
hidden_size = self.hidden_size
feats_proj = self.i2h(feats.view(-1,nC))
prev_hidden_proj = self.h2h(prev_hidden).view(1,nB, hidden_size).expand(nT, nB, hidden_size).contiguous().view(-1, hidden_size)
emition = self.score(F.tanh(feats_proj + prev_hidden_proj).view(-1, hidden_size)).view(nT,nB)
alpha = F.softmax(emition, 0) # nB * nT
if not test:
alpha_fp = self.fracPickup(alpha.unsqueeze(1).unsqueeze(2)).squeeze()
context = (feats * alpha_fp.view(nT,nB,1).expand(nT, nB, nC)).sum(0).squeeze(0) # nB * nC
if len(context.size()) == 1:
context = context.unsqueeze(0)
context = torch.cat([context, cur_embeddings], 1)
cur_hidden = self.rnn(context, prev_hidden)
return cur_hidden, alpha_fp
else:
context = (feats * alpha.view(nT,nB,1).expand(nT, nB, nC)).sum(0).squeeze(0) # nB * nC
if len(context.size()) == 1:
context = context.unsqueeze(0)
context = torch.cat([context, cur_embeddings], 1)
cur_hidden = self.rnn(context, prev_hidden)
return cur_hidden, alpha
class Attention(nn.Module):
def __init__(self, input_size, hidden_size, num_classes, num_embeddings=128, CUDA=True):
super(Attention, self).__init__()
self.attention_cell = AttentionCell(input_size, hidden_size, num_embeddings, CUDA=CUDA)
self.input_size = input_size
self.hidden_size = hidden_size
self.generator = nn.Linear(hidden_size, num_classes)
self.char_embeddings = Parameter(torch.randn(num_classes+1, num_embeddings))
self.num_embeddings = num_embeddings
self.num_classes = num_classes
self.cuda = CUDA
# targets is nT * nB
def forward(self, feats, text_length, text, test=False):
nT = feats.size(0)
nB = feats.size(1)
nC = feats.size(2)
hidden_size = self.hidden_size
input_size = self.input_size
assert(input_size == nC)
assert(nB == text_length.numel())
num_steps = text_length.data.max()
num_labels = text_length.data.sum()
if not test:
targets = torch.zeros(nB, num_steps+1).long()
if self.cuda:
targets = targets.cuda()
start_id = 0
for i in range(nB):
targets[i][1:1+text_length.data[i]] = text.data[start_id:start_id+text_length.data[i]]+1
start_id = start_id+text_length.data[i]
targets = Variable(targets.transpose(0,1).contiguous())
output_hiddens = Variable(torch.zeros(num_steps, nB, hidden_size).type_as(feats.data))
hidden = Variable(torch.zeros(nB,hidden_size).type_as(feats.data))
for i in range(num_steps):
cur_embeddings = self.char_embeddings.index_select(0, targets[i])
hidden, alpha = self.attention_cell(hidden, feats, cur_embeddings, test)
output_hiddens[i] = hidden
new_hiddens = Variable(torch.zeros(num_labels, hidden_size).type_as(feats.data))
b = 0
start = 0
for length in text_length.data:
new_hiddens[start:start+length] = output_hiddens[0:length,b,:]
start = start + length
b = b + 1
probs = self.generator(new_hiddens)
return probs
else:
hidden = Variable(torch.zeros(nB,hidden_size).type_as(feats.data))
targets_temp = Variable(torch.zeros(nB).long().contiguous())
probs = Variable(torch.zeros(nB*num_steps, self.num_classes))
if self.cuda:
targets_temp = targets_temp.cuda()
probs = probs.cuda()
for i in range(num_steps):
cur_embeddings = self.char_embeddings.index_select(0, targets_temp)
hidden, alpha = self.attention_cell(hidden, feats, cur_embeddings, test)
hidden2class = self.generator(hidden)
probs[i*nB:(i+1)*nB] = hidden2class
_, targets_temp = hidden2class.max(1)
targets_temp += 1
probs = probs.view(num_steps, nB, self.num_classes).permute(1, 0, 2).contiguous()
probs = probs.view(-1, self.num_classes).contiguous()
probs_res = Variable(torch.zeros(num_labels, self.num_classes).type_as(feats.data))
b = 0
start = 0
for length in text_length.data:
probs_res[start:start+length] = probs[b*num_steps:b*num_steps+length]
start = start + length
b = b + 1
return probs_res
class Residual_block(nn.Module):
def __init__(self, c_in, c_out, stride):
super(Residual_block, self).__init__()
self.downsample = None
flag = False
if isinstance(stride, tuple):
if stride[0] > 1:
self.downsample = nn.Sequential(nn.Conv2d(c_in, c_out, 3, stride, 1),nn.BatchNorm2d(c_out, momentum=0.01))
flag = True
else:
if stride > 1:
self.downsample = nn.Sequential(nn.Conv2d(c_in, c_out, 3, stride, 1),nn.BatchNorm2d(c_out, momentum=0.01))
flag = True
if flag:
self.conv1 = nn.Sequential(nn.Conv2d(c_in, c_out, 3, stride, 1),
nn.BatchNorm2d(c_out, momentum=0.01))
else:
self.conv1 = nn.Sequential(nn.Conv2d(c_in, c_out, 1, stride, 0),
nn.BatchNorm2d(c_out, momentum=0.01))
self.conv2 = nn.Sequential(nn.Conv2d(c_out, c_out, 3, 1, 1),
nn.BatchNorm2d(c_out, momentum=0.01))
self.relu = nn.ReLU()
def forward(self,x):
residual = x
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
if self.downsample is not None:
residual = self.downsample(residual)
return self.relu(residual + conv2)
class ResNet(nn.Module):
def __init__(self,c_in):
super(ResNet,self).__init__()
self.block0 = nn.Sequential(nn.Conv2d(c_in, 32, 3, 1, 1),nn.BatchNorm2d(32, momentum=0.01))
self.block1 = self._make_layer(32, 32, 2, 3)
self.block2 = self._make_layer(32, 64, 2, 4)
self.block3 = self._make_layer(64, 128, (2,1), 6)
self.block4 = self._make_layer(128, 256, (2,1), 6)
self.block5 = self._make_layer(256, 512, (2,1), 3)
def _make_layer(self,c_in,c_out,stride,repeat=3):
layers = []
layers.append(Residual_block(c_in, c_out, stride))
for i in range(repeat - 1):
layers.append(Residual_block(c_out, c_out, 1))
return nn.Sequential(*layers)
def forward(self,x):
block0 = self.block0(x)
block1 = self.block1(block0)
block2 = self.block2(block1)
block3 = self.block3(block2)
block4 = self.block4(block3)
block5 = self.block5(block4)
return block5
class ASRN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, BidirDecoder=False, CUDA=True):
super(ASRN, self).__init__()
assert imgH % 16 == 0, 'imgH must be a multiple of 16'
self.cnn = ResNet(nc)
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nh),
)
self.BidirDecoder = BidirDecoder
if self.BidirDecoder:
self.attentionL2R = Attention(nh, nh, nclass, 256, CUDA=CUDA)
self.attentionR2L = Attention(nh, nh, nclass, 256, CUDA=CUDA)
else:
self.attention = Attention(nh, nh, nclass, 256, CUDA=CUDA)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out', a=0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
def forward(self, input, length, text, text_rev, test=False):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1).contiguous() # [w, b, c]
# rnn features
rnn = self.rnn(conv)
if self.BidirDecoder:
outputL2R = self.attentionL2R(rnn, length, text, test)
outputR2L = self.attentionR2L(rnn, length, text_rev, test)
return outputL2R, outputR2L
else:
output = self.attention(rnn, length, text, test)
return output
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/MORAN_v2/models/asrn_res.py
| 0.906928 | 0.502136 |
asrn_res.py
|
pypi
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class MORN(nn.Module):
def __init__(self, nc, targetH, targetW, inputDataType='torch.cuda.FloatTensor', maxBatch=256, CUDA=True):
super(MORN, self).__init__()
self.targetH = targetH
self.targetW = targetW
self.inputDataType = inputDataType
self.maxBatch = maxBatch
self.cuda = CUDA
self.cnn = nn.Sequential(
nn.MaxPool2d(2, 2),
nn.Conv2d(nc, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True), nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(True), nn.MaxPool2d(2, 2),
nn.Conv2d(128, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True),
nn.Conv2d(64, 16, 3, 1, 1), nn.BatchNorm2d(16), nn.ReLU(True),
nn.Conv2d(16, 1, 3, 1, 1), nn.BatchNorm2d(1)
)
self.pool = nn.MaxPool2d(2, 1)
h_list = np.arange(self.targetH)*2./(self.targetH-1)-1
w_list = np.arange(self.targetW)*2./(self.targetW-1)-1
grid = np.meshgrid(
w_list,
h_list,
indexing='ij'
)
grid = np.stack(grid, axis=-1)
grid = np.transpose(grid, (1, 0, 2))
grid = np.expand_dims(grid, 0)
grid = np.tile(grid, [maxBatch, 1, 1, 1])
grid = torch.from_numpy(grid).type(self.inputDataType)
if self.cuda:
grid = grid.cuda()
self.grid = Variable(grid, requires_grad=False)
self.grid_x = self.grid[:, :, :, 0].unsqueeze(3)
self.grid_y = self.grid[:, :, :, 1].unsqueeze(3)
def forward(self, x, test, enhance=1, debug=False):
if not test and np.random.random() > 0.5:
return nn.functional.upsample(x, size=(self.targetH, self.targetW), mode='bilinear')
if not test:
enhance = 0
assert x.size(0) <= self.maxBatch
assert x.data.type() == self.inputDataType
grid = self.grid[:x.size(0)]
grid_x = self.grid_x[:x.size(0)]
grid_y = self.grid_y[:x.size(0)]
x_small = nn.functional.upsample(x, size=(self.targetH, self.targetW), mode='bilinear')
offsets = self.cnn(x_small)
offsets_posi = nn.functional.relu(offsets, inplace=False)
offsets_nega = nn.functional.relu(-offsets, inplace=False)
offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)
offsets_grid = nn.functional.grid_sample(offsets_pool, grid)
offsets_grid = offsets_grid.permute(0, 2, 3, 1).contiguous()
offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)
x_rectified = nn.functional.grid_sample(x, offsets_x)
for iteration in range(enhance):
offsets = self.cnn(x_rectified)
offsets_posi = nn.functional.relu(offsets, inplace=False)
offsets_nega = nn.functional.relu(-offsets, inplace=False)
offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)
offsets_grid += nn.functional.grid_sample(offsets_pool, grid).permute(0, 2, 3, 1).contiguous()
offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)
x_rectified = nn.functional.grid_sample(x, offsets_x)
if debug:
offsets_mean = torch.mean(offsets_grid.view(x.size(0), -1), 1)
offsets_max, _ = torch.max(offsets_grid.view(x.size(0), -1), 1)
offsets_min, _ = torch.min(offsets_grid.view(x.size(0), -1), 1)
import matplotlib.pyplot as plt
from colour import Color
from torchvision import transforms
import cv2
alpha = 0.7
density_range = 256
color_map = np.empty([self.targetH, self.targetW, 3], dtype=int)
cmap = plt.get_cmap("rainbow")
blue = Color("blue")
hex_colors = list(blue.range_to(Color("red"), density_range))
rgb_colors = [[rgb * 255 for rgb in color.rgb] for color in hex_colors][::-1]
to_pil_image = transforms.ToPILImage()
for i in range(x.size(0)):
img_small = x_small[i].data.cpu().mul_(0.5).add_(0.5)
img = to_pil_image(img_small)
img = np.array(img)
if len(img.shape) == 2:
img = cv2.merge([img.copy()]*3)
img_copy = img.copy()
v_max = offsets_max.data[i]
v_min = offsets_min.data[i]
img_offsets = (offsets_grid[i]).view(1, self.targetH, self.targetW).data.cpu().add_(-v_min).mul_(1./(v_max-v_min))
img_offsets = to_pil_image(img_offsets)
img_offsets = np.array(img_offsets)
color_map = np.empty([self.targetH, self.targetW, 3], dtype=int)
for h_i in range(self.targetH):
for w_i in range(self.targetW):
color_map[h_i][w_i] = rgb_colors[int(img_offsets[h_i, w_i]/256.*density_range)]
color_map = color_map.astype(np.uint8)
cv2.addWeighted(color_map, alpha, img_copy, 1-alpha, 0, img_copy)
img_processed = x_rectified[i].data.cpu().mul_(0.5).add_(0.5)
img_processed = to_pil_image(img_processed)
img_processed = np.array(img_processed)
if len(img_processed.shape) == 2:
img_processed = cv2.merge([img_processed.copy()]*3)
total_img = np.ones([self.targetH, self.targetW*3+10, 3], dtype=int)*255
total_img[0:self.targetH, 0:self.targetW] = img
total_img[0:self.targetH, self.targetW+5:2*self.targetW+5] = img_copy
total_img[0:self.targetH, self.targetW*2+10:3*self.targetW+10] = img_processed
total_img = cv2.resize(total_img.astype(np.uint8), (300, 50))
# cv2.imshow("Input_Offsets_Output", total_img)
# cv2.waitKey()
return x_rectified, total_img
return x_rectified
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/MORAN_v2/models/morn.py
| 0.897288 | 0.59302 |
morn.py
|
pypi
|
import random
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from torch.utils.data import sampler
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, reverse=False, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()))
self.nSamples = nSamples
self.transform = transform
self.alphabet = alphabet
self.reverse = reverse
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key.encode())
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
label_key = 'label-%09d' % index
label = str(txn.get(label_key.encode()).decode('utf-8'))
label = ''.join(label[i] if label[i].lower() in self.alphabet else ''
for i in range(len(label)))
if len(label) <= 0:
return self[index + 1]
if self.reverse:
label_rev = label[-1::-1]
label_rev += '$'
label += '$'
if self.transform is not None:
img = self.transform(img)
if self.reverse:
return (img, label, label_rev)
else:
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __len__(self):
return self.num_samples
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.arange(0, self.batch_size)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.arange(0, tail)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/MORAN_v2/tools/dataset.py
| 0.52829 | 0.296349 |
dataset.py
|
pypi
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
class strLabelConverterForAttention(object):
"""Convert between str and label.
NOTE:
Insert `EOS` to the alphabet for attention.
Args:
alphabet (str): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet, sep):
self._scanned_list = False
self._out_of_list = ''
self._ignore_case = True
self.sep = sep
self.alphabet = alphabet.split(sep)
self.dict = {}
for i, item in enumerate(self.alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[item] = i
def scan(self, text):
# print(text)
text_tmp = text
text = []
for i in range(len(text_tmp)):
text_result = ''
for j in range(len(text_tmp[i])):
chara = text_tmp[i][j].lower() if self._ignore_case else text_tmp[i][j]
if chara not in self.alphabet:
if chara in self._out_of_list:
continue
else:
self._out_of_list += chara
file_out_of_list = open("out_of_list.txt", "a+")
file_out_of_list.write(chara + "\n")
file_out_of_list.close()
print('" %s " is not in alphabet...' % chara)
continue
else:
text_result += chara
text.append(text_result)
text_result = tuple(text)
self._scanned_list = True
return text_result
def encode(self, text, scanned=True):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
self._scanned_list = scanned
if not self._scanned_list:
text = self.scan(text)
if isinstance(text, str):
text = [
self.dict[char.lower() if self._ignore_case else char]
for char in text
]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
return (torch.LongTensor(text), torch.LongTensor(length))
def decode(self, t, length):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
return ''.join([self.alphabet[i] for i in t])
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.LongTensor([l])))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def loadData(v, data):
v.data.resize_(data.size()).copy_(data)
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/recognizer/MORAN_v2/tools/utils.py
| 0.734596 | 0.353456 |
utils.py
|
pypi
|
import logging
import math
import numpy as np
import os
import time
import cv2
import tensorflow as tf
from keras.models import load_model, model_from_json
from .EAST import locality_aware_nms as nms_locality
from .EAST import lanms as lanms
from .EAST.model import *
from .EAST.losses import *
from .EAST.data_processor import restore_rectangle
log = logging.getLogger(__name__)
def resize_image(im, max_side_len=2400):
'''
resize image to a size multiple of 32 which is required by the network
:param im: the resized image
:param max_side_len: limit of max image size to avoid out of memory in gpu
:return: the resized image and the resize ratio
'''
h, w, _ = im.shape
resize_w = w
resize_h = h
# limit the max side
if max(resize_h, resize_w) > max_side_len:
ratio = float(max_side_len) / resize_h if resize_h > resize_w else float(max_side_len) / resize_w
else:
ratio = 1.
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 - 1) * 32
resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 - 1) * 32
im = cv2.resize(im, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return im, (ratio_h, ratio_w)
def detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):
'''
restore text boxes from score map and geo map
:param score_map:
:param geo_map:
:param timer:
:param score_map_thresh: threshhold for score map
:param box_thresh: threshhold for boxes
:param nms_thres: threshold for nms
:return:
'''
if len(score_map.shape) == 4:
score_map = score_map[0, :, :, 0]
geo_map = geo_map[0, :, :, ]
# filter the score map
xy_text = np.argwhere(score_map > score_map_thresh)
# sort the text boxes via the y axis
xy_text = xy_text[np.argsort(xy_text[:, 0])]
# restore
start = time.time()
text_box_restored = restore_rectangle(xy_text[:, ::-1]*4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2
log.debug('{} text boxes before nms'.format(text_box_restored.shape[0]))
boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
boxes[:, :8] = text_box_restored.reshape((-1, 8))
boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
timer['restore'] = time.time() - start
# nms part
start = time.time()
# boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)
boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)
timer['nms'] = time.time() - start
if boxes.shape[0] == 0:
return None, timer
# here we filter some low score boxes by the average score map, this is different from the orginal paper
for i, box in enumerate(boxes):
mask = np.zeros_like(score_map, dtype=np.uint8)
cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)
boxes[i, 8] = cv2.mean(score_map, mask)[0]
boxes = boxes[boxes[:, 8] > box_thresh]
return boxes, timer
class EASTDetector:
def __init__(self
, gpu_list='0'
, model_path=os.path.join(os.path.dirname(__file__), 'EAST/model/EAST_IC15+13_model.h5')):
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
if not os.path.isfile(model_path):
log.info('loading pretrained model from Google Drive URL')
os.mkdir(os.path.dirname(model_path))
from scene_text.util import download_file_from_google_drive
download_file_from_google_drive('1hfIzGuQn-xApDYiucMDZvOCosyAVwvku',
model_path)
download_file_from_google_drive('1gnkdCToYQfdU3ssaOareFTBr0Nz6u4rr',
os.path.join(os.path.dirname(model_path), 'model.json'))
log.info('loading model from %s' % model_path)
json_file = open('/'.join(model_path.split('/')[0:-1]) + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json, custom_objects={'tf': tf, 'RESIZE_FACTOR': RESIZE_FACTOR})
self.model.load_weights(model_path)
def detect(self, image):
start_time = time.time()
img_resized, (ratio_h, ratio_w) = resize_image(image)
img_resized = (img_resized / 127.5) - 1
# feed image into model
score_map, geo_map = self.model.predict(img_resized[np.newaxis, :, :, :])
timer = {'net': 0, 'restore': 0, 'nms': 0}
boxes, timer = detect(score_map=score_map, geo_map=geo_map, timer=timer)
if boxes is not None:
boxes = boxes[:, :8].reshape((-1, 4, 2))
boxes[:, :, 0] /= ratio_w
boxes[:, :, 1] /= ratio_h
return boxes
if __name__ == '__main__':
main()
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/detector/east.py
| 0.608827 | 0.286918 |
east.py
|
pypi
|
import cv2
import time
import math
import os
import argparse
import numpy as np
import tensorflow as tf
from keras.models import load_model, model_from_json
import detector.EAST.locality_aware_nms as nms_locality
import detector.EAST.lanms
parser = argparse.ArgumentParser()
parser.add_argument('--test_data_path', type=str, default='./data')
parser.add_argument('--gpu_list', type=str, default='0')
parser.add_argument('--model_path', type=str, default='model/EAST_IC15+13_model.h5')
parser.add_argument('--output_dir', type=str, default='tmp/eval/east_icdar2015_resnet_v1_50_rbox/')
FLAGS = parser.parse_args()
from detector.EAST.model import *
from detector.EAST.losses import *
from detector.EAST.data_processor import restore_rectangle
def get_images():
'''
find image files in test data path
:return: list of files found
'''
files = []
exts = ['jpg', 'png', 'jpeg', 'JPG']
for parent, dirnames, filenames in os.walk(FLAGS.test_data_path):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
files.append(os.path.join(parent, filename))
break
print('Find {} images'.format(len(files)))
return files
def resize_image(im, max_side_len=2400):
'''
resize image to a size multiple of 32 which is required by the network
:param im: the resized image
:param max_side_len: limit of max image size to avoid out of memory in gpu
:return: the resized image and the resize ratio
'''
h, w, _ = im.shape
resize_w = w
resize_h = h
# limit the max side
if max(resize_h, resize_w) > max_side_len:
ratio = float(max_side_len) / resize_h if resize_h > resize_w else float(max_side_len) / resize_w
else:
ratio = 1.
resize_h = int(resize_h * ratio)
resize_w = int(resize_w * ratio)
resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 - 1) * 32
resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 - 1) * 32
im = cv2.resize(im, (int(resize_w), int(resize_h)))
ratio_h = resize_h / float(h)
ratio_w = resize_w / float(w)
return im, (ratio_h, ratio_w)
def detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):
'''
restore text boxes from score map and geo map
:param score_map:
:param geo_map:
:param timer:
:param score_map_thresh: threshhold for score map
:param box_thresh: threshhold for boxes
:param nms_thres: threshold for nms
:return:
'''
if len(score_map.shape) == 4:
score_map = score_map[0, :, :, 0]
geo_map = geo_map[0, :, :, ]
# filter the score map
xy_text = np.argwhere(score_map > score_map_thresh)
# sort the text boxes via the y axis
xy_text = xy_text[np.argsort(xy_text[:, 0])]
# restore
start = time.time()
text_box_restored = restore_rectangle(xy_text[:, ::-1]*4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2
print('{} text boxes before nms'.format(text_box_restored.shape[0]))
boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)
boxes[:, :8] = text_box_restored.reshape((-1, 8))
boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]
timer['restore'] = time.time() - start
# nms part
start = time.time()
# boxes = nms_locality.nms_locality(boxes.astype(np.float64), nms_thres)
boxes = lanms.merge_quadrangle_n9(boxes.astype('float32'), nms_thres)
timer['nms'] = time.time() - start
if boxes.shape[0] == 0:
return None, timer
# here we filter some low score boxes by the average score map, this is different from the orginal paper
for i, box in enumerate(boxes):
mask = np.zeros_like(score_map, dtype=np.uint8)
cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)
boxes[i, 8] = cv2.mean(score_map, mask)[0]
boxes = boxes[boxes[:, 8] > box_thresh]
return boxes, timer
def sort_poly(p):
min_axis = np.argmin(np.sum(p, axis=1))
p = p[[min_axis, (min_axis+1)%4, (min_axis+2)%4, (min_axis+3)%4]]
if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):
return p
else:
return p[[0, 3, 2, 1]]
def main(argv=None):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
try:
os.makedirs(FLAGS.output_dir)
except OSError as e:
if e.errno != 17:
raise
# load trained model
json_file = open('/'.join(FLAGS.model_path.split('/')[0:-1]) + '/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json, custom_objects={'tf': tf, 'RESIZE_FACTOR': RESIZE_FACTOR})
model.load_weights(FLAGS.model_path)
img_list = get_images()
for img_file in img_list:
img = cv2.imread(img_file)[:, :, ::-1]
start_time = time.time()
img_resized, (ratio_h, ratio_w) = resize_image(img)
img_resized = (img_resized / 127.5) - 1
timer = {'net': 0, 'restore': 0, 'nms': 0}
start = time.time()
# feed image into model
score_map, geo_map = model.predict(img_resized[np.newaxis, :, :, :])
timer['net'] = time.time() - start
boxes, timer = detect(score_map=score_map, geo_map=geo_map, timer=timer)
print('{} : net {:.0f}ms, restore {:.0f}ms, nms {:.0f}ms'.format(
img_file, timer['net']*1000, timer['restore']*1000, timer['nms']*1000))
if boxes is not None:
boxes = boxes[:, :8].reshape((-1, 4, 2))
boxes[:, :, 0] /= ratio_w
boxes[:, :, 1] /= ratio_h
duration = time.time() - start_time
print('[timing] {}'.format(duration))
# save to file
if boxes is not None:
res_file = os.path.join(
FLAGS.output_dir,
'{}.txt'.format(
os.path.basename(img_file).split('.')[0]))
with open(res_file, 'w') as f:
for box in boxes:
# to avoid submitting errors
box = sort_poly(box.astype(np.int32))
if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
continue
f.write('{},{},{},{},{},{},{},{}\r\n'.format(
box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],
))
cv2.polylines(img[:, :, ::-1], [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1)
img_path = os.path.join(FLAGS.output_dir, os.path.basename(img_file))
cv2.imwrite(img_path, img[:, :, ::-1])
if __name__ == '__main__':
main()
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/detector/EAST/eval.py
| 0.583678 | 0.241735 |
eval.py
|
pypi
|
from keras.optimizers import Optimizer
from keras import backend as K
import six
import copy
from six.moves import zip
from keras.utils.generic_utils import serialize_keras_object
from keras.utils.generic_utils import deserialize_keras_object
from keras.legacy import interfaces
class AdamW(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
weight_decay: float >= 0. Decoupled weight decay over each update.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [Optimization for Deep Learning Highlights in 2017](http://ruder.io/deep-learning-optimization-2017/index.html)
- [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4, # decoupled weight decay (1/4)
epsilon=1e-8, decay=0., **kwargs):
super(AdamW, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.wd = K.variable(weight_decay, name='weight_decay') # decoupled weight decay (2/4)
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
wd = self.wd # decoupled weight decay (3/4)
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p # decoupled weight decay (4/4)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'weight_decay': float(K.get_value(self.wd)),
'epsilon': self.epsilon}
base_config = super(AdamW, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
/scene-text-0.2.3.tar.gz/scene-text-0.2.3/scene_text/detector/EAST/adamw.py
| 0.919077 | 0.519521 |
adamw.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.