ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df98ea0fd0be04550eb56c66e69646d8f29cf42 | #coding:utf-8
from dao.session.session import SessionDao
from model.session import Session
"""Session Repository
"""
class SessionRepository(object):
def __init__(self):
pass
def find_by_session_id(self, session_id):
session = Session({"session_id":session_id})
dao = SessionDao(session)
result = dao.get()
return Session(result)
def create(self, session):
dao = SessionDao(session)
dao.create()
def update(self, session):
dao = SessionDao(session)
dao.update()
def delete(self, session):
dao = SessionDao(session)
dao.delete()
|
py | 7df98f96c007eb84e7e66e49b61713250bb1563b | # os
import os
# argument parsing helper
import fire
# these are custom-written functions part of this library
# which you can find in the src/directional_clustering folder
#JSON file directory
from directional_clustering import JSON
# extended version of Mesh
from directional_clustering.mesh import MeshPlus
# clustering algorithms factory
from directional_clustering.clustering import ClusteringFactory
# vector field
from directional_clustering.fields import VectorField
#plotters
from directional_clustering.plotters import PlyPlotter
# ==============================================================================
# Main function: directional_clustering
# ==============================================================================
def results_plotting(filename="perimeter_supported_slab_m_1_variational kmeans_5",
vectorfield_tag="m_1",
plot_faces=True,
paint_clusters=True,
plot_mesh_edges=False,
plot_vector_fields=True,
plot_original_field=False,
plot_cones=False):
"""
Plot clustering results stored in JSON file.
Parameters
----------
filename : `str`
The name of the JSON file that stores the clustering resultes w.r.t certain
\n mesh, attribute, alglrithm and number of clusters.
\nAll JSON files must reside in this repo's data/json folder.
Defaults to "perimeter_supported_slab_m_1_variational kmeans_5".
vectorfield_tag : `str`
The name of the vector field on which clustering has been done, should corresponds
to the `filename`.
Defaults to "m_1"
plot_faces : `bool`
Plots the faces of the input mesh.
\nDefaults to True.
paint_clusters : `bool`
Color up the faces according to their cluster
\nDefaults to True.
plot_mesh_edges : `bool`
Plots the edges of the input mesh.
\nDefaults to False.
plot_vector_fields : `bool`
Plots the clustered vector field atop of the input mesh.
\nDefaults to True.
plot_original_field : `bool`
Plots the original vector field before clustering atop of the input mesh.
\nDefaults to False.
plot_cones : `bool`
Plots the cones atop of the input mesh.
\nDefaults to False.
"""
# ============================================================================
# Plot stuff
# ============================================================================
# there is a lot of potential work to do for visualization!
# below there is the simplest snippet, but you can see more stuff
# in the scripts/visualization folder
#resume results from JSON file
name_in = filename + ".json"
json_in = os.path.abspath(os.path.join(JSON, name_in))
mesh_to_plot = MeshPlus.from_json(json_in)
# there is a lot of potential work to do for visualization!
# below there is the simplest snippet, but you can see more stuff
# in the scripts/visualization folder
# PlyPlotter is a custom wrapper around a Plotly graph object (Figure)
# that handles formating and adjustments to data structure.
plotter = PlyPlotter()
# color up the faces of the mesh according to their cluster
if plot_faces:
plotter.plot_trimesh(mesh_to_plot, paint_clusters, plot_mesh_edges)
# plot vector fields on mesh as lines
if plot_vector_fields:
clustered_field_name = vectorfield_tag + "_clustered"
clustered_field_to_plot = mesh_to_plot.vector_field(
clustered_field_name)
plotter.plot_vector_field_lines(mesh_to_plot, clustered_field_to_plot,
(0, 0, 255), True, 0.07)
if plot_original_field:
vectors = mesh_to_plot.vector_field(vectorfield_tag)
plotter.plot_vector_field_lines(mesh_to_plot, vectors, (50, 50, 50),
True, 0.07)
# plot cones
if plot_cones:
vectors = mesh_to_plot.vector_field(vectorfield_tag)
plotter.plot_vector_field_cones(mesh_to_plot, vectors)
# set title, this will also set the final aspect ratio according to the data
plotter.set_title(title="Example 01 Directional Clustering")
# show to screen
plotter.show()
if __name__ == '__main__':
fire.Fire(results_plotting)
|
py | 7df98ff4366a034d6c8f9805b81d90e28bd0f25c | import unittest
from test import support
from test.support import import_module
# Skip test if _thread or _tkinter wasn't built, or idlelib is missing,
# or if tcl/tk version before 8.5, which is needed for ttk widgets.
import_module('threading') # imported by PyShell, imports _thread
tk = import_module('tkinter') # imports _tkinter
idlelib = import_module('idlelib')
idlelib.testing = True # Avoid locale-changed test error
# Without test_main present, test.libregrtest.runtest.runtest_inner
# calls (line 173) unittest.TestLoader().loadTestsFromModule(module)
# which calls load_tests() if it finds it. (Unittest.main does the same.)
from idlelib.idle_test import load_tests
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
|
py | 7df990f8722a0bfc6ab9ef46d343014d8d7d97ca | # Copyright (c) 2022 Ed Harry, Wellcome Sanger Institute, Genome Research Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import sys
import warnings
from concurrent.futures import ThreadPoolExecutor as TPE
from dataclasses import dataclass
from enum import Enum, auto
from functools import partial
from importlib import import_module
from importlib.metadata import version as get_version
from io import StringIO
from itertools import chain, groupby, tee
from pathlib import Path
from threading import Thread
import click as ck
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm as _tqdm
from tqdm.contrib.concurrent import thread_map as _tqdm_map
mpl.use("agg")
mpl.rc("font", **{"family": "sans", "weight": "normal", "size": 14})
mpl.rcParams["agg.path.chunksize"] = 10000
plt.style.use("ggplot")
plt.rcParams["figure.figsize"] = (10.0, 7.0)
import seaborn as sb
sb.set(style="darkgrid", color_codes=True)
NAME = __name__.split(".")[0]
VERSION = get_version(NAME)
DESCRIPTION = "Collect and process statistics from aligned linked-reads."
LICENCE = (
"Copyright (c) 2022 Ed Harry, Wellcome Sanger Institute, Genome Research Limited."
)
sam_parser = getattr(import_module(NAME + "._" + NAME + "_C"), "_" + NAME)
def create_logger_handle(stream, typeid, level):
class LogFilter(logging.Filter):
def __init__(self, level):
super().__init__()
self.__level = level
def filter(self, record):
return record.levelno == self.__level
handle = logging.StreamHandler(stream=stream)
handle.setLevel(level=level)
handle.setFormatter(
logging.Formatter("[%(name)s {id}] :: %(message)s".format(id=typeid))
)
handle.addFilter(LogFilter(level=level))
return handle
LOGGER = logging.getLogger(NAME)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="status", level=logging.INFO)
)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="error", level=logging.ERROR)
)
LOGGER.addHandler(
create_logger_handle(stream=sys.stderr, typeid="warning", level=logging.WARNING)
)
class LoggerHandle:
def __init__(self, id):
self.threads_and_handles = []
self.info = self.add_logger(log_func=LOGGER.info, id=id)
self.error = self.add_logger(log_func=LOGGER.error, id=id)
def add_logger(self, log_func, id):
read, write = os.pipe()
def _thread_func():
with os.fdopen(read, encoding="utf-8", errors="replace") as file:
for line in file:
log_func(f"({id}) {line[:-1]}")
thread = Thread(target=_thread_func)
thread.start()
self.threads_and_handles.append((thread, write))
return write
@dataclass
class LogHandles:
info: int
error: int
def __enter__(self):
return self.LogHandles(info=self.info, error=self.error)
def __exit__(self, exc_type, exc_val, exc_tb):
for thread, handle in self.threads_and_handles:
os.close(handle)
thread.join()
def _showwarning(message, category, filename, lineno, file=None, line=None):
LOGGER.warning(
f"[{filename} {lineno}] {message}"
if line is None
else f"[{filename} {lineno}] {message} {line}"
)
warnings.showwarning = _showwarning
warnings.filterwarnings("ignore", message="divide by zero")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
class TqdmToLogger(StringIO):
buf = ""
def __init__(self):
super(TqdmToLogger, self).__init__()
def write(self, buf):
self.buf = buf.strip("\r\n")
return len(self.buf)
def flush(self):
LOGGER.info(self.buf)
def close(self):
LOGGER.info("")
super(TqdmToLogger, self).close(self)
tqdm = partial(_tqdm, file=TqdmToLogger())
tqdm_map = partial(_tqdm_map, file=TqdmToLogger())
MOL_DATA_HEADER = (
"Sample Name",
"Molecule Length",
"No. Reads",
"MI",
"BX",
"Reference",
"Mean Read Depth",
"Mean MapQ",
"No. Gaps",
"Mean Gap Size",
"Max Gap Size",
)
COV_DATA_HEADER = ("Sample Name", "Reference", "Gap Length")
MOL_LEN_HIST_DATA_HEADER = (
"PDF",
"CDF",
"Molecule Length",
"Sample Name",
"Min No. Reads",
)
COV_GAP_HIST_DATA_HEADER = ("PDF", "CDF", "Coverage Gap Length", "Sample Name")
def ReadCSV(path, header):
df = pd.read_csv(path)
if len(df.columns.values) == len(header) and np.all(df.columns.values == header):
return df
else:
LOGGER.error(
f"Error reading '{path}'; expected header:'{header}', got:'{df.columns.values}'"
)
return pd.DataFrame()
class CallBack:
class Types(Enum):
AF = auto()
MD = auto()
CD = auto()
MLHD = auto()
CGHD = auto()
CP = auto()
PP = auto()
def __init__(self, cb_type):
self.cb_type = cb_type
@property
def is_AF(self):
return self.cb_type == self.Types.AF
@property
def is_MD(self):
return self.cb_type == self.Types.MD
@property
def is_MLHD(self):
return self.cb_type == self.Types.MLHD
@property
def is_CGHD(self):
return self.cb_type == self.Types.CGHD
@property
def is_CP(self):
return self.cb_type == self.Types.CP
@property
def is_PP(self):
return self.cb_type == self.Types.PP
@property
def is_CD(self):
return self.cb_type == self.Types.CD
class AlignmentFile(CallBack):
def __init__(self, file, ref, use_mi_tags, cluster_threshold, name=None):
super().__init__(self.Types.AF)
self.file = file
self.ref = ref
self.use_mi_tags = use_mi_tags
self.cluster_threshold = cluster_threshold
self.name = name
@property
def stem_name(self):
return self.file.stem if str(self.file) != "-" else "<stdin>"
class Data(CallBack):
def __init__(self, file, cb_type):
super().__init__(cb_type)
self.file = file
class MoleculeData(Data):
def __init__(self, file):
super().__init__(file, self.Types.MD)
class CoverageData(Data):
def __init__(self, file):
super().__init__(file, self.Types.CD)
class MolLenHistData(Data):
def __init__(self, file):
super().__init__(file, self.Types.MLHD)
class CovGapHistData(Data):
def __init__(self, file):
super().__init__(file, self.Types.CGHD)
class Prefix(CallBack):
def __init__(self, prefix, cb_type):
super().__init__(cb_type)
self.prefix = prefix
class CSVPrefix(Prefix):
def __init__(
self, prefix, save_summ, save_mol, save_cov, save_mol_hist, save_cov_hist
):
super().__init__(prefix, self.Types.CP)
self.save_summ = save_summ
self.save_mol = save_mol
self.save_cov = save_cov
self.save_mol_hist = save_mol_hist
self.save_cov_hist = save_cov_hist
@property
def any_set(self):
return (
self.save_summ
or self.save_mol
or self.save_cov
or self.save_mol_hist
or self.save_cov_hist
)
class PlotPrefix(Prefix):
def __init__(self, prefix):
super().__init__(prefix, self.Types.PP)
def ConcatDF(it):
def iter_then_empty():
yield from it
yield pd.DataFrame()
return pd.concat(iter_then_empty(), ignore_index=True)
def GetAllStats(alignment_files, molecular_data, coverage_data, min_reads, threads):
alignment_files = tuple(alignment_files)
def GetAllStatsFromAFs():
@dataclass(frozen=True, eq=True)
class BasicStats:
median_insert_size: int
total_read_length: int
total_alignments: int
total_dup: int
total_qcf: int
total_unm: int
total_nomi: int
total_nobx: int
total_zeromq: int
def ReadSAM(alignment_file, threads):
with LoggerHandle(id=f"Read {alignment_file.stem_name}") as handles:
(
genome_length,
ref_names,
basic_stats,
molecule_data,
coverage_gaps,
) = sam_parser(
log=handles.info,
error=handles.error,
num_threads=threads,
group_cutoff_dis=alignment_file.cluster_threshold,
sam_file_name=str(alignment_file.file),
fasta_file_name=alignment_file.ref,
override_name=alignment_file.name,
fallback_name=alignment_file.stem_name,
use_mi=alignment_file.use_mi_tags,
)
return (
genome_length,
{name: BasicStats(*stats) for name, stats in basic_stats},
tuple(
(name, tuple((ref_names[tid], t2) for tid, t2 in t1))
for name, t1 in molecule_data
),
tuple(
(name, tuple((ref_names[tid], t2) for tid, t2 in t1))
for name, t1 in coverage_gaps
),
)
def GetStats(alignment_file, threads):
genome_length, basic_stats, molecule_data, coverage_gaps = ReadSAM(
alignment_file, threads
)
molecule_data = pd.DataFrame(
(
(
name,
max(0, pos_max - pos_min),
n_reads,
mi,
bx,
reference_name,
total_read_length / max(1, pos_max - pos_min),
total_mapping_quality / n_reads,
len(gaps),
np.mean(gaps) if len(gaps) > 0 else 0,
max(gaps) if len(gaps) > 0 else 0,
)
for name, a in molecule_data
for reference_name, b in a
for (bx, _), c in b
for n_reads, mi, total_mapping_quality, pos_min, pos_max, total_read_length, gaps in c
),
columns=MOL_DATA_HEADER,
)
coverage_data = pd.DataFrame(
(
(name, reference_name, c)
for name, a in coverage_gaps
for reference_name, b in a
for c in b
),
columns=COV_DATA_HEADER,
)
def n_stats(data, ns):
d = np.cumsum((0,) + tuple(np.sort(data)[::-1]))
return tuple(
d[x] - d[x - 1]
for n in ns
for x in (np.where(d >= (d[-1] * n / 100))[0].min(),)
)
return (
molecule_data,
coverage_data,
pd.DataFrame(
(
(
(
name,
genome_length,
bs.total_alignments,
bs.total_dup / bs.total_alignments,
bs.total_qcf / bs.total_alignments,
bs.total_unm / bs.total_alignments,
(bs.total_alignments - bs.total_unm)
/ bs.total_alignments,
bs.total_nobx / bs.total_alignments,
bs.total_nomi / bs.total_alignments,
bs.total_zeromq / bs.total_alignments,
)
+ n_stats(data["No. Reads"], (50, 90))
+ (
(data["No. Reads"] ** 2).sum()
/ data["No. Reads"].sum(),
)
+ tuple(
chain.from_iterable(
(
(
(
q.shape[0],
q.mean(),
d.mean(),
)
+ n_stats(
d,
(50, 90),
)
+ ((d ** 2).sum() / d.sum(),)
)
for m in min_reads
for s in (data[data["No. Reads"] >= m],)
for d in (s["Molecule Length"],)
for q in (s["Mean MapQ"],)
)
)
)
+ (
bs.median_insert_size,
bs.total_read_length / genome_length,
)
+ tuple(
chain.from_iterable(
(
(
s["Mean Read Depth"].mean(),
s["Molecule Length"].sum() / genome_length,
)
for m in min_reads
for s in (data[data["No. Reads"] >= m],)
)
)
)
)
for name in molecule_data["Sample Name"].unique()
for bs in (basic_stats[name],)
for data in (
molecule_data[molecule_data["Sample Name"] == name],
)
),
columns=(
"Sample Name",
"Genome Length",
"Total Alignments",
"Duplicates",
"QCFail",
"Unmapped",
"Mapped",
"No BX",
"No MI",
"Zero MapQ",
"N50 Reads Per Molecule",
"N90 Reads Per Molecule",
"auN Reads Per Molecule",
)
+ tuple(
chain.from_iterable(
(
(
f"No. Molecules (No. Reads >= {m})",
f"Mean Read MapQ Per Molecule (No. Reads >= {m})",
f"Mean Molecule Length (No. Reads >= {m})",
f"N50 Molecule Length (No. Reads >= {m})",
f"N90 Molecule Length (No. Reads >= {m})",
f"auN Molecule Length (No. Reads >= {m})",
)
for m in min_reads
)
)
)
+ ("Median Insert Size", "Mean Short Read Depth")
+ tuple(
chain.from_iterable(
(
(
f"Mean Short Read Depth Per Molecule (No. Reads >= {m})",
f"Molecule Read Depth (No. Reads >= {m})",
)
for m in min_reads
)
)
),
),
)
max_workers = max(min(threads, len(alignment_files)), 1)
with TPE(max_workers=max_workers) as exe:
return exe.map(
partial(GetStats, threads=max(threads // max_workers, 1)),
alignment_files,
)
def GetAllStatsFromCSVs(data, name, header):
files = tuple(mol.file for mol in data)
return (
iter(
tqdm_map(
partial(ReadCSV, header=header),
files,
max_workers=threads,
desc=f"Read CSVs ({name})",
unit=" CSV files",
unit_scale=True,
)
)
if len(files) > 0
else ()
)
summary_dfs = []
cov_dfs = []
def yield_all():
for df, cov_df, summ_df in GetAllStatsFromAFs():
summary_dfs.append(summ_df)
cov_dfs.append(cov_df)
yield df
yield from GetAllStatsFromCSVs(
molecular_data, "molecular data", MOL_DATA_HEADER
)
return (
ConcatDF(yield_all()),
ConcatDF(
chain(
cov_dfs,
GetAllStatsFromCSVs(coverage_data, "coverage data", COV_DATA_HEADER),
)
),
ConcatDF(summary_dfs),
)
def GetAllMolLenHists(df, hist_data, min_reads, threads):
def GetMolLenHist(args):
MAX_BINS = 1024
sample_name, min_reads = args
data = df[(df["Sample Name"] == sample_name) & (df["No. Reads"] >= min_reads)][
"Molecule Length"
]
prob, length = np.histogram(
data,
bins=np.interp(
np.linspace(
0,
len(data),
np.clip(
len(np.histogram_bin_edges(data, bins="auto")) - 1, 1, MAX_BINS
)
+ 1,
),
np.arange(len(data)),
np.sort(data),
),
density=True,
)
select = ~np.isnan(prob)
return pd.DataFrame(
{
"PDF": prob[select],
"CDF": np.cumsum(prob[select]) / prob[select].sum(),
"Molecule Length": ((length[:-1] + length[1:]) / 2)[select],
"Sample Name": sample_name,
"Min No. Reads": str(min_reads),
}
)
def yield_all():
if df.shape[0] > 0:
yield from iter(
tqdm_map(
GetMolLenHist,
tuple(
(name, n)
for name in df["Sample Name"].unique()
for n in min_reads
),
max_workers=threads,
desc="Generate Molecule Length Histogram Data",
unit=" Data-Sets",
unit_scale=True,
)
)
hist_files = tuple(hist.file for hist in hist_data)
yield from (
iter(
tqdm_map(
partial(ReadCSV, header=MOL_LEN_HIST_DATA_HEADER),
hist_files,
max_workers=threads,
desc="Read CSVs (molecule length histogram data)",
unit=" CSV files",
unit_scale=True,
)
)
if len(hist_files) > 0
else ()
)
return ConcatDF(yield_all())
def GetAllCovGapHists(df, hist_data, threads):
def GetMolLenHist(sample_name):
MAX_BINS = 1024
data = df[(df["Sample Name"] == sample_name)]["Gap Length"]
prob, length = np.histogram(
data,
bins=np.interp(
np.linspace(
0,
len(data),
np.clip(
len(np.histogram_bin_edges(data, bins="auto")) - 1, 1, MAX_BINS
)
+ 1,
),
np.arange(len(data)),
np.sort(data),
),
density=True,
)
select = ~np.isnan(prob)
return pd.DataFrame(
{
"PDF": prob[select],
"CDF": np.cumsum(prob[select]) / prob[select].sum(),
"Coverage Gap Length": ((length[:-1] + length[1:]) / 2)[select],
"Sample Name": sample_name,
}
)
def yield_all():
if df.shape[0] > 0:
yield from iter(
tqdm_map(
GetMolLenHist,
tuple(df["Sample Name"].unique()),
max_workers=threads,
desc="Generate Coverage Gap Histogram Data",
unit=" Data-Sets",
unit_scale=True,
)
)
hist_files = tuple(hist.file for hist in hist_data)
yield from (
iter(
tqdm_map(
partial(ReadCSV, header=COV_GAP_HIST_DATA_HEADER),
hist_files,
max_workers=threads,
desc="Read CSVs (coverage gap histogram data)",
unit=" CSV files",
unit_scale=True,
)
)
if len(hist_files) > 0
else ()
)
return ConcatDF(yield_all())
def documenter(docstring):
def inner_documenter(f):
f.__doc__ = docstring
return f
return inner_documenter
@ck.group(chain=True)
@ck.option(
"-t",
"--threads",
type=ck.IntRange(1, None, clamp=True),
default=4,
help="Number of threads to use. Default=4.",
)
@ck.option(
"-m",
"--min_reads",
type=ck.IntRange(1, None, clamp=True),
multiple=True,
default=(1, 3, 5, 10),
help="Minimum reads per molecule for analysis, multiple values possible. Default=(1, 3, 5, 10).",
)
@ck.version_option()
@documenter(
f"""
{NAME} {VERSION}
\b
{DESCRIPTION}
\b
{LICENCE}
\b
\b
Usage Example, read SAM/BAM/CRAM from <stdin> and save the summary and molecule data in csv format. Analyse molecules grouped by 5 and 10 minimum reads per molecule.
-------------
...<sam/bam/cram> | LinkStats -t 16 -m 5 -m 10 sam-data - save-csvs results/csvs/
\b
Usage Example, combine histogram data from multiple sources into summary plots.
-------------
LinkStats -t 16 hist-data results/dataset_1_molecular_length_histograms.csv.bz2 hist-data results/dataset_2_molecular_length_histograms.csv.bz2 hist-data results/dataset_3_molecular_length_histograms.csv.bz2 save-plots results/plots/
"""
)
def cli(threads, min_reads):
pass
@cli.command()
@ck.argument("path", type=ck.Path(readable=True, path_type=Path))
@ck.option(
"-r",
"--reference",
type=ck.Path(exists=True),
help="FASTA reference for CRAM decoding.",
)
@ck.option(
"-n", "--name", type=str, help="Sample name, overrides name from SM or RG tags."
)
@ck.option(
"--mi/--no-mi",
default=False,
help="Group by MI:I as well as BX:Z SAM tags. Default=False.",
)
@ck.option(
"-t",
"--threshold",
type=int,
default=50000,
help="Maximum allowed separation between alignments grouped to the same molecule.",
)
@documenter(
"""
Read SAM/BAM/CRAM data from PATH.
\b
Creates summary and molecular data-sets for each sample-name (SM:Z tag or RG:Z SAM tag).
\b
Alignments must have BX:Z (barcode) SAM tags.
"""
)
def sam_data(path, mi, threshold, reference=None, name=None):
return AlignmentFile(
file=path, ref=reference, name=name, use_mi_tags=mi, cluster_threshold=threshold
)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in molecular data from a CSV FILE.
\b
Use to re-calculate histogram data.
"""
)
def molecule_data(file):
return MoleculeData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in coverage gap data from a CSV FILE.
\b
Use to re-calculate histogram data.
"""
)
def coverage_data(file):
return CoverageData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in molecule length histogram data from a CSV FILE.
\b
Use to re-generate or create combined plots.
"""
)
def mol_len_hist_data(file):
return MolLenHistData(file=file)
@cli.command()
@ck.argument("file", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Read in coverage gap histogram data from a CSV FILE.
\b
Use to re-generate or create combined plots.
"""
)
def cov_gap_hist_data(file):
return CovGapHistData(file=file)
@cli.command()
@ck.argument("prefix", type=ck.Path(readable=True, path_type=Path))
@ck.option(
"--summ/--no-summ", default=True, help="Save summary data table. Default=True."
)
@ck.option(
"--mol/--no-mol", default=False, help="Save molecule data table. Default=False."
)
@ck.option(
"--cov/--no-cov", default=False, help="Save coverage data table. Default=False."
)
@ck.option(
"--mol-hist/--no-mol-hist",
default=False,
help="Save molecular-length histogram data table. Default=False.",
)
@ck.option(
"--cov-hist/--no-cov-hist",
default=False,
help="Save coverage-gap histogram data table. Default=False.",
)
@documenter(
"""
Saves summary, molecule or histogram data to CSV files at PREFIX_.
\b
By default, only summary data is saved.
"""
)
def save_csvs(prefix, summ, mol, cov, mol_hist, cov_hist):
return CSVPrefix(
prefix,
save_summ=summ,
save_mol=mol,
save_cov=cov,
save_mol_hist=mol_hist,
save_cov_hist=cov_hist,
)
@cli.command()
@ck.argument("prefix", type=ck.Path(readable=True, path_type=Path))
@documenter(
"""
Generates plots from any histogram data and saves them at PREFIX_.
"""
)
def save_plots(prefix):
return PlotPrefix(prefix)
@cli.result_callback()
def run(callbacks, threads, min_reads):
def Error(msg):
LOGGER.error(msg)
sys.exit(1)
LOGGER.info("Starting...")
LOGGER.info("")
csv = tuple(cp for cp in callbacks if cp.is_CP)
if len(csv) == 0:
csv = None
else:
if len(csv) > 1:
warnings.warn(
f"More than one CSV prefix specified, using last one: {csv[-1].prefix}"
)
csv = csv[-1]
if csv and csv.save_summ and len(tuple(af for af in callbacks if af.is_AF)) == 0:
warnings.warn("No SAM data input, cannot output summary data")
csv.save_summ = False
if csv and not csv.any_set:
Error("CSV prefix specified, but no data set to be saved")
plot = tuple(pp.prefix for pp in callbacks if pp.is_PP)
if len(plot) == 0:
plot = None
else:
if len(plot) > 1:
warnings.warn(
f"More than one Plot prefix specified, using last one: {plot[-1]}"
)
plot = plot[-1]
if not (csv or plot):
Error("Neither CSV nor Plot prefix specified, nothing to do")
mol_data, cov_data, summary_data = GetAllStats(
(af for af in callbacks if af.is_AF),
(md for md in callbacks if md.is_MD),
(cd for cd in callbacks if cd.is_CD),
min_reads,
threads,
)
if summary_data.shape[0] > 0:
LOGGER.info("")
for line in str(summary_data).split("\n"):
LOGGER.info(line)
LOGGER.info("")
mol_hist_data = (
GetAllMolLenHists(
mol_data, (hd for hd in callbacks if hd.is_MLHD), min_reads, threads
)
if ((csv and csv.save_mol_hist) or plot)
else None
)
cov_hist_data = (
GetAllCovGapHists(cov_data, (hd for hd in callbacks if hd.is_CGHD), threads)
if ((csv and csv.save_cov_hist) or plot)
else None
)
def base_get_path(f, prefix):
return prefix / f if prefix.is_dir() else Path(str(prefix) + "_" + f)
generated = []
if csv:
csv.prefix.parent.mkdir(parents=True, exist_ok=True)
get_path = partial(base_get_path, prefix=csv.prefix)
def save_csv(args):
df, name = args
name = get_path(name)
df.to_csv(name, index=False)
return name
generated.append(
iter(
tqdm_map(
save_csv,
(
(
((summary_data, "summary_data.csv"),)
if (csv.save_summ and summary_data.shape[0] > 0)
else ()
)
+ (
((mol_data, "molecular_data.csv.bz2"),)
if csv.save_mol
else ()
)
+ (
((cov_data, "coverage_data.csv.bz2"),)
if csv.save_cov
else ()
)
+ (
((mol_hist_data, "molecular_length_histograms.csv.bz2"),)
if csv.save_mol_hist
else ()
)
+ (
((cov_hist_data, "coverage_gap_histograms.csv.bz2"),)
if csv.save_cov_hist
else ()
)
),
max_workers=threads,
desc="Saving CSV data",
unit=" Data-Sets",
)
)
)
if plot:
plot.parent.mkdir(parents=True, exist_ok=True)
get_path = partial(base_get_path, prefix=plot)
if mol_hist_data.shape[0] > 0:
def save_mol_plots(col, hue, n, typ):
name = get_path(f"molecular_length_{typ}s_{n}.png")
sb.relplot(
kind="line",
data=mol_hist_data,
col=col,
hue=hue,
x="Molecule Length",
y=typ,
).set(
xscale="log", yscale=("log" if typ == "PDF" else "linear")
).savefig(
name,
dpi=200,
bbox_inches="tight",
)
return name
generated.append(
(
save_mol_plots(col, hue, i + 1, typ)
for i, col, hue, typ in tqdm(
tuple(
(i, col, hue, typ)
for i, (col, hue) in enumerate(
(col, hue)
for colhue in (("Sample Name", "Min No. Reads"),)
for col, hue in (colhue, colhue[::-1])
)
for typ in ("PDF", "CDF")
),
desc="Saving Molecular Length Plots",
unit=" Plots",
)
)
)
if cov_hist_data.shape[0] > 0:
def save_cov_plots(typ):
name = get_path(f"coverage_gap_{typ}s.png")
sb.relplot(
kind="line",
data=cov_hist_data,
hue="Sample Name",
x="Coverage Gap Length",
y=typ,
).set(
xscale="log", yscale=("log" if typ == "PDF" else "linear")
).savefig(
name,
dpi=200,
bbox_inches="tight",
)
return name
generated.append(
(
save_cov_plots(typ)
for typ in tqdm(
("PDF", "CDF"),
desc="Saving Coverage Gap Plots",
unit=" Plots",
)
)
)
generated = tuple(("\t" + str(n)) for n in chain.from_iterable(generated))
LOGGER.info("")
LOGGER.info("Generated files:")
for line in generated:
LOGGER.info(line)
LOGGER.info("")
LOGGER.info("Done")
|
py | 7df990fa0e5dd1b8c9046fd8d73d91b216543a9a | """Component to monitor plants.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/plant/
"""
import logging
import asyncio
from datetime import datetime, timedelta
from collections import deque
import voluptuous as vol
from homeassistant.const import (
STATE_OK, STATE_PROBLEM, STATE_UNKNOWN, TEMP_CELSIUS, ATTR_TEMPERATURE,
CONF_SENSORS, ATTR_UNIT_OF_MEASUREMENT)
from homeassistant.components import group
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change
from homeassistant.components.recorder.util import session_scope, execute
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'plant'
READING_BATTERY = 'battery'
READING_TEMPERATURE = ATTR_TEMPERATURE
READING_MOISTURE = 'moisture'
READING_CONDUCTIVITY = 'conductivity'
READING_BRIGHTNESS = 'brightness'
ATTR_PROBLEM = 'problem'
ATTR_SENSORS = 'sensors'
PROBLEM_NONE = 'none'
ATTR_MAX_BRIGHTNESS_HISTORY = 'max_brightness'
# we're not returning only one value, we're returning a dict here. So we need
# to have a separate literal for it to avoid confusion.
ATTR_DICT_OF_UNITS_OF_MEASUREMENT = 'unit_of_measurement_dict'
CONF_MIN_BATTERY_LEVEL = 'min_' + READING_BATTERY
CONF_MIN_TEMPERATURE = 'min_' + READING_TEMPERATURE
CONF_MAX_TEMPERATURE = 'max_' + READING_TEMPERATURE
CONF_MIN_MOISTURE = 'min_' + READING_MOISTURE
CONF_MAX_MOISTURE = 'max_' + READING_MOISTURE
CONF_MIN_CONDUCTIVITY = 'min_' + READING_CONDUCTIVITY
CONF_MAX_CONDUCTIVITY = 'max_' + READING_CONDUCTIVITY
CONF_MIN_BRIGHTNESS = 'min_' + READING_BRIGHTNESS
CONF_MAX_BRIGHTNESS = 'max_' + READING_BRIGHTNESS
CONF_CHECK_DAYS = 'check_days'
CONF_SENSOR_BATTERY_LEVEL = READING_BATTERY
CONF_SENSOR_MOISTURE = READING_MOISTURE
CONF_SENSOR_CONDUCTIVITY = READING_CONDUCTIVITY
CONF_SENSOR_TEMPERATURE = READING_TEMPERATURE
CONF_SENSOR_BRIGHTNESS = READING_BRIGHTNESS
SCHEMA_SENSORS = vol.Schema({
vol.Optional(CONF_SENSOR_BATTERY_LEVEL): cv.entity_id,
vol.Optional(CONF_SENSOR_MOISTURE): cv.entity_id,
vol.Optional(CONF_SENSOR_CONDUCTIVITY): cv.entity_id,
vol.Optional(CONF_SENSOR_TEMPERATURE): cv.entity_id,
vol.Optional(CONF_SENSOR_BRIGHTNESS): cv.entity_id,
})
PLANT_SCHEMA = vol.Schema({
vol.Required(CONF_SENSORS): vol.Schema(SCHEMA_SENSORS),
vol.Optional(CONF_MIN_BATTERY_LEVEL): cv.positive_int,
vol.Optional(CONF_MIN_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_MIN_MOISTURE): cv.positive_int,
vol.Optional(CONF_MAX_MOISTURE): cv.positive_int,
vol.Optional(CONF_MIN_CONDUCTIVITY): cv.positive_int,
vol.Optional(CONF_MAX_CONDUCTIVITY): cv.positive_int,
vol.Optional(CONF_MIN_BRIGHTNESS): cv.positive_int,
vol.Optional(CONF_MAX_BRIGHTNESS): cv.positive_int,
vol.Optional(CONF_CHECK_DAYS): cv.positive_int,
})
DOMAIN = 'plant'
DEPENDENCIES = ['zone', 'group']
GROUP_NAME_ALL_PLANTS = 'all plants'
ENTITY_ID_ALL_PLANTS = group.ENTITY_ID_FORMAT.format('all_plants')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
cv.string: PLANT_SCHEMA
},
}, extra=vol.ALLOW_EXTRA)
# Flag for enabling/disabling the loading of the history from the database.
# This feature is turned off right now as it's tests are not 100% stable.
ENABLE_LOAD_HISTORY = False
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the Plant component."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name=GROUP_NAME_ALL_PLANTS)
entities = []
for plant_name, plant_config in config[DOMAIN].items():
_LOGGER.info("Added plant %s", plant_name)
entity = Plant(plant_name, plant_config)
sensor_entity_ids = list(plant_config[CONF_SENSORS].values())
_LOGGER.debug("Subscribing to entity_ids %s", sensor_entity_ids)
async_track_state_change(hass, sensor_entity_ids, entity.state_changed)
entities.append(entity)
yield from component.async_add_entities(entities)
return True
class Plant(Entity):
"""Plant monitors the well-being of a plant.
It also checks the measurements against
configurable min and max values.
"""
READINGS = {
READING_BATTERY: {
ATTR_UNIT_OF_MEASUREMENT: '%',
'min': CONF_MIN_BATTERY_LEVEL,
},
READING_TEMPERATURE: {
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
'min': CONF_MIN_TEMPERATURE,
'max': CONF_MAX_TEMPERATURE,
},
READING_MOISTURE: {
ATTR_UNIT_OF_MEASUREMENT: '%',
'min': CONF_MIN_MOISTURE,
'max': CONF_MAX_MOISTURE,
},
READING_CONDUCTIVITY: {
ATTR_UNIT_OF_MEASUREMENT: 'µS/cm',
'min': CONF_MIN_CONDUCTIVITY,
'max': CONF_MAX_CONDUCTIVITY,
},
READING_BRIGHTNESS: {
ATTR_UNIT_OF_MEASUREMENT: 'lux',
'min': CONF_MIN_BRIGHTNESS,
'max': CONF_MAX_BRIGHTNESS,
}
}
def __init__(self, name, config):
"""Initialize the Plant component."""
self._config = config
self._sensormap = dict()
self._readingmap = dict()
self._unit_of_measurement = dict()
for reading, entity_id in config['sensors'].items():
self._sensormap[entity_id] = reading
self._readingmap[reading] = entity_id
self._state = STATE_UNKNOWN
self._name = name
self._battery = None
self._moisture = None
self._conductivity = None
self._temperature = None
self._brightness = None
self._problems = PROBLEM_NONE
self._conf_check_days = 3 # default check interval: 3 days
if CONF_CHECK_DAYS in self._config:
self._conf_check_days = self._config[CONF_CHECK_DAYS]
self._brightness_history = DailyHistory(self._conf_check_days)
@callback
def state_changed(self, entity_id, _, new_state):
"""Update the sensor status.
This callback is triggered, when the sensor state changes.
"""
value = new_state.state
_LOGGER.debug("Received callback from %s with value %s",
entity_id, value)
if value == STATE_UNKNOWN:
return
reading = self._sensormap[entity_id]
if reading == READING_MOISTURE:
self._moisture = int(float(value))
elif reading == READING_BATTERY:
self._battery = int(float(value))
elif reading == READING_TEMPERATURE:
self._temperature = float(value)
elif reading == READING_CONDUCTIVITY:
self._conductivity = int(float(value))
elif reading == READING_BRIGHTNESS:
self._brightness = int(float(value))
self._brightness_history.add_measurement(self._brightness,
new_state.last_updated)
else:
raise _LOGGER.error("Unknown reading from sensor %s: %s",
entity_id, value)
if ATTR_UNIT_OF_MEASUREMENT in new_state.attributes:
self._unit_of_measurement[reading] = \
new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._update_state()
def _update_state(self):
"""Update the state of the class based sensor data."""
result = []
for sensor_name in self._sensormap.values():
params = self.READINGS[sensor_name]
value = getattr(self, '_{}'.format(sensor_name))
if value is not None:
if sensor_name == READING_BRIGHTNESS:
result.append(self._check_min(
sensor_name, self._brightness_history.max, params))
else:
result.append(self._check_min(sensor_name, value, params))
result.append(self._check_max(sensor_name, value, params))
result = [r for r in result if r is not None]
if result:
self._state = STATE_PROBLEM
self._problems = ', '.join(result)
else:
self._state = STATE_OK
self._problems = PROBLEM_NONE
_LOGGER.debug("New data processed")
self.async_schedule_update_ha_state()
def _check_min(self, sensor_name, value, params):
"""If configured, check the value against the defined minimum value."""
if 'min' in params and params['min'] in self._config:
min_value = self._config[params['min']]
if value < min_value:
return '{} low'.format(sensor_name)
def _check_max(self, sensor_name, value, params):
"""If configured, check the value against the defined maximum value."""
if 'max' in params and params['max'] in self._config:
max_value = self._config[params['max']]
if value > max_value:
return '{} high'.format(sensor_name)
return None
@asyncio.coroutine
def async_added_to_hass(self):
"""After being added to hass, load from history."""
if ENABLE_LOAD_HISTORY and 'recorder' in self.hass.config.components:
# only use the database if it's configured
self.hass.async_add_job(self._load_history_from_db)
@asyncio.coroutine
def _load_history_from_db(self):
"""Load the history of the brightness values from the database.
This only needs to be done once during startup.
"""
from homeassistant.components.recorder.models import States
start_date = datetime.now() - timedelta(days=self._conf_check_days)
entity_id = self._readingmap.get(READING_BRIGHTNESS)
if entity_id is None:
_LOGGER.debug("not reading the history from the database as "
"there is no brightness sensor configured.")
return
_LOGGER.debug("initializing values for %s from the database",
self._name)
with session_scope(hass=self.hass) as session:
query = session.query(States).filter(
(States.entity_id == entity_id.lower()) and
(States.last_updated > start_date)
).order_by(States.last_updated.asc())
states = execute(query)
for state in states:
# filter out all None, NaN and "unknown" states
# only keep real values
try:
self._brightness_history.add_measurement(
int(state.state), state.last_updated)
except ValueError:
pass
_LOGGER.debug("initializing from database completed")
self.async_schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the attributes of the entity.
Provide the individual measurements from the
sensor in the attributes of the device.
"""
attrib = {
ATTR_PROBLEM: self._problems,
ATTR_SENSORS: self._readingmap,
ATTR_DICT_OF_UNITS_OF_MEASUREMENT: self._unit_of_measurement,
}
for reading in self._sensormap.values():
attrib[reading] = getattr(self, '_{}'.format(reading))
if self._brightness_history.max is not None:
attrib[ATTR_MAX_BRIGHTNESS_HISTORY] = self._brightness_history.max
return attrib
class DailyHistory(object):
"""Stores one measurement per day for a maximum number of days.
At the moment only the maximum value per day is kept.
"""
def __init__(self, max_length):
"""Create new DailyHistory with a maximum length of the history."""
self.max_length = max_length
self._days = None
self._max_dict = dict()
self.max = None
def add_measurement(self, value, timestamp=datetime.now()):
"""Add a new measurement for a certain day."""
day = timestamp.date()
if value is None:
return
if self._days is None:
self._days = deque()
self._add_day(day, value)
else:
current_day = self._days[-1]
if day == current_day:
self._max_dict[day] = max(value, self._max_dict[day])
elif day > current_day:
self._add_day(day, value)
else:
_LOGGER.warning('received old measurement, not storing it!')
self.max = max(self._max_dict.values())
def _add_day(self, day, value):
"""Add a new day to the history.
Deletes the oldest day, if the queue becomes too long.
"""
if len(self._days) == self.max_length:
oldest = self._days.popleft()
del self._max_dict[oldest]
self._days.append(day)
self._max_dict[day] = value
|
py | 7df9938bbedde58d3d803f2ff6f9aa41cfc78317 | # https://adventofcode.com/2019/day/7
import itertools
from intcode import IntcodeComputer
def build_amps(code, feedback_mode=False, n=5):
return [IntcodeComputer(code, True, feedback_mode) for _ in range(n)]
def test_phase_setting(amps, phases):
assert len(amps) == len(phases)
signal = 0
for amp, phase in zip(amps, phases):
amp.reset()
signal = amp.run([phase, signal])
while not amps[0].has_halted:
for amp in amps:
new_signal = amp.run([signal])
if new_signal is None:
break
signal = new_signal
return signal
def iterate_phases(amps, phase_range):
max_signal = 0
for phase_settings in itertools.permutations(phase_range):
max_signal = max(
test_phase_setting(amps, phase_settings),
max_signal
)
return max_signal
with open("../../input/2019-07-input.txt") as file:
program = [int(i) for i in file.read().split(",")]
# part 1:
test_code_1 = build_amps((3, 15, 3, 16, 1002, 16, 10, 16,
1, 16, 15, 15, 4, 15, 99, 0, 0))
assert test_phase_setting(test_code_1, (4, 3, 2, 1, 0)) == 43210
test_code_2 = build_amps((3, 23, 3, 24, 1002, 24, 10, 24, 1002, 23, -1, 23,
101, 5, 23, 23, 1, 24, 23, 23, 4, 23, 99, 0, 0))
assert test_phase_setting(test_code_2, (0, 1, 2, 3, 4)) == 54321
test_code_3 = build_amps((3, 31, 3, 32, 1002, 32, 10, 32, 1001, 31, -2, 31,
1007, 31, 0, 33, 1002, 33, 7, 33, 1, 33, 31, 31,
1, 32, 31, 31, 4, 31, 99, 0, 0, 0))
assert test_phase_setting(test_code_3, (1, 0, 4, 3, 2)) == 65210
amplifiers = build_amps(program)
print(iterate_phases(amplifiers, range(5))) # 21000
# part 2:
test_code_4 = build_amps((3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1,
27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99,
0, 0, 5), feedback_mode=True)
assert test_phase_setting(test_code_4, (9, 8, 7, 6, 5)) == 139629729
test_code_5 = build_amps((3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007,
54, 5, 55, 1005, 55, 26, 1001, 54, -5, 54, 1105, 1,
12, 1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55,
2, 53, 55, 53, 4, 53, 1001, 56, -1, 56, 1005, 56, 6,
99, 0, 0, 0, 0, 10), feedback_mode=True)
assert test_phase_setting(test_code_5, (9, 7, 8, 5, 6)) == 18216
amplifiers = build_amps(program, feedback_mode=True)
print(iterate_phases(amplifiers, range(5, 10))) # 61379886
|
py | 7df9946c1ffe5a369b4d488f35b0c796ee3c73f1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vision operator."""
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.vision_v1.types import (
AnnotateImageRequest,
FieldMask,
Image,
Product,
ProductSet,
ReferenceImage,
)
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.vision import CloudVisionHook
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class CloudVisionCreateProductSetOperator(BaseOperator):
"""
Creates a new ProductSet resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductSetOperator`
:param product_set: (Required) The ProductSet to create. If a dict is provided, it must be of the same
form as the protobuf message `ProductSet`.
:param location: (Required) The region where the ProductSet should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the ProductSet should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_set_id: (Optional) A user-supplied resource id for this ProductSet.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_create_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_create_template_fields]
def __init__(
self,
*,
product_set: Union[dict, ProductSet],
location: str,
project_id: Optional[str] = None,
product_set_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set = product_set
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product_set(
location=self.location,
project_id=self.project_id,
product_set=self.product_set,
product_set_id=self.product_set_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"Product set with id %s already exists. Exiting from the create operation.",
self.product_set_id,
)
return self.product_set_id
class CloudVisionGetProductSetOperator(BaseOperator):
"""
Gets information associated with a ProductSet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionGetProductSetOperator`
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Required) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet is located. If set
to None or missing, the default `project_id` from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_get_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_set_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_productset_get_template_fields]
def __init__(
self,
*,
location: str,
product_set_id: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.get_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionUpdateProductSetOperator(BaseOperator):
"""
Makes changes to a `ProductSet` resource. Only display_name can be updated currently.
.. note:: To locate the `ProductSet` resource, its `name` in the form
`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID` is necessary.
You can provide the `name` directly as an attribute of the `product_set` object.
However, you can leave it blank and provide `location` and `product_set_id` instead
(and optionally `project_id` - if not present, the connection default will be used)
and the `name` will be created by the operator itself.
This mechanism exists for your convenience, to allow leaving the `project_id` empty
and having Airflow use the connection default `project_id`.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionUpdateProductSetOperator`
:param product_set: (Required) The ProductSet resource which replaces the one on the
server. If a dict is provided, it must be of the same form as the protobuf
message `ProductSet`.
:param location: (Optional) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Optional) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param update_mask: (Optional) The `FieldMask` that specifies which fields to update. If update_mask
isn’t specified, all mutable fields are to be updated. Valid mask path is display_name. If a dict is
provided, it must be of the same form as the protobuf message `FieldMask`.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_update_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_set_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_productset_update_template_fields]
def __init__(
self,
*,
product_set: Union[Dict, ProductSet],
location: Optional[str] = None,
product_set_id: Optional[str] = None,
project_id: Optional[str] = None,
update_mask: Union[Dict, FieldMask] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set = product_set
self.update_mask = update_mask
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.update_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
product_set=self.product_set,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDeleteProductSetOperator(BaseOperator):
"""
Permanently deletes a `ProductSet`. `Products` and `ReferenceImages` in the
`ProductSet` are not deleted. The actual image files are not deleted from Google
Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteProductSetOperator`
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Required) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet should be created.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_delete_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_set_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_productset_delete_template_fields]
def __init__(
self,
*,
location: str,
product_set_id: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionCreateProductOperator(BaseOperator):
"""
Creates and returns a new product resource.
Possible errors regarding the `Product` object provided:
- Returns `INVALID_ARGUMENT` if `display_name` is missing or longer than 4096 characters.
- Returns `INVALID_ARGUMENT` if `description` is longer than 4096 characters.
- Returns `INVALID_ARGUMENT` if `product_category` is missing or invalid.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductOperator`
:param location: (Required) The region where the Product should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param product: (Required) The product to create. If a dict is provided, it must be of the same form as
the protobuf message `Product`.
:param project_id: (Optional) The project in which the Product should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_id: (Optional) A user-supplied resource id for this Product.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_create_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_product_create_template_fields]
def __init__(
self,
*,
location: str,
product: str,
project_id: Optional[str] = None,
product_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product = product
self.project_id = project_id
self.product_id = product_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product(
location=self.location,
product=self.product,
project_id=self.project_id,
product_id=self.product_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
'Product with id %s already exists. Exiting from the create operation.', self.product_id
)
return self.product_id
class CloudVisionGetProductOperator(BaseOperator):
"""
Gets information associated with a `Product`.
Possible errors:
- Returns `NOT_FOUND` if the `Product` does not exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionGetProductOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Required) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to
None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_get_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_product_get_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.get_product(
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionUpdateProductOperator(BaseOperator):
"""
Makes changes to a Product resource. Only the display_name, description, and labels fields can be
updated right now.
If labels are updated, the change will not be reflected in queries until the next index time.
.. note:: To locate the `Product` resource, its `name` in the form
`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID` is necessary.
You can provide the `name` directly as an attribute of the `product` object. However, you can leave it
blank and provide `location` and `product_id` instead (and optionally `project_id` - if not present,
the connection default will be used) and the `name` will be created by the operator itself.
This mechanism exists for your convenience, to allow leaving the `project_id` empty and having Airflow
use the connection default `project_id`.
Possible errors related to the provided `Product`:
- Returns `NOT_FOUND` if the Product does not exist.
- Returns `INVALID_ARGUMENT` if `display_name` is present in update_mask but is missing from the request
or longer than 4096 characters.
- Returns `INVALID_ARGUMENT` if `description` is present in update_mask but is longer than 4096
characters.
- Returns `INVALID_ARGUMENT` if `product_category` is present in update_mask.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionUpdateProductOperator`
:param product: (Required) The Product resource which replaces the one on the server. product.name is
immutable. If a dict is provided, it must be of the same form as the protobuf message `Product`.
:param location: (Optional) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param update_mask: (Optional) The `FieldMask` that specifies which fields to update. If update_mask
isn’t specified, all mutable fields are to be updated. Valid mask paths include product_labels,
display_name, and description. If a dict is provided, it must be of the same form as the protobuf
message `FieldMask`.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_update_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_product_update_template_fields]
def __init__(
self,
*,
product: Union[Dict, Product],
location: Optional[str] = None,
product_id: Optional[str] = None,
project_id: Optional[str] = None,
update_mask: Union[Dict, FieldMask] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product = product
self.location = location
self.product_id = product_id
self.project_id = project_id
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.update_product(
product=self.product,
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDeleteProductOperator(BaseOperator):
"""
Permanently deletes a product and its reference images.
Metadata of the product and all its images will be deleted right away, but search queries against
ProductSets containing the product may still work until all related caches are refreshed.
Possible errors:
- Returns `NOT_FOUND` if the product does not exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteProductOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Required) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_delete_template_fields]
template_fields: Sequence[str] = (
'location',
'project_id',
'product_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_product_delete_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_product(
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionImageAnnotateOperator(BaseOperator):
"""
Run image detection and annotation for an image or a batch of images.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionImageAnnotateOperator`
:param request: (Required) Annotation request for image or a batch.
If a dict is provided, it must be of the same form as the protobuf
message class:`google.cloud.vision_v1.types.AnnotateImageRequest`
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_annotate_image_template_fields]
template_fields: Sequence[str] = (
'request',
'gcp_conn_id',
'impersonation_chain',
)
# [END vision_annotate_image_template_fields]
def __init__(
self,
*,
request: Union[Dict, AnnotateImageRequest],
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.request = request
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not isinstance(self.request, list):
response = hook.annotate_image(request=self.request, retry=self.retry, timeout=self.timeout)
else:
response = hook.batch_annotate_images(
requests=self.request, retry=self.retry, timeout=self.timeout
)
return response
class CloudVisionCreateReferenceImageOperator(BaseOperator):
"""
Creates and returns a new ReferenceImage ID resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateReferenceImageOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param reference_image: (Required) The reference image to create. If an image ID is specified, it is
ignored.
If a dict is provided, it must be of the same form as the protobuf message
:class:`google.cloud.vision_v1.types.ReferenceImage`
:param reference_image_id: (Optional) A user-supplied resource id for the ReferenceImage to be added.
If set, the server will attempt to use this value as the resource id. If it is already in use, an
error is returned with code ALREADY_EXISTS. Must be at most 128 characters long. It cannot contain
the character `/`.
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_reference_image_create_template_fields]
template_fields: Sequence[str] = (
"location",
"reference_image",
"product_id",
"reference_image_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_reference_image_create_template_fields]
def __init__(
self,
*,
location: str,
reference_image: Union[Dict, ReferenceImage],
product_id: str,
reference_image_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.reference_image = reference_image
self.reference_image_id = reference_image_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
try:
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.create_reference_image(
location=self.location,
product_id=self.product_id,
reference_image=self.reference_image,
reference_image_id=self.reference_image_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"ReferenceImage with id %s already exists. Exiting from the create operation.",
self.product_id,
)
return self.reference_image_id
class CloudVisionDeleteReferenceImageOperator(BaseOperator):
"""
Deletes a ReferenceImage ID resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteReferenceImageOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param reference_image_id: (Optional) A user-supplied resource id for the ReferenceImage to be added.
If set, the server will attempt to use this value as the resource id. If it is already in use, an
error is returned with code ALREADY_EXISTS. Must be at most 128 characters long. It cannot contain
the character `/`.
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_reference_image_create_template_fields]
template_fields: Sequence[str] = (
"location",
"product_id",
"reference_image_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_reference_image_create_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
reference_image_id: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.reference_image_id = reference_image_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_reference_image(
location=self.location,
product_id=self.product_id,
reference_image_id=self.reference_image_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionAddProductToProductSetOperator(BaseOperator):
"""
Adds a Product to the specified ProductSet. If the Product is already present, no change is made.
One Product can be added to at most 100 ProductSets.
Possible errors:
- Returns `NOT_FOUND` if the Product or the ProductSet doesn’t exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionAddProductToProductSetOperator`
:param product_set_id: (Required) The resource id for the ProductSet to modify.
:param product_id: (Required) The resource id of this Product.
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_add_product_to_product_set_template_fields]
template_fields: Sequence[str] = (
"location",
"product_set_id",
"product_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_add_product_to_product_set_template_fields]
def __init__(
self,
*,
product_set_id: str,
product_id: str,
location: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set_id = product_set_id
self.product_id = product_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.add_product_to_product_set(
product_set_id=self.product_set_id,
product_id=self.product_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionRemoveProductFromProductSetOperator(BaseOperator):
"""
Removes a Product from the specified ProductSet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionRemoveProductFromProductSetOperator`
:param product_set_id: (Required) The resource id for the ProductSet to modify.
:param product_id: (Required) The resource id of this Product.
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_remove_product_from_product_set_template_fields]
template_fields: Sequence[str] = (
"location",
"product_set_id",
"product_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_remove_product_from_product_set_template_fields]
def __init__(
self,
*,
product_set_id: str,
product_id: str,
location: str,
project_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set_id = product_set_id
self.product_id = product_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.remove_product_from_product_set(
product_set_id=self.product_set_id,
product_id=self.product_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDetectTextOperator(BaseOperator):
"""
Detects Text in the image
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectTextOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: (Optional) Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param language_hints: List of languages to use for TEXT_DETECTION.
In most cases, an empty value yields the best results since it enables automatic language detection.
For languages based on the Latin alphabet, setting language_hints is not needed.
:param web_detection_params: Parameters for web detection.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
:class:`google.cloud.vision_v1.types.AnnotateImageRequest`
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_text_set_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_text_set_template_fields]
def __init__(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
language_hints: Optional[Union[str, List[str]]] = None,
web_detection_params: Optional[Dict] = None,
additional_properties: Optional[Dict] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.kwargs = kwargs
self.additional_properties = prepare_additional_parameters(
additional_properties=additional_properties,
language_hints=language_hints,
web_detection_params=web_detection_params,
)
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.text_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionTextDetectOperator(BaseOperator):
"""
Detects Document Text in the image
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionTextDetectOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param language_hints: List of languages to use for TEXT_DETECTION.
In most cases, an empty value yields the best results since it enables automatic language detection.
For languages based on the Latin alphabet, setting language_hints is not needed.
:param web_detection_params: Parameters for web detection.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_document_detect_text_set_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
) # Iterable[str]
# [END vision_document_detect_text_set_template_fields]
def __init__(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
language_hints: Optional[Union[str, List[str]]] = None,
web_detection_params: Optional[Dict] = None,
additional_properties: Optional[Dict] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = prepare_additional_parameters(
additional_properties=additional_properties,
language_hints=language_hints,
web_detection_params=web_detection_params,
)
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.document_text_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionDetectImageLabelsOperator(BaseOperator):
"""
Detects Document Text in the image
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectImageLabelsOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_labels_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_labels_template_fields]
def __init__(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = additional_properties
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.label_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionDetectImageSafeSearchOperator(BaseOperator):
"""
Detects Document Text in the image
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectImageSafeSearchOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_safe_search_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_safe_search_template_fields]
def __init__(
self,
image: Union[Dict, Image],
max_results: Optional[int] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
additional_properties: Optional[Dict] = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = additional_properties
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.safe_search_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
def prepare_additional_parameters(
additional_properties: Optional[Dict], language_hints: Any, web_detection_params: Any
) -> Optional[Dict]:
"""
Creates additional_properties parameter based on language_hints, web_detection_params and
additional_properties parameters specified by the user
"""
if language_hints is None and web_detection_params is None:
return additional_properties
if additional_properties is None:
return {}
merged_additional_parameters = deepcopy(additional_properties)
if 'image_context' not in merged_additional_parameters:
merged_additional_parameters['image_context'] = {}
merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[
'image_context'
].get('language_hints', language_hints)
merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[
'image_context'
].get('web_detection_params', web_detection_params)
return merged_additional_parameters
|
py | 7df994865da206c55ad8e90b49cd3c0edb3842de | import cv2, numpy as np
from Area import areaThreshold_by_havg, areaThreshold_by_top
from threshold import otsu_threshold
from _8connected import get_8connected_v2
from util_ import *
import warnings
import traceback
from eclipes_test import elliptic_fourier_descriptors, efd
warnings.filterwarnings("error")
import time
color = {i: np.random.randint(20, 255, 3) for i in range(5, 5000)}
color[1] = [255, 255, 255]
color[2] = [0, 0, 255]
def make_border(points, shape, bval=255):
# h,w = shape
boundry = np.zeros(shape, dtype=np.uint8)
# boundry = padding2D_zero(boundry,2)
boundry[points[0][0],points[0][1]] = bval
i=0
x,y = points[0]
while i < len(points)-1:
try:
boundry[x, y] = bval
except IndexError:
x1=x;y1=y
if x >= boundry.shape[0]:
x1 = boundry.shape[0]-1
if y >= boundry.shape[1]:
y1 = boundry.shape[1]-1
boundry[x1, y1] = bval
# traceback.print_exc()
if abs(points[i+1][0] - x) <=1 and abs(points[i+1][1] - y) <=1:
i+=1
x,y = points[i]
elif abs(points[i+1][0] - x) > 1:
x ,y = int(x + (points[i+1][0] - x)/abs(points[i+1][0] - x)), y
# x ,y = int(x + 1), y
elif abs(points[i+1][1] - y) > 1:
x ,y = x, int(y + (points[i+1][1] - y)/abs(points[i+1][1] - y))
# x ,y = x, int(y + 1)
# boundry = remove_padding2D_zero(boundry, 2)
return boundry
def mask_by_border(boundry, ival):
h,w = boundry.shape
inside = 0
b1=np.int0(boundry)
for i in range(h):
# try:
val = np.ones(np.argmax(b1[i,:])) * 2
b1[i,:len(val)] = val
val1 = np.ones(np.argmax(b1[i,::-1])) *2
b1[i,w-len(val1):] = val1
for i in range(w):
val = np.ones(np.argmax(b1[::-1,i])) * 2
b1[h-len(val):,i] = val
val = np.ones(np.argmax(b1[:,i])) * 2
b1[:len(val),i] = val
b1 = ((b1 - boundry)/-2 + 1) * ival
return b1
def L2_segmentation_2(iimg , T, index):
h, w, _ = iimg.shape
# cv2.imshow('image', iimg)
t0 = time.time()
gray = iimg[:, :, 2]
# cv2.imshow('gray', gray)
thresh = np.array([[0 if pixel < T else 255 for pixel in row] for row in gray], dtype=np.uint8)
sober = sober_operation(gray)
# cv2.imshow('sober', sober)
# print("\tsober operation", time.time() - t0)
# t0 = time.time()
sober = cv2.fastNlMeansDenoising(sober, None, h=2, templateWindowSize=3, searchWindowSize=5)
# cv2.imshow('sober cleaned', sober)
# print("\tnoise operation", time.time() - t0)
# t0 = time.time()
T= otsu_threshold(sober)
# print("\tsober threshold", time.time() - t0)
# t0 = time.time()
sthresh = np.array([[0 if pixel < T else 255 for pixel in row] for row in sober], dtype=np.uint8)
# cv2.imshow('sober Threshold', sthresh)
# print("\tcalc threshold", time.time() - t0)
# t0 = time.time()
diluted = cv2.dilate(sthresh, kernel=np.ones((5,5), np.uint8), iterations=1)
# cv2.imshow('dilutated2 ', diluted)
# print("\tdilation operation", time.time() - t0)
# t0 = time.time()
thresh2 = np.where((thresh == 0) * (diluted == 255), 0, thresh-diluted)
# cv2.imshow('Thresh - dilute ', thresh2)
mask = get_8connected_v2(thresh=thresh2, mcount=index)
# display_mask("Diluted mask", mask)
# print("\tmask foamation", time.time() - t0)
# t0 = time.time()
# Calcutaing the grain segment using mask image
s = cal_segment_area(mask)
# print("\tcalc area seg", time.time() - t0)
# t0 = time.time()
rmcount = 0
if len(s) < 2:
# print
return None
low_Tarea, up_Tarea = areaThreshold_by_top(s, 3)
slist = list(s)
for i in slist:
area = (s[i][0] - s[i][1]) * (s[i][2] - s[i][3])
if area < low_Tarea:
s.pop(i)
rmcount += 1
if len(s) < 2:
# print
return None
# print("\tselecting area", time.time() - t0)
# t0 = time.time()
# removing unwanted masks
mask = np.array([[0 if pixel not in s else pixel for pixel in row] for row in mask])
# print("\tremoving unwanted mask opeation", time.time() - t0)
# t0 = time.time()
# Adding boundry mask
boundry = get_boundry_img_matrix(thresh, 1)
# print("\tgetting boundry", time.time() - t0)
# t0 = time.time()
mask = np.where(boundry == 1, 1, mask)
# print("\tadding boundry to mask opeation", time.time() - t0)
# t0 = time.time()
# display_mask('boundried mask', mask)
# using mask fill the mask values in boundry
mask = flood_filling(mask)
# print("\tflood filling opeation", time.time() - t0)
# t0 = time.time()
# display_mask('flood fill', mask)
# replace boundry by respective mask value
mask = boundry_fill(mask)
# print("\tfilling opeation", time.time() - t0)
# t0 = time.time()
# cv2.waitKey()
masks =[]
for ii in s:
img = get_mask_value_area(gray, mask, ii)
# b1 = get_boundry_img_matrix(img)
# b2 = get_boundry_img_matrix(get_mask_value_area(boundry, mask, i),bval=255)
# img = b1-b2
points = get_boundry_as_points(img)
img = get_boundry_img_matrix(img, bval=255)
# cv2.imshow("img %d" % (ii), img)
coff = elliptic_fourier_descriptors(points,order=5)
if coff is None:
print("Ellipsis not work")
return None
x, y = np.int0(efd(coff, contour_1=points, locus=np.mean(points, axis=0)))
coordinates = [(x[i], y[i]) for i in range(len(x))]
boundry = make_border(coordinates, img.shape, bval=255)
# cv2.imshow("border %d"%(ii), boundry)
mask1 = mask_by_border(boundry, ii)
# display_mask("mask %d" % (ii), mask1)
masks.append(mask1)
# print("\telliptical fitting operation", time.time() - t0,'\n')
# cv2.waitKey()
# cv2.destroyAllWindows()
return masks, rmcount |
py | 7df9974fcc4f1c245c27d76fedf58f97a001d470 | """``:help`` command implementation."""
from ansible_navigator.content_defs import ContentFormat
from ..action_base import ActionBase
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..ui_framework import Interaction
from ..utils.compatibility import importlib_resources
from . import _actions as actions
@actions.register
class Action(ActionBase):
"""``:help`` command implementation."""
KEGEX = r"^h(?:elp)?$"
def __init__(self, args: ApplicationConfiguration):
"""Initialize the ``:help`` action.
:param args: The current settings for the application
"""
super().__init__(args=args, logger_name=__name__, name="help")
def run(self, interaction: Interaction, app: AppPublic) -> Interaction:
"""Execute the ``:help`` request.
:param interaction: The interaction from the user
:param app: The app instance
:returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction`
"""
self._logger.debug("help requested")
self._prepare_to_run(app, interaction)
with importlib_resources.open_text("ansible_navigator.package_data", "help.md") as fh:
help_md = fh.read()
while True:
interaction = interaction.ui.show(
obj=help_md,
content_format=ContentFormat.MARKDOWN,
)
app.update()
if interaction.name != "refresh":
break
self._prepare_to_exit(interaction)
return interaction
|
py | 7df9975145e98fe8ab142f386bc0ca0f0460094a | from tabpy_server.app.app import TabPyApp
def main():
app = TabPyApp()
app.run()
if __name__ == '__main__':
main()
|
py | 7df9983b1f250e9e5aca0a6b47d13eeccede8d96 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implements a source for reading Avro files."""
import cStringIO
import os
import zlib
import avro
from avro import datafile
from avro import io as avroio
from avro import schema
import apache_beam as beam
from apache_beam.io import filebasedsource
from apache_beam.io import filebasedsink
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.transforms import PTransform
__all__ = ['ReadFromAvro', 'WriteToAvro']
class ReadFromAvro(PTransform):
"""A ``PTransform`` for reading avro files."""
def __init__(self, file_pattern=None, min_bundle_size=0, validate=True):
"""Initializes ``ReadFromAvro``.
Uses source '_AvroSource' to read a set of Avro files defined by a given
file pattern.
If '/mypath/myavrofiles*' is a file-pattern that points to a set of Avro
files, a ``PCollection`` for the records in these Avro files can be created
in the following manner.
p = df.Pipeline(argv=pipeline_args)
records = p | 'Read' >> df.io.ReadFromAvro('/mypath/myavrofiles*')
Each record of this ``PCollection`` will contain a single record read from a
source. Records that are of simple types will be mapped into corresponding
Python types. Records that are of Avro type 'RECORD' will be mapped to
Python dictionaries that comply with the schema contained in the Avro file
that contains those records. In this case, keys of each dictionary
will contain the corresponding field names and will be of type ``string``
while the values of the dictionary will be of the type defined in the
corresponding Avro schema.
For example, if schema of the Avro file is the following.
{"namespace": "example.avro","type": "record","name": "User","fields":
[{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}]}
Then records generated by ``AvroSource`` will be dictionaries of the
following form.
{u'name': u'Alyssa', u'favorite_number': 256, u'favorite_color': None}).
Args:
file_pattern: the set of files to be read.
min_bundle_size: the minimum size in bytes, to be considered when
splitting the input into bundles.
validate: flag to verify that the files exist during the pipeline
creation time.
"""
super(ReadFromAvro, self).__init__()
self._source = _AvroSource(file_pattern, min_bundle_size, validate=validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def display_data(self):
return {'source_dd': self._source}
class _AvroUtils(object):
@staticmethod
def read_meta_data_from_file(f):
"""Reads metadata from a given Avro file.
Args:
f: Avro file to read.
Returns:
a tuple containing the codec, schema, and the sync marker of the Avro
file.
Raises:
ValueError: if the file does not start with the byte sequence defined in
the specification.
"""
if f.tell() > 0:
f.seek(0)
decoder = avroio.BinaryDecoder(f)
header = avroio.DatumReader().read_data(datafile.META_SCHEMA,
datafile.META_SCHEMA, decoder)
if header.get('magic') != datafile.MAGIC:
raise ValueError('Not an Avro file. File header should start with %s but'
'started with %s instead.', datafile.MAGIC,
header.get('magic'))
meta = header['meta']
if datafile.CODEC_KEY in meta:
codec = meta[datafile.CODEC_KEY]
else:
codec = 'null'
schema_string = meta[datafile.SCHEMA_KEY]
sync_marker = header['sync']
return codec, schema_string, sync_marker
@staticmethod
def read_block_from_file(f, codec, schema, expected_sync_marker):
"""Reads a block from a given Avro file.
Args:
f: Avro file to read.
codec: The codec to use for block-level decompression.
Supported codecs: 'null', 'deflate', 'snappy'
schema: Avro Schema definition represented as JSON string.
expected_sync_marker: Avro synchronization marker. If the block's sync
marker does not match with this parameter then ValueError is thrown.
Returns:
A single _AvroBlock.
Raises:
ValueError: If the block cannot be read properly because the file doesn't
match the specification.
"""
offset = f.tell()
decoder = avroio.BinaryDecoder(f)
num_records = decoder.read_long()
block_size = decoder.read_long()
block_bytes = decoder.read(block_size)
sync_marker = decoder.read(len(expected_sync_marker))
if sync_marker != expected_sync_marker:
raise ValueError('Unexpected sync marker (actual "%s" vs expected "%s"). '
'Maybe the underlying avro file is corrupted?',
sync_marker, expected_sync_marker)
size = f.tell() - offset
return _AvroBlock(block_bytes, num_records, codec, schema, offset, size)
@staticmethod
def advance_file_past_next_sync_marker(f, sync_marker):
buf_size = 10000
data = f.read(buf_size)
while data:
pos = data.find(sync_marker)
if pos >= 0:
# Adjusting the current position to the ending position of the sync
# marker.
backtrack = len(data) - pos - len(sync_marker)
f.seek(-1 * backtrack, os.SEEK_CUR)
return True
else:
if f.tell() >= len(sync_marker):
# Backtracking in case we partially read the sync marker during the
# previous read. We only have to backtrack if there are at least
# len(sync_marker) bytes before current position. We only have to
# backtrack (len(sync_marker) - 1) bytes.
f.seek(-1 * (len(sync_marker) - 1), os.SEEK_CUR)
data = f.read(buf_size)
class _AvroBlock(object):
"""Represents a block of an Avro file."""
def __init__(self, block_bytes, num_records, codec, schema_string,
offset, size):
# Decompress data early on (if needed) and thus decrease the number of
# parallel copies of the data in memory at any given in time during
# block iteration.
self._decompressed_block_bytes = self._decompress_bytes(block_bytes, codec)
self._num_records = num_records
self._schema = schema.parse(schema_string)
self._offset = offset
self._size = size
def size(self):
return self._size
def offset(self):
return self._offset
@staticmethod
def _decompress_bytes(data, codec):
if codec == 'null':
return data
elif codec == 'deflate':
# zlib.MAX_WBITS is the window size. '-' sign indicates that this is
# raw data (without headers). See zlib and Avro documentations for more
# details.
return zlib.decompress(data, -zlib.MAX_WBITS)
elif codec == 'snappy':
# Snappy is an optional avro codec.
# See Snappy and Avro documentation for more details.
try:
import snappy
except ImportError:
raise ValueError('Snappy does not seem to be installed.')
# Compressed data includes a 4-byte CRC32 checksum which we verify.
# We take care to avoid extra copies of data while slicing large objects
# by use of a buffer.
result = snappy.decompress(buffer(data)[:-4])
avroio.BinaryDecoder(cStringIO.StringIO(data[-4:])).check_crc32(result)
return result
else:
raise ValueError('Unknown codec: %r', codec)
def num_records(self):
return self._num_records
def records(self):
decoder = avroio.BinaryDecoder(
cStringIO.StringIO(self._decompressed_block_bytes))
reader = avroio.DatumReader(
writers_schema=self._schema, readers_schema=self._schema)
current_record = 0
while current_record < self._num_records:
yield reader.read(decoder)
current_record += 1
class _AvroSource(filebasedsource.FileBasedSource):
"""A source for reading Avro files.
``_AvroSource`` is implemented using the file-based source framework available
in module 'filebasedsource'. Hence please refer to module 'filebasedsource'
to fully understand how this source implements operations common to all
file-based sources such as file-pattern expansion and splitting into bundles
for parallel processing.
"""
def read_records(self, file_name, range_tracker):
next_block_start = -1
def split_points_unclaimed(stop_position):
if next_block_start >= stop_position:
# Next block starts at or after the suggested stop position. Hence
# there will not be split points to be claimed for the range ending at
# suggested stop position.
return 0
return iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
start_offset = range_tracker.start_position()
if start_offset is None:
start_offset = 0
with self.open_file(file_name) as f:
codec, schema_string, sync_marker = _AvroUtils.read_meta_data_from_file(
f)
# We have to start at current position if previous bundle ended at the
# end of a sync marker.
start_offset = max(0, start_offset - len(sync_marker))
f.seek(start_offset)
_AvroUtils.advance_file_past_next_sync_marker(f, sync_marker)
while range_tracker.try_claim(f.tell()):
block = _AvroUtils.read_block_from_file(f, codec, schema_string,
sync_marker)
next_block_start = block.offset() + block.size()
for record in block.records():
yield record
class WriteToAvro(beam.transforms.PTransform):
"""A ``PTransform`` for writing avro files."""
def __init__(self,
file_path_prefix,
schema,
codec='deflate',
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/x-avro'):
"""Initialize a WriteToAvro transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as returned by avro.schema.parse
codec: The codec to use for block-level compression. Any string supported
by the Avro specification is accepted (for example 'null').
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToAvro transform usable for writing.
"""
self._sink = _AvroSink(file_path_prefix, schema, codec, file_name_suffix,
num_shards, shard_name_template, mime_type)
def expand(self, pcoll):
return pcoll | beam.io.iobase.Write(self._sink)
def display_data(self):
return {'sink_dd': self._sink}
class _AvroSink(filebasedsink.FileBasedSink):
"""A sink to avro files."""
def __init__(self,
file_path_prefix,
schema,
codec,
file_name_suffix,
num_shards,
shard_name_template,
mime_type):
super(_AvroSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=None,
mime_type=mime_type,
# Compression happens at the block level using the supplied codec, and
# not at the file level.
compression_type=CompressionTypes.UNCOMPRESSED)
self._schema = schema
self._codec = codec
def open(self, temp_path):
file_handle = super(_AvroSink, self).open(temp_path)
return avro.datafile.DataFileWriter(
file_handle, avro.io.DatumWriter(), self._schema, self._codec)
def write_record(self, writer, value):
writer.append(value)
def display_data(self):
res = super(self.__class__, self).display_data()
res['codec'] = str(self._codec)
res['schema'] = str(self._schema)
return res
|
py | 7df9989045c6a21df563df59f78374dc39445e32 | import numpy as np
import openmdao.api as om
from ...transcriptions.grid_data import GridData
from ...utils.lgl import lgl
class VandermondeControlInterpComp(om.ExplicitComponent):
"""
A component which interpolates control values in 1D using Vandermonde interpolation.
Takes training values for control variables at given _input_ nodes,
broadcaasts them to _discretization_ nodes, and then interpolates the discretization values
to provide a control variable at a given segment tau or phase tau.
For dynamic controls, the current segment is given as a discrete input and the interpolation is
a smooth polynomial along the given segment.
OpenMDAO assumes sizes of variables at setup time, and we don't want to need to change the
size of the control input nodes when we evaluate different segments. Instead, this component
will take in the control values of all segments and internally use the appropriate one.
Parameters
----------
grid_data : GridData
A GridData instance that details information on how the control input and discretization
nodes are layed out.
control_options : dict of {str: ControlOptionsDictionary}
A mapping that maps the name of each control to a ControlOptionsDictionary of its options.
polynomial_control_options : dict of {str: PolynomialControlOptionsDictionary}
A mapping that maps the name of each polynomial control to an OptionsDictionary of its options.
time_units : str
The time units pertaining to the control rates.
standalone_mode : bool
If True, this component runs its configuration steps during setup. This is useful for
unittests in which the component does not exist in a larger group.
**kwargs
Keyword arguments passed to ExplicitComponent.
"""
def __init__(self, grid_data, control_options=None, polynomial_control_options=None,
time_units=None, standalone_mode=False, **kwargs):
self._grid_data = grid_data
self._control_options = {} if control_options is None else control_options
self._polynomial_control_options = {} if polynomial_control_options is None else polynomial_control_options
self._time_units = time_units
self._standalone_mode = standalone_mode
# Storage for the Vandermonde matrix and its inverse for each control
self._V_hat = {}
self._V_hat_inv = {}
# Storage for factors used in the derivatives of Vandermonde matrices.
self._fac = {}
# Cache formatted strings: { control_name : (input_name, output_name) }
self._control_io_names = {}
# The Lagrange interpolation matrix L_id maps control values as given at the input nodes
# to values at the discretization nodes.
num_disc_nodes = grid_data.subset_num_nodes['control_disc']
num_input_nodes = grid_data.subset_num_nodes['control_input']
self._L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)
self._L_id[np.arange(num_disc_nodes, dtype=int),
self._grid_data.input_maps['dynamic_control_input_to_disc']] = 1.0
super().__init__(**kwargs)
def initialize(self):
"""
Declare component options.
"""
self.options.declare('segment_index', types=int, desc='index of the current segment')
self.options.declare('vec_size', types=int, default=1,
desc='number of points at which the control will be evaluated. This is not'
'necessarily the same as the number of nodes in the GridData.')
def _configure_controls(self):
vec_size = self.options['vec_size']
gd = self._grid_data
self._V_hat = {}
self._V_hat_inv = {}
self._disc_node_idxs_by_segment = []
self._input_node_idxs_by_segment = []
if not self._control_options:
return
first_disc_node_in_seg = 0
for seg_idx in range(gd.num_segments):
# Number of control discretization nodes per segment
ncdnps = gd.subset_num_nodes_per_segment['control_disc'][seg_idx]
ar_control_disc_nodes = np.arange(ncdnps, dtype=int)
disc_idxs_in_seg = first_disc_node_in_seg + ar_control_disc_nodes
first_disc_node_in_seg += ncdnps
# The indices of the discretization node u vector pertaining to the given segment
self._disc_node_idxs_by_segment.append(disc_idxs_in_seg)
# The indices of the input u vector pertaining to the given segment
self._input_node_idxs_by_segment.append(gd.input_maps['dynamic_control_input_to_disc'][disc_idxs_in_seg])
# Indices of the control disc nodes belonging to the current segment
control_disc_seg_idxs = gd.subset_segment_indices['control_disc'][seg_idx]
# Segment tau values for the control disc nodes in the phase
control_disc_stau = gd.node_stau[gd.subset_node_indices['control_disc']]
# Segment tau values for the control disc nodes in the given segment
control_disc_seg_stau = control_disc_stau[control_disc_seg_idxs[0]:
control_disc_seg_idxs[1]]
seg_control_order = gd.transcription_order[seg_idx] - 1
if seg_control_order not in self._V_hat:
self._V_hat[seg_control_order] = np.vander(control_disc_seg_stau, increasing=True)
self._V_hat_inv[seg_control_order] = np.linalg.inv(self._V_hat[seg_control_order])
if seg_control_order + 1 not in self._fac:
self._fac[seg_control_order + 1] = np.arange(seg_control_order + 1, dtype=int)
num_uhat_nodes = gd.subset_num_nodes['control_input']
ar = np.arange(vec_size, dtype=int)
for control_name, options in self._control_options.items():
shape = options['shape']
units = options['units']
input_name = f'controls:{control_name}'
output_name = f'control_values:{control_name}'
rate_name = f'control_rates:{control_name}_rate'
rate2_name = f'control_rates:{control_name}_rate2'
uhat_shape = (num_uhat_nodes,) + shape
output_shape = (vec_size,) + shape
self.add_input(input_name, shape=uhat_shape, units=units)
self.add_output(output_name, shape=output_shape, units=units)
self.add_output(rate_name, shape=output_shape, units=units)
self.add_output(rate2_name, shape=output_shape, units=units)
self._control_io_names[control_name] = (input_name, output_name, rate_name, rate2_name)
self.declare_partials(of=output_name, wrt=input_name)
self.declare_partials(of=output_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt=input_name)
self.declare_partials(of=rate_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt='dstau_dt')
self.declare_partials(of=rate2_name, wrt=input_name)
self.declare_partials(of=rate2_name, wrt='stau', rows=ar, cols=ar)
self.declare_partials(of=rate2_name, wrt='dstau_dt')
def _configure_polynomial_controls(self):
vec_size = self.options['vec_size']
ar = np.arange(vec_size, dtype=int)
for pc_name, options in self._polynomial_control_options.items():
order = options['order']
shape = options['shape']
units = options['units']
input_name = f'polynomial_controls:{pc_name}'
output_name = f'polynomial_control_values:{pc_name}'
rate_name = f'polynomial_control_rates:{pc_name}_rate'
rate2_name = f'polynomial_control_rates:{pc_name}_rate2'
input_shape = (order + 1,) + shape
output_shape = (vec_size,) + shape
self.add_input(input_name, shape=input_shape, units=units)
self.add_output(output_name, shape=output_shape, units=units)
self.add_output(rate_name, shape=output_shape, units=units)
self.add_output(rate2_name, shape=output_shape, units=units)
self._control_io_names[pc_name] = (input_name, output_name, rate_name, rate2_name)
self.declare_partials(of=output_name, wrt=input_name)
self.declare_partials(of=output_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt=input_name)
self.declare_partials(of=rate_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate_name, wrt='t_duration')
self.declare_partials(of=rate2_name, wrt=input_name)
self.declare_partials(of=rate2_name, wrt='ptau', rows=ar, cols=ar)
self.declare_partials(of=rate2_name, wrt='t_duration')
if order not in self._V_hat:
pc_disc_seg_ptau, _ = lgl(order + 1)
self._V_hat[order] = np.vander(pc_disc_seg_ptau, increasing=True)
self._V_hat_inv[order] = np.linalg.inv(self._V_hat[order])
if order + 1 not in self._fac:
self._fac[order + 1] = np.arange(order + 1, dtype=int)
def setup(self):
"""
Perform the I/O creation if operating in _standalone_mode.
"""
if self._standalone_mode:
self.configure_io()
def configure_io(self):
"""
I/O creation is delayed until configure so we can determine shape and units for the controls.
"""
vec_size = self.options['vec_size']
self._V_hat = {}
self._V_hat_inv = {}
# self.add_discrete_input('segment_index', val=0, desc='index of the segment')
self.add_input('stau', shape=(vec_size,), units=None)
self.add_input('dstau_dt', val=1.0, units=f'1/{self._time_units}')
self.add_input('t_duration', val=1.0, units=self._time_units)
self.add_input('ptau', shape=(vec_size,), units=None)
self._configure_controls()
self._configure_polynomial_controls()
def _dvander(self, v):
"""
Return the derivative of a Vandermonde matrix wrt the independent variable _in increasing order_.
Parameters
----------
v : np.array
The Vandermonde matrix for which the derivatives are requested.
Returns
-------
dV : np.array
The derivative of v with respect to the independent variable.
dv2 : np.array
The second derivative of v wrt the independent variable.
dv3 : np.array
The third derivative of v wrt the independent variable.
"""
p, n = v.shape
dv = np.zeros_like(v)
dv2 = dv.copy()
dv3 = dv.copy()
dv[:, 1:] = v[:, :-1]
dv2[:, 2:] = v[:, :-2]
dv3[:, 3:] = v[:, :-3]
fac = self._fac[n]
fac2 = fac[:-1]
fac3 = fac[:-2]
dv[:, :] = dv * fac[np.newaxis, :]
dv2[:, 1:] = dv2[:, 1:] * fac2[np.newaxis, :] * fac[np.newaxis, 1:]
dv3[:, 2:] = dv3[:, 2:] * fac3[np.newaxis, :] * fac2[np.newaxis, 1:] * fac[np.newaxis, 2:]
return dv, dv2, dv3
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
"""
Compute interpolated control values and rates.
Parameters
----------
inputs : `Vector`
`Vector` containing inputs.
outputs : `Vector`
`Vector` containing outputs.
discrete_inputs : `Vector`
`Vector` containing discrete_inputs.
discrete_outputs : `Vector`
`Vector` containing discrete_outputs.
"""
seg_idx = self.options['segment_index']
n = self._grid_data.transcription_order[seg_idx]
stau = inputs['stau']
dstau_dt = inputs['dstau_dt']
ptau = inputs['ptau']
dptau_dt = 2 / inputs['t_duration']
if self._control_options:
seg_order = n - 1
disc_node_idxs = self._disc_node_idxs_by_segment[seg_idx]
input_node_idxs = self._input_node_idxs_by_segment[seg_idx]
V_stau = np.vander(stau, N=n, increasing=True)
dV_stau, dV2_stau, _ = self._dvander(V_stau)
L_seg = self._L_id[disc_node_idxs[0]:disc_node_idxs[0] + len(disc_node_idxs),
input_node_idxs[0]:input_node_idxs[0] + len(input_node_idxs)]
for control_name, options in self._control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[control_name]
u_hat = np.dot(L_seg, inputs[input_name][input_node_idxs])
a = np.atleast_2d(self._V_hat_inv[seg_order] @ u_hat)
outputs[output_name] = V_stau @ a
outputs[rate_name] = dstau_dt * (dV_stau @ a)
outputs[rate2_name] = dstau_dt**2 * (dV2_stau @ a)
for pc_name, options in self._polynomial_control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[pc_name]
order = options['order']
V_ptau = np.vander(ptau, N=order+1, increasing=True)
dV_ptau, dV2_ptau, _ = self._dvander(V_ptau)
a = np.atleast_2d(self._V_hat_inv[order] @ inputs[input_name])
outputs[output_name] = V_ptau @ a
outputs[rate_name] = dptau_dt * (dV_ptau @ a)
outputs[rate2_name] = dptau_dt**2 * (dV2_ptau @ a)
def compute_partials(self, inputs, partials, discrete_inputs=None):
"""
Compute derivatives interpolated control values and rates wrt the inputs.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
partials : Jacobian
Subjac components written to partials[output_name, input_name].
discrete_inputs : Vector
Unscaled, discrete input variables keyed by variable name.
"""
seg_idx = self.options['segment_index']
n = self._grid_data.transcription_order[seg_idx]
stau = inputs['stau'].real
dstau_dt = inputs['dstau_dt'].real
ptau = inputs['ptau'].real
t_duration = inputs['t_duration'].real
dptau_dt = 2.0 / t_duration
ddptau_dt_dtduration = -2.0 / t_duration**2
if self._control_options:
u_idxs = self._input_node_idxs_by_segment[seg_idx]
seg_order = self._grid_data.transcription_order[seg_idx] - 1
V_stau = np.vander(stau, N=n, increasing=True)
dV_stau, dV2_stau, dV3_stau = self._dvander(V_stau)
disc_node_idxs = self._disc_node_idxs_by_segment[seg_idx]
input_node_idxs = self._input_node_idxs_by_segment[seg_idx]
L_seg = self._L_id[disc_node_idxs[0]:disc_node_idxs[0] + len(disc_node_idxs),
input_node_idxs[0]:input_node_idxs[0] + len(input_node_idxs)]
for control_name, options in self._control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[control_name]
u_hat = np.dot(L_seg, inputs[input_name][input_node_idxs].real)
a = self._V_hat_inv[seg_order] @ u_hat
da_duhat = self._V_hat_inv[seg_order] @ L_seg
dV_a = dV_stau @ a
dV2_a = dV2_stau @ a
dV3_a = dV3_stau @ a
partials[output_name, input_name][...] = 0.0
partials[output_name, input_name][..., u_idxs] = V_stau @ da_duhat
partials[output_name, 'stau'] = dV_a.ravel()
pudot_pa = dstau_dt * dV_stau
pa_puhat = self._V_hat_inv[seg_order]
partials[rate_name, input_name][...] = 0.0
partials[rate_name, input_name][..., u_idxs] = pudot_pa @ pa_puhat
partials[rate_name, 'dstau_dt'][...] = dV_a
partials[rate_name, 'stau'][...] = dV2_a.ravel()
pu2dot_pa = dstau_dt**2 * dV2_stau
partials[rate2_name, input_name][...] = 0.0
partials[rate2_name, input_name][..., u_idxs] = pu2dot_pa @ pa_puhat
partials[rate2_name, 'dstau_dt'][...] = 2 * dstau_dt * dV2_a
partials[rate2_name, 'stau'][...] = dV3_a.ravel()
for pc_name, options in self._polynomial_control_options.items():
input_name, output_name, rate_name, rate2_name = self._control_io_names[pc_name]
order = options['order']
V_ptau = np.vander(ptau, N=order+1, increasing=True)
dV_ptau, dV2_ptau, dV3_ptau = self._dvander(V_ptau)
u_hat = inputs[input_name].real
a = self._V_hat_inv[order] @ u_hat
dV_a = dV_ptau @ a
dV2_a = dV2_ptau @ a
dV3_a = dV3_ptau @ a
da_duhat = self._V_hat_inv[order]
partials[output_name, input_name][...] = V_ptau @ da_duhat
partials[output_name, 'ptau'][...] = dV_a.ravel()
pudot_pa = dptau_dt * dV_ptau
pa_puhat = self._V_hat_inv[order]
partials[rate_name, input_name][...] = pudot_pa @ pa_puhat
partials[rate_name, 't_duration'][...] = ddptau_dt_dtduration * dV_a
partials[rate_name, 'ptau'][...] = dptau_dt * dV2_a.ravel()
pu2dot_pa = dptau_dt ** 2 * dV2_ptau
partials[rate2_name, input_name][...] = pu2dot_pa @ pa_puhat
partials[rate2_name, 't_duration'][...] = 2 * dptau_dt * ddptau_dt_dtduration * dV2_a
partials[rate2_name, 'ptau'][...] = dptau_dt**2 * dV3_a.ravel()
|
py | 7df998cfb7a463fdb2be86bef3465225561101ac | import os, sys, numpy as np, ast
import init_paths
import load_models
from lib.utils import benchmark_utils, util
import tensorflow as tf
import cv2, time, scipy, scipy.misc as scm, sklearn.cluster, skimage.io as skio, numpy as np, argparse
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
def mean_shift(points_, heat_map, iters=5):
points = np.copy(points_)
kdt = scipy.spatial.cKDTree(points)
eps_5 = np.percentile(scipy.spatial.distance.cdist(points, points, metric='euclidean'), 10)
for epis in range(iters):
for point_ind in range(points.shape[0]):
point = points[point_ind]
nearest_inds = kdt.query_ball_point(point, r=eps_5)
points[point_ind] = np.mean(points[nearest_inds], axis=0)
val = []
for i in range(points.shape[0]):
val.append(kdt.count_neighbors(scipy.spatial.cKDTree(np.array([points[i]])), r=eps_5))
mode_ind = np.argmax(val)
ind = np.nonzero(val == np.max(val))
return np.mean(points[ind[0]], axis=0).reshape(heat_map.shape[0], heat_map.shape[1])
def centroid_mode(heat_map):
eps_thresh = np.percentile(heat_map, 10)
k = heat_map <= eps_thresh
# Get's max centroid
num_affinities = np.sum(k, axis=(2, 3))
x = np.nonzero(num_affinities >= np.max(num_affinities))
if type(x) is tuple:
ind1 = x[0][0]
ind2 = x[1][0]
else:
ind1 = x[0]
ind2 = x[1]
assert np.max(num_affinities) == num_affinities[ind1, ind2]
return heat_map[ind1, ind2]
def normalized_cut(res):
sc = sklearn.cluster.SpectralClustering(n_clusters=2, n_jobs=-1,
affinity="precomputed")
out = sc.fit_predict(res.reshape((res.shape[0] * res.shape[1], -1)))
vis = out.reshape((res.shape[0], res.shape[1]))
return vis
def process_response_no_resize(response):
return 255 * plt.cm.jet(response)[:,:,:3]
def process_response(response):
size = get_resized_shape(response)
im = 255 * plt.cm.jet(response)[:,:,:3]
return scm.imresize(im, size)# , interp='nearest')
def get_resized_shape(im, max_im_dim=400):
ratio = float(max_im_dim) / np.max(im.shape)
return (int(im.shape[0] * ratio), int(im.shape[1] * ratio), 3)
def process_image(im):
size = get_resized_shape(im)
return scm.imresize(im, size) #, interp='nearest')
def norm(response):
res = response - np.min(response)
return res/np.max(res)
def apply_mask(im, mask):
mask = scipy.misc.imresize(mask, (im.shape[0], im.shape[1])) / 255.
mask = mask.reshape(im.shape[0], im.shape[1], 1)
mask = mask * 0.8 + 0.2
return mask * im
def aff_fn(v1, v2):
return np.mean((v1 * v2 + (1 - v1)*(1 - v2)))
def ssd_distance(results, with_inverse=True):
def ssd(x, y):
# uses mean instead
return np.mean(np.square(x - y))
results = np.array(results)
results = np.concatenate([results, 1.0 - results], axis=0)
dist_matrix = np.zeros((len(results), len(results)))
for i, r_x in enumerate(results):
for j, r_y in enumerate(results):
score = ssd(r_x, r_y)
dist_matrix[i][j] = score
return dist_matrix, results
def dbscan_consensus(results, eps_range=(0.1, 0.5), eps_sample=10, dbscan_sample=4):
"""
Slowly increases DBSCAN epsilon until a cluster is found.
The distance between responses is the SSD.
Best prediction is based on the spread within the cluster.
Here spread is the average per-pixel variance of the output.
The cluster is then combined using the median of the cluster.
When no cluster is found, returns the response
that has smallest median score across other responses.
"""
dist_matrix, results = ssd_distance(results, with_inverse=True)
debug = False #True
lowest_spread = 100.0
best_pred = None
for eps in np.linspace(eps_range[0], eps_range[1], eps_sample):
db = DBSCAN(eps=eps, min_samples=dbscan_sample).fit(dist_matrix)
labels = set(db.labels_)
if debug:
print('DBSCAN with epsilon %.3f' % eps)
print('Found %i labels' % len(labels))
try:
labels.remove(-1)
except:
pass
if debug:
print('%i Unique cluster' % len(labels))
labels = np.array(list(labels))
if len(labels) < 2:
if debug:
print('Not enough cluster found')
continue
clusters = {l:np.argwhere(db.labels_ == l) for l in labels}
cluster_spreads = {}
cluster_preds = {}
for lbl, cluster_indices in clusters.items():
if debug:
print('Cluster %i with %i samples' % (lbl, len(cluster_indices)))
cluster_indices = np.squeeze(cluster_indices)
cluster_results = [results[i] for i in cluster_indices]
#mean_result = np.mean(cluster_results, axis=0)
median_result = np.median(cluster_results, axis=0)
# Average Per pixel deviation
average_spread = np.mean(np.std(cluster_results, axis=0))
cluster_spreads[lbl] = average_spread
cluster_preds[lbl] = median_result
#print average_spread
if average_spread < lowest_spread:
lowest_spread = average_spread
best_pred = median_result
best_lbl, avg_spread = util.sort_dict(cluster_spreads)[0]
if debug:
print('Cluster spread %.3f' % avg_spread)
plt.imshow(cluster_preds[best_lbl], cmap='jet', vmin=0.0, vmax=1.0)
plt.show()
if best_pred is None:
# Uses a sample that has the median minimum distance between all predicted sample
print('Failed to find DBSCAN cluster')
compact_dist_matrix = dist_matrix[:len(dist_matrix)//2, :len(dist_matrix)//2]
avg_dist = np.median(compact_dist_matrix, axis=0)
best_pred = results[np.argmin(avg_dist)]
if debug:
plt.figure()
plt.imshow(best_pred, cmap='jet', vmin=0.0, vmax=1.0)
return best_pred, lowest_spread
def run_vote_no_threads(image, solver, exif_to_use, n_anchors=1, num_per_dim=None,
patch_size=None, batch_size=None, sample_ratio=3.0, override_anchor=False):
"""
solver: exif_solver module. Must be initialized and have a network connected.
exif_to_use: exif to extract responses from. A list. If exif_to_use is None
extract result from classification output cls_pred
n_anchors: number of anchors to use.
num_per_dim: number of patches to use along the largest dimension.
patch_size: size of the patch. If None, uses the one specified in solver.net
batch_size: size of the batch. If None, uses the one specified in solver.net
sample_ratio: The ratio of overlap between patches. num_per_dim must be None
to be useful.
"""
h, w = np.shape(image)[:2]
if patch_size is None:
patch_size = solver.net.im_size
if batch_size is None:
batch_size = solver.net.batch_size
if num_per_dim is None:
num_per_dim = int(np.ceil(sample_ratio * (max(h,w)/float(patch_size))))
if exif_to_use is None:
not_exif = True
exif_to_use = ['out']
else:
not_exif = False
exif_map = {e: np.squeeze(np.argwhere(np.array(solver.net.train_runner.tags) == e)) for e in exif_to_use}
responses = {e:np.zeros((n_anchors, h, w)) for e in exif_to_use}
vote_counts = {e:1e-6 * np.ones((n_anchors, h, w)) for e in exif_to_use}
if np.min(image) < 0.0:
# already preprocessed
processed_image = image
else:
processed_image = util.process_im(image)
ones = np.ones((patch_size, patch_size))
anchor_indices = []
# select n anchors
for anchor_idx in range(n_anchors):
if override_anchor is False:
_h, _w = np.random.randint(0, h - patch_size), np.random.randint(0, w - patch_size)
else:
assert len(override_anchor) == 2, override_anchor
_h, _w = override_anchor
anchor_indices.append((_h, _w))
anchor_patch = processed_image[_h:_h+patch_size, _w:_w+patch_size, :]
batch_a = np.tile([anchor_patch], [batch_size, 1, 1, 1])
batch_b, batch_b_coord = [], []
prev_batch = None
for i in np.linspace(0, h - patch_size, num_per_dim).astype(int):
for j in np.linspace(0, w - patch_size, num_per_dim).astype(int):
compare_patch = processed_image[i:i+patch_size, j:j+patch_size]
batch_b.append(compare_patch)
batch_b_coord.append((i,j))
if len(batch_b) == batch_size:
if not_exif:
pred = solver.sess.run(solver.net.cls_pred,
feed_dict={solver.net.im_a:batch_a,
solver.net.im_b:batch_b,
solver.net.is_training:False})
else:
pred = solver.sess.run(solver.net.pred,
feed_dict={solver.net.im_a:batch_a,
solver.net.im_b:batch_b,
solver.net.is_training:False})
for p_vec, (_i, _j) in zip(pred, batch_b_coord):
for e in exif_to_use:
if not_exif:
p = p_vec[0]
else:
p = p_vec[int(exif_map[e])]
responses[e][anchor_idx, _i:_i+patch_size, _j:_j+patch_size] += (p * ones)
vote_counts[e][anchor_idx, _i:_i+patch_size, _j:_j+patch_size] += ones
prev_batch = batch_b
batch_b, batch_b_coord = [], []
if len(batch_b) > 0:
batch_b_len = len(batch_b)
to_pad = np.array(prev_batch)[:batch_size - batch_b_len]
batch_b = np.concatenate([batch_b, to_pad], axis=0)
if not_exif:
pred = solver.sess.run(solver.net.cls_pred,
feed_dict={solver.net.im_a:batch_a,
solver.net.im_b:batch_b,
solver.net.is_training:False})
else:
pred = solver.sess.run(solver.net.pred,
feed_dict={solver.net.im_a:batch_a,
solver.net.im_b:batch_b,
solver.net.is_training:False})
for p_vec, (_i, _j) in zip(pred, batch_b_coord):
for e in exif_to_use:
if not_exif:
p = p_vec[0]
else:
p = p_vec[int(exif_map[e])]
responses[e][anchor_idx, _i:_i+patch_size, _j:_j+patch_size] += (p * ones)
vote_counts[e][anchor_idx, _i:_i+patch_size, _j:_j+patch_size] += ones
return {e: {'responses':(responses[e] / vote_counts[e]), 'anchors':anchor_indices} for e in exif_to_use}
class Demo():
def __init__(self, ckpt_path='/data/scratch/minyoungg/ckpt/exif_medifor/exif_medifor.ckpt', use_gpu=0,
quality=3.0, patch_size=128, num_per_dim=30):
self.quality = quality # sample ratio
self.solver, nc, params = load_models.initialize_exif(ckpt=ckpt_path, init=False, use_gpu=use_gpu)
params["im_size"] = patch_size
self.im_size = patch_size
tf.reset_default_graph()
im = np.zeros((256, 256, 3))
self.bu = benchmark_utils.EfficientBenchmark(self.solver, nc, params, im, auto_close_sess=False,
mirror_pred=False, dense_compute=False, stride=None, n_anchors=10,
patch_size=patch_size, num_per_dim=num_per_dim)
return
def run(self, im, gt=None, show=False, save=False,
blue_high=False, use_ncuts=False):
# run for every new image
self.bu.reset_image(im)
res = self.bu.precomputed_analysis_vote_cls(num_fts=4096)
#print('result shape', np.shape(res))
ms = mean_shift(res.reshape((-1, res.shape[0] * res.shape[1])), res)
if np.mean(ms > .5) > .5:
# majority of the image is above .5
if blue_high:
ms = 1 - ms
if use_ncuts:
ncuts = normalized_cut(res)
if np.mean(ncuts > .5) > .5:
# majority of the image is white
# flip so spliced is white
ncuts = 1 - ncuts
out_ncuts = cv2.resize(ncuts.astype(np.float32), (im.shape[1], im.shape[0]),
interpolation=cv2.INTER_LINEAR)
out_ms = cv2.resize(ms, (im.shape[1], im.shape[0]), interpolation=cv2.INTER_LINEAR)
if use_ncuts:
return out_ms, out_ncuts
return out_ms
def run_vote(self, im, num_per_dim=3, patch_size=128):
h,w = np.shape(im)[:2]
all_results = []
for hSt in np.linspace(0, h - patch_size, num_per_dim).astype(int):
for wSt in np.linspace(0, w - patch_size, num_per_dim).astype(int):
res = run_vote_no_threads(im, self.solver, None, n_anchors=1, num_per_dim=None,
patch_size=128, batch_size=96, sample_ratio=self.quality,
override_anchor=(hSt, wSt))['out']['responses'][0]
all_results.append(res)
return dbscan_consensus(all_results)
def __call__(self, url, dense=False):
"""
@Args
url: This can either be a web-url or directory
dense: If False, runs the new DBSCAN clustering.
Using dense will be low-res and low-variance.
@Returns
output of the clustered response
"""
if type(url) is not str:
im = url
else:
if url.startswith('http'):
im = util.get(url)
else:
im = cv2.imread(url)[:,:,[2,1,0]]
#print('Image shape:', np.shape(im))
assert min(np.shape(im)[:2]) > self.im_size, 'image dimension too small'
if not dense:
# Runs default dense clustering
out, _ = self.run_vote(im, num_per_dim=3, patch_size=self.im_size)
else:
# Runs DBSCAN
out = self.run(im)
return im, out
import scipy.misc
from PIL import Image
import imageio
if __name__ == '__main__':
plt.switch_backend('agg')
import glob
import os
im_path_results=glob.glob("./fake_images_result/*.png")
import re
for j in range (0, len(im_path_results)):
im_path_results[j] = re.sub('_result.png$', '.jpg', im_path_results[j])
im_path_results[j]=im_path_results[j].replace('fake_images_result','images')
print(len(im_path_results))
im_path = glob.glob("./images/*")
filepaths=set(im_path) - set(im_path_results)
filepaths=list(filepaths)
# Re-populate list with filename, size tuples
for i in range(len(filepaths)):
filepaths[i] = (filepaths[i], os.path.getsize(filepaths[i]))
# Sort list by file size
# If reverse=True sort from largest to smallest
# If reverse=False sort from smallest to largest
filepaths.sort(key=lambda filename: filename[1], reverse=True)
# Re-populate list with just filenames
for i in range(len(filepaths)):
filepaths[i] = filepaths[i][0]
print(filepaths)
print(len(filepaths))
print("loading model")
ckpt_path = './ckpt/exif_final/exif_final.ckpt'
exif_demo = Demo(ckpt_path=ckpt_path, use_gpu=0, quality=2.0, num_per_dim=20)
print("model loaded")
#assert os.path.exists(cfg.im_path)
for i in filepaths:
try:
imid = i.split('/')[-1].split('.')[0]
save_path = os.path.join('./fake_images_result', imid + '_result.png')
print('Running image %s' % i)
ms_st = time.time()
#im_path = './images/demo.png'
im, res = exif_demo(i, dense=True)
print (res.shape)
#scipy.misc.toimage(image_array, cmin=0.0, cmax=...).save('out_demo.jpg')
#imageio.imwrite('image_name_out.png', res)
print('MeanShift run time: %.3f' % (time.time() - ms_st))
plt.subplots(figsize=(16, 8))
plt.subplot(1, 3, 1)
plt.title('Input Image')
plt.imshow(im)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Cluster w/ MeanShift')
plt.axis('off')
if np.mean(res > 0.5) > 0.5:
res = 1.0 - res
plt.imshow(res, cmap='jet', vmin=0.0, vmax=1.0)
import matplotlib
matplotlib.image.imsave(save_path, res, cmap='jet')
#plt.savefig(save_path)
print('Result saved %s' % save_path)
except Exception as e:
print(e)
pass |
py | 7df99914592b88d7cce5115aa2a6f9de39bb5083 | import unittest
import ibllib.time
import datetime
import pandas as pd
class TestUtils(unittest.TestCase):
def test_format_date_range(self):
date_range = ['2018-03-01', '2018-03-24']
date_range_out = ['2018-03-01', '2018-03-24']
# test the string input
self.assertTrue(ibllib.time.format_date_range(date_range) == date_range_out)
# test the date input
date_range = [datetime.datetime.strptime(d, '%Y-%m-%d') for d in date_range]
self.assertTrue(ibllib.time.format_date_range(date_range) == date_range_out)
# test input validation
date_range[-1] = date_range_out[-1] # noqa [datetime, str]
with self.assertRaises(ValueError):
ibllib.time.format_date_range(date_range)
def test_isostr2date(self):
# test the full string
a = ibllib.time.isostr2date('2018-03-01T12:34:56.99999')
self.assertTrue(a == datetime.datetime(2018, 3, 1, 12, 34, 56, 999990))
# test UTC offset
# a = ibllib.time.isostr2date('2018-03-01T12:34:56+02:00') # FAILS!
# if ms is rounded, test without the F field
b = ibllib.time.isostr2date('2018-03-01T12:34:56')
self.assertTrue(b == datetime.datetime(2018, 3, 1, 12, 34, 56))
# test a mixed list input
c = ['2018-03-01T12:34:56.99999', '2018-03-01T12:34:56']
d = ibllib.time.isostr2date(c)
self.assertTrue((d[0] == a) and (d[1] == b))
# test with pandas series
e = ibllib.time.isostr2date(pd.Series(c))
self.assertTrue((e[0] == a) and (e[1] == b))
def test_date2isostr(self):
expected = '2018-08-14T00:00:00'
day = datetime.date(2018, 8, 14)
self.assertEqual(expected, ibllib.time.date2isostr(day))
dt = datetime.datetime(2018, 8, 14)
self.assertEqual(expected, ibllib.time.date2isostr(dt))
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
|
py | 7df999d5b70fc31299995b176e6d92657e07df48 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This serves as a handler for PubSub push for builds."""
import base64
import json
import logging
import re
import urlparse
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.protobuf import json_format
from google.protobuf.field_mask_pb2 import FieldMask
from gae_libs import appengine_util
from gae_libs.handlers.base_handler import BaseHandler
from gae_libs.handlers.base_handler import Permission
from common import constants
from common.waterfall.buildbucket_client import GetV2Build
from model.isolated_target import IsolatedTarget
_PROP_NAME_REGEX = re.compile(
r'swarm_hashes_(?P<ref>.*)\(at\)\{\#(?P<cp>[0-9]+)\}'
r'(?P<suffix>(_with(out)?_patch))?')
# Builds from such LUCI projects should be intercepted by Findit v2.
# It doesn't necessarily mean build failures will be analyzed in v2 though.
_FINDIT_V2_INTERCEPT_PROJECTS = ['chromium', 'chromeos']
class CompletedBuildPubsubIngestor(BaseHandler):
"""Adds isolate targets to the index when pubsub notifies of completed build.
"""
PERMISSION_LEVEL = Permission.ANYONE # Protected with login:admin.
def HandlePost(self):
build_id = None
build_result = None
status = None
project = None
bucket = None
builder_name = None
try:
envelope = json.loads(self.request.body)
version = envelope['message']['attributes'].get('version')
if version and version != 'v1':
logging.info('Ignoring versions other than v1')
return
build_id = envelope['message']['attributes']['build_id']
build = json.loads(base64.b64decode(envelope['message']['data']))['build']
build_result = build.get('result')
status = build['status']
project = build['project']
bucket = build['bucket']
parameters_json = json.loads(build['parameters_json'])
builder_name = parameters_json['builder_name']
except (ValueError, KeyError) as e:
# Ignore requests with invalid message.
logging.debug('build_id: %r', build_id)
logging.error('Unexpected PubSub message format: %s', e.message)
logging.debug('Post body: %s', self.request.body)
return
if status == 'COMPLETED':
# Checks if the build is accessable.
bb_build = GetV2Build(build_id)
if not bb_build:
logging.error('Failed to download build for %s/%r.', builder_name,
build_id)
return
_HandlePossibleCodeCoverageBuild(int(build_id))
if project in _FINDIT_V2_INTERCEPT_PROJECTS:
_HandlePossibleFailuresInBuild(project, bucket, builder_name,
int(build_id), build_result)
if project == 'chromium':
# Only ingests chromium builds.
return _IngestProto(int(build_id))
# We don't care about pending or non-supported builds, so we accept the
# notification by returning 200, and prevent pubsub from retrying it.
def _HandlePossibleCodeCoverageBuild(build_id): # pragma: no cover
"""Schedules a taskqueue task to process the code coverage data."""
# https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/creating-tasks#target
try:
taskqueue.add(
name='coveragedata-%s' % build_id, # Avoid duplicate tasks.
url='/coverage/task/process-data/build/%s' % build_id,
target='code-coverage-backend', # Always use the default version.
queue_name='code-coverage-process-data')
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
logging.warning('Build %s was already scheduled to be processed', build_id)
def _HandlePossibleFailuresInBuild(project, bucket, builder_name, build_id,
build_result): # pragma: no cover
"""Schedules a taskqueue task to process a completed failed build."""
try:
taskqueue.add(
name='buildfailure-%s' % build_id, # Avoid duplicate tasks.
url='/findit/internal/v2/task/build-completed',
payload=json.dumps({
'project': project,
'bucket': bucket,
'builder_name': builder_name,
'build_id': build_id,
'build_result': build_result,
}),
target=appengine_util.GetTargetNameForModule('findit-backend'),
queue_name='failure-detection-queue')
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
logging.warning('Build %s was already scheduled to be processed', build_id)
def _DecodeSwarmingHashesPropertyName(prop):
"""Extracts ref, commit position and patch status from property name.
Args:
prop(str): The property name is expected to be in the following format:
swarm_hashes_<ref>(at){#<commit_position}<optional suffix>
"""
matches = _PROP_NAME_REGEX.match(prop)
with_patch = matches.group('suffix') == '_with_patch'
return matches.group('ref'), int(matches.group('cp')), with_patch
def _IngestProto(build_id):
"""Process a build described in a proto, i.e. buildbucket v2 api format."""
assert build_id
build = GetV2Build(
build_id,
fields=FieldMask(
paths=['id', 'output.properties', 'input', 'status', 'builder']))
if not build:
logging.error(
'Could not retrieve build #%d from buildbucket, '
'acknowledging to avoid retries', build_id)
return
# Sanity check.
assert build_id == build.id
commit = build.input.gitiles_commit
patches = build.input.gerrit_changes
# Convert the Struct to standard dict, to use .get, .iteritems etc.
input_properties = json_format.MessageToDict(build.input.properties)
output_properties = json_format.MessageToDict(build.output.properties)
swarm_hashes_properties = {}
for k, v in output_properties.iteritems():
if _PROP_NAME_REGEX.match(k):
swarm_hashes_properties[k] = v
if not swarm_hashes_properties:
logging.debug('Build %d does not have swarm_hashes property', build_id)
return
# TODO(https://crbug.com/1109276) Once all builders use builder_group
# property, do not check the mastername property
master_name = (
output_properties.get('target_builder_group') or
input_properties.get('target_builder_group') or
output_properties.get('target_mastername') or
input_properties.get('target_mastername') or
output_properties.get('builder_group') or
input_properties.get('builder_group') or
output_properties.get('mastername') or input_properties.get('mastername'))
if not master_name:
logging.error('Build %d does not have expected "mastername" property',
build_id)
return
luci_project = build.builder.project
luci_bucket = build.builder.bucket
luci_builder = output_properties.get(
'target_buildername') or build.builder.builder
if commit.host:
gitiles_host = commit.host
gitiles_project = commit.project
gitiles_ref = commit.ref or 'refs/heads/main'
else:
# Non-ci build, use 'repository' property instead to get base revision
# information.
repo_url = urlparse.urlparse(output_properties.get('repository', ''))
gitiles_host = repo_url.hostname or ''
gitiles_project = repo_url.path or ''
# Trim "/" prefix so that "/chromium/src" becomes
# "chromium/src", also remove ".git" suffix if present.
if gitiles_project.startswith('/'): # pragma: no branch
gitiles_project = gitiles_project[1:]
if gitiles_project.endswith('.git'): # pragma: no branch
gitiles_project = gitiles_project[:-len('.git')]
gitiles_ref = output_properties.get('gitiles_ref', 'refs/heads/master')
gerrit_patch = None
if len(patches) > 0:
gerrit_patch = '/'.join(
map(str, [patches[0].host, patches[0].change, patches[0].patchset]))
entities = []
for prop_name, swarm_hashes in swarm_hashes_properties.iteritems():
ref, commit_position, with_patch = _DecodeSwarmingHashesPropertyName(
prop_name)
for target_name, isolated_hash in swarm_hashes.items():
entities.append(
IsolatedTarget.Create(
build_id=build_id,
luci_project=luci_project,
bucket=luci_bucket,
master_name=master_name,
builder_name=luci_builder,
gitiles_host=gitiles_host,
gitiles_project=gitiles_project,
gitiles_ref=gitiles_ref or ref,
gerrit_patch=gerrit_patch if with_patch else '',
target_name=target_name,
isolated_hash=isolated_hash,
commit_position=commit_position,
revision=output_properties.get('got_revision')))
result = [key.pairs() for key in ndb.put_multi(entities)]
return {'data': {'created_rows': result}}
def _TriggerV1AnalysisForChromiumBuildIfNeeded(bucket, builder_name, build_id,
build_result):
"""Temporary solution of triggering v1 analysis until v2 is ready."""
if not bucket.endswith('ci'):
return
if build_result != 'FAILURE':
logging.debug('Build %d is not a failure', build_id)
return
assert build_id
build = GetV2Build(
build_id,
fields=FieldMask(
paths=['id', 'number', 'output.properties', 'input.properties']))
# Sanity check.
assert build, 'Failed to download build for {}.'.format(build_id)
assert build_id == build.id, (
'Build id {} is different from the requested id {}.'.format(
build.id, build_id))
assert build.number, 'No build_number for chromium build {}'.format(build_id)
# Converts the Struct to standard dict, to use .get, .iteritems etc.
input_properties = json_format.MessageToDict(build.input.properties)
output_properties = json_format.MessageToDict(build.output.properties)
# TODO(https://crbug.com/1109276) Once all builders use builder_group
# property, do not check the mastername property
master_name = (
output_properties.get('target_builder_group') or
input_properties.get('target_builder_group') or
output_properties.get('target_mastername') or
input_properties.get('target_mastername') or
output_properties.get('builder_group') or
input_properties.get('builder_group') or
output_properties.get('mastername') or input_properties.get('mastername'))
if not master_name:
logging.error('Build %d does not have expected "mastername" property',
build_id)
return
build_info = {
'master_name': master_name,
'builder_name': builder_name,
'build_number': build.number,
}
logging.info('Triggering v1 analysis for chromium build %d', build_id)
target = appengine_util.GetTargetNameForModule(constants.WATERFALL_BACKEND)
payload = json.dumps({'builds': [build_info]})
taskqueue.add(
url=constants.WATERFALL_PROCESS_FAILURE_ANALYSIS_REQUESTS_URL,
payload=payload,
target=target,
queue_name=constants.WATERFALL_FAILURE_ANALYSIS_REQUEST_QUEUE)
|
py | 7df99a6a571c46ce17b9ac4002be5f01e274357e | # -*- coding: utf-8 -*-
"""
test_basic_logic
~~~~~~~~~~~~~~~~
Test the basic logic of the h2 state machines.
"""
import random
import sys
import hyperframe
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.frame_buffer
import h2.settings
import h2.stream
import helpers
from hypothesis import given
from hypothesis.strategies import integers
IS_PYTHON3 = sys.version_info >= (3, 0)
class TestBasicClient(object):
"""
Basic client-side tests.
"""
example_request_headers = [
(u':authority', u'example.com'),
(u':path', u'/'),
(u':scheme', u'https'),
(u':method', u'GET'),
]
bytes_example_request_headers = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
]
example_response_headers = [
(u':status', u'200'),
(u'server', u'fake-serv/0.1.0')
]
bytes_example_response_headers = [
(b':status', b'200'),
(b'server', b'fake-serv/0.1.0')
]
def test_begin_connection(self, frame_factory):
"""
Client connections emit the HTTP/2 preamble.
"""
c = h2.connection.H2Connection()
expected_settings = frame_factory.build_settings_frame(
c.local_settings
)
expected_data = (
b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' + expected_settings.serialize()
)
events = c.initiate_connection()
assert not events
assert c.data_to_send() == expected_data
def test_sending_headers(self):
"""
Single headers frames are correctly encoded.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Clear the data, then send headers.
c.clear_outbound_data_buffer()
events = c.send_headers(1, self.example_request_headers)
assert not events
assert c.data_to_send() == (
b'\x00\x00\r\x01\x04\x00\x00\x00\x01'
b'A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x87\x82'
)
def test_sending_data(self):
"""
Single data frames are encoded correctly.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
# Clear the data, then send some data.
c.clear_outbound_data_buffer()
events = c.send_data(1, b'some data')
assert not events
data_to_send = c.data_to_send()
assert (
data_to_send == b'\x00\x00\t\x00\x00\x00\x00\x00\x01some data'
)
buffer = h2.frame_buffer.FrameBuffer(server=False)
buffer.max_frame_size = 65535
buffer.add_data(data_to_send)
data_frame = list(buffer)[0]
sanity_check_data_frame(
data_frame=data_frame,
expected_flow_controlled_length=len(b'some data'),
expect_padded_flag=False,
expected_data_frame_pad_length=0
)
def test_sending_data_in_memoryview(self):
"""
Support memoryview for sending data.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
# Clear the data, then send some data.
c.clear_outbound_data_buffer()
events = c.send_data(1, memoryview(b'some data'))
assert not events
data_to_send = c.data_to_send()
assert (
data_to_send == b'\x00\x00\t\x00\x00\x00\x00\x00\x01some data'
)
def test_sending_data_with_padding(self):
"""
Single data frames with padding are encoded correctly.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
# Clear the data, then send some data.
c.clear_outbound_data_buffer()
events = c.send_data(1, b'some data', pad_length=5)
assert not events
data_to_send = c.data_to_send()
assert data_to_send == (
b'\x00\x00\x0f\x00\x08\x00\x00\x00\x01'
b'\x05some data\x00\x00\x00\x00\x00'
)
buffer = h2.frame_buffer.FrameBuffer(server=False)
buffer.max_frame_size = 65535
buffer.add_data(data_to_send)
data_frame = list(buffer)[0]
sanity_check_data_frame(
data_frame=data_frame,
expected_flow_controlled_length=len(b'some data') + 1 + 5,
expect_padded_flag=True,
expected_data_frame_pad_length=5
)
def test_sending_data_with_zero_length_padding(self):
"""
Single data frames with zero-length padding are encoded
correctly.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
# Clear the data, then send some data.
c.clear_outbound_data_buffer()
events = c.send_data(1, b'some data', pad_length=0)
assert not events
data_to_send = c.data_to_send()
assert data_to_send == (
b'\x00\x00\x0a\x00\x08\x00\x00\x00\x01'
b'\x00some data'
)
buffer = h2.frame_buffer.FrameBuffer(server=False)
buffer.max_frame_size = 65535
buffer.add_data(data_to_send)
data_frame = list(buffer)[0]
sanity_check_data_frame(
data_frame=data_frame,
expected_flow_controlled_length=len(b'some data') + 1,
expect_padded_flag=True,
expected_data_frame_pad_length=0
)
@pytest.mark.parametrize("expected_error,pad_length", [
(None, 0),
(None, 255),
(None, None),
(ValueError, -1),
(ValueError, 256),
(TypeError, 'invalid'),
(TypeError, ''),
(TypeError, '10'),
(TypeError, {}),
(TypeError, ['1', '2', '3']),
(TypeError, []),
(TypeError, 1.5),
(TypeError, 1.0),
(TypeError, -1.0),
])
def test_sending_data_with_invalid_padding_length(self,
expected_error,
pad_length):
"""
``send_data`` with a ``pad_length`` parameter that is an integer
outside the range of [0, 255] throws a ``ValueError``, and a
``pad_length`` parameter which is not an ``integer`` type
throws a ``TypeError``.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
c.clear_outbound_data_buffer()
if expected_error is not None:
with pytest.raises(expected_error):
c.send_data(1, b'some data', pad_length=pad_length)
else:
c.send_data(1, b'some data', pad_length=pad_length)
def test_closing_stream_sending_data(self, frame_factory):
"""
We can close a stream with a data frame.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
f = frame_factory.build_data_frame(
data=b'some data',
flags=['END_STREAM'],
)
# Clear the data, then send some data.
c.clear_outbound_data_buffer()
events = c.send_data(1, b'some data', end_stream=True)
assert not events
assert c.data_to_send() == f.serialize()
def test_receiving_a_response(self, frame_factory):
"""
When receiving a response, the ResponseReceived event fires.
"""
config = h2.config.H2Configuration(header_encoding='utf-8')
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
# Clear the data
f = frame_factory.build_headers_frame(
self.example_response_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ResponseReceived)
assert event.stream_id == 1
assert event.headers == self.example_response_headers
def test_receiving_a_response_bytes(self, frame_factory):
"""
When receiving a response, the ResponseReceived event fires with bytes
headers if the encoding is set appropriately.
"""
config = h2.config.H2Configuration(header_encoding=False)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
# Clear the data
f = frame_factory.build_headers_frame(
self.example_response_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ResponseReceived)
assert event.stream_id == 1
assert event.headers == self.bytes_example_response_headers
def test_receiving_a_response_change_encoding(self, frame_factory):
"""
When receiving a response, the ResponseReceived event fires with bytes
headers if the encoding is set appropriately, but if this changes then
the change reflects it.
"""
config = h2.config.H2Configuration(header_encoding=False)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
f = frame_factory.build_headers_frame(
self.example_response_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ResponseReceived)
assert event.stream_id == 1
assert event.headers == self.bytes_example_response_headers
c.send_headers(3, self.example_request_headers, end_stream=True)
c.config.header_encoding = 'utf-8'
f = frame_factory.build_headers_frame(
self.example_response_headers,
stream_id=3,
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ResponseReceived)
assert event.stream_id == 3
assert event.headers == self.example_response_headers
def test_end_stream_without_data(self, frame_factory):
"""
Ending a stream without data emits a zero-length DATA frame with
END_STREAM set.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=False)
# Clear the data
c.clear_outbound_data_buffer()
f = frame_factory.build_data_frame(b'', flags=['END_STREAM'])
events = c.end_stream(1)
assert not events
assert c.data_to_send() == f.serialize()
def test_cannot_send_headers_on_lower_stream_id(self):
"""
Once stream ID x has been used, cannot use stream ID y where y < x.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(3, self.example_request_headers, end_stream=False)
with pytest.raises(h2.exceptions.StreamIDTooLowError) as e:
c.send_headers(1, self.example_request_headers, end_stream=True)
assert e.value.stream_id == 1
assert e.value.max_stream_id == 3
def test_receiving_pushed_stream(self, frame_factory):
"""
Pushed streams fire a PushedStreamReceived event, followed by
ResponseReceived when the response headers are received.
"""
config = h2.config.H2Configuration(header_encoding='utf-8')
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=False)
f1 = frame_factory.build_headers_frame(
self.example_response_headers
)
f2 = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers,
flags=['END_HEADERS'],
)
f3 = frame_factory.build_headers_frame(
self.example_response_headers,
stream_id=2,
)
data = b''.join(x.serialize() for x in [f1, f2, f3])
events = c.receive_data(data)
assert len(events) == 3
stream_push_event = events[1]
response_event = events[2]
assert isinstance(stream_push_event, h2.events.PushedStreamReceived)
assert isinstance(response_event, h2.events.ResponseReceived)
assert stream_push_event.pushed_stream_id == 2
assert stream_push_event.parent_stream_id == 1
assert (
stream_push_event.headers == self.example_request_headers
)
assert response_event.stream_id == 2
assert response_event.headers == self.example_response_headers
def test_receiving_pushed_stream_bytes(self, frame_factory):
"""
Pushed headers are not decoded if the header encoding is set to False.
"""
config = h2.config.H2Configuration(header_encoding=False)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=False)
f1 = frame_factory.build_headers_frame(
self.example_response_headers
)
f2 = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers,
flags=['END_HEADERS'],
)
f3 = frame_factory.build_headers_frame(
self.example_response_headers,
stream_id=2,
)
data = b''.join(x.serialize() for x in [f1, f2, f3])
events = c.receive_data(data)
assert len(events) == 3
stream_push_event = events[1]
response_event = events[2]
assert isinstance(stream_push_event, h2.events.PushedStreamReceived)
assert isinstance(response_event, h2.events.ResponseReceived)
assert stream_push_event.pushed_stream_id == 2
assert stream_push_event.parent_stream_id == 1
assert (
stream_push_event.headers == self.bytes_example_request_headers
)
assert response_event.stream_id == 2
assert response_event.headers == self.bytes_example_response_headers
def test_cannot_receive_pushed_stream_when_enable_push_is_0(self,
frame_factory):
"""
If we have set SETTINGS_ENABLE_PUSH to 0, receiving PUSH_PROMISE frames
triggers the connection to be closed.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.local_settings.enable_push = 0
c.send_headers(1, self.example_request_headers, end_stream=False)
f1 = frame_factory.build_settings_frame({}, ack=True)
f2 = frame_factory.build_headers_frame(
self.example_response_headers
)
f3 = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers,
flags=['END_HEADERS'],
)
c.receive_data(f1.serialize())
c.receive_data(f2.serialize())
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f3.serialize())
expected_frame = frame_factory.build_goaway_frame(
0, h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_receiving_response_no_body(self, frame_factory):
"""
Receiving a response without a body fires two events, ResponseReceived
and StreamEnded.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
f = frame_factory.build_headers_frame(
self.example_response_headers,
flags=['END_STREAM']
)
events = c.receive_data(f.serialize())
assert len(events) == 2
response_event = events[0]
end_stream = events[1]
assert isinstance(response_event, h2.events.ResponseReceived)
assert isinstance(end_stream, h2.events.StreamEnded)
def test_oversize_headers(self):
"""
Sending headers that are oversized generates a stream of CONTINUATION
frames.
"""
all_bytes = [chr(x) for x in range(0, 256)]
if IS_PYTHON3:
all_bytes = [x.encode('latin1') for x in all_bytes]
large_binary_string = b''.join(
random.choice(all_bytes) for _ in range(0, 256)
)
test_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':method', 'GET'),
(':scheme', 'https'),
('key', large_binary_string)
]
c = h2.connection.H2Connection()
# Greatly shrink the max frame size to force us over.
c.max_outbound_frame_size = 48
c.initiate_connection()
c.send_headers(1, test_headers, end_stream=True)
# Use the frame buffer here, because we don't care about decoding
# the headers. Don't send all the data in because that will force the
# frame buffer to stop caching the CONTINUATION frames, so instead
# send all but one byte.
buffer = h2.frame_buffer.FrameBuffer(server=True)
buffer.max_frame_size = 65535
data = c.data_to_send()
buffer.add_data(data[:-1])
# Drain the buffer, confirming that it only provides a single frame
# (the settings frame)
assert len(list(buffer)) == 1
# Get the cached frames.
frames = buffer._headers_buffer
# Split the frames up.
headers_frame = frames[0]
continuation_frames = frames[1:]
assert isinstance(headers_frame, hyperframe.frame.HeadersFrame)
assert all(
map(
lambda f: isinstance(f, hyperframe.frame.ContinuationFrame),
continuation_frames)
)
assert all(
map(lambda f: len(f.data) <= c.max_outbound_frame_size, frames)
)
assert frames[0].flags == {'END_STREAM'}
buffer.add_data(data[-1:])
headers = list(buffer)[0]
assert isinstance(headers, hyperframe.frame.HeadersFrame)
def test_handle_stream_reset(self, frame_factory):
"""
Streams being remotely reset fires a StreamReset event.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
c.clear_outbound_data_buffer()
f = frame_factory.build_rst_stream_frame(
stream_id=1,
error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.StreamReset)
assert event.stream_id == 1
assert event.error_code is h2.errors.ErrorCodes.STREAM_CLOSED
assert isinstance(event.error_code, h2.errors.ErrorCodes)
assert event.remote_reset
def test_handle_stream_reset_with_unknown_erorr_code(self, frame_factory):
"""
Streams being remotely reset with unknown error codes behave exactly as
they do with known error codes, but the error code on the event is an
int, instead of being an ErrorCodes.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
c.clear_outbound_data_buffer()
f = frame_factory.build_rst_stream_frame(stream_id=1, error_code=0xFA)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.StreamReset)
assert event.stream_id == 1
assert event.error_code == 250
assert not isinstance(event.error_code, h2.errors.ErrorCodes)
assert event.remote_reset
def test_can_consume_partial_data_from_connection(self):
"""
We can do partial reads from the connection.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
assert len(c.data_to_send(2)) == 2
assert len(c.data_to_send(3)) == 3
assert 0 < len(c.data_to_send(500)) < 500
assert len(c.data_to_send(10)) == 0
assert len(c.data_to_send()) == 0
def test_we_can_update_settings(self, frame_factory):
"""
Updating the settings emits a SETTINGS frame.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.clear_outbound_data_buffer()
new_settings = {
h2.settings.SettingCodes.HEADER_TABLE_SIZE: 52,
h2.settings.SettingCodes.ENABLE_PUSH: 0,
}
events = c.update_settings(new_settings)
assert not events
f = frame_factory.build_settings_frame(new_settings)
assert c.data_to_send() == f.serialize()
def test_settings_get_acked_correctly(self, frame_factory):
"""
When settings changes are ACKed, they contain the changed settings.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
new_settings = {
h2.settings.SettingCodes.HEADER_TABLE_SIZE: 52,
h2.settings.SettingCodes.ENABLE_PUSH: 0,
}
c.update_settings(new_settings)
f = frame_factory.build_settings_frame({}, ack=True)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.SettingsAcknowledged)
assert len(event.changed_settings) == len(new_settings)
for setting, value in new_settings.items():
assert event.changed_settings[setting].new_value == value
def test_cannot_create_new_outbound_stream_over_limit(self, frame_factory):
"""
When the number of outbound streams exceeds the remote peer's
MAX_CONCURRENT_STREAMS setting, attempting to open new streams fails.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
f = frame_factory.build_settings_frame(
{h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 1}
)
c.receive_data(f.serialize())[0]
c.send_headers(1, self.example_request_headers)
with pytest.raises(h2.exceptions.TooManyStreamsError):
c.send_headers(3, self.example_request_headers)
def test_can_receive_trailers(self, frame_factory):
"""
When two HEADERS blocks are received in the same stream from a
server, the second set are trailers.
"""
config = h2.config.H2Configuration(header_encoding='utf-8')
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
f = frame_factory.build_headers_frame(self.example_response_headers)
c.receive_data(f.serialize())
# Send in trailers.
trailers = [('content-length', '0')]
f = frame_factory.build_headers_frame(
trailers,
flags=['END_STREAM'],
)
events = c.receive_data(f.serialize())
assert len(events) == 2
event = events[0]
assert isinstance(event, h2.events.TrailersReceived)
assert event.headers == trailers
assert event.stream_id == 1
def test_reject_trailers_not_ending_stream(self, frame_factory):
"""
When trailers are received without the END_STREAM flag being present,
this is a ProtocolError.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(1, self.example_request_headers)
f = frame_factory.build_headers_frame(self.example_response_headers)
c.receive_data(f.serialize())
# Send in trailers.
c.clear_outbound_data_buffer()
trailers = [('content-length', '0')]
f = frame_factory.build_headers_frame(
trailers,
flags=[],
)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f.serialize())
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
)
assert c.data_to_send() == expected_frame.serialize()
def test_can_send_trailers(self, frame_factory):
"""
When a second set of headers are sent, they are properly trailers.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.clear_outbound_data_buffer()
c.send_headers(1, self.example_request_headers)
# Now send trailers.
trailers = [('content-length', '0')]
c.send_headers(1, trailers, end_stream=True)
frame_factory.refresh_encoder()
f1 = frame_factory.build_headers_frame(
self.example_request_headers,
)
f2 = frame_factory.build_headers_frame(
trailers,
flags=['END_STREAM'],
)
assert c.data_to_send() == f1.serialize() + f2.serialize()
def test_trailers_must_have_end_stream(self, frame_factory):
"""
A set of trailers must carry the END_STREAM flag.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Send headers.
c.send_headers(1, self.example_request_headers)
# Now send trailers.
trailers = [('content-length', '0')]
with pytest.raises(h2.exceptions.ProtocolError):
c.send_headers(1, trailers)
def test_headers_are_lowercase(self, frame_factory):
"""
When headers are sent, they are forced to lower-case.
"""
weird_headers = self.example_request_headers + [
('ChAnGiNg-CaSe', 'AlsoHere'),
('alllowercase', 'alllowercase'),
('ALLCAPS', 'ALLCAPS'),
]
expected_headers = self.example_request_headers + [
('changing-case', 'AlsoHere'),
('alllowercase', 'alllowercase'),
('allcaps', 'ALLCAPS'),
]
c = h2.connection.H2Connection()
c.initiate_connection()
c.clear_outbound_data_buffer()
c.send_headers(1, weird_headers)
expected_frame = frame_factory.build_headers_frame(
headers=expected_headers
)
assert c.data_to_send() == expected_frame.serialize()
@given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1)))
def test_changing_max_frame_size(self, frame_factory, frame_size):
"""
When the user changes the max frame size and the change is ACKed, the
remote peer is now bound by the new frame size.
"""
# We need to refresh the encoder because hypothesis has a problem with
# integrating with py.test, meaning that we use the same frame factory
# for all tests.
# See https://github.com/HypothesisWorks/hypothesis-python/issues/377
frame_factory.refresh_encoder()
c = h2.connection.H2Connection()
c.initiate_connection()
# Set up the stream.
c.send_headers(1, self.example_request_headers, end_stream=True)
headers_frame = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
c.receive_data(headers_frame.serialize())
# Change the max frame size.
c.update_settings(
{h2.settings.SettingCodes.MAX_FRAME_SIZE: frame_size}
)
settings_ack = frame_factory.build_settings_frame({}, ack=True)
c.receive_data(settings_ack.serialize())
# Greatly increase the flow control windows: we're not here to test
# flow control today.
c.increment_flow_control_window(increment=(2 * frame_size) + 1)
c.increment_flow_control_window(
increment=(2 * frame_size) + 1, stream_id=1
)
# Send one DATA frame that is exactly the max frame size: confirm it's
# fine.
data = frame_factory.build_data_frame(
data=(b'\x00' * frame_size),
)
events = c.receive_data(data.serialize())
assert len(events) == 1
assert isinstance(events[0], h2.events.DataReceived)
assert events[0].flow_controlled_length == frame_size
# Send one that is one byte too large: confirm a protocol error is
# raised.
data.data += b'\x00'
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data.serialize())
def test_cookies_are_joined_on_push(self, frame_factory):
"""
RFC 7540 Section 8.1.2.5 requires that we join multiple Cookie headers
in a header block together when they're received on a push.
"""
# This is a moderately varied set of cookie headers: some combined,
# some split.
cookie_headers = [
('cookie',
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
('cookie', 'path=1'),
('cookie', 'test1=val1; test2=val2')
]
expected = (
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC; '
'path=1; test1=val1; test2=val2'
)
config = h2.config.H2Configuration(header_encoding='utf-8')
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers + cookie_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
e = events[0]
cookie_fields = [(n, v) for n, v in e.headers if n == 'cookie']
assert len(cookie_fields) == 1
_, v = cookie_fields[0]
assert v == expected
def test_cookies_arent_joined_without_normalization(self, frame_factory):
"""
If inbound header normalization is disabled, cookie headers aren't
joined.
"""
# This is a moderately varied set of cookie headers: some combined,
# some split.
cookie_headers = [
('cookie',
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
('cookie', 'path=1'),
('cookie', 'test1=val1; test2=val2')
]
config = h2.config.H2Configuration(
client_side=True,
normalize_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(1, self.example_request_headers, end_stream=True)
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers + cookie_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
e = events[0]
received_cookies = [(n, v) for n, v in e.headers if n == 'cookie']
assert len(received_cookies) == 3
assert cookie_headers == received_cookies
class TestBasicServer(object):
"""
Basic server-side tests.
"""
example_request_headers = [
(u':authority', u'example.com'),
(u':path', u'/'),
(u':scheme', u'https'),
(u':method', u'GET'),
]
bytes_example_request_headers = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'hyper-h2/0.1.0')
]
server_config = h2.config.H2Configuration(
client_side=False, header_encoding='utf-8'
)
def test_ignores_preamble(self):
"""
The preamble does not cause any events or frames to be written.
"""
c = h2.connection.H2Connection(config=self.server_config)
preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
events = c.receive_data(preamble)
assert not events
assert not c.data_to_send()
@pytest.mark.parametrize("chunk_size", range(1, 24))
def test_drip_feed_preamble(self, chunk_size):
"""
The preamble can be sent in in less than a single buffer.
"""
c = h2.connection.H2Connection(config=self.server_config)
preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
events = []
for i in range(0, len(preamble), chunk_size):
events += c.receive_data(preamble[i:i+chunk_size])
assert not events
assert not c.data_to_send()
def test_initiate_connection_sends_server_preamble(self, frame_factory):
"""
For server-side connections, initiate_connection sends a server
preamble.
"""
c = h2.connection.H2Connection(config=self.server_config)
expected_settings = frame_factory.build_settings_frame(
c.local_settings
)
expected_data = expected_settings.serialize()
events = c.initiate_connection()
assert not events
assert c.data_to_send() == expected_data
def test_headers_event(self, frame_factory):
"""
When a headers frame is received a RequestReceived event fires.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.RequestReceived)
assert event.stream_id == 1
assert event.headers == self.example_request_headers
def test_headers_event_bytes(self, frame_factory):
"""
When a headers frame is received a RequestReceived event fires with
bytes headers if the encoding is set appropriately.
"""
config = h2.config.H2Configuration(
client_side=False, header_encoding=False
)
c = h2.connection.H2Connection(config=config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.RequestReceived)
assert event.stream_id == 1
assert event.headers == self.bytes_example_request_headers
def test_data_event(self, frame_factory):
"""
Test that data received on a stream fires a DataReceived event.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f1 = frame_factory.build_headers_frame(
self.example_request_headers, stream_id=3
)
f2 = frame_factory.build_data_frame(
b'some request data',
stream_id=3,
)
data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
events = c.receive_data(data)
assert len(events) == 2
event = events[1]
assert isinstance(event, h2.events.DataReceived)
assert event.stream_id == 3
assert event.data == b'some request data'
assert event.flow_controlled_length == 17
def test_data_event_with_padding(self, frame_factory):
"""
Test that data received on a stream fires a DataReceived event that
accounts for padding.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f1 = frame_factory.build_headers_frame(
self.example_request_headers, stream_id=3
)
f2 = frame_factory.build_data_frame(
b'some request data',
stream_id=3,
padding_len=20
)
data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
events = c.receive_data(data)
assert len(events) == 2
event = events[1]
assert isinstance(event, h2.events.DataReceived)
assert event.stream_id == 3
assert event.data == b'some request data'
assert event.flow_controlled_length == 17 + 20 + 1
def test_receiving_ping_frame(self, frame_factory):
"""
Ping frames should be immediately ACKed.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
ping_data = b'\x01' * 8
sent_frame = frame_factory.build_ping_frame(ping_data)
expected_frame = frame_factory.build_ping_frame(
ping_data, flags=["ACK"]
)
expected_data = expected_frame.serialize()
c.clear_outbound_data_buffer()
events = c.receive_data(sent_frame.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.PingReceived)
assert event.ping_data == ping_data
assert c.data_to_send() == expected_data
def test_receiving_settings_frame_event(self, frame_factory):
"""
Settings frames should cause a RemoteSettingsChanged event to fire.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_settings_frame(
settings=helpers.SAMPLE_SETTINGS
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.RemoteSettingsChanged)
assert len(event.changed_settings) == len(helpers.SAMPLE_SETTINGS)
def test_acknowledging_settings(self, frame_factory):
"""
Acknowledging settings causes appropriate Settings frame to be emitted.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
received_frame = frame_factory.build_settings_frame(
settings=helpers.SAMPLE_SETTINGS
)
expected_frame = frame_factory.build_settings_frame(
settings={}, ack=True
)
expected_data = expected_frame.serialize()
c.clear_outbound_data_buffer()
events = c.receive_data(received_frame.serialize())
assert len(events) == 1
assert c.data_to_send() == expected_data
def test_close_connection(self, frame_factory):
"""
Closing the connection with no error code emits a GOAWAY frame with
error code 0.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_goaway_frame(last_stream_id=0)
expected_data = f.serialize()
c.clear_outbound_data_buffer()
events = c.close_connection()
assert not events
assert c.data_to_send() == expected_data
@pytest.mark.parametrize("error_code", h2.errors.ErrorCodes)
def test_close_connection_with_error_code(self, frame_factory, error_code):
"""
Closing the connection with an error code emits a GOAWAY frame with
that error code.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_goaway_frame(
error_code=error_code, last_stream_id=0
)
expected_data = f.serialize()
c.clear_outbound_data_buffer()
events = c.close_connection(error_code)
assert not events
assert c.data_to_send() == expected_data
@pytest.mark.parametrize("last_stream_id,output", [
(None, 23),
(0, 0),
(42, 42)
])
def test_close_connection_with_last_stream_id(self, frame_factory,
last_stream_id, output):
"""
Closing the connection with last_stream_id set emits a GOAWAY frame
with that value.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
headers_frame = frame_factory.build_headers_frame(
[
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
],
stream_id=23)
c.receive_data(headers_frame.serialize())
f = frame_factory.build_goaway_frame(
last_stream_id=output
)
expected_data = f.serialize()
c.clear_outbound_data_buffer()
events = c.close_connection(last_stream_id=last_stream_id)
assert not events
assert c.data_to_send() == expected_data
@pytest.mark.parametrize("additional_data,output", [
(None, b''),
(b'', b''),
(b'foobar', b'foobar')
])
def test_close_connection_with_additional_data(self, frame_factory,
additional_data, output):
"""
Closing the connection with additional debug data emits a GOAWAY frame
with that data attached.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_goaway_frame(
last_stream_id=0, additional_data=output
)
expected_data = f.serialize()
c.clear_outbound_data_buffer()
events = c.close_connection(additional_data=additional_data)
assert not events
assert c.data_to_send() == expected_data
def test_reset_stream(self, frame_factory):
"""
Resetting a stream with no error code emits a RST_STREAM frame with
error code 0.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
expected_frame = frame_factory.build_rst_stream_frame(stream_id=1)
expected_data = expected_frame.serialize()
events = c.reset_stream(stream_id=1)
assert not events
assert c.data_to_send() == expected_data
@pytest.mark.parametrize("error_code", h2.errors.ErrorCodes)
def test_reset_stream_with_error_code(self, frame_factory, error_code):
"""
Resetting a stream with an error code emits a RST_STREAM frame with
that error code.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
self.example_request_headers,
stream_id=3
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
expected_frame = frame_factory.build_rst_stream_frame(
stream_id=3, error_code=error_code
)
expected_data = expected_frame.serialize()
events = c.reset_stream(stream_id=3, error_code=error_code)
assert not events
assert c.data_to_send() == expected_data
def test_cannot_reset_nonexistent_stream(self, frame_factory):
"""
Resetting nonexistent streams raises NoSuchStreamError.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
self.example_request_headers,
stream_id=3
)
c.receive_data(f.serialize())
with pytest.raises(h2.exceptions.NoSuchStreamError) as e:
c.reset_stream(stream_id=1)
assert e.value.stream_id == 1
with pytest.raises(h2.exceptions.NoSuchStreamError) as e:
c.reset_stream(stream_id=5)
assert e.value.stream_id == 5
def test_basic_sending_ping_frame_logic(self, frame_factory):
"""
Sending ping frames serializes a ping frame on stream 0 with
approriate opaque data.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
ping_data = b'\x01\x02\x03\x04\x05\x06\x07\x08'
expected_frame = frame_factory.build_ping_frame(ping_data)
expected_data = expected_frame.serialize()
events = c.ping(ping_data)
assert not events
assert c.data_to_send() == expected_data
@pytest.mark.parametrize(
'opaque_data',
[
b'',
b'\x01\x02\x03\x04\x05\x06\x07',
u'abcdefgh',
b'too many bytes',
]
)
def test_ping_frame_opaque_data_must_be_length_8_bytestring(self,
frame_factory,
opaque_data):
"""
Sending a ping frame only works with 8-byte bytestrings.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
with pytest.raises(ValueError):
c.ping(opaque_data)
def test_receiving_ping_acknowledgement(self, frame_factory):
"""
Receiving a PING acknowledgement fires a PingAckReceived event.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
ping_data = b'\x01\x02\x03\x04\x05\x06\x07\x08'
f = frame_factory.build_ping_frame(
ping_data, flags=['ACK']
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.PingAckReceived)
assert isinstance(event, h2.events.PingAcknowledged) # deprecated
assert event.ping_data == ping_data
def test_stream_ended_remotely(self, frame_factory):
"""
When the remote stream ends with a non-empty data frame a DataReceived
event and a StreamEnded event are fired.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f1 = frame_factory.build_headers_frame(
self.example_request_headers, stream_id=3
)
f2 = frame_factory.build_data_frame(
b'some request data',
flags=['END_STREAM'],
stream_id=3,
)
data = b''.join(map(lambda f: f.serialize(), [f1, f2]))
events = c.receive_data(data)
assert len(events) == 3
data_event = events[1]
stream_ended_event = events[2]
assert isinstance(data_event, h2.events.DataReceived)
assert isinstance(stream_ended_event, h2.events.StreamEnded)
stream_ended_event.stream_id == 3
def test_can_push_stream(self, frame_factory):
"""
Pushing a stream causes a PUSH_PROMISE frame to be emitted.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
self.example_request_headers
)
c.receive_data(f.serialize())
frame_factory.refresh_encoder()
expected_frame = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=self.example_request_headers,
flags=['END_HEADERS'],
)
c.clear_outbound_data_buffer()
c.push_stream(
stream_id=1,
promised_stream_id=2,
request_headers=self.example_request_headers
)
assert c.data_to_send() == expected_frame.serialize()
def test_cannot_push_streams_when_disabled(self, frame_factory):
"""
When the remote peer has disabled stream pushing, we should fail.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_settings_frame(
{h2.settings.SettingCodes.ENABLE_PUSH: 0}
)
c.receive_data(f.serialize())
f = frame_factory.build_headers_frame(
self.example_request_headers
)
c.receive_data(f.serialize())
with pytest.raises(h2.exceptions.ProtocolError):
c.push_stream(
stream_id=1,
promised_stream_id=2,
request_headers=self.example_request_headers
)
def test_settings_remote_change_header_table_size(self, frame_factory):
"""
Acknowledging a remote HEADER_TABLE_SIZE settings change causes us to
change the header table size of our encoder.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
assert c.encoder.header_table_size == 4096
received_frame = frame_factory.build_settings_frame(
{h2.settings.SettingCodes.HEADER_TABLE_SIZE: 80}
)
c.receive_data(received_frame.serialize())[0]
assert c.encoder.header_table_size == 80
def test_settings_local_change_header_table_size(self, frame_factory):
"""
The remote peer acknowledging a local HEADER_TABLE_SIZE settings change
does not cause us to change the header table size of our decoder.
For an explanation of why this test is this way around, see issue #37.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
assert c.decoder.header_table_size == 4096
expected_frame = frame_factory.build_settings_frame({}, ack=True)
c.update_settings(
{h2.settings.SettingCodes.HEADER_TABLE_SIZE: 80}
)
c.receive_data(expected_frame.serialize())
c.clear_outbound_data_buffer()
assert c.decoder.header_table_size == 4096
def test_restricting_outbound_frame_size_by_settings(self, frame_factory):
"""
The remote peer can shrink the maximum outbound frame size using
settings.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.FrameTooLargeError):
c.send_data(1, b'\x01' * 17000)
received_frame = frame_factory.build_settings_frame(
{h2.settings.SettingCodes.MAX_FRAME_SIZE: 17001}
)
c.receive_data(received_frame.serialize())
c.send_data(1, b'\x01' * 17000)
assert c.data_to_send()
def test_restricting_inbound_frame_size_by_settings(self, frame_factory):
"""
We throw ProtocolErrors and tear down connections if oversize frames
are received.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
h = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(h.serialize())
c.clear_outbound_data_buffer()
data_frame = frame_factory.build_data_frame(b'\x01' * 17000)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data_frame.serialize())
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.FRAME_SIZE_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_cannot_receive_new_streams_over_limit(self, frame_factory):
"""
When the number of inbound streams exceeds our MAX_CONCURRENT_STREAMS
setting, their attempt to open new streams fails.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.update_settings(
{h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 1}
)
f = frame_factory.build_settings_frame({}, ack=True)
c.receive_data(f.serialize())
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.example_request_headers,
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
stream_id=3,
headers=self.example_request_headers,
)
with pytest.raises(h2.exceptions.TooManyStreamsError):
c.receive_data(f.serialize())
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
)
assert c.data_to_send() == expected_frame.serialize()
def test_can_receive_trailers(self, frame_factory):
"""
When two HEADERS blocks are received in the same stream from a
client, the second set are trailers.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
# Send in trailers.
trailers = [('content-length', '0')]
f = frame_factory.build_headers_frame(
trailers,
flags=['END_STREAM'],
)
events = c.receive_data(f.serialize())
assert len(events) == 2
event = events[0]
assert isinstance(event, h2.events.TrailersReceived)
assert event.headers == trailers
assert event.stream_id == 1
def test_reject_trailers_not_ending_stream(self, frame_factory):
"""
When trailers are received without the END_STREAM flag being present,
this is a ProtocolError.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
# Send in trailers.
c.clear_outbound_data_buffer()
trailers = [('content-length', '0')]
f = frame_factory.build_headers_frame(
trailers,
flags=[],
)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f.serialize())
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
)
assert c.data_to_send() == expected_frame.serialize()
def test_can_send_trailers(self, frame_factory):
"""
When a second set of headers are sent, they are properly trailers.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
# Send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, self.example_response_headers)
# Now send trailers.
trailers = [('content-length', '0')]
c.send_headers(1, trailers, end_stream=True)
frame_factory.refresh_encoder()
f1 = frame_factory.build_headers_frame(
self.example_response_headers,
)
f2 = frame_factory.build_headers_frame(
trailers,
flags=['END_STREAM'],
)
assert c.data_to_send() == f1.serialize() + f2.serialize()
def test_trailers_must_have_end_stream(self, frame_factory):
"""
A set of trailers must carry the END_STREAM flag.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
# Send headers.
c.send_headers(1, self.example_response_headers)
# Now send trailers.
trailers = [('content-length', '0')]
with pytest.raises(h2.exceptions.ProtocolError):
c.send_headers(1, trailers)
@pytest.mark.parametrize("frame_id", range(12, 256))
def test_unknown_frames_are_ignored(self, frame_factory, frame_id):
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_data_frame(data=b'abcdefghtdst')
f.type = frame_id
events = c.receive_data(f.serialize())
assert not c.data_to_send()
assert len(events) == 1
assert isinstance(events[0], h2.events.UnknownFrameReceived)
assert isinstance(events[0].frame, hyperframe.frame.ExtensionFrame)
def test_can_send_goaway_repeatedly(self, frame_factory):
"""
We can send a GOAWAY frame as many times as we like.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
c.close_connection()
c.close_connection()
c.close_connection()
f = frame_factory.build_goaway_frame(last_stream_id=0)
assert c.data_to_send() == (f.serialize() * 3)
def test_receiving_goaway_frame(self, frame_factory):
"""
Receiving a GOAWAY frame causes a ConnectionTerminated event to be
fired and transitions the connection to the CLOSED state, and clears
the outbound data buffer.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_goaway_frame(
last_stream_id=5, error_code=h2.errors.ErrorCodes.SETTINGS_TIMEOUT
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ConnectionTerminated)
assert event.error_code == h2.errors.ErrorCodes.SETTINGS_TIMEOUT
assert isinstance(event.error_code, h2.errors.ErrorCodes)
assert event.last_stream_id == 5
assert event.additional_data is None
assert c.state_machine.state == h2.connection.ConnectionState.CLOSED
assert not c.data_to_send()
def test_receiving_multiple_goaway_frames(self, frame_factory):
"""
Multiple GOAWAY frames can be received at once, and are allowed. Each
one fires a ConnectionTerminated event.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_goaway_frame(last_stream_id=0)
events = c.receive_data(f.serialize() * 3)
assert len(events) == 3
assert all(
isinstance(event, h2.events.ConnectionTerminated)
for event in events
)
def test_receiving_goaway_frame_with_additional_data(self, frame_factory):
"""
GOAWAY frame can contain additional data,
it should be available via ConnectionTerminated event.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
additional_data = b'debug data'
f = frame_factory.build_goaway_frame(last_stream_id=0,
additional_data=additional_data)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ConnectionTerminated)
assert event.additional_data == additional_data
def test_receiving_goaway_frame_with_unknown_error(self, frame_factory):
"""
Receiving a GOAWAY frame with an unknown error code behaves exactly the
same as receiving one we know about, but the code is reported as an
integer instead of as an ErrorCodes.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_goaway_frame(
last_stream_id=5, error_code=0xFA
)
events = c.receive_data(f.serialize())
assert len(events) == 1
event = events[0]
assert isinstance(event, h2.events.ConnectionTerminated)
assert event.error_code == 250
assert not isinstance(event.error_code, h2.errors.ErrorCodes)
assert event.last_stream_id == 5
assert event.additional_data is None
assert c.state_machine.state == h2.connection.ConnectionState.CLOSED
assert not c.data_to_send()
def test_cookies_are_joined(self, frame_factory):
"""
RFC 7540 Section 8.1.2.5 requires that we join multiple Cookie headers
in a header block together.
"""
# This is a moderately varied set of cookie headers: some combined,
# some split.
cookie_headers = [
('cookie',
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
('cookie', 'path=1'),
('cookie', 'test1=val1; test2=val2')
]
expected = (
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC; '
'path=1; test1=val1; test2=val2'
)
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
self.example_request_headers + cookie_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
e = events[0]
cookie_fields = [(n, v) for n, v in e.headers if n == 'cookie']
assert len(cookie_fields) == 1
_, v = cookie_fields[0]
assert v == expected
def test_cookies_arent_joined_without_normalization(self, frame_factory):
"""
If inbound header normalization is disabled, cookie headers aren't
joined.
"""
# This is a moderately varied set of cookie headers: some combined,
# some split.
cookie_headers = [
('cookie',
'username=John Doe; expires=Thu, 18 Dec 2013 12:00:00 UTC'),
('cookie', 'path=1'),
('cookie', 'test1=val1; test2=val2')
]
config = h2.config.H2Configuration(
client_side=False,
normalize_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
self.example_request_headers + cookie_headers
)
events = c.receive_data(f.serialize())
assert len(events) == 1
e = events[0]
received_cookies = [(n, v) for n, v in e.headers if n == 'cookie']
assert len(received_cookies) == 3
assert cookie_headers == received_cookies
def test_stream_repr(self):
"""
Ensure stream string representation is appropriate.
"""
s = h2.stream.H2Stream(4, None, 12, 14)
assert repr(s) == "<H2Stream id:4 state:<StreamState.IDLE: 0>>"
def sanity_check_data_frame(data_frame,
expected_flow_controlled_length,
expect_padded_flag,
expected_data_frame_pad_length):
"""
``data_frame`` is a frame of type ``hyperframe.frame.DataFrame``,
and the ``flags`` and ``flow_controlled_length`` of ``data_frame``
match expectations.
"""
assert isinstance(data_frame, hyperframe.frame.DataFrame)
assert data_frame.flow_controlled_length == expected_flow_controlled_length
if expect_padded_flag:
assert 'PADDED' in data_frame.flags
else:
assert 'PADDED' not in data_frame.flags
assert data_frame.pad_length == expected_data_frame_pad_length
|
py | 7df99a7219207eed120b617776fb7d5a514277eb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
from .db import * # noqa
|
py | 7df99a9ff30a9fb609bf7293bbbd0d213e275637 | # https://ibb.co/JHjWxss
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
dp = [0]*n
for i in range(0 , n):
if(i == 0):
dp[i] = 1
print(s[i])
if(s[i-1] == 0 and s[i] == 0):
print(s[i-1] + s[i])
dp[i] = 0
elif(s[i-1] != 0 and s[i] == 0):
print(s[i-1] + s[i])
if(int(s[i-1] + s[i]) <= 26):
if (i >= 2):
dp[i] = dp[i-2]
else:
dp[i] = 1
else:
dp[i] = 0
elif(s[i-1] == 0 and s[i] != 0):
print(s[i-1] + s[i])
dp[i] = dp[i-1]
else :
if(int(s[i] + s[i-1]) <= 26):
print(s[i] + s[i-1])
if (i >= 2):
dp[i] = dp[i-1] + dp[i-2]
else:
dp[i] = dp[i-1] + 1
else:
dp[i] = dp[i-1]
print(dp)
return dp[-1]
|
py | 7df99b1aecc4e6a0d287c045957c8139c533cbef | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('core.urls')),
path('members/', include('members.urls')),
path('projects/', include('projects.urls')),
path('bugs/', include('bugs.urls')),
]
|
py | 7df99bd7fe82d527fb5e383ce3e161a0a6f533f3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 08:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
('ip', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ip',
name='user_profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.UserProfile'),
),
migrations.AlterUniqueTogether(
name='ip',
unique_together=set([('user_profile', 'address', 'vhost')]),
),
]
|
py | 7df99bfbc5b4af4de3eeff70d2dbbf4c14e6c5ef | from typing import Any, Callable, Dict, Iterable, List, Set, Tuple, Union
from collections import defaultdict
import datetime
import logging
import pytz
from django.db.models import Q, QuerySet
from django.template import loader
from django.conf import settings
from django.utils.timezone import now as timezone_now
from confirmation.models import one_click_unsubscribe_link
from zerver.lib.notifications import build_message_list
from zerver.lib.send_email import send_future_email, FromAddress
from zerver.lib.url_encoding import encode_stream
from zerver.models import UserProfile, UserMessage, Recipient, Stream, \
Subscription, UserActivity, get_active_streams, get_user_profile_by_id, \
Realm, Message
from zerver.context_processors import common_context
from zerver.lib.queue import queue_json_publish
from zerver.lib.logging_util import log_to_file
logger = logging.getLogger(__name__)
log_to_file(logger, settings.DIGEST_LOG_PATH)
VALID_DIGEST_DAY = 1 # Tuesdays
DIGEST_CUTOFF = 5
# Digests accumulate 4 types of interesting traffic for a user:
# 1. Missed PMs
# 2. New streams
# 3. New users
# 4. Interesting stream traffic, as determined by the longest and most
# diversely comment upon topics.
def inactive_since(user_profile: UserProfile, cutoff: datetime.datetime) -> bool:
# Hasn't used the app in the last DIGEST_CUTOFF (5) days.
most_recent_visit = [row.last_visit for row in
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def should_process_digest(realm_str: str) -> bool:
if realm_str in settings.SYSTEM_ONLY_REALMS:
# Don't try to send emails to system-only realms
return False
return True
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile: UserProfile, cutoff: datetime.datetime) -> None:
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event)
def enqueue_emails(cutoff: datetime.datetime) -> None:
if not settings.SEND_DIGEST_EMAILS:
return
if timezone_now().weekday() != VALID_DIGEST_DAY:
return
for realm in Realm.objects.filter(deactivated=False, digest_emails_enabled=True):
if not should_process_digest(realm.string_id):
continue
user_profiles = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, enable_digest_emails=True)
for user_profile in user_profiles:
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
logger.info("%s is inactive, queuing for potential digest" % (
user_profile.email,))
def gather_hot_conversations(user_profile: UserProfile, stream_ums: QuerySet) -> List[Dict[str, Any]]:
# Gather stream conversations of 2 types:
# 1. long conversations
# 2. conversations where many different people participated
#
# Returns a list of dictionaries containing the templating
# information for each hot conversation.
# stream_ums is a list of UserMessage rows for a single
# user, so the list of messages is distinct here.
messages = [um.message for um in stream_ums]
conversation_length = defaultdict(int) # type: Dict[Tuple[int, str], int]
conversation_messages = defaultdict(list) # type: Dict[Tuple[int, str], List[Message]]
conversation_diversity = defaultdict(set) # type: Dict[Tuple[int, str], Set[str]]
for message in messages:
key = (message.recipient.type_id,
message.topic_name())
conversation_messages[key].append(message)
if not message.sent_by_human():
# Don't include automated messages in the count.
continue
conversation_diversity[key].add(
message.sender.full_name)
conversation_length[key] += 1
diversity_list = list(conversation_diversity.items())
diversity_list.sort(key=lambda entry: len(entry[1]), reverse=True)
length_list = list(conversation_length.items())
length_list.sort(key=lambda entry: entry[1], reverse=True)
# Get up to the 4 best conversations from the diversity list
# and length list, filtering out overlapping conversations.
hot_conversations = [elt[0] for elt in diversity_list[:2]]
for candidate, _ in length_list:
if candidate not in hot_conversations:
hot_conversations.append(candidate)
if len(hot_conversations) >= 4:
break
# There was so much overlap between the diversity and length lists that we
# still have < 4 conversations. Try to use remaining diversity items to pad
# out the hot conversations.
num_convos = len(hot_conversations)
if num_convos < 4:
hot_conversations.extend([elt[0] for elt in diversity_list[num_convos:4]])
hot_conversation_render_payloads = []
for h in hot_conversations:
users = list(conversation_diversity[h])
count = conversation_length[h]
messages = conversation_messages[h]
# We'll display up to 2 messages from the conversation.
first_few_messages = messages[:2]
teaser_data = {"participants": users,
"count": count - len(first_few_messages),
"first_few_messages": build_message_list(
user_profile, first_few_messages)}
hot_conversation_render_payloads.append(teaser_data)
return hot_conversation_render_payloads
def gather_new_users(user_profile: UserProfile, threshold: datetime.datetime) -> Tuple[int, List[str]]:
# Gather information on users in the realm who have recently
# joined.
if not user_profile.can_access_all_realm_members():
new_users = [] # type: List[UserProfile]
else:
new_users = list(UserProfile.objects.filter(
realm=user_profile.realm, date_joined__gt=threshold,
is_bot=False))
user_names = [user.full_name for user in new_users]
return len(user_names), user_names
def gather_new_streams(user_profile: UserProfile,
threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]:
if user_profile.can_access_public_streams():
new_streams = list(get_active_streams(user_profile.realm).filter(
invite_only=False, date_created__gt=threshold))
else:
new_streams = []
base_url = "%s/#narrow/stream/" % (user_profile.realm.uri,)
streams_html = []
streams_plain = []
for stream in new_streams:
narrow_url = base_url + encode_stream(stream.id, stream.name)
stream_link = "<a href='%s'>%s</a>" % (narrow_url, stream.name)
streams_html.append(stream_link)
streams_plain.append(stream.name)
return len(new_streams), {"html": streams_html, "plain": streams_plain}
def enough_traffic(unread_pms: str, hot_conversations: str, new_streams: int, new_users: int) -> bool:
if unread_pms or hot_conversations:
# If you have any unread traffic, good enough.
return True
if new_streams and new_users:
# If you somehow don't have any traffic but your realm did get
# new streams and users, good enough.
return True
return False
def handle_digest_email(user_profile_id: int, cutoff: float,
render_to_web: bool = False) -> Union[None, Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
# We are disabling digest emails for soft deactivated users for the time.
# TODO: Find an elegant way to generate digest emails for these users.
if user_profile.long_term_idle:
return None
# Convert from epoch seconds to a datetime object.
cutoff_date = datetime.datetime.fromtimestamp(int(cutoff), tz=pytz.utc)
all_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__pub_date__gt=cutoff_date
).select_related('message').order_by("message__pub_date")
context = common_context(user_profile)
# Start building email template data.
context.update({
'realm_name': user_profile.realm.name,
'name': user_profile.full_name,
'unsubscribe_link': one_click_unsubscribe_link(user_profile, "digest")
})
# Gather recent missed PMs, re-using the missed PM email logic.
# You can't have an unread message that you sent, but when testing
# this causes confusion so filter your messages out.
pms = all_messages.filter(
~Q(message__recipient__type=Recipient.STREAM) &
~Q(message__sender=user_profile))
# Show up to 4 missed PMs.
pms_limit = 4
context['unread_pms'] = build_message_list(
user_profile, [pm.message for pm in pms[:pms_limit]])
context['remaining_unread_pms_count'] = min(0, len(pms) - pms_limit)
home_view_recipients = [sub.recipient for sub in
Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=True)]
stream_messages = all_messages.filter(
message__recipient__type=Recipient.STREAM,
message__recipient__in=home_view_recipients)
# Gather hot conversations.
context["hot_conversations"] = gather_hot_conversations(
user_profile, stream_messages)
# Gather new streams.
new_streams_count, new_streams = gather_new_streams(
user_profile, cutoff_date)
context["new_streams"] = new_streams
context["new_streams_count"] = new_streams_count
# Gather users who signed up recently.
new_users_count, new_users = gather_new_users(
user_profile, cutoff_date)
context["new_users"] = new_users
if render_to_web:
return context
# We don't want to send emails containing almost no information.
if enough_traffic(context["unread_pms"], context["hot_conversations"],
new_streams_count, new_users_count):
logger.info("Sending digest email for %s" % (user_profile.email,))
# Send now, as a ScheduledEmail
send_future_email('zerver/emails/digest', user_profile.realm, to_user_ids=[user_profile.id],
from_name="Zulip Digest", from_address=FromAddress.NOREPLY,
context=context)
return None
|
py | 7df99c775364ad35052e9c81644db8334ad5c77f | #-
# Copyright (c) 2011 Robert N. M. Watson
# Copyright (c) 2011 Steven J. Murdoch
# Copyright (c) 2013 Michael Roe
# Copyright (c) 2015 SRI International
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from beritest_tools import attr
class test_cp2_cllb(BaseBERITestCase):
EXPECTED_EXCEPTIONS = 1
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_cp2_cllb_1(self):
'''That an uninterrupted cllb+cscb succeeds'''
self.assertRegisterEqual(self.MIPS.a0, 1, "Uninterrupted cllb+cscb failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_cp2_cllb_2(self):
'''That an uninterrupted cllb+cscb stored the right value'''
self.assertRegisterEqual(self.MIPS.a1, 0xffffffffffffffff, "Uninterrupted cllb+cscb stored wrong value")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_cp2_cllb_4(self):
'''That an uninterrupted cllb+add+cscb succeeds'''
self.assertRegisterEqual(self.MIPS.a2, 1, "Uninterrupted cllb+add+cscb failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_cp2_cllb_5(self):
'''That an uninterrupted cllb+add+cscb stored the right value'''
self.assertRegisterEqual(self.MIPS.a3, 0, "Uninterrupted cllb+add+cscb stored wrong value")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_cp2_cllb_8(self):
'''That an cllb+cscb spanning a trap fails'''
self.assertRegisterEqual(self.MIPS.a4, 0, "Interrupted cllb+tnei+cscb succeeded")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_s0(self):
'''Test signed-extended capability load linked byte from double word'''
self.assertRegisterEqual(self.MIPS.s0, 0xfffffffffffffffe, "Sign-extended capability load linked byte from double word failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_s1(self):
'''Test signed-extended positive capability load linked byte'''
self.assertRegisterEqual(self.MIPS.s1, 0x7f, "Sign-extended positive byte capability load linked failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_s2(self):
'''Test signed-extended negative capability load linked byte'''
self.assertRegisterEqual(self.MIPS.s2, 0xffffffffffffffff, "Sign-extended negative byte capability load linked failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_s3(self):
'''Test unsigned positive capability load linked byte'''
self.assertRegisterEqual(self.MIPS.s3, 0x7f, "Unsigned positive byte capability load linked failed")
@attr('llsc')
@attr('cached')
@attr('capabilities')
def test_s4(self):
'''Test unsigned negative capability load linked byte'''
self.assertRegisterEqual(self.MIPS.s4, 0xff, "Unsigned negative byte capability load linked failed")
|
py | 7df99da37b2d6935b24f5619195e6af8bb17b613 | import os
import argparse
from PIL import Image
import numpy as np
import math
from tqdm import tqdm
# generating a random number for dividing the image's height
def gen_divisors(n):
for i in range(2, math.floor(math.sqrt(n))):
if n % i == 0:
yield i
# generating the extedned padded image
def pad_to_square(imgobj):
"""Pad to the nearest 100th"""
square = Image.new('RGB', ((imgobj.width // 100 + 1) * 100,(imgobj.height // 100 + 1) * 100), (0, 0, 0))
# pasting the original image object over the extended black padded background and returning it
square.paste(imgobj, imgobj.getbbox())
return square
# the function returns the glitched image with given step size
def pixel_sort(imgobj, step_size=8):
# padding the image to transform into square
padded = pad_to_square(imgobj)
# image to array
data = np.array(padded)
# dividing the image into stripes
stripes = np.split(data, data.shape[0] // step_size, axis=0)
sorted_data = []
# sort by rows
for stripe in tqdm(stripes):
sorted_data.append(np.sort(stripe, axis=0))
sorted_arr = np.array(sorted_data)
# shaping the sorted array into the padded image shape
sorted_arr = sorted_arr.reshape(padded.height, padded.width, 3)
# remove padding
sorted_arr = sorted_arr[:imgobj.height, :imgobj.width, :]
# generating image from array
return Image.fromarray(sorted_arr)
def glitch_image(fname):
"""
Return a glitched image object
"""
orginal, step_size = None, None
original = Image.open(fname)
# randomly generate a step size that divides the image's height
divisors = list(gen_divisors(original.height))
# randomly choosing the index for divisors
idx = np.random.choice(len(divisors))
step_size = divisors[idx]
try:
return pixel_sort(original, step_size=step_size)
except:
print('Dimension errors processing ' + fname + ' Please try again.')
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str)
args = parser.parse_args()
glitched = glitch_image(args.file)
if glitched is not None:
glitched.show()
dirname, fname = os.path.split(args.file)
glitched.save(dirname + '/glitched_' + fname)
if __name__ == '__main__':
main() |
py | 7df99dff468654832ddbe910bfbc199be61097a1 | from slacker import Slacker
from utils import Utils
class SlackBot:
def __init__(self, channel, debug_channel, slack_api_token):
self.slacker = Slacker(slack_api_token)
self.channel = channel
self.debug_channel = debug_channel
def post_message(self, message, is_debug=False):
channel = self.debug_channel if is_debug else self.channel
try:
self.slacker.chat.post_message(channel, message)
return True, "post message to {}".format(channel)
except:
return False, "FAIL to post message to {}".format(channel)
|
py | 7df99ea65089f8f1351d48fb7bb590b4b303d8d8 | import pandas
import numpy as np
#Function to convert the file into csv
def convert(imgf, labelf, outf, n):
f = open(imgf, "rb")
o = open(outf, "w")
l = open(labelf, "rb")
f.read(16)
l.read(8)
images = []
for i in range(n):
image = [ord(l.read(1))]
for j in range(28*28):
image.append(ord(f.read(1)))
images.append(image)
for image in images:
o.write(",".join(str(pix) for pix in image)+"\n")
f.close()
o.close()
l.close()
convert("train-images.idx3-ubyte", "train-labels.idx1-ubyte",
"mnist_train.csv", 60000)
convert("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte",
"mnist_test.csv", 10000)
# filepath = "mnist_train.csv"
# dataset= pandas.read_csv(filepath)
# print dataset.shape
# (59999, 785)
#sigmoid to be used as activation function
def sigmoid(x):
return 1/(1+ np.exp(-x))
pass
# neural network class definition
class NeuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.layer0 = inputnodes
self.layer1 = hiddennodes
self.layer2 = outputnodes
self.lr = learningrate
# weights connecting l0 to l1
self.w1 = 2*np.random.random((self.layer1,self.layer0)) - 1
# weights connecting l1 to l2
self.w2 = 2*np.random.random((self.layer2,self.layer1)) - 1
pass
# train the simple neural network
def train(self, pixel_list, label_list):
# convert inputs list to 2d array
pixels = np.array(pixel_list, ndmin=2).T
labels = np.array(label_list, ndmin=2).T
# values into layer 1
l1_inputs = np.dot(self.w1, pixels)
# values emerging from layer1
l1_outputs = sigmoid(l1_inputs)
# values into layer2
l2_inputs = np.dot(self.w2, l1_outputs)
# Values emerging from layer2
l2_outputs = sigmoid(l2_inputs)
# output layer error is the (target - actual)
l2_errors = labels - l2_outputs
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
l1_errors = np.dot(self.w2.T, l2_errors)
# update weights for hidden and output layers
self.w2 += self.lr * np.dot((l2_errors * l2_outputs * (1.0 - l2_outputs)), np.transpose(l1_outputs))
# update weights for input and hidden layers
self.w1 += self.lr * np.dot((l1_errors * l1_outputs * (1.0 - l1_outputs)), np.transpose(pixels))
pass
# test the simple neural network
def test(self, pixel_list):
# inputs list to 2d array
pixels = np.array(pixel_list, ndmin=2).T
l1_inputs = np.dot(self.w1, pixels)
l1_outputs = sigmoid(l1_inputs)
l2_inputs = np.dot(self.w2, l1_outputs)
l2_outputs = sigmoid(l2_inputs)
return l2_outputs
# Values
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
# learning rate
learning_rate = 0.02
# create instance of neural network
n = NeuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open("mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# Used only once to reduce the processing time for temporary porpose.
epochs = 1
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
# Normalisation of pixels 0 - 255 to range of 0 to 1
pixels = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# the label output values (all 0.01, except the desired label which is 0.99)
labels = np.zeros(output_nodes) + 0.01
labels[int(all_values[0])] = 0.99
n.train(pixels, labels)
pass
print ("Training Done on {0} Epoch").format(e)
pass
# ---------------------------------------- 4. Checking the accuracy of the Network -----------------------------------------------------
# loading the mnist_test csv file
test_data_file = open("mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
#if predicted right accuracy = 1 if not accuracy = 0
accuracy = []
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
# Normalisation of pixels to range 0 to 1
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# run the simple neural network on test data
outputs = n.test(inputs)
label = np.argmax(outputs)
if (label == correct_label):
accuracy.append(1)
else:
accuracy.append(0)
pass
pass
accuracy_array = np.asarray(accuracy)
correct= float(accuracy_array.sum())
total = float(accuracy_array.size)
print "total number of correct predictions", correct
print "Total predictions", total
per = (correct/total)
print "performance = ", per
|
py | 7df99efdc89576a20334237b765f3725c8a35e4a | #!/usr/bin/env python3
# vim : nospell filetype=py ts=2 sw=2 sts=2 et :
from lib import *
from thing import Num,Sym
class Cols(Pretty):
def __init__(i,inits=[]):
i.all = []
i.nums = []
i.syms = []
i.names = inits
i.indep = []
i.klass = None
i.xnums = []
i.xsyms = []
[i.add(pos,txt) for pos,txt in enumerate(inits)]
def klassp(i,x):
return THE.char.klass in x
def nump(i,x):
for y in [THE.char.less, THE.char.more, THE.char.num]:
if y in x: return True
return False
def dep(i,x):
for y in [THE.char.less, THE.char.more, THE.char.klass]:
if y in x: return True
return False
def weight(i,x):
return -1 if THE.char.less in x else 1
def add(i,pos,txt):
klass = Num if i.nump(txt) else Sym
tmp = klass(txt=txt, pos=pos, w=i.weight(txt))
i.all += [tmp]
if i.klassp(txt): i.klass=tmp
what = i.nums if i.nump(txt) else i.syms
what += [tmp]
if not i.dep(txt):
i.indep += [tmp]
what = i.xnums if i.nump(txt) else i.xsyms
what += [tmp]
|
py | 7df99f1179105b2b70bb022e9ee25e5f60f25c38 | from .. settings import MONGO_SETTINGS
from pymongo import MongoClient
class MongoRouter(object):
def __init__(self,
connect_to="local"):
# ToDo: At some point in the future, make a dedicated mongo machine and make router differentiate
# between prod and local machines
self.connect_to = connect_to
self.client = MongoClient(MONGO_SETTINGS.get(connect_to, ("localhost", 27017)))
if connect_to == "local":
self.client = MongoClient()
else:
raise NotImplemented("Not yet implemented other environments.")
def route(self, desired_collection):
if desired_collection == "users":
return self.client["users_db"]["users_db"]
elif desired_collection == "test":
return self.client["test_db"]["test_db"]
import unittest
class TestMongoRouter(unittest.TestCase):
def setUp(self):
self.test_collection = MongoClient()["test_db"]["test_db"]
def tearDown(self):
self.test_collection.drop()
def test_route(self):
self.test_collection.insert_one({
"test_id": "tid",
"test": "success"
})
router = MongoRouter()
self.assertEquals(
"success",
router.route("test").find_one({"test_id": "tid"}).get("test", None)
)
router.route("test").insert_one({"test_id": "tid_2", "test": "success"})
self.assertEquals(
"success",
router.route("test").find_one({"test_id": "tid_2"}).get("test", None)
) |
py | 7df99f86159915917eb75eead44782b1a4938c19 | #
# Basic Single Particle Model (SPM)
#
import pybamm
from .base_lithium_ion_model import BaseModel
class BasicSPM(BaseModel):
"""Single Particle Model (SPM) model of a lithium-ion battery, from [2]_.
This class differs from the :class:`pybamm.lithium_ion.SPM` model class in that it
shows the whole model in a single class. This comes at the cost of flexibility in
combining different physical effects, and in general the main SPM class should be
used instead.
Parameters
----------
name : str, optional
The name of the model.
References
----------
.. [2] SG Marquis, V Sulzer, R Timms, CP Please and SJ Chapman. “An asymptotic
derivation of a single particle model with electrolyte”. Journal of The
Electrochemical Society, 166(15):A3693–A3706, 2019
**Extends:** :class:`pybamm.lithium_ion.BaseModel`
"""
def __init__(self, name="Single Particle Model"):
super().__init__({}, name)
pybamm.citations.register("marquis2019asymptotic")
# `param` is a class containing all the relevant parameters and functions for
# this model. These are purely symbolic at this stage, and will be set by the
# `ParameterValues` class when the model is processed.
param = self.param
######################
# Variables
######################
# Variables that depend on time only are created without a domain
Q = pybamm.Variable("Discharge capacity [A.h]")
# Variables that vary spatially are created with a domain
c_s_n = pybamm.Variable(
"X-averaged negative particle concentration", domain="negative particle"
)
c_s_p = pybamm.Variable(
"X-averaged positive particle concentration", domain="positive particle"
)
# Constant temperature
T = param.T_init
######################
# Other set-up
######################
# Current density
i_cell = param.current_with_time
j_n = i_cell / param.l_n
j_p = -i_cell / param.l_p
######################
# State of Charge
######################
I = param.dimensional_current_with_time
# The `rhs` dictionary contains differential equations, with the key being the
# variable in the d/dt
self.rhs[Q] = I * param.timescale / 3600
# Initial conditions must be provided for the ODEs
self.initial_conditions[Q] = pybamm.Scalar(0)
######################
# Particles
######################
# The div and grad operators will be converted to the appropriate matrix
# multiplication at the discretisation stage
N_s_n = -param.D_n(c_s_n, T) * pybamm.grad(c_s_n)
N_s_p = -param.D_p(c_s_p, T) * pybamm.grad(c_s_p)
self.rhs[c_s_n] = -(1 / param.C_n) * pybamm.div(N_s_n)
self.rhs[c_s_p] = -(1 / param.C_p) * pybamm.div(N_s_p)
# Surf takes the surface value of a variable, i.e. its boundary value on the
# right side. This is also accessible via `boundary_value(x, "right")`, with
# "left" providing the boundary value of the left side
c_s_surf_n = pybamm.surf(c_s_n)
c_s_surf_p = pybamm.surf(c_s_p)
# Boundary conditions must be provided for equations with spatial derivatives
self.boundary_conditions[c_s_n] = {
"left": (pybamm.Scalar(0), "Neumann"),
"right": (
-param.C_n * j_n / param.a_n / param.D_n(c_s_surf_n, T),
"Neumann",
),
}
self.boundary_conditions[c_s_p] = {
"left": (pybamm.Scalar(0), "Neumann"),
"right": (
-param.C_p * j_p / param.a_p / param.gamma_p / param.D_p(c_s_surf_p, T),
"Neumann",
),
}
# c_n_init and c_p_init are functions, but for the SPM we evaluate them at x=0
# and x=1 since there is no x-dependence in the particles
self.initial_conditions[c_s_n] = param.c_n_init(0)
self.initial_conditions[c_s_p] = param.c_p_init(1)
# Events specify points at which a solution should terminate
self.events += [
pybamm.Event(
"Minimum negative particle surface concentration",
pybamm.min(c_s_surf_n) - 0.01,
),
pybamm.Event(
"Maximum negative particle surface concentration",
(1 - 0.01) - pybamm.max(c_s_surf_n),
),
pybamm.Event(
"Minimum positive particle surface concentration",
pybamm.min(c_s_surf_p) - 0.01,
),
pybamm.Event(
"Maximum positive particle surface concentration",
(1 - 0.01) - pybamm.max(c_s_surf_p),
),
]
# Note that the SPM does not have any algebraic equations, so the `algebraic`
# dictionary remains empty
######################
# (Some) variables
######################
# Interfacial reactions
j0_n = param.j0_n(1, c_s_surf_n, T) / param.C_r_n
j0_p = param.gamma_p * param.j0_p(1, c_s_surf_p, T) / param.C_r_p
eta_n = (2 / param.ne_n) * pybamm.arcsinh(j_n / (2 * j0_n))
eta_p = (2 / param.ne_p) * pybamm.arcsinh(j_p / (2 * j0_p))
phi_s_n = 0
phi_e = -eta_n - param.U_n(c_s_surf_n, T)
phi_s_p = eta_p + phi_e + param.U_p(c_s_surf_p, T)
V = phi_s_p
whole_cell = ["negative electrode", "separator", "positive electrode"]
# The `variables` dictionary contains all variables that might be useful for
# visualising the solution of the model
# Primary broadcasts are used to broadcast scalar quantities across a domain
# into a vector of the right shape, for multiplying with other vectors
self.variables = {
"Negative particle surface concentration": pybamm.PrimaryBroadcast(
c_s_surf_n, "negative electrode"
),
"Electrolyte concentration": pybamm.PrimaryBroadcast(1, whole_cell),
"Positive particle surface concentration": pybamm.PrimaryBroadcast(
c_s_surf_p, "positive electrode"
),
"Current [A]": I,
"Negative electrode potential": pybamm.PrimaryBroadcast(
phi_s_n, "negative electrode"
),
"Electrolyte potential": pybamm.PrimaryBroadcast(phi_e, whole_cell),
"Positive electrode potential": pybamm.PrimaryBroadcast(
phi_s_p, "positive electrode"
),
"Terminal voltage": V,
}
self.events += [
pybamm.Event("Minimum voltage", V - param.voltage_low_cut),
pybamm.Event("Maximum voltage", V - param.voltage_high_cut),
]
|
py | 7df9a015a801dde913be83c3911ecc1e97cd926d | """
The complete list of localized-orbital SCF wavefunction quantities.
"""
localized_wavefunction = {}
# Orbitals
localized_wavefunction["localized_orbitals_a"] = {
"type": "array",
"description": "Localized alpha-spin orbitals in the AO basis. "
"All nmo orbitals are included, even if only a subset were localized.",
"items": {"type": "number"},
"shape": ["nao", "nmo"]
}
localized_wavefunction["localized_orbitals_b"] = {
"type": "array",
"description": "Localized beta-spin orbitals in the AO basis. "
"All nmo orbitals are included, even if only a subset were localized.",
"items": {"type": "number"},
"shape": ["nao", "nmo"]
}
# Fock matrix
localized_wavefunction["localized_fock_a"] = {
"type": "array",
"description": "Alpha-spin Fock matrix in the localized molecular orbital basis. "
"All nmo orbitals are included, even if only a subset were localized.",
"items": {"type": "number"},
"shape": ["nmo", "nmo"]
}
localized_wavefunction["localized_fock_b"] = {
"type": "array",
"description": "Beta-spin Fock matrix in the localized molecular orbital basis. "
"All nmo orbitals are included, even if only a subset were localized.",
"items": {"type": "number"},
"shape": ["nmo", "nmo"]
}
# Note that localized density, eigenvalues, and occupations are not included since they are the same as the SCF density
|
py | 7df9a05a73be605947d0a8d7c3b5570a8b0ba9e4 | # test some extreme cases of allocating exceptions and tracebacks
import micropython
# SKIP LOPY1 for limitations
if os.uname().sysname == 'LoPy':
print("SKIP")
import sys
sys.exit()
# Check for stackless build, which can't call functions without
# allocating a frame on the heap.
try:
def stackless(): pass
micropython.heap_lock(); stackless(); micropython.heap_unlock()
except RuntimeError:
print("SKIP")
raise SystemExit
# some ports need to allocate heap for the emergency exception
try:
micropython.alloc_emergency_exception_buf(256)
except AttributeError:
pass
def main():
# create an exception with many args while heap is locked
# should revert to empty tuple for args
micropython.heap_lock()
e = Exception(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
micropython.heap_unlock()
print(repr(e))
# create an exception with a long formatted error message while heap is locked
# should use emergency exception buffer and truncate the message
def f():
pass
micropython.heap_lock()
try:
f(abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:10])
# create an exception with a long formatted error message while heap is low
# should use the heap and truncate the message
lst = []
while 1:
try:
lst = [lst]
except MemoryError:
break
try:
f(abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1)
except Exception as er:
e = er
lst[0] = None
lst = None
print(repr(e)[:10])
# raise a deep exception with the heap locked
# should use emergency exception and be unable to resize traceback array
def g():
g()
micropython.heap_lock()
try:
g()
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:13])
# create an exception on the heap with some traceback on the heap, but then
# raise it with the heap locked so it can't allocate any more traceback
exc = Exception('my exception')
try:
raise exc
except:
pass
def h(e):
raise e
micropython.heap_lock()
try:
h(exc)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e))
main()
|
py | 7df9a29f96abe708d071dee5ca72a679cc10b62f | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-24 22:33
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import heltour.tournament.models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0079_loneplayerscore_perf_rating'),
]
operations = [
migrations.AlterModelOptions(
name='player',
options={'ordering': ['lichess_username']},
),
migrations.AddField(
model_name='teampairing',
name='black_wins',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teampairing',
name='white_wins',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='games_won',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='head_to_head',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='sb_score',
field=heltour.tournament.models.ScoreField(default=0),
),
migrations.AlterField(
model_name='player',
name='lichess_username',
field=models.CharField(max_length=255, validators=[django.core.validators.RegexValidator('^[\\w-]+$')]),
),
]
|
py | 7df9a3c33ddc4623fb8f07432769c1256567a0b6 | """withered_shadow_31459 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Withered Shadow"
admin.site.site_title = "Withered Shadow Admin Portal"
admin.site.index_title = "Withered Shadow Admin"
# swagger
api_info = openapi.Info(
title="Withered Shadow API",
default_version="v1",
description="API documentation for Withered Shadow App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
py | 7df9a605408e523262abb2a8edf74a3137c15f0a | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.addons.onesphere_assembly_industry.constants import ASSEMBLY_TOOLS_TECH_NAME
class MaintenanceEquipment(models.Model):
_inherit = 'maintenance.equipment'
def create_group_tool(self):
for worckcenter_group in self.workcenter_id.group_ids:
val = {
"workgroup_id": worckcenter_group.id,
"workcenter_id": self.workcenter_id.id,
"tightening_tool_id": self.id,
}
self.env['mrp.workcenter.group.tightening.tool'].sudo().create(val)
# 修改拧紧工具类设备时,如果所属工作中心有变化,将更新拧紧工具组表
def write(self, vals):
ret = super(MaintenanceEquipment, self).write(vals)
if 'workcenter_id' not in vals:
return ret
for tool_id in self:
if tool_id.category_id.technical_name not in ASSEMBLY_TOOLS_TECH_NAME:
continue
need_unlink_recs = self.env['mrp.workcenter.group.tightening.tool'].search(
[('tightening_tool_id', '=', tool_id.id)])
need_unlink_recs.sudo().unlink()
tool_id.create_group_tool()
return ret
# 创建拧紧工具时,生成对应拧紧工具组数据
@api.model
def create(self, vals):
ret = super(MaintenanceEquipment, self).create(vals)
if 'workcenter_id' not in vals:
return ret
for tool_id in ret:
if tool_id.category_id.technical_name not in ASSEMBLY_TOOLS_TECH_NAME:
continue
tool_id.create_group_tool()
return ret
|
py | 7df9a634be0912c3de80b6aab512d0fa119aa59e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 9 19:42:11 2022
@author: Mohammad ASif Zaman
A program to read an image file (pixel art) and convert it to a numpy matrix.
Different color regions of the image file will be segmented as different regions.
The indices corresponding to each region (with respec to a meshgrid array) will
be calculated. The function returns the indices of each region as a list file.
% Output variables
Nx : Number of elements in the x direction
Ny : Number of elements in the y direction
n_regions : Number of identified regions
regions_indices : list of indices of each region. (e.g. regions_indices[1] = indices of all elements that fall in region 1)
regiond_indices_u : list of indices in unravel form
"""
import numpy as np
#==============================================================================
def structure_definition(im):
Nx,Ny = im.size
# Dummy variables having the same size as the acutal spatial variables but spanning from -1 to 1
# The boundary indices found for this specific dummy space variables would be the same as the boundary indices
# of the actual spatial variables.
x = np.linspace(-1,1,Nx) # x variables in 1D
y = np.linspace(-1,1,Ny) # y variable in 1D
# Meshgrid of the dummy spatial variables
X,Y = np.meshgrid(x,y) # 2D meshgrid
# 1D indexing of the dummy spatial variables
Xu = X.ravel() # Unravel 2D meshgrid to 1D array
Yu = Y.ravel()
# Read the image file in a numpy array
strct_map = np.flipud(np.array(im)) # the flipud is necessary to match the numpy array coordinate with the figure pixel coordinates
# Map the structure in 1D (unravel)
strct_map_u = strct_map.ravel()
# Find the unique elements of structure array. Each unique element referst to a specific color.
# When drawing the image, different boundary regions should be drawn with different colors.
# All regions having the same color would be assigned the same boundary value (or will be assigned as unknown)
regions = np.unique(strct_map_u)
n_regions = np.size(regions) # The number of unique regions
print('Number of regions identified = %d \n' % (n_regions) )
# Defining empty lists. They will be populated by indices of different boundary regions
regions_indices = [];
regions_indices_u = [];
for m in range(n_regions):
ind_temp = np.where(strct_map==regions[m]) # indices for the meshgrid
ind_u_temp = np.squeeze(np.where(strct_map_u==regions[m])) # indices for the unravel case
regions_indices.append(ind_temp) # store meshgrid indices
regions_indices_u.append(ind_u_temp) # store unravel indices
return Nx,Ny,n_regions, regions_indices, regions_indices_u
#============================================================================== |
py | 7df9a6d96946b68aed3e229bacd15a46c8294570 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#pylint: disable=
"""
File : utils.py
Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
Description:
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
# system modules
import cgi
import json
import time
import hashlib
import cherrypy
from urllib.error import URLError
def tstamp():
"Generic time stamp"
return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
def gen_color(val):
"Generate unique color code for given string value"
keyhash = hashlib.md5()
keyhash.update(val)
col = '#%s' % keyhash.hexdigest()[:6]
return col
def quote(data):
"""
Sanitize the data using cgi.escape.
"""
if isinstance(data, int) or isinstance(data, float):
res = data
elif isinstance(data, dict):
res = data
elif isinstance(data, list):
res = data
elif isinstance(data, long) or isinstance(data, int) or\
isinstance(data, float):
res = data
else:
try:
if data:
res = cgi.escape(data, quote=True)
else:
res = ""
except Exception as exc:
print("Unable to cgi.escape(%s, quote=True)" % data)
res = ""
return res
def json2form(jsondata, indent=2, keep_first_value=True):
"Convert input json dict into one used by HTML form"
if keep_first_value:
for key, val in jsondata.items():
if isinstance(val, list):
if len(val) == 0:
jsondata[key] = ""
else:
jsondata[key] = val[0]
return json.dumps(jsondata, indent=2)
def json2table(jsondata, web_ui_map, visible_attrs=None, selected={}):
"""
Convert input json dict into HTML table based on assumtion that
input json is in a simple key:value form.
"""
table = """<table class="table-bordered width-100">\n"""
table += "<thead><tr><th>Field</th><th>Value</th></tr></thead>\n"
keys = sorted(jsondata.keys())
# move up keys whose values have REPLACE prefix
priority_keys = []
rest_keys = []
for key in keys:
val = jsondata[key]
if isinstance(val, basestring) and val.startswith('REPLACE-'):
priority_keys.append(key)
else:
rest_keys.append(key)
cells = {}
for key in priority_keys+rest_keys:
val = jsondata[key]
if isinstance(val, list) and not val: # empty list replace with input text tag
val = ""
if isinstance(val, list):
if not visible_attrs:
sel = '<textarea name="%s" class="width-100">%s</textarea>' \
% (key, json.dumps(val))
else:
MULTI_SELECTION_KEYS = ['SiteWhitelist', 'SiteBlacklist', 'AutoApproveSubscriptionSites']
if key in MULTI_SELECTION_KEYS:
sel = "<select class=\"width-100\" name=\"%s\" multiple>" % key
else:
sel = "<select class=\"width-100\" name=\"%s\">" % key
if key in selected:
values = val
else:
values = sorted(val)
if key in ['CMSSWVersion', 'ScramArch']:
values.reverse()
# when there is no value to be selected
if key in selected and not selected[key]:
sel += "<option selected disabled>--select an option--</option>"
for item in values:
if key in selected and item in selected[key]:
sel += "<option value=\"%s\" selected=\"selected\">%s</option>" % (item, item)
else:
sel += "<option value=\"%s\">%s</option>" % (item, item)
sel += "</select>"
val = sel
elif isinstance(val, basestring):
if val.startswith('REPLACE-'):
val = '<input type="text" name="%s" placeholder="%s" class="width-100">'\
% (key, val)
elif len(val) < 80:
val = '<input type="text" name="%s" value="%s" class="width-100" />' % (key, val)
else:
val = '<textarea name="%s" class="width-100">%s</textarea>' % (key, val)
elif isinstance(val, (dict, list)):
val = '<textarea name="%s" class="width-100">%s</textarea>' % (key, json.dumps(val))
else:
val = '<input type="text" name="%s" value="%s" class="width-100" />' % (key, val)
if key in web_ui_map:
kname = web_ui_map[key]
else:
# use original key
kname = key
cells[key] = (kname, val)
if visible_attrs and isinstance(visible_attrs, list):
for attr in visible_attrs:
key, val = cells.pop(attr)
if key in web_ui_map:
kname = web_ui_map[key]
else:
# use original key
kname = key
val = val.replace('width-100', 'width-100 visible_input')
table += "<tr><td>%s</td><td class=\"visible\">%s</td></tr>\n" % (kname, val)
for key, pair in cells.items():
kname, val = pair
if not visible_attrs:
val = val.replace('<input', '<input readonly')
val = val.replace('<textarea', '<textarea readonly')
val = val.replace('<select', '<select disabled')
val = val.replace('width-100', 'width-100 invisible_input')
table += "<tr><td>%s</td><td>%s</td></tr>\n" % (kname, val)
table += "</table>"
return table
def genid(kwds):
"Generate id for given field"
if isinstance(kwds, dict):
record = dict(kwds)
data = json.JSONEncoder(sort_keys=True).encode(record)
else:
data = str(kwds)
keyhash = hashlib.md5()
keyhash.update(data)
return keyhash.hexdigest()
def checkarg(kwds, arg):
"""Check arg in a dict that it has str/unicode type"""
data = kwds.get(arg, None)
cond = data and (isinstance(data, str) or isinstance(data, unicode))
return cond
def checkargs(supported):
"""
Decorator to check arguments in provided supported list
"""
def wrap(func):
"""Wrap input function"""
def require_string(val):
"""Check that provided input is a string"""
if not (isinstance(val, str) or isinstance(val, unicode)):
code = web_code('Invalid input')
raise URLError('code=%s' % code)
def wrapped_f(self, *args, **kwds):
"""Wrap function arguments"""
# check request headers. For methods POST/PUT
# we need to read request body to get parameters
if cherrypy.request.method == 'POST' or\
cherrypy.request.method == 'PUT':
try:
body = cherrypy.request.body.read()
except:
body = None
if args and kwds:
code = web_code('Misleading request')
raise URLError('code=%s' % code)
if body:
jsondict = json.loads(body, encoding='latin-1')
else:
jsondict = kwds
for key, val in jsondict.iteritems():
kwds[str(key)] = str(val)
if not kwds:
if args:
kwds = args[-1]
keys = []
if not isinstance(kwds, dict):
code = web_code('Unsupported kwds')
raise URLError('code=%s' % code)
if kwds:
keys = [i for i in kwds.keys() if i not in supported]
if keys:
code = web_code('Unsupported key')
raise URLError('code=%s' % code)
if checkarg(kwds, 'status'):
if kwds['status'] not in \
['new', 'assigned']:
code = web_code('Unsupported view')
raise URLError('code=%s' % code)
data = func (self, *args, **kwds)
return data
wrapped_f.__doc__ = func.__doc__
wrapped_f.__name__ = func.__name__
wrapped_f.exposed = True
return wrapped_f
wrap.exposed = True
return wrap
WEB_CODES = [
(0 , 'N/A'),
(1 , 'Unsupported key'),
(2 , 'Unsupported value'),
(3 , 'Unsupported method'),
(4 , 'Unsupported collection'),
(5 , 'Unsupported database'),
(6 , 'Unsupported view'),
(7 , 'Unsupported format'),
(8 , 'Wrong type'),
(9 , 'Misleading request'),
(10 , 'Invalid query'),
(11 , 'Exception'),
(12 , 'Invalid input'),
(13 , 'Unsupported expire value'),
(14 , 'Unsupported order value'),
(15 , 'Unsupported skey value'),
(16 , 'Unsupported idx value'),
(17 , 'Unsupported limit value'),
(18 , 'Unsupported dir value'),
(19 , 'Unsupported sort value'),
(20 , 'Unsupported ajax value'),
(21 , 'Unsupported show value'),
(22 , 'Unsupported dasquery value'),
(23 , 'Unsupported dbcoll value'),
(24 , 'Unsupported msg value'),
(25 , 'Unable to start DASCore'),
(26 , 'No file id'),
(27 , 'Unsupported id value'),
(28 , 'Server error'),
(29 , 'Query is not suitable for this view'),
(30 , 'Parser error'),
(31 , 'Unsupported pid value'),
(32 , 'Unsupported interval value'),
(33 , 'Unsupported kwds'),
]
def decode_code(code):
"""Return human readable string for provided code ID"""
for idx, msg in WEB_CODES:
if code == idx:
return msg
return 'N/A'
def web_code(error):
"""Return WEB code for provided error string"""
for idx, msg in WEB_CODES:
if msg.lower() == error.lower():
return idx
return -1
def sort(docs, sortby):
"Sort given documents by sortby attribute"
for doc in docs:
yield doc
def reorder_list(org_list, selected):
"""
if the first is in the list.
move the first in front of the list
if not, add first to the list
"""
if isinstance(selected, list) and len(selected) == 0:
return org_list, selected
if not isinstance(selected, list):
selected = [selected]
new_list = list(org_list)
for item in selected:
try:
new_list.remove(item)
except ValueError:
pass
updated_list = list(selected)
updated_list.extend(new_list)
return updated_list, selected
|
py | 7df9a784c6d6d1cc1843593aed8f59ce262c49cf | #!/usr/bin/env python
from __future__ import print_function
import argparse
import torch
from dortmund_utils import build_dortmund_model
from laia.data import ImageDataLoader
from laia.data import TextImageFromTextTableDataset
from laia.common.arguments import add_argument, add_defaults, args
from laia.utils import ImageToTensor
from laia.utils.symbols_table import SymbolsTable
from tqdm import tqdm
import math
def phoc_lattice(img_ids, outputs, fileout):
for img_id, output in zip(img_ids, outputs):
output = output.cpu()
print(img_id, file=fileout)
for t in range(output.size(0)):
lp1 = float(output[t])
p0 = -math.expm1(lp1)
lp0 = math.log(p0) if p0 > 0 else float('-inf')
for k, p in enumerate([lp0, lp1], 1):
if not math.isinf(p):
print('{:d}\t{:d}\t{:d}\t0,{:.10g},{:d}'.format(
t, t + 1, k, -float(p), k), file=fileout)
print(output.size(0), file=fileout)
print('', file=fileout)
if __name__ == '__main__':
add_defaults('gpu')
add_argument('--phoc_levels', type=int, default=[1, 2, 3, 4, 5], nargs='+',
help='PHOC levels used to encode the transcript')
add_argument('--add_sigmoid', action='store_true')
add_argument('syms', help='Symbols table mapping from strings to integers')
add_argument('img_dir', help='Directory containing word images')
add_argument('gt_file', help='')
add_argument('checkpoint', help='')
add_argument('output', type=argparse.FileType('w'))
args = args()
# Build neural network
syms = SymbolsTable(args.syms)
phoc_size = sum(args.phoc_levels) * len(syms)
model = build_dortmund_model(phoc_size)
# Load checkpoint
ckpt = torch.load(args.checkpoint)
if 'model' in ckpt and 'optimizer' in ckpt:
model.load_state_dict(ckpt['model'])
else:
model.load_state_dict(ckpt)
# Ensure parameters are in the correct device
model.eval()
if args.gpu > 0:
model = model.cuda(args.gpu - 1)
else:
model = model.cpu()
dataset = TextImageFromTextTableDataset(
args.gt_file, args.img_dir,
img_transform=ImageToTensor())
dataset_loader = ImageDataLoader(dataset=dataset,
image_channels=1,
num_workers=8)
with torch.cuda.device(args.gpu - 1):
for batch in tqdm(dataset_loader):
if args.gpu > 0:
x = batch['img'].data.cuda(args.gpu - 1)
else:
x = batch['img'].data.cpu()
y = model(torch.autograd.Variable(x))
if args.add_sigmoid:
y = torch.nn.functional.logsigmoid(y)
phoc_lattice(batch['id'], y, args.output)
|
py | 7df9a89a842d0c4fa985ae0dc598983e9ab2cc95 | from __future__ import print_function
import sys
import re
import binascii
import struct
from .. import database
from .utils import format_message_by_frame_id
# Matches 'candump' output, i.e. "vcan0 1F0 [8] 00 00 00 00 00 00 1B C1".
RE_CANDUMP = re.compile(r'^.* ([0-9A-F]+) \[\d+\]\s*([0-9A-F ]*)$')
def _mo_unpack(mo):
frame_id = mo.group(1)
frame_id = '0' * (8 - len(frame_id)) + frame_id
frame_id = binascii.unhexlify(frame_id)
frame_id = struct.unpack('>I', frame_id)[0]
data = mo.group(2)
data = data.replace(' ', '')
data = binascii.unhexlify(data)
return frame_id, data
def _do_decode(args):
dbase = database.load_file(args.database,
encoding=args.encoding,
frame_id_mask=args.frame_id_mask,
strict=not args.no_strict)
decode_choices = not args.no_decode_choices
while True:
line = sys.stdin.readline()
# Break at EOF.
if not line:
break
line = line.strip('\r\n')
mo = RE_CANDUMP.match(line)
if mo:
frame_id, data = _mo_unpack(mo)
line += ' ::'
line += format_message_by_frame_id(dbase,
frame_id,
data,
decode_choices,
args.single_line)
print(line)
def add_subparser(subparsers):
decode_parser = subparsers.add_parser(
'decode',
description=('Decode "candump" CAN frames read from standard input '
'and print them in a human readable format.'))
decode_parser.add_argument(
'-c', '--no-decode-choices',
action='store_true',
help='Do not convert scaled values to choice strings.')
decode_parser.add_argument(
'-s', '--single-line',
action='store_true',
help='Print the decoded message on a single line.')
decode_parser.add_argument(
'-e', '--encoding',
default='utf-8',
help='File encoding (default: utf-8).')
decode_parser.add_argument(
'--no-strict',
action='store_true',
help='Skip database consistency checks.')
decode_parser.add_argument(
'-m', '--frame-id-mask',
type=lambda x: int(x, 0),
help=('Only compare selected frame id bits to find the message in the '
'database. By default the candump and database frame ids must '
'be equal for a match.'))
decode_parser.add_argument(
'database',
help='Database file.')
decode_parser.set_defaults(func=_do_decode)
|
py | 7df9aba36e531897e2d440e54b448ee0f4936b98 | from .event import EventTree
from .staged import StagedTree |
py | 7df9ac3624c09079108800f92365ce24e17cc6cb | #!/usr/bin/env python3
import os
import sys
import time
import select
import logging
import argparse
import threading
import subprocess
ETCD_IMAGE = "quay.io/coreos/etcd:v3.3.5"
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
LOG_COLORS = {
"critical": "\x1b[31;1m",
"error": "\x1b[31;1m",
"warning": "\x1b[33;1m",
}
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(levelname)s:%(message)s"))
log.addHandler(handler)
class ExcThread(threading.Thread):
def __init__(self, target):
super().__init__(target=target)
self.error = None
def run(self):
try:
self._target()
except Exception as e:
self.error = e
def join(*targets):
threads = []
for target in targets:
t = ExcThread(target)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
if t.error is not None:
raise Exception("Thread error") from t.error
class Output:
def __init__(self, pipe, level):
self.pipe = pipe
self.level = level
self.lines = []
class ProcessResult:
def __init__(self, rc, stdout, stderr):
self.rc = rc
self.stdout = stdout
self.stderr = stderr
class DefaultDriver:
def available(self):
return True
def clear(self):
if run("yes | pachctl delete all", shell=True, raise_on_error=False).rc != 0:
log.error("could not call `pachctl delete all`; most likely this just means that a pachyderm cluster hasn't been setup, but may indicate a bad state")
def start(self):
pass
def inspect(self):
pass
def push_images(self, deploy_version, dash_image):
pass
def wait(self):
while suppress("pachctl", "version") != 0:
log.info("Waiting for pachyderm to come up...")
time.sleep(1)
class MinikubeDriver(DefaultDriver):
def available(self):
return run("which", "minikube", raise_on_error=False).rc == 0
def clear(self):
run("minikube", "delete")
def start(self):
run("minikube", "start")
while suppress("minikube", "status") != 0:
log.info("Waiting for minikube to come up...")
time.sleep(1)
def push_images(self, deploy_version, dash_image):
run("./etc/kube/push-to-minikube.sh", "pachyderm/pachd:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", "pachyderm/worker:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", ETCD_IMAGE)
run("./etc/kube/push-to-minikube.sh", dash_image)
class MicroK8sDriver(DefaultDriver):
def available(self):
return run("which", "microk8s.kubectl", raise_on_error=False).rc == 0
def clear(self):
# `microk8s.reset` doesn't clear out cluster pods, so we'll go ahead
# and do that through pachctl functionality if possible
if run("yes | pachctl delete all --no-port-forwarding", shell=True, raise_on_error=False).rc != 0:
log.error("could not call `pachctl delete all`; most likely this just means that a pachyderm cluster hasn't been setup, but may indicate a bad state")
run("microk8s.stop")
def start(self):
# starting microk8s immediately after stopping it can fail, so try a
# few times
for i in range(5):
if run("microk8s.start", raise_on_error=False).rc == 0:
break
time.sleep(1)
# `microk8s.reset` has a couple of issues:
# 1) it can fail when called immediately after `microk8s.start`
# 2) it doesn't always output a proper return code when there's an error
stderr = None
for i in range(5):
stderr = run("microk8s.reset").stderr
if len(stderr) == 0:
break
time.sleep(1)
if len(stderr) > 0:
raise Exception("reset failed")
while suppress("microk8s.status") != 0:
log.info("Waiting for microk8s to come up...")
time.sleep(1)
def inspect(self):
# get output of `microk8s.inspect`, as it may include a warning about
# firewall rules that need to be changed in order for it to work
run("microk8s.inspect")
def push_images(self, deploy_version, dash_image):
run("./etc/kube/push-to-microk8s.sh", "pachyderm/pachd:{}".format(deploy_version))
run("./etc/kube/push-to-microk8s.sh", "pachyderm/worker:{}".format(deploy_version))
run("./etc/kube/push-to-microk8s.sh", ETCD_IMAGE)
run("./etc/kube/push-to-microk8s.sh", dash_image)
def wait(self):
while suppress("pachctl", "version", "--no-port-forwarding") != 0:
log.info("Waiting for pachyderm to come up...")
time.sleep(1)
def parse_log_level(s):
try:
return LOG_LEVELS[s]
except KeyError:
raise Exception("Unknown log level: {}".format(s))
def redirect_to_logger(stdout, stderr):
for io in select.select([stdout.pipe, stderr.pipe], [], [], 1000)[0]:
line = io.readline().decode().rstrip()
if line == "":
continue
dest = stdout if io == stdout.pipe else stderr
log.log(LOG_LEVELS[dest.level], "{}{}\x1b[0m".format(LOG_COLORS.get(dest.level, ""), line))
dest.lines.append(line)
def run(cmd, *args, raise_on_error=True, shell=False, stdout_log_level="info", stderr_log_level="error"):
log.debug("Running `%s %s`", cmd, " ".join(args))
proc = subprocess.Popen([cmd, *args], shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = Output(proc.stdout, stdout_log_level)
stderr = Output(proc.stderr, stderr_log_level)
while proc.poll() is None:
redirect_to_logger(stdout, stderr)
redirect_to_logger(stdout, stderr)
rc = proc.wait()
if raise_on_error and rc != 0:
raise Exception("Unexpected return code for `{} {}`: {}".format(cmd, " ".join(args), rc))
return ProcessResult(rc, "\n".join(stdout.lines), "\n".join(stderr.lines))
def capture(cmd, *args, shell=False):
return run(cmd, *args, shell=shell, stdout_log_level="debug").stdout
def suppress(cmd, *args):
return run(cmd, *args, stdout_log_level="debug", stderr_log_level="debug", raise_on_error=False).rc
def get_pachyderm(deploy_version):
log.info("Deploying pachd:{}".format(deploy_version))
should_download = suppress("which", "pachctl") != 0 \
or capture("pachctl", "version", "--client-only") != deploy_version
if should_download:
release_url = "https://github.com/pachyderm/pachyderm/releases/download/v{}/pachctl_{}_linux_amd64.tar.gz".format(deploy_version, deploy_version)
outpath = os.path.join(os.environ["GOPATH"], "bin")
filepath = "pachctl_{}_linux_amd64/pachctl".format(deploy_version)
run("curl -L {} | tar -C \"{}\" --strip-components=1 -xzf - {}".format(release_url, outpath, filepath), shell=True)
run("docker", "pull", "pachyderm/pachd:{}".format(deploy_version))
run("docker", "pull", "pachyderm/worker:{}".format(deploy_version))
def main():
parser = argparse.ArgumentParser(description="Recompiles pachyderm tooling and restarts the cluster with a clean slate.")
parser.add_argument("--no-deploy", default=False, action="store_true", help="Disables deployment")
parser.add_argument("--deploy-args", default="", help="Arguments to be passed into `pachctl deploy`")
parser.add_argument("--deploy-version", default="local", help="Sets the deployment version")
parser.add_argument("--log-level", default="info", type=parse_log_level, help="Sets the log level; defaults to 'info', other options include 'critical', 'error', 'warning', and 'debug'")
args = parser.parse_args()
log.setLevel(args.log_level)
if "GOPATH" not in os.environ:
log.critical("Must set GOPATH")
sys.exit(1)
if not args.no_deploy and "PACH_CA_CERTS" in os.environ:
log.critical("Must unset PACH_CA_CERTS\nRun:\nunset PACH_CA_CERTS", file=sys.stderr)
sys.exit(1)
if args.deploy_version == "local" and not os.getcwd().startswith(os.path.join(os.environ["GOPATH"], "src", "github.com", "pachyderm", "pachyderm")):
log.critical("Must be in a Pachyderm client", file=sys.stderr)
sys.exit(1)
if MinikubeDriver().available():
log.info("using the minikube driver")
driver = MinikubeDriver()
elif MicroK8sDriver().available():
log.info("using the microk8s driver")
driver = MicroK8sDriver()
else:
log.info("using the k8s for docker driver")
log.warning("with this driver, it's not possible to fully reset the cluster")
driver = DefaultDriver()
driver.clear()
gopath = os.environ["GOPATH"]
if args.deploy_version == "local":
os.chdir(os.path.join(gopath, "src", "github.com", "pachyderm", "pachyderm"))
try:
os.remove(os.path.join(gopath, "bin", "pachctl"))
except:
pass
join(
driver.start,
lambda: run("make", "install"),
lambda: run("make", "docker-build"),
)
else:
join(
driver.start,
lambda: get_pachyderm(args.deploy_version),
)
driver.inspect()
version = capture("pachctl", "version", "--client-only")
log.info("Deploy pachyderm version v{}".format(version))
while suppress("pachctl", "version", "--client-only") != 0:
log.info("Waiting for pachctl to build...")
time.sleep(1)
run("which", "pachctl")
dash_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"dash\" and has(\"image\")).image'", shell=True)
grpc_proxy_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"grpc-proxy\").image'", shell=True)
run("docker", "pull", dash_image)
run("docker", "pull", grpc_proxy_image)
run("docker", "pull", ETCD_IMAGE)
driver.push_images(args.deploy_version, dash_image)
if not args.no_deploy:
if args.deploy_version == "local":
run("pachctl deploy local -d {}".format(args.deploy_args), shell=True)
else:
run("pachctl deploy local -d {} --dry-run | sed \"s/:local/:{}/g\" | kubectl create -f -".format(args.deploy_args, args.deploy_version), shell=True)
driver.wait()
run("killall", "kubectl", raise_on_error=False)
if __name__ == "__main__":
main()
|
py | 7df9ad83e307c6c2c6fb5ff77860ab92208d0f00 | import numpy as np
import sys
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.01
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, LSTM, GRU, SimpleRNN, Dense, Lambda
import keras.backend as K
from keras.callbacks import ModelCheckpoint
import keras.layers as layers
from keras import optimizers
adam_half = optimizers.Adam(lr=0.0005)
from keras.preprocessing import sequence
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.embeddings import Embedding
from random import random
from numpy import array
from numpy import cumsum
from keras.layers import TimeDistributed
from keras.layers import Bidirectional
from keras.callbacks import ModelCheckpoint
from keras.layers.normalization import BatchNormalization
print('\n\n\n\n\n\n\n\n\n\n\n')
print('###############################################################\n# #\n# Demonstration : Real-time Automatic Word Segmentation (K/E) #\n# #\n###############################################################')
import fasttext
import re
print('\nImporting dictionaries...')
dic_kor = fasttext.load_model('vectors/model_kor.bin')
def loadvector(File):
f = open(File,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
return model
dic_eng = loadvector('vectors/model_eng.txt')
import string
idchar = {}
for i in range(len(string.ascii_lowercase)):
idchar.update({string.ascii_lowercase[i]:i})
for i in range(10):
idchar.update({i:i+26})
idchar.update({'#':36})
big = re.compile(r"[A-Z]")
small= re.compile(r"[a-z]")
num = re.compile(r"[0-9]")
print('Loading models...')
from keras.models import load_model
model_kor = load_model('model/model_kor.hdf5')
model_eng = load_model('model/model_eng.hdf5')
print('\nEnter "bye" to quit\n')
## Functions_KOR
threshold_kor=0.5
overlap=30
def pred_correction_rnn(sent,model,dic,maxlen,wdim):
conv = np.zeros((1,maxlen,wdim,1))
rnn = np.zeros((1,maxlen,wdim))
charcount = -1
for j in range(len(sent)):
if j<maxlen and sent[j]!=' ':
charcount=charcount+1
conv[0][charcount,:,0]=dic[sent[j]]
rnn[0][charcount,:]=dic[sent[j]]
z = model.predict([conv,rnn])[0]
sent_raw = ''
count_char=-1
lastpoint=-1
lastchar=-1
for j in range(len(sent)):
if sent[j]!=' ':
count_char=count_char+1
sent_raw = sent_raw+sent[j]
if z[count_char]>threshold_kor:
sent_raw = sent_raw+' '
if j<overlap:
lastpoint=len(sent_raw)
lastchar=j
return sent_raw, lastpoint, lastchar
def kor_spacing(s):
if len(s)<overlap:
temp,lp,lc = pred_correction_rnn(s,model_kor,dic_kor,100,100)
z = temp+"\n"
else:
z=''
start=0
while start<len(s):
if start+overlap<len(s):
temp,lp,lc =pred_correction_rnn(s[start:start+2*overlap],model_kor,dic_kor,100,100)
temp=temp[:lp]
else:
temp,lp,lc =pred_correction_rnn(s[start:],model_kor,dic_kor,100,100)
lc = overlap
z = z+temp
start=start+lc+1
z = z+"\n"
print('>> Output:',z)
return z
## Function_ENG
def underscore(hashtag):
result=''
for i in range(len(hashtag)):
if i>0:
if hashtag[i].isalpha()==True:
result = result+hashtag[i]
else:
result = result+' '
return result
def split_hashtag(hashtagestring):
fo = re.compile(r'#[A-Z]{2,}(?![a-z])|[A-Z][a-z]+')
fi = fo.findall(hashtagestring)
result = ''
for var in fi:
result += var + ' '
return result
threshold=0.35
def hash_pred(sent,model,dic1,dic2,maxlen,wdim):
conv = np.zeros((1,maxlen,wdim,1))
rnn = np.zeros((1,maxlen,len(dic2)))
charcount=-1
lastpoint=-1
lastchar=-1
for j in range(len(sent)):
if charcount<maxlen-1 and sent[j]!=' ':
charcount=charcount+1
if sent[j] in dic1:
conv[0][charcount,:,0]=dic1[sent[j]]
if sent[j] in dic2:
rnn[0][charcount,dic2[sent[j]]]=1
z = model.predict([conv,rnn])[0]
sent_raw = ''
count_char=-1
for j in range(len(sent)):
if sent[j]!=' ':
count_char=count_char+1
sent_raw = sent_raw+sent[j]
if z[count_char]>threshold:
sent_raw = sent_raw+' '
if j<overlap:
lastpoint=len(sent_raw)
lastchar=j
return sent_raw, z[:count_char], count_char, lastpoint, lastchar
def hash_space(tag):
tag_re = ''
for i in range(len(tag)):
if tag[i].isalpha() == True:
tag_re = tag_re+tag[i].lower()
else:
tag_re = tag_re+tag[i]
sent_raw, z, count_char, lastpoint, lastchar = hash_pred(tag_re,model_eng,dic_eng,idchar,100,100)
return sent_raw, lastpoint, lastchar
def eng_spacing(s):
if len(s)<overlap:
temp,lp,lc = hash_space(s)
z = temp+"\n"
else:
z=''
start=0
while start<len(s):
if start+overlap<len(s):
temp,lp,lc =hash_space(s[start:start+2*overlap])
temp=temp[:lp]
else:
temp,lp,lc =hash_space(s[start:])
lc = overlap
z = z+temp
start=start+lc+1
z = z+"\n"
print('>> Output:',z)
return z
def eng_hashsegment(hashtag):
if '_' in hashtag:
print('>> output:',underscore(hashtag))
return underscore(hashtag)
else:
if re.search(big,hashtag) and re.search(small,hashtag):
print('>> output:',split_hashtag(hashtag))
return split_hashtag(hashtag)
else:
return eng_spacing(hashtag[1:])
print('\nEnter "k" for Korean spacing\nEnter "e" for English segmentation\nEnter "bye" to quit\n\n')
## Demonstration
def KOR():
print('\nAutomatic Korean spacing ...\nEnter "e" to activate English segmentation\nEnter "bye" to quit\n')
while 1:
s = input('>> You say: ')
if s == 'bye':
sys.exit()
elif s == 'e':
ENG()
else:
kor_spacing(s)
def ENG():
print('\nAutomatic English segmentation ...\nEnter "k" to activate identification mode\nEnter "bye" to quit\n')
while 1:
s = input('>> You say: ')
if s == 'bye':
sys.exit()
elif s == 'k':
KOR()
else:
eng_spacing(s)
while 1:
s = input(' Choose: ')
if s == 'k':
KOR()
elif s == 'e':
ENG()
elif s == 'bye':
sys.exit()
|
py | 7df9ae0cf21d8fab8d05a64b12840ac9fc6736d9 |
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
channels = [1, 4, 16, 32]
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class TCN(nn.Module):
def __init__(self, num_channels, embedding_dim=128, kernel_size=2, dropout=0.2):
super(TCN, self).__init__()
self.tcn = TemporalConvNet(
1, num_channels, kernel_size=kernel_size, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.decoder = nn.Linear(num_channels[-1], embedding_dim)
def forward(self, x):
x = self.tcn(x)
x = x[:, :, -1]
x = self.dropout(x)
x = self.decoder(x)
return x
#return self.decoder(self.dropout(self.tcn(x)[:, :, -1])) |
py | 7df9af6717d64d9643ba56f40f4de7f1346a421f | # Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from u2flib_server.attestation.metadata import MetadataProvider, Transport
from u2flib_server.attestation.resolvers import create_resolver
from u2flib_server.attestation.data import YUBICO
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from base64 import b64decode
import json
import unittest
ATTESTATION_CERT = b64decode(b"""
MIICGzCCAQWgAwIBAgIEdaP2dTALBgkqhkiG9w0BAQswLjEsMCoGA1UEAxMjWXViaWNvIFUyRiBS
b290IENBIFNlcmlhbCA0NTcyMDA2MzEwIBcNMTQwODAxMDAwMDAwWhgPMjA1MDA5MDQwMDAwMDBa
MCoxKDAmBgNVBAMMH1l1YmljbyBVMkYgRUUgU2VyaWFsIDE5NzM2Nzk3MzMwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAAQZo35Damtpl81YdmcbhEuXKAr7xDcQzAy5n3ftAAhtBbu8EeGU4ynfSgLo
nckqX6J2uXLBppTNE3v2bt+Yf8MLoxIwEDAOBgorBgEEAYLECgECBAAwCwYJKoZIhvcNAQELA4IB
AQC9LbiNPgs0sQYOHAJcg+lMk+HCsiWRlYVnbT4I/5lnqU907vY17XYAORd432bU3Nnhsbkvjz76
kQJGXeNAF4DPANGGlz8JU+LNEVE2PWPGgEM0GXgB7mZN5Sinfy1AoOdO+3c3bfdJQuXlUxHbo+nD
pxxKpzq9gr++RbokF1+0JBkMbaA/qLYL4WdhY5NvaOyMvYpO3sBxlzn6FcP67hlotGH1wU7qhCeh
+uur7zDeAWVh7c4QtJOXHkLJQfV3Z7ZMvhkIA6jZJAX99hisABU/SSa5DtgX7AfsHwa04h69AAAW
DUzSk3HgOXbUd1FaSOPdlVFkG2N2JllFHykyO3zO
""")
ATTESTATION_CERT_WITH_TRANSPORT = b64decode(b"""
MIICIjCCAQygAwIBAgIEIHHwozALBgkqhkiG9w0BAQswDzENMAsGA1UEAxMEdGVz
dDAeFw0xNTA4MTEwOTAwMzNaFw0xNjA4MTAwOTAwMzNaMCkxJzAlBgNVBAMTHll1
YmljbyBVMkYgRUUgU2VyaWFsIDU0NDMzODA4MzBZMBMGByqGSM49AgEGCCqGSM49
AwEHA0IABPdFG1pBjBBQVhLrD39Qg1vKjuR2kRdBZnwLI/zgzztQpf4ffpkrkB/3
E0TXj5zg8gN9sgMkX48geBe+tBEpvMmjOzA5MCIGCSsGAQQBgsQKAgQVMS4zLjYu
MS40LjEuNDE0ODIuMS4yMBMGCysGAQQBguUcAgEBBAQDAgQwMAsGCSqGSIb3DQEB
CwOCAQEAb3YpnmHHduNuWEXlLqlnww9034ZeZaojhPAYSLR8d5NPk9gc0hkjQKmI
aaBM7DsaHbcHMKpXoMGTQSC++NCZTcKvZ0Lt12mp5HRnM1NNBPol8Hte5fLmvW4t
Q9EzLl4gkz7LSlORxTuwTbae1eQqNdxdeB+0ilMFCEUc+3NGCNM0RWd+sP5+gzMX
BDQAI1Sc9XaPIg8t3du5JChAl1ifpu/uERZ2WQgtxeBDO6z1Xoa5qz4svf5oURjP
ZjxS0WUKht48Z2rIjk5lZzERSaY3RrX3UtrnZEIzCmInXOrcRPeAD4ZutpiwuHe6
2ABsjuMRnKbATbOUiLdknNyPYYQz2g==
""")
YUBICO_RESOLVER = create_resolver(YUBICO)
EMPTY_RESOLVER = create_resolver([])
class AttestationTest(unittest.TestCase):
def test_resolver(self):
cert = x509.load_der_x509_certificate(ATTESTATION_CERT,
default_backend())
metadata = YUBICO_RESOLVER.resolve(cert)
assert metadata.identifier == '2fb54029-7613-4f1d-94f1-fb876c14a6fe'
def test_provider(self):
provider = MetadataProvider(YUBICO_RESOLVER)
cert = x509.load_der_x509_certificate(ATTESTATION_CERT,
default_backend())
attestation = provider.get_attestation(cert)
assert attestation.trusted
def test_versioning_newer(self):
resolver = create_resolver(YUBICO)
newer = json.loads(json.dumps(YUBICO))
newer['version'] = newer['version'] + 1
newer['trustedCertificates'] = []
resolver.add_metadata(newer)
cert = x509.load_der_x509_certificate(ATTESTATION_CERT,
default_backend())
metadata = resolver.resolve(cert)
assert metadata is None
def test_versioning_older(self):
resolver = create_resolver(YUBICO)
newer = json.loads(json.dumps(YUBICO))
newer['trustedCertificates'] = []
resolver.add_metadata(newer)
cert = x509.load_der_x509_certificate(ATTESTATION_CERT,
default_backend())
metadata = resolver.resolve(cert)
assert metadata.identifier == '2fb54029-7613-4f1d-94f1-fb876c14a6fe'
def test_transports_from_cert(self):
provider = MetadataProvider(EMPTY_RESOLVER)
cert = x509.load_der_x509_certificate(ATTESTATION_CERT_WITH_TRANSPORT,
default_backend())
attestation = provider.get_attestation(cert)
assert attestation.transports == Transport.USB | Transport.NFC
def test_transports_from_metadata(self):
provider = MetadataProvider(YUBICO_RESOLVER)
cert = x509.load_der_x509_certificate(ATTESTATION_CERT,
default_backend())
attestation = provider.get_attestation(cert)
assert attestation.transports == Transport.USB
|
py | 7df9afd68b432ea479c9dc0baa65ad30b2576c13 | from __future__ import annotations
from contextlib import contextmanager
import os
from pathlib import Path
from shutil import rmtree
import tempfile
from typing import (
IO,
Any,
)
import uuid
import numpy as np
from pandas import set_option
from pandas.io.common import get_handle
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
Name of the decompression to use
Returns
-------
file object
"""
with get_handle(path, "rb", compression=compression, is_text=False) as handle:
yield handle.handle
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime(2021, 1, 1))
...
'EST'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
@contextmanager
def ensure_clean(filename=None, return_filelike: bool = False, **kwargs: Any):
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = str(uuid.uuid4()) + filename
path = folder / filename
path.touch()
handle_or_str: str | IO = str(path)
if return_filelike:
kwargs.setdefault("mode", "w+b")
handle_or_str = open(path, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
try:
yield
finally:
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
set_option("compute.use_numexpr", use)
expr._MIN_ELEMENTS = min_elements
try:
yield
finally:
expr._MIN_ELEMENTS = oldmin
set_option("compute.use_numexpr", olduse)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed) -> None:
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
|
py | 7df9b018f8b5c7cc73e9fef8e25a2b8672d44d6d | # Copyright (C) 2019 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .api import (
MessageReader, MessageWriter, AT_MOST_ONCE, AT_LEAST_ONCE, EXACTLY_ONCE,
DEFAULT_CLIENT_ID,
)
from .error import (
SinetError, NoServiceError, NoConfigError, InvalidArgumentError,
ConnectionError, AlreadyConnectedError, UnsupportedServiceTypeError,
InvalidMessageError, AuthorizationError,
)
from .value_type import TEXT, BYTE_ARRAY
|
py | 7df9b03750ad09dbf3e4b39f8cb6b2f2637851b6 | # -*- coding: utf-8 -*-
#
# pv documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 15 08:27:09 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import shutil
pdb_structures = [ '1r6a', '1nmr', '1crn' ]
for pdb_id in pdb_structures:
shutil.copyfile('../pdbs/%s.pdb' % pdb_id, '_static/%s.pdb' % pdb_id)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [ 'pvsample' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
nitpicky = True
# General information about the project.
project = u'PV'
copyright = u'2013-2015, Marco Biasini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
release = '1.8.0dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [ '_static' ]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pvdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pv.tex', u'pv Documentation',
u'Marco Biasini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pv', u'pv Documentation',
[u'Marco Biasini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pv', u'pv Documentation',
u'Marco Biasini', 'pv', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'js'
|
py | 7df9b07798b4173d5c5358feae22da47ae8b0746 | """
言語処理100本ノック 2015
http://www.cl.ecei.tohoku.ac.jp/nlp100/#ch1
第1章: 準備運動
00. 文字列の逆順
文字列"stressed"の文字を逆に(末尾から先頭に向かって)並べた文字列を得よ.
"""
strings = 'stressed'
reversed_strings = strings[::-1]
print(reversed_strings)
|
py | 7df9b2d07563d1dd456b3b0824cd0ea9cda94c82 | # Utilities
from time import sleep
import math
import random
# Helper functions.
def clamp(minimum, x, maximum): return max(minimum, min(x, maximum))
def clear(): print("\n" * 100)
# DEFAULT GAME VARIABLES #
# Defines the order in which you should place ships with this length.
BATTLESHIP_LENS = [2, 3, 3, 4, 5]
# Creates 2 empty boards, 1 for the enemy and 1 for you.
PLAYER_BOARD = [[0] * 10 for i in range(10)]
ENEMY_BOARD = [[0] * 10 for i in range(10)]
# A cool seperator.
LINE = "-" * 60
# How many moves the AI has.
MAX_MOVES = 100 # Covers the whole board.
# FUNCTIONS #
def get_coordinates():
print("Please enter values between 1 and 10 (transformed to fit arrays)")
# Returns a tuple with 2 inputs (1 being the X value, one being the Y value)
# Asks the user for an X and Y coordinate value.
x = input("Coordinate x value: ")
y = input("Coordinate y value: ")
# Tries to return the integer values of both x and y.
# If this fails, return the function so it keeps
# asking the player. If we dont return the function,
# it will error in the future.
try:
return (int(x), int(y))
except ValueError:
print("Invalid coordinate value!")
return get_coordinates()
def get_orientation():
print("Orientation of Ship: Enter Orientation")
o = input("(either 1 for vertical or 2 for horizontal): ")
# Checks if the input == 1, if it does then it returns
# the integer 1 (which maps to vertical),
# if not it returns the integer 2 (which maps to horizontal.)
return o=="1" and 1 or 2
def print_board(board):
# Prints a board (based on input)
for x in range(len(board)):
# Special case for when it starts printing.
if x==0:
# Sets up the first string.
new_str = ""
for i in range(len(board) - 1):
# Adds spaces between the numbers.
new_str += " " + str(i+1) + " "
# Appends the roman numeral for 10 (because OCD)
new_str += " X"
# Actually prints the string.
print(new_str)
# Sets up the string.
str_row = ""
for y in range(len(board[x])):
# A stupid 1 line if statement to append to the string.
# value 0: blank, '[ ]'
# value 1: ship is present, '[S]'
# value 2: hit (ship), '[X]'
# value 3: hit (miss), '[M]'
str_row += (
board[x][y]==0 and "[ ]" or (
board[x][y]==1 and "[S]" or (
board[x][y]==2 and "[X]" or "[M]"
)
)
)
# Actually prints the nth row of the board.
print(str_row + " " + str(x + 1))
def print_board_exclusion(board, exclude_num):
# Prints a board without exclude_num
# Useful for when the game starts.
for x in range(len(board)):
# Special case for when it starts printing.
if x==0:
# Sets up the first string.
new_str = ""
for i in range(len(board) - 1):
# Adds spaces between the numbers.
new_str += " " + str(i+1) + " "
# Appends the roman numeral for 10 (because OCD)
new_str += " X"
# Actually prints the string.
print(new_str)
# Sets up the string.
str_row = ""
for y in range(len(board[x])):
# A stupid 1 line if statement to append to the string.
# value 0 (or exclude_num): blank, '[ ]'
# value 1: ship is present, '[S]'
# value 2: hit (ship), '[X]'
# value 3: hit (miss), '[M]'
str_row += (
(board[x][y]==exclude_num or board[x][y]==0) and "[ ]" or (
board[x][y]==1 and "[S]" or (
board[x][y]==2 and "[X]" or "[M]"
)
)
)
# Actually prints the nth row of the board.
print(str_row + " " + str(x + 1))
def valid_ship_position(board, size, position = (1, 1), orientation = 1):
x, y = position
# Stupid workaround because of stupid board setup
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# Clamped size.
board_size = len(board) - 1
# Check 1: if the positions are within the bounds of the
if y > board_size or x > board_size or y < 0 or x < 0:
return False
else:
# remember that I flipped the X and Y axis.
# If orientation is vertical
if orientation == 1:
# If ship bounds are greater than the horizontal height
if math.floor(x / size) > math.ceil(board_size / size):
return False
# If orientation is horizontal
elif orientation == 2:
# If ship bounds are greater than the horizontal height
if math.floor(y / size) > math.ceil(board_size / size):
return False
# Invalid orientations just return false.
else:
return False
# Check 2: checks the spaces in front / behind the position specified.
count = 0
for i in range(size + 1):
# Maths
add = int(math.ceil(i / 2))
# Increments count
count += 1
# If count modulus 2 (which is shortened: count % 2 == 0)
if count % 2:
# If orientation is vertical
if orientation == 1:
# More if statements and failsafes.
if x - add < 0:
return False
else:
if board[x - add][y] == 1:
return False
# If orientation is horizontal
else:
# More if statements and failsafes.
if y - add < 0:
return False
else:
if board[x][y - add] == 1:
return False
else:
# If orientation is vertical
if orientation == 1:
# More if statements and failsafes.
if x + add > board_size:
return False
else:
if board[x + add][y] == 1:
return False
# If orientation is horizontal
else:
# More if statements and failsafes.
if y + add > board_size:
return False
else:
if board[x][y + add] == 1:
return False
return True
def add_ship_to_board(board, size = 3, position = (1, 1), orientation = 1):
x, y = position
# Another stupid workaround because of stupid board setup
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# Check if the placement is valid on the board.
if valid_ship_position(board, size, position, orientation):
# Initialize variable
count = 0
# Set the board at array X [array Y]
# Goes down X, over Y (was reversed above)
board[x][y] = 1
# For loop which covers range (size + 1)
for i in range(size + 1):
# Something to help with indexing
add = int(math.ceil(i / 2))
# Adds 1 to the count to help for adding the ship pieces near board[x][y]
count += 1
# If count modulus 2 (count % 2 == 0 (which is divisible))
if count % 2:
if orientation == 1:
# If vertical, adds a ship piece above the origin (board[x][y])
board[x - add][y] = 1
else:
# Else, adds a ship piece to the left of the origin (board[x][y])
board[x][y - add] = 1
else:
if orientation == 1:
# If vertical, adds a ship piece below the origin (board[x][y])
board[x + add][y] = 1
else:
# Else, adds a ship piece to the right of the origin (board[x][y])
board[x][y + add] = 1
else:
print("Not a valid ship placement!")
def generate_enemy_queue():
# Generates a moveset for the enemy.
# Return value
ret = []
# Copies the player board.
player_board_copy = list(PLAYER_BOARD)
# Last position the enemy found a ship.
LASTPOS = (1,1)
# This is for the enemy when they find a ship on your board.
# 1 = left, 2 = above, 3 = right, 4 = below
SIDE = 1
# Has the enemy found a ship?
FOUNDSHIP = False
# The length of the found ship.
# Infinite loop protection
iters = 0
while True:
# Iterates by one
iters += 1
# If the iterations are greater than the max amount of moves,
# then break.
if iters >= MAX_MOVES:
break
# Counter for the amount of player ships.
player_ships = 0
# Goes through the length of the copied board.
for y in range(len(player_board_copy)):
# Goes through the length of the (copied board)[y value]
for x in range(len(player_board_copy[y])):
# If there is a ship there, increment
# player_ships by 1.
if player_board_copy[y][x] == 1:
player_ships += 1
# If there are no more ships, then break.
if player_ships == 0:
break
else:
# If the AI hasnt found a ship yet:
if not FOUNDSHIP:
# Chose random coordinates on the board.
x, y = (random.randint(1, 10), random.randint(1, 10))
# More stupid workarounds.
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# If the enemy has already chosen that spot, keep chosing more
# random coordinates and then using the stupid workaround (again.)
while (player_board_copy[x][y] == 2 or player_board_copy[x][y] == 3) or ((x, y) in ret):
x, y = (random.randint(1, 10), random.randint(1, 10))
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# Adds the desired coordinate to the enemy queue
ret.append((x, y))
# Sets the last position to the coordinates
LASTPOS = (x, y)
# If they hit a ship, set FOUNDSHIP to true to start
# checking the sides.
if player_board_copy[x][y] == 1:
FOUNDSHIP = True
# Else, check sides.
else:
# Gets the last position. It is already clamped
# so no stupid workarond here.
x, y = LASTPOS
# Have I found the next piece of a ship?
found_next_ship = False
# If the side is 1 (left)
if SIDE == 1:
# Boundary checks. If y - 1 is negative,
# increment SIDE by 1.
if y - 1 < 0:
SIDE += 1
else:
# If the area to the left of the origin (x, y) has
# a ship, append to the queue and sets its last
# position to the position that the AI has found the ship.
# And, set "found_next_ship" to True, so it keeps checking for
# any more ships near the location.
# Else, just increment SIDE by 1.
if player_board_copy[x][y - 1] == 1:
ret.append((x, y - 1))
LASTPOS = (x, y - 1)
found_next_ship = True
else:
SIDE += 1
# If the side is 2 (above)
elif SIDE == 2:
# Boundary checks. If x - 1 is negative,
# increment SIDE by 1.
if x - 1 < 0:
SIDE += 1
else:
# If the area above the origin (x, y) has
# a ship, append to the queue and sets its last
# position to the position that the AI has found the ship.
# And, set "found_next_ship" to True, so it keeps checking for
# any more ships near the location.
# Else, just increment SIDE by 1.
if player_board_copy[x - 1][y] == 1:
ret.append((x - 1, y))
LASTPOS = (x - 1, y)
found_next_ship = True
else:
SIDE += 1
# If the side is 3 (right)
elif SIDE == 3:
# Boundary checks. If y + 1 is greater than
# the boundary length of the copied board, then
# increment SIDE by 1.
if y + 1 > len(player_board_copy):
SIDE += 1
else:
# If the area to the right of the origin (x, y) has
# a ship, append to the queue and sets its last
# position to the position that the AI has found the ship.
# And, set "found_next_ship" to True, so it keeps checking for
# any more ships near the location.
# Else, just increment SIDE by 1.
if player_board_copy[x][y + 1] == 1:
ret.append((x, y + 1))
LASTPOS = (x, y + 1)
found_next_ship = True
else:
SIDE += 1
# Else (only option is 4 which is below)
else:
# Boundary checks. If x + 1 is greater than
# the boundary length of the copied board, then
# increment SIDE by 1.
if x + 1 > len(player_board_copy):
SIDE = 1
else:
# If the area below the origin (x, y) has
# a ship, append to the queue and sets its last
# position to the position that the AI has found the ship.
# And, set "found_next_ship" to True, so it keeps checking for
# any more ships near the location.
# Else, just increment SIDE by 1.
if player_board_copy[x + 1][y] == 1:
ret.append((x + 1, y))
LASTPOS = (x + 1, y)
found_next_ship = True
else:
SIDE = 1
# Have I found the next piece of a ship? If
# not, then just set FOUNDSHIP to false.
if found_next_ship == False:
FOUNDSHIP = False
return ret
def start_game():
# Actually starts the game.
# Whos turn is it?
# True: player's
# False: enemy's
TURN = True
# Tracks what round you guys are on.
ROUND = 0
# Generates the queue for the enemy.
QUEUE = generate_enemy_queue()
# While true loop for playing the game.
while True:
# If it is your turn
if TURN:
print("It is your turn!")
# Prints a line.
print(LINE)
# Another status message.
print("Where to attack? (input coordinates)")
# Gets the coordinates and then uses the
# stupid workaround.
x, y = get_coordinates()
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# If the player has already shot at that location,
# keep chosing.
while ENEMY_BOARD[x][y] == 2 or ENEMY_BOARD[x][y] == 3:
print("You have already hit that spot! Choose another.")
x, y = get_coordinates()
x, y = clamp(y - 1, 0, 9), clamp(x - 1, 0, 9)
# Prints a line.
print(LINE)
# If there is a ship there, notify the player
# and then add a "hitmarker" to (x, y)
if ENEMY_BOARD[x][y] == 1:
ENEMY_BOARD[x][y] = 2
print("That was a hit!")
# Else, just set the marker at the area to a miss.
else:
ENEMY_BOARD[x][y] = 3
print("You missed.")
# Prints the enemy board, but with only the hitmarkers.
print_board_exclusion(ENEMY_BOARD, 1)
# Makes it the enemies turn.
TURN = False
# Else
else:
print("It is the enemy's turn...")
# Sleep for one second.
sleep(1)
# Get the X and Y values from the queue (already used
# the workaround here.)
x, y = QUEUE[0]
# If there is a ship at the queue placement,
# notify the player and then add a "hitmarker" to (x, y)
if PLAYER_BOARD[x][y] == 1:
PLAYER_BOARD[x][y] = 2
print("The enemy hit one of your ships!")
# Else, just set the marker at the area to a miss.
else:
PLAYER_BOARD[x][y] = 3
print("The enemy missed.")
# Prints your board.
print_board(PLAYER_BOARD)
# Pop the move from the queue, so it doesnt play again/
QUEUE.pop(0)
# Makes it the player's turn.
TURN = True
# Prints a line.
print(LINE)
# Increments the round counter.
ROUND += 1
# Since game_is_over returns 2 arguments,
# status is the boolean and msg is the status message.
status, msg = game_is_over()
# If the status == true (game is over), then print stats.
if status:
# Game over after N rounds!
print("Game over after " + str(ROUND) + " rounds!")
# Another line.
print(LINE)
# Prints if you had win or not (with a stupid 1 line if statement.)
print(msg == "ENEMY" and "You have lost to the enemy!" or "You have won against the enemy!")
# Prints your board and the enemies board.
print("Your board:")
print_board(PLAYER_BOARD)
print("The enemy's board:")
print_board(ENEMY_BOARD)
# Then, break the while true loop.
break
# Sleep for 1 second in between turns.
sleep(1)
def game_is_over():
# Counter for the amount of player ships.
player_ships = 0
# Goes through the length of the copied board.
for y in range(len(PLAYER_BOARD)):
# Goes through the length of the (copied board)[y value]
for x in range(len(PLAYER_BOARD[y])):
# If there is a ship there, increment
# player_ships by 1.
if PLAYER_BOARD[y][x] == 1:
player_ships += 1
# Counter for the amount of enemy ships.
enemy_ships = 0
# Goes through the length of the copied board.
for y in range(len(ENEMY_BOARD)):
# Goes through the length of the (copied board)[y value]
for x in range(len(ENEMY_BOARD[y])):
# If there is a ship there, increment
# player_ships by 1.
if ENEMY_BOARD[y][x] == 1:
enemy_ships += 1
# If there are no player ships left, then return
# a status message.
if player_ships == 0:
return True, "ENEMY" # Winner is Enemy
# Else, if there are no enemy ships left, then return
# a status message.
elif enemy_ships == 0:
return True, "PLAYER" # Winner is Player
# Else, just return false and the status message that the
# game is still in progress.
else:
return False, "PROGRESS" # Game is still in progress
def enemy_setup_board():
# Gives the enemy a board.
for i in BATTLESHIP_LENS:
# Gets random cordinates. No workaround here because
# it is used in valid_ship_position.
coords = (random.randint(1, 10), random.randint(1, 10))
# Get a random orientation (either horizontal or vertical)
orientation = random.randint(1, 2)
# If it isnt a valid ship position, keep randomizing.
while valid_ship_position(ENEMY_BOARD, int(i), coords, orientation) != True:
coords = (random.randint(1, 10), random.randint(1, 10))
orientation = random.randint(1, 2)
# Adds the ship to the board if everything is correct.
add_ship_to_board(ENEMY_BOARD, int(i), coords, orientation)
def player_setup_board():
# Interactive way of you setting up your board.
for i in BATTLESHIP_LENS:
# Status message.
print("BATTLESHIP SETUP (LENGTH " + str(i) + ")")
# Asks the user for coordinates and orientation.
coords = get_coordinates()
orientation = get_orientation()
# Prints a line.
print(LINE)
# If it isnt a valid ship position, keep asking the player for
# coordinates and orientation.
while valid_ship_position(PLAYER_BOARD, int(i), coords, orientation) != True:
print("Invalid position / orientation! (doesnt fit on board / overlaps)")
print(LINE)
coords = get_coordinates()
orientation = get_orientation()
# After everything, adds the ship to the board and prints your board.
add_ship_to_board(PLAYER_BOARD, int(i), coords, orientation)
print_board(PLAYER_BOARD)
# Another line.
print(LINE)
# Sets up the enemy board.
enemy_setup_board()
# Starts the game.
start_game()
if __name__ == '__main__':
# Actual start of the game.
print("Battleships")
print("You have to find the enemies battleships and sink them before")
print("they sink yours!")
print("")
print("The key: ")
print(" nothing: water")
print(" M = missed in water")
print(" S = ship")
print(" X = hit ship")
print("")
sleep(2)
print(LINE)
# Sets up the player board.
player_setup_board()
|
py | 7df9b31b467139c9353ba4267d51eddefc0ea540 | import requests
from authentication.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import EdmodoProvider
class EdmodoOAuth2Adapter(OAuth2Adapter):
provider_id = EdmodoProvider.id
access_token_url = 'https://api.edmodo.com/oauth/token'
authorize_url = 'https://api.edmodo.com/oauth/authorize'
profile_url = 'https://api.edmodo.com/users/me'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EdmodoOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EdmodoOAuth2Adapter)
|
py | 7df9b370f310a80882b43fea41346eea824fb1e8 | """
Definition of the :class:`PersonName` model.
"""
from django.db import models
from django_dicom.models.values.data_element_value import DataElementValue
from django_dicom.utils.html import Html
class PersonName(DataElementValue):
"""
A :class:`~django.db.models.Model` representing a single *PersonName*
data element value.
"""
value = models.JSONField(blank=True, null=True)
"""
Overrides
:attr:`~django_dicom.models.values.data_element_value.DataElementValue.value`
to assign a :class:`~django.db.models.JSONField`.
"""
raw = models.CharField(max_length=64, blank=True, null=True)
"""
Overrides
:attr:`~django_dicom.models.values.data_element_value.DataElementValue.raw`
to assign a :class:`~django.db.models.BinaryField`.
"""
# String representation template for a PersonName instance.
_NAME_STRING_TEMPLATE = (
"{name_prefix} {given_name} {middle_name} {family_name} {name_suffix}"
)
def __str__(self) -> str:
"""
Returns the str representation of this instance.
Returns
-------
str
This instance's string representation
"""
components = {key: value for key, value in self.value.items()}
name = self._NAME_STRING_TEMPLATE.format(**components)
return " ".join(name.strip().split())
def to_html(self, **kwargs) -> str:
"""
Returns the HTML representation of this instance.
Returns
-------
str
HTML representation of this instance
"""
return Html.json(self.value)
# flake8: noqa: E501
|
py | 7df9b52a16f6824aa6a596b48811ef6449293b1e | from __future__ import absolute_import
from __future__ import print_function
import torch as T
import torch.autograd as G
def tensor(args, dtype='float'):
'''
Note: all types are not implemented yet
'''
if dtype == 'float':
return T.FloatTensor(*args)
elif dtype == 'double':
return T.DoubleTensor(*args)
elif dtype == 'int':
return T.IntTensor(*args)
elif dtype == 'long':
return T.LongTensor(*args)
def variable(args, dtype='float', grad=True):
'''
returns a variable that wraps the input tensor of dtype
'''
return G.Variable(tensor(*args, dtype=dtype), requires_grad=grad)
|
py | 7df9b70ea86fc6540765297b6b432fa4447e22cb | from django.contrib.auth.models import User
from django.test import TestCase
from api.decorators import check_response
from django.utils.decorators import method_decorator
from api.tests.test_auth import USER_CREDENTIALS, STORE_DATA
from medicines.models import Medicine
from api.views.medicine import MedicineView
from django.http import QueryDict
NEW_Medicine = {
'name': 'Dummy for testing',
'price': 100.5,
'category': 1,
'description': 'This is intended for testing only',
'quantity': 200
}
class TestMedViews(TestCase):
# Initial data for testing
fixtures = ['api/tests/med_initial.json']
def setUp(self) -> None:
self.user = User.objects.create_user(**USER_CREDENTIALS)
def handle_permit(self, login=True):
"""
Handles the permissions in case of this tests
:param login: Specifies if, the user should be logged in
:type login: bool
"""
if login:
self.client.login(**USER_CREDENTIALS)
else:
self.client.logout()
@method_decorator(check_response(path="/api/v1/", login_required=False))
def test_get_list(self):
res = self.client.get('/api/v1/')
self.assertEqual(Medicine.objects.count(), len(res.json()))
query = QueryDict('price__lt=5')
res = MedicineView.get_queryset(query)
self.assertTrue(all(m.price <= 5 for m in res))
query = QueryDict('category=Fever')
res = MedicineView.get_queryset(query)
self.assertTrue(all(m.category.name == 'Fever' for m in res))
def test_new_add(self):
"""
Test if the POST requests to create new medicines are working
"""
# Login
self.handle_permit()
res = self.client.post('/api/v1/', NEW_Medicine)
self.assertEqual(res.status_code, 400, 'Non permitted users are allowed to use')
# Become seller | Get permissions
self.client.post('/api/v1/sell/', STORE_DATA)
res = self.client.post('/api/v1/', {**NEW_Medicine, 'category': 1})
self.assertEqual(res.status_code, 201, res.json().get('errors', res.json().get('msg', 'No message')))
self.assertTrue(Medicine.objects.filter(name=NEW_Medicine['name']), 'Medicine not created')
# Check duplicate request
res = self.client.post('/api/v1/', {**NEW_Medicine, 'category': 1})
self.assertEqual(res.status_code, 400, 'Duplicate medicines are being created')
@method_decorator(check_response("/api/v1/detail/1/", login_required=False))
def test_detail_data(self):
res = self.client.get("/api/v1/detail/new-med/")
self.assertEqual(res.json()['name'], 'New Med')
def test_update_med(self):
res = self.client.post('/api/v1/detail/1/', {**NEW_Medicine, 'name': 'Dummy changed name'})
self.assertEqual(res.json()['name'], 'Dummy changed name')
self.assertEqual(Medicine.objects.get(pk=1).name, 'Dummy changed name')
|
py | 7df9b7599496424dad7fffc5211f78bec3588a1f | from abc import abstractmethod, ABC
class Logger(ABC):
@abstractmethod
def log(self, obj):
pass
class FileLogger(Logger):
def __init__(self, file_path):
self.file_path = file_path
def log(self, obj):
with open(self.file_path, 'a') as file:
file.write(obj)
file.write('\n')
class StdoutLogger(Logger):
def log(self, obj):
print(obj)
class LoggersBuilder:
def __init__(self):
self.file_path = None
self.environment = 'dev'
def set_file_path(self, file_path):
self.file_path = file_path
self.environment = 'prod'
def set_environment(self, environment):
self.environment = environment
@property
def environment(self):
return self.__environment
@environment.setter
def environment(self, value):
self.__environment = value
@property
def file_path(self):
return self.__file_path
@file_path.setter
def file_path(self, value):
self.__file_path = value
self.environment = 'prod'
def build(self) -> Logger:
if self.environment == 'prod':
return FileLogger(self.file_path)
else:
return StdoutLogger()
loggers_builder = LoggersBuilder()
# loggers_builder.set_file_path('./logs3.txt')
loggers_builder.file_path = './logs3.txt'
loggers_builder.build().log('It works with builders and properties')
|
py | 7df9b7827912816e2cba886abde23b4a78c5df82 | import logging
import json
from aws_xray_sdk.core import xray_recorder
from app.models.metrics.metrics_model import MetricsModel
from app.config import ROOT_DIR
from typing import List, Dict, Optional, Union
from operator import itemgetter
from scipy.stats import beta
from app.models.slate_config import SlateConfigModel
from app.models.personalized_topic_list import PersonalizedTopicList
DEFAULT_ALPHA_PRIOR = 0.02
DEFAULT_BETA_PRIOR = 1.0
RankableListType = Union[List['SlateModel'], List['RecommendationModel']]
RecommendationListType = List['RecommendationModel']
def top5(items: RankableListType) -> RankableListType:
"""
Gets the first 5 recommendations from the list of recommendations.
:param items: a list of recommendations in the desired order (pre-publisher spread)
:return: first 5 recommendations from the list of recommendations
"""
return items[:5]
def top15(items: RankableListType) -> RankableListType:
"""
Gets the first 15 recommendations from the list of recommendations.
:param items: a list of recommendations in the desired order (pre-publisher spread)
:return: first 15 recommendations from the list of recommendations
"""
return items[:15]
def top30(items: RankableListType) -> RankableListType:
"""
Gets the first 30 recommendations from the list of recommendations.
:param items: a list of recommendations in the desired order (pre-publisher spread)
:return: first 30 recommendations from the list of recommendations
"""
return items[:30]
def top45(items: RankableListType) -> RankableListType:
"""
Gets the first N recommendations from the list of recommendations.
:param items: a list of recommendations in the desired order (pre-publisher spread)
:param n: int, number of recommendations to be returned
:return: first n recommendations from the list of recommendations
"""
return items[:45]
def blocklist(recs: RecommendationListType, blocklist: Optional[List[str]] = None) -> RecommendationListType:
"""
this filters recommendations by item_id using the blocklist available
in ./app/resources/blocklists.json
:param recs: a list of recommendations in the desired order (pre-publisher spread)
:param blocklist: a list of item_ids to be blocked
:return: filtered recommendations from the input list of recommendations
"""
if not blocklist:
with open(ROOT_DIR + "/app/resources/blocklists.json", "r") as fp:
blocklists = json.load(fp)
return [rec for rec in recs if str(rec.item.item_id) not in blocklists["items"]]
else:
return [rec for rec in recs if rec.item.item_id not in blocklist]
def thompson_sampling(
recs: RankableListType,
metrics: Dict[(int or str), 'MetricsModel']) -> RankableListType:
"""
Re-rank items using Thompson sampling which combines exploitation of known item CTR
with exploration of new items with unknown CTR modeled by a prior
Thompson Sampling uses click data to make a list of tried-and-true recommendations that typically generate a
lot of interest, mixed in with some newer ones that we want to try out so we can keep adding more interesting
items to our repertoire.
:param recs: a list of recommendations in the desired order (pre-publisher spread)
:param metrics: a dict with item_id as key and dynamodb row modeled as ClickDataModel
:return: a re-ordered version of recs satisfying the spread as best as possible
"""
# if there are no recommendations, we done
if not recs:
return recs
# Currently we are using the hardcoded priors below.
# TODO: We should return to having slate/lineup-specific priors. We could load slate-priors from
# MODELD-Prod-SlateMetrics, although that might require an additional database lookup. We might choose to use a
# 'default' key that aggregates engagement data in the same table, such that no additional lookups are required.
alpha_prior, beta_prior = DEFAULT_ALPHA_PRIOR, DEFAULT_BETA_PRIOR
scores = []
# create prior distribution for CTR from parameters in click data table
prior = beta(alpha_prior, beta_prior)
for rec in recs:
try:
# Recommendations are keyed on item_id. Note that the metrics model grabs the item_id
# when it parses the clickdata by splitting the primary key in dynamo
clickdata_id = rec.item.item_id
except AttributeError:
# Slates are keyed on their slate id, in this case the id field of the slate config model
# Similarly these are parsed as the prefix of the primary key in the slate metrics table
clickdata_id = rec.id
d = metrics.get(clickdata_id)
if d:
# TODO: Decide how many days we want to look back.
clicks = max(d.trailing_28_day_opens + alpha_prior, 1e-18)
# posterior combines click data with prior (also a beta distribution)
no_clicks = max(d.trailing_28_day_impressions - d.trailing_28_day_opens + beta_prior, 1e-18)
# sample from posterior for CTR given click data
score = beta.rvs(clicks, no_clicks)
scores.append((rec, score))
else: # no click data, sample from module prior
scores.append((rec, prior.rvs()))
scores.sort(key=itemgetter(1), reverse=True)
return [x[0] for x in scores]
def personalize_topic_slates(input_slate_configs: List['SlateConfigModel'],
personalized_topics: PersonalizedTopicList,
topic_limit: Optional[int] = 1) -> List['SlateConfigModel']:
"""
This routine takes a list of slates as input in which must include slates with an associated curator topic
label. It uses the topic_profile that is supplied by RecIt to re-rank the slates according to affinity
with items in the user's list.
This version allows non-topic slates within the lineup. These are left in order in the output configs
list. Personalizable (topic) slates are re-ordered using their initial slots in the config lineup.
If the topic_limit parameter is included this will determine the number of topic slates that
remain in the output config list.
:param input_slate_configs: SlateConfigModel list that includes slates with curatorTopicLabels
:param personalized_topics: response from RecIt listing topics ordered by affinity to user
:param topic_limit: desired number of topics to return, if this is set the number of slates returned is truncated.
otherwise all personalized topics among the input slate configs are returned
:return: SlateLineupExperimentModel with reordered slates
"""
topic_to_score_map = {t.curator_topic_label: t.score for t in personalized_topics.curator_topics}
# filter non-topic slates
personalizable_configs = list(filter(lambda s: s.curator_topic_label in topic_to_score_map, input_slate_configs))
logging.debug(personalizable_configs)
if not personalizable_configs:
raise ValueError(f"Input lineup to personalize_topic_slates includes no topic slates")
elif topic_limit and len(personalizable_configs) < topic_limit:
raise ValueError(f"Input lineup to personalize_topic_slates includes fewer topic slates than requested")
# re-rank topic slates
personalizable_configs.sort(key=lambda s: topic_to_score_map.get(s.curator_topic_label), reverse=True)
output_configs = list()
added_topic_slates = 0
personalized_index = 0
for config in input_slate_configs:
if config in personalizable_configs:
# if slate is personalizable add highest ranked slate remaining
if added_topic_slates < topic_limit:
output_configs.append(personalizable_configs[personalized_index])
added_topic_slates += 1
personalized_index += 1
else:
logging.debug(f"adding topic slate {added_topic_slates}")
output_configs.append(config)
return output_configs
@xray_recorder.capture('rankers_algorithms_spread_publishers')
def spread_publishers(recs: RecommendationListType, spread: int = 3) -> RecommendationListType:
"""
Makes sure stories from the same publisher/domain are not listed sequentially, and have a configurable number
of stories in-between them.
:param recs: a list of recommendations in the desired order (pre-publisher spread)
:param spread: the minimum number of items before we can repeat a publisher/domain
:return: a re-ordered version of recs satisfying the spread as best as possible
"""
# if there are no recommendations, we done
if not len(recs):
return recs
# move first item in list to first item in re-ordered list
reordered = [recs.pop(0)]
# iterator to keep track of spread between domains
iterator = 0
# iterate over remaining items in recs
while len(recs):
# if there aren't enough items left in recs to satisfy the desired domain spread,
# or if the iterator reaches the end of recs, then we cannot spread any further.
# just add the rest of the recs as-is to the end of the re-ordered list.
# note that this is a simplistic take - we could write more logic here to decrease the spread value by
# one each time if iterator reaches or exceeds the length of recs
if (len(recs) <= spread) or (iterator >= len(recs)):
reordered.extend(recs)
break
# get a list of domains that are currently invalid in the sequence
if len(reordered) > spread:
# if we have enough items in the reordered list, the invalid domains are the last spread number
domains_to_check = [x.publisher for x in reordered[-spread:]]
else:
# if we don't have more than spread items reordered, just get all the domains in reordered
domains_to_check = [x.publisher for x in reordered]
# we can add the rec at iterator position to the re-ordered list if.the rec at iterator has a different
# domain than the invalid list retrieved above
if recs[iterator].publisher not in domains_to_check:
reordered.append(recs.pop(iterator))
iterator = 0
else:
# if we cannot add the rec at position iterator to the re-ordered list, increment the iterator and try
# the next item in recs
iterator += 1
return reordered
|
py | 7df9bb914a6d2420d21b4e90a8d7bbb9e3a82aec | """
WSGI config for cass-prototype project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cass-prototype.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
py | 7df9bc11c2e20b858b01abfcf348e332d0eda2ff | """empty message
Revision ID: 1232ed1b2ec5
Revises: 6a9d9de4c93a
Create Date: 2019-02-06 22:33:37.248268
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1232ed1b2ec5'
down_revision = '6a9d9de4c93a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ios_device_token', sa.Column('value', sa.String(length=64), nullable=True))
op.drop_column('ios_device_token', 'token')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ios_device_token', sa.Column('token', sa.VARCHAR(length=64), autoincrement=False, nullable=True))
op.drop_column('ios_device_token', 'value')
# ### end Alembic commands ###
|
py | 7df9bc1cea6f5bf6adfc9a18c26892887a5d9013 | import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.lines as mlines
import matplotlib.pyplot as plt # Voor de grafieken
import numpy as np # Om data als arrays op te kunnen slaan en de normale verdeling te kunnen tekenen
import pandas as pd
import seaborn as sns
from cartopy.io.img_tiles import Stamen
from matplotlib import cm
from matplotlib.patches import Ellipse
from scipy.spatial import Voronoi
from scipy.spatial import voronoi_plot_2d as SPvorPlot
from plaguepy import bereken
sns.set()
def voronoi(punten: np.ndarray, mu: float, sigma: float, tekst: bool = True, ellipse: bool = True) -> None:
"""Plot een Voronoi-diagram ter grote van de maximale x en y-coördinaten.
Bij ieder middelpunt is eventueel het %-overlap te zien en het daarbijbehordende bereik.
:param punten: Numpy array van n bij 2. Iedere row bevat een x en y coördinaat.
:param mu: gemiddelde van de normale verdelingen
:param sigma: standaard deviatie van de normale verdelingen
:param tekst: keuze of de percentages zichtbaar zijn in de plot.
Aan te raden om uit te zetten bij veel punten.
:param ellipse: keuze of de ellipsen zichtbaar zijn in de plot.
Aan te raden om uit te zetten bij veel punten
:returns: Plot met een Voronoi-diagram en stippen op de coordinaten meegegeven in 'punten'
"""
if not isinstance(punten, np.ndarray) or not isinstance(mu, (float, int)) or not isinstance(sigma, (float, int)) \
or not isinstance(tekst, bool) or not isinstance(ellipse, bool):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
plt.rcParams["figure.figsize"] = [8, 8] # Grotere plots in Jupyter Notebook
vor = Voronoi(punten) # Voronoi berekenen
SPvorPlot(vor) # Voronoi plotten
ax = plt.gca()
if tekst or ellipse:
mid_afstanden = bereken.middelpunt_afstanden(punten) # Bereken alle middelpuntafstanden
for i, punt in enumerate(vor.ridge_points):
perc_overlap = bereken.perc_overlap(mid_afstanden[i], mu, sigma)
middelpunt = vor.points[punt].mean(axis=0)
if round(perc_overlap * 100, 2) > 0.00: # Als er meer dan 0.00% overlap is, teken dan
if ellipse:
graden = bereken.helling(vor.points[punt][0], vor.points[punt][1])
grens = bereken.grens(0.999, mu, sigma)
ax.add_artist(
Ellipse((middelpunt[0], middelpunt[1]), sigma * perc_overlap, grens - mid_afstanden[i],
angle=graden, color="red", fill=False)) # Ellipse tekenen
if tekst:
plt.text(middelpunt[0], middelpunt[1], f"{round(perc_overlap * 100, 2)}%") # Tekst tekenen
plt.title(f"Aantal punten: {punten.shape[0]}")
plt.xlabel("X-coördinaten")
plt.ylabel("Y-coördinaten")
if ellipse:
legenda = (mlines.Line2D([0], [0], marker="o", color="w", label="Punt", markerfacecolor="b", markersize=15),
mlines.Line2D([0], [0], marker="o", color="w", label="Overlapping", markerfacecolor="r",
markersize=15))
else:
legenda = [mlines.Line2D([0], [0], marker="o", color="w", label="Punt", markerfacecolor="b", markersize=15)]
plt.legend(handles=legenda)
plt.show()
def verloop(punten: np.ndarray, vector: np.ndarray, perioden: int, mu: float = 0, sigma: float = 1,
legenda: bool = True, cmap_type: str = "hot") -> None:
"""Plot het verloop van een verspreiding waarbij het begin van de infectie in de vector wordt aangegeven.
Verspreiding op tijdstip t gaat volgende de formule v·M^t.
:param punten: Numpy array van n bij 2. Iedere row bevat een x en y coördinaat.
:param vector: Numpy array van n bij 1. Bevat per cell 0 of 1.
Iedere cell waar 1 staat, begint de infectie.
:param perioden: Het aantal perioden dat het verloop berekend en getoont moet worden.
:param mu: gemiddelde van de normale verdelingen
:param sigma: standaard deviatie van de normale verdelingen
:param legenda: optioneel, toont de legenda.
Aangeraden om uit te zetten als er heel veel punten dienen te worden geplot
:param cmap_type: De te gebruiken colormap voor de plot
:returns: Plot met het verloop van de infectiegraad/verspreiding van de punten
"""
if not isinstance(punten, np.ndarray) or not isinstance(vector, np.ndarray) or not isinstance(perioden, int) \
or not isinstance(mu, (float, int)) or not isinstance(sigma, (float, int)) or not isinstance(legenda, bool) \
or not isinstance(cmap_type, str):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
if punten.shape[0] != vector.shape[0]:
raise ValueError("Vector en punten moeten even lang zijn.")
print("Even geduld a.u.b, dit kan even duren...")
plt.rcParams["figure.figsize"] = [8, 8] # Grotere plots in Jupyter Notebook
matrix_verloop = bereken.matrix_vec_verloop(punten, vector, perioden, mu=mu, sigma=sigma)
perc_voll_inf = bereken.perc_volledige_infectie(matrix_verloop)
color_map = cm.get_cmap(cmap_type, 12)
for i, periode in enumerate(matrix_verloop):
plt.plot(periode, color=color_map(perc_voll_inf[i]), label=f"Punt {punten[i]}")
# Plot de infectiegraad van ieder punt tijdens iedere periode
if legenda:
plt.legend(loc="lower right")
plt.xlabel("Periodes")
plt.ylabel("Infectiegraad")
plt.xlim(0, perioden)
plt.ylim(0, 1.1)
plt.title(f"Aantal punten: {punten.shape[0]}")
plt.tight_layout() # Alles past zo beter op de grafiek
plt.show()
def kaart(file_path: str, punten: np.ndarray, vector: np.ndarray, perioden: int, terrein: bool = True, sep: str = ",",
mu: float = 0, sigma: float = 1, cmap_type: str = "Dark2") -> None:
"""Maakt een kaart gebaseerd op de coordinaten in een CSV-bestand.
:param file_path: Pad naar het te plotten CSV-bestand
:param punten: Numpy array van n bij 2. Iedere row bevat een x en y coördinaat.
:param vector: Numpy array van n bij 1. Bevat per cell 0 of 1. Iedere cell waar 1 staat, begint de infectie.
:param perioden: Het aantal perioden dat de kaart moet berekenen.
:param terrein: optioneel, zorgt voor een grafische achtergrond van de plot. Kan enkele seconden langer duren.
:param sep: optioneel, seperator van het CSV-bestand
:param mu: gemiddelde van de normale verdelingen
:param sigma: standaard deviatie van de normale verdelingen
:param cmap_type: De te gebruiken colormap voor de plot
:returns: Een kaart met de geplotte punten, al dan niet met een 'terrein-achtergrond'.
Kleur van de stippen staat voor de periode wanneer zij volledig besmet raakten.
"""
if not isinstance(file_path, str) or not isinstance(punten, np.ndarray) or not isinstance(vector, np.ndarray) \
or not isinstance(perioden, int) or not isinstance(terrein, bool) or not isinstance(sep, str) or not \
isinstance(mu, (float, int)) or not isinstance(sigma, (float, int)) or not isinstance(cmap_type, str):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
if punten.shape[0] != vector.shape[0]:
raise ValueError("Vector en punten moeten even lang zijn.")
print("Kaart aan het maken. \nEven geduld a.u.b, dit kan even duren...")
plt.rcParams["figure.figsize"] = [8, 8] # Grotere plots in Jupyter Notebook
coordinaten = pd.read_csv(file_path, sep=sep)
coordinaten = coordinaten.loc[(coordinaten["latitude"] < 53.5) & (coordinaten["latitude"] > 50.7) &
(coordinaten["longitude"] < 7.3) & (coordinaten["longitude"] > 3.3)] # Filter NL
coordinaten = coordinaten.values[:, :] # DataFrame omzetten naar Numpy-array
sf_path = "data/shapefiles/gadm36_NLD_2.shp"
sf_data = list(shpreader.Reader(sf_path).geometries())
ax = plt.axes(projection=ccrs.EuroPP())
if terrein:
ax.add_image(Stamen("terrain-background"), 12)
ax.add_geometries(sf_data, ccrs.PlateCarree(), edgecolor="black", facecolor="none", alpha=1)
else:
ax.add_geometries(sf_data, ccrs.PlateCarree(), edgecolor="black", facecolor="orange", alpha=0.2)
ax.set_extent([min(coordinaten[:, 1]), max(coordinaten[:, 1]),
min(coordinaten[:, 0]), max(coordinaten[:, 0])]) # Grootte gelijk aan min/max van coordinaten
matrix_verloop = bereken.matrix_vec_verloop(punten, vector, perioden, mu=mu, sigma=sigma)
perc_voll_inf = bereken.perc_volledige_infectie(matrix_verloop)
color_map = cm.get_cmap(cmap_type, 12)
for i, coord in enumerate(coordinaten): # Stippen tekenen
ax.plot(coord[1], coord[0], marker="o", markersize=3, color=color_map(perc_voll_inf[i]),
transform=ccrs.PlateCarree())
for i, punt in enumerate(vector):
if punt:
ax.plot(punten[i][1], punten[i][0], marker="D", markersize=12, color="red", transform=ccrs.PlateCarree())
plt.show()
|
py | 7df9bd2e3a28383c56cc774a9a5c536cc3e76bdb | from tqdm import tqdm
import os
import math
import logging
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.optim import SGD
import numpy as np
from pytorch_bert import BertForPreTraining
from bert_config import BertConfig
from load_data import DataLoaderForBertPretraining
import time
import argparse
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(123)
def pretrain(args):
num_epochs = args.epochs
lr = args.lr
cuda_condition = torch.cuda.is_available()
device = torch.device("cuda:%d"%args.gpu_id if cuda_condition else "cpu")
config = BertConfig(vocab_size=args.vocab_size,
hidden_size=args.hidden_size,
num_hidden_layers=args.num_hidden_layers,
num_attention_heads=args.num_attention_heads,
intermediate_size=args.hidden_size*4,
max_position_embeddings=args.seq_length,
attention_probs_dropout_prob=args.dropout_prob,
hidden_dropout_prob=args.dropout_prob,
batch_size=args.train_batch_size,
hidden_act=args.hidden_act)
model = BertForPreTraining(config=config)
model.to(device)
# Input data file names definition
dict_seqlen2predlen = {128:20, 512:80}
pred_len = dict_seqlen2predlen[config.max_position_embeddings]
dataset = args.dataset
if dataset not in ['wikicorpus_en', 'wiki_books']:
raise(NotImplementedError)
file_dir = './data/hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/%s/'%dataset
file_name_format = dataset + '_training_%d.hdf5'
train_file_num = 256
train_files = [file_dir + file_name_format%file_id for file_id in range(train_file_num)]
#init parameters
for m in model.modules():
if isinstance(m, (nn.Linear, nn.Embedding)):
nn.init.xavier_normal_(m.weight)
# # save init parameters
# params = model.state_dict()
# for key, val in params.items():
# params[key] = val.cpu().numpy()
# torch.save(params, "pytorch_params.file")
opt = Adam(model.parameters(), lr=lr, betas=(0.9,0.999), eps=1e-8, weight_decay = args.adam_weight_decay)
# opt = Adam(model.parameters(), lr=lr, betas=(0.9,0.999), eps=1e-8)
# opt = SGD(model.parameters(), lr=lr)
# # load parameters
# load_ep = 0.0
# load_i = 5
# load_path = './pretrained_params/pytorch_pretrained_params/'
# load_file = 'epoch_%d_iter_%d.params'%(load_ep,load_i)
# state_dict = torch.load(load_path+load_file, map_location='cpu' if not torch.cuda.is_available() else None)
# model.load_state_dict(state_dict)
global_step_num = 0
for ep in range(num_epochs):
step_num = 0
for train_file in train_files:
dataloader = DataLoaderForBertPretraining(train_file, config.batch_size, pred_len)
for i in range(dataloader.batch_num):
start_time = time.time()
batch_data = dataloader.get_batch(i)
input_ids = torch.LongTensor(batch_data['input_ids']).to(device)
token_type_ids = torch.LongTensor(batch_data['token_type_ids']).to(device)
attention_mask = torch.LongTensor(batch_data['attention_mask']).to(device)
masked_lm_labels = torch.LongTensor(batch_data['masked_lm_labels']).to(device)
next_sentence_label = torch.LongTensor(batch_data['next_sentence_label']).to(device)
opt.zero_grad()
_,_, masked_lm_loss_mean, next_sentence_loss_mean = model(input_ids, token_type_ids, attention_mask, masked_lm_labels, next_sentence_label)
loss = masked_lm_loss_mean + next_sentence_loss_mean
loss.backward()
opt.step()
# # save parameters
# if i%5000 == 0 and i != 0:
# save_path = './pretrained_params/pytorch_pretrained_params_adam/'
# save_file = 'epoch_%d_iter_%d.params'%(ep,i)
# if not os.path.exists(save_path):
# os.makedirs(save_path)
# torch.save(model.state_dict(), save_path+save_file)
masked_lm_loss_out = masked_lm_loss_mean.item()
next_sentence_loss_out = next_sentence_loss_mean.item()
loss_out = loss.item()
end_time = time.time()
print('[Epoch %d] (Iteration %d): Loss = %.3f, MLM_loss = %.3f, NSP_loss = %.6f, Time = %.3f'%(ep,step_num,loss_out, masked_lm_loss_out, next_sentence_loss_out, end_time-start_time))
step_num += 1
global_step_num += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--gpu_id', type=int, default=0, help='Id of GPU to run.'
)
parser.add_argument(
"--train_batch_size", type=int, default=64, help="Training batch size"
)
parser.add_argument(
"--dataset", type=str, default='wikicorpus_en', help="Dataset used to train."
)
parser.add_argument(
"--vocab_size", type=int, default=30522, help="Total number of vocab"
)
parser.add_argument(
"--hidden_size", type=int, default=768, help="Hidden size of transformer model",
)
parser.add_argument(
"--num_hidden_layers", type=int, default=12, help="Number of layers"
)
parser.add_argument(
"-a",
"--num_attention_heads",
type=int,
default=12,
help="Number of attention heads",
)
parser.add_argument(
"-s", "--seq_length", type=int, default=128, help="Maximum sequence len"
)
parser.add_argument("-e", "--epochs", type=int,
default=10, help="Number of epochs")
parser.add_argument("--lr", type=float, default=1e-5,
help="Learning rate of adam")
parser.add_argument(
"--adam_weight_decay", type=float, default=0.01, help="Weight_decay of adam"
)
parser.add_argument(
"--hidden_act", type=str, default='gelu', help="Hidden activation to use."
)
parser.add_argument(
"--dropout_prob", type=float, default=0.1, help="Dropout rate."
)
args = parser.parse_args()
pretrain(args) |
py | 7df9bd9806467cc37623dd7926c4ad714d678585 | """Miscellaneous utility functions."""
import gzip
import itertools
import numpy as np
import pandas as pd
from scipy import stats
import six.moves.cPickle as pickle
def df_to_struct(df):
"""Converts a DataFrame to RPy-compatible structured array."""
struct_array = df.to_records()
arr_dtype = struct_array.dtype.descr
for i, dtype in enumerate(arr_dtype):
if dtype[1] == np.dtype('object'):
arr_dtype[i] = (dtype[0], dtype[1].replace("|O", "|S"))
struct_array = np.asarray([tuple(d) for d in struct_array],
dtype=arr_dtype)
return struct_array
def df_ttest(df, by, key, paired=False, nice=True, **kwargs):
"""Perform a T-test over a DataFrame groupby."""
test_kind = "rel" if paired else "ind"
test_func = getattr(stats, "ttest_" + test_kind)
args = [d[key] for i, d in df.groupby(by)]
t, p = test_func(*args, **kwargs)
dof = (len(df) / 2) - 1 if paired else len(df) - 2
if nice:
return "t(%d) = %.3f; p = %.3g%s" % (dof, t, p, sig_stars(p))
else:
return pd.Series([t, p], ["t", "p"])
def df_oneway(df, by, key, nice=True, **kwargs):
"""Perform a oneway analysis over variance on a DataFrame groupby."""
args = [d[key] for i, d in df.groupby(by)]
f, p = stats.f_oneway(*args, **kwargs)
dof_b = len(args) - 1
dof_w = len(df) - dof_b
if nice:
return "F(%d, %d) = %.3f; p = %.3g%s" % (dof_b, dof_w, f,
p, sig_stars(p))
else:
return pd.Series([f, p], ["F", "p"])
def product_index(values, names=None):
"""Make a MultiIndex from the combinatorial product of the values."""
iterable = itertools.product(*values)
idx = pd.MultiIndex.from_tuples(list(iterable), names=names)
return idx
def make_master_schedule(evs):
"""Take a list of event specifications and make one schedule.
Parameters
----------
evs : sequence of n x 3 arrays
list of (onset, duration, amplitude) event secifications
Returns
-------
sched : n_event x 5 array
schedule of event specifications with
event and presentation ids
"""
evs = np.asarray(evs)
n_cond = len(evs)
# Make a vector of condition ids and stimulus indices
cond_ids = [np.ones(evs[i].shape[0]) * i for i in range(n_cond)]
cond_ids = np.concatenate(cond_ids)
stim_idxs = np.concatenate([np.arange(len(ev)) for ev in evs])
# Make a schedule of the whole run
sched = np.row_stack(evs)
sched = np.column_stack((sched, cond_ids, stim_idxs))
# Sort the master schedule by onset time
timesorter = np.argsort(sched[:, 0])
sched = sched[timesorter]
return sched
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
class Results(object):
"""Extremely simple namespace for passing around and pickling data."""
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def load_pkl(fname, zip=True):
"""Read pickled data from disk, possible decompressing."""
if zip:
open = gzip.open
with open(fname, "rb") as fid:
res = pickle.load(fid)
return res
def save_pkl(fname, res, zip=True):
"""Write pickled data to disk, possible compressing."""
if zip:
open = gzip.open
with open(fname, "wb") as fid:
pickle.dump(res, fid)
|
py | 7df9bf771e6989490c0373eeda95b3891cb2c868 | import sys
from launch import LaunchDescription
from launch_ros.actions import Node
#launch_arguments: expected format '<name>:=<value>', you can type robot_ip:=<ip value> or ip:=<ip value> .
#example: ros2 launch ui_for_debug_and_demo tm_gui.launch.py robot_ip:=192.168.10.2
def generate_launch_description():
args = []
length = len(sys.argv)
if (len(sys.argv) >= 5):
i = 4
while i < len(sys.argv):
args.append(sys.argv[i])
i = i + 1
# gui_demo executable
ui_for_debug_and_demo_node = Node(
package='ui_for_debug_and_demo',
executable='robot_ui',
output='screen',
#The five different verbosity levels are, in order: DEBUG, INFO, WARN, ERROR, FATAL
arguments=['--ros-args', '--log-level', 'WARN'],
)
# tm driver
tm_driver_node = Node(
package='tm_driver',
executable='tm_driver',
output='screen',
#The five different verbosity levels are, in order: DEBUG, INFO, WARN, ERROR, FATAL
arguments=args,
#arguments=[args, '--ros-args', '--log-level', 'DEBUG'],
)
return LaunchDescription([ui_for_debug_and_demo_node,tm_driver_node])
|
py | 7df9c021a6d68da2c728ec41a1205b6b4a5cd943 | # Generated by Django 3.0.8 on 2021-06-08 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0014_auto_20210609_0123'),
]
operations = [
migrations.AddField(
model_name='profile',
name='rollno',
field=models.IntegerField(null=True),
),
]
|
py | 7df9c081ef027beed9a93b67195a6412d7123344 | from django.db import models
from django.contrib.auth.models \
import AbstractBaseUser, BaseUserManager, PermissionsMixin
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
# Create & saves the new user
if not email:
raise ValueError("User must provide an email address")
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
# Creates & saves the new super user
user = self.create_user(email=email, password=password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
# Custom user model that supports using email instead of username
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
py | 7df9c16114722da979b5c302dfecda29f716f6f5 | #from ScheduledOptim import *
#from pythonBottom.run import finetune
#from pythonBottom.run import pre
#wandb.init("sql")
class dotdict(dict):
def __getattr__(self, name):
return self[name]
args = dotdict({
'NlLen':50,
'CodeLen':100,
'batch_size':48,
'embedding_size':256,
'WoLen':15,
'Vocsize':100,
'Nl_Vocsize':100,
'max_step':3,
'margin':0.5,
'poolsize':50,
'Code_Vocsize':100,
'num_steps':50,
'rulenum':10
})
#os.environ["CUDA_VISIBLE_DEVICES"]="5, 4"
#os.environ['CUDA_LAUNCH_BLOCKING']="1"
def save_model(model):
if not os.path.exists("checkpointSearch/"):
os.makedirs("checkpointSearch")
torch.save(model.state_dict(), 'checkpointSearch/best_model.ckpt')
def load_model(model):
assert os.path.exists('checkpointSearch/best_model.ckpt'), 'Weights for saved model not found'
model.load_state_dict(torch.load('checkpointSearch/best_model.ckpt'))
def gVar(data):
tensor = data
if isinstance(data, np.ndarray):
tensor = torch.from_numpy(data)
else:
assert isinstance(tensor, torch.Tensor)
if use_cuda:
tensor = tensor.cuda()
return tensor
def getAntiMask(size):
ans = np.zeros([size, size])
for i in range(size):
for j in range(0, i + 1):
ans[i, j] = 1.0
return ans
def getAdMask(size):
ans = np.zeros([size, size])
for i in range(size - 1):
ans[i, i + 1] = 1.0
return ans
def train():
train_set = SumDataset(args, "train")
#print(len(train_set.data[0]))
args.Code_Vocsize = len(train_set.Code_Voc)
args.Nl_Vocsize = len(train_set.Nl_Voc)
args.Vocsize = len(train_set.Char_Voc)
args.rulenum = len(train_set.ruledict) + args.NlLen
dev_set = SumDataset(args, "test")
data_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size,
shuffle=True, drop_last=True, num_workers=1)
model = Decoder(args)
load_model(model)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
optimizer = ScheduledOptim(optimizer, d_model=args.embedding_size, n_warmup_steps=4000)
maxAcc= 0
maxC = 0
if torch.cuda.is_available():
print('using GPU')
#os.environ["CUDA_VISIBLE_DEVICES"] = "3"
model = model.cuda()
model = nn.DataParallel(model, device_ids=[0, 1])
antimask = gVar(getAntiMask(args.CodeLen))
#model.to()
for epoch in range(100000):
j = 0
for dBatch in tqdm(data_loader):
if j % 3000 == 0:
devloader = torch.utils.data.DataLoader(dataset=dev_set, batch_size=56,
shuffle=False, drop_last=True, num_workers=1)
model = model.eval()
accs = []
tcard = []
tmp = []
antimask2 = antimask.unsqueeze(0).repeat(56, 1, 1).unsqueeze(1)
for devBatch in tqdm(devloader):
for i in range(len(devBatch)):
devBatch[i] = gVar(devBatch[i])
with torch.no_grad():
_, pre = model(devBatch[0], devBatch[1], devBatch[2], devBatch[3], devBatch[4], devBatch[6], devBatch[7], devBatch[8], antimask2, devBatch[5])
pred = pre.argmax(dim=-1)
resmask = torch.gt(devBatch[5], 0)
acc = (torch.eq(pred, devBatch[5]) * resmask).float()#.mean(dim=-1)
predres = (1 - acc) * pred.float() * resmask.float()
accsum = torch.sum(acc, dim=-1)
'''tmp = []
for i in range(len(predres)):
tmp2 = []
for j in range(len(predres[i])):
if predres[i, j] != 0:
tmp.append((predres[i, j].item(), devBatch[5][i, j].item()))
print(tmp)'''
resTruelen = torch.sum(resmask, dim=-1).float()
for x in torch.eq(accsum, resTruelen):
if x == 1:#print(torch.eq(accsum, resTruelen))
tmp.append(1)
else:
tmp.append(0)
cnum = (torch.eq(accsum, resTruelen)).sum().float()
acc = acc.sum(dim=-1) / resTruelen
accs.append(acc.mean().item())
tcard.append(cnum.item())
#print(devBatch[5])
#print(predres)
tnum = np.sum(tcard)
acc = np.mean(accs)
#wandb.log({"accuracy":acc})
print(str(acc), str(tnum))
print(tmp)
exit(0)
if maxC < tnum or maxC == tnum and maxAcc < acc:
maxC = tnum
maxAcc = acc
print("find better acc " + str(maxAcc))
save_model(model.module)
antimask2 = antimask.unsqueeze(0).repeat(args.batch_size, 1, 1).unsqueeze(1)
model = model.train()
for i in range(len(dBatch)):
dBatch[i] = gVar(dBatch[i])
loss, _ = model(dBatch[0], dBatch[1], dBatch[2], dBatch[3], dBatch[4], dBatch[6], dBatch[7], dBatch[8], antimask2, dBatch[5])
loss = torch.mean(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step_and_update_lr()
j += 1
import time
class Node:
def __init__(self, name, d):
self.name = name
self.namewithouttype = name.split('🚀')[0]
self.id = d
self.father = None
self.child = []
self.sibiling = None
self.expanded = False
self.fatherlistID = 0
self.treestr = ""
self.block = ""
self.num = 0
self.fname = ""
self.position = None
self.copyable = True
self.possibility = 0#max(min(np.random.normal(0.1, 0.08, 10)[0], 1), 0)
def printTree(self, r):
#print(r.name)
s = r.name + "" + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def printTreeWithoutTer(self, r):
#print(r.name)
if len(r.child) == 0:
s = r.name[:-4] + "" + " "
else:
s = r.name + "" + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def printTreeWithID(self, r):
#print(r.name)
if len(r.child) == 0:
s = r.name + "_" + str(r.id) + " "
else:
s = r.name + "_" + str(r.id) + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def printTreeWithoutType(self, r):
#print(r.name)
s = r.namewithouttype + "" + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTreeWithoutType(c)
s += "^ "#print(r.name + "^")
return s
def getNum(self):
return len(self.getTreestr().strip().split())
def getTreeProb(self, r):
ans = [r.possibility]
if len(r.child) == 0:
return ans
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
ans += self.getTreeProb(c)
return ans
def getTreestr(self):
if self.treestr == "":
self.treestr = self.printTree(self)
return self.treestr
else:
return self.treestr
def printTreeWithVar(self, node, var):
ans = ""
if node.name in var:
ans += var[node.name] + " "
else:
ans += node.name + " "
for x in node.child:
ans += self.printTreeWithVar(x, var)
ans += '^ '
return ans
def printTreeWithLine(self, node):
ans = ""
if node.position:
ans += node.name + "-" + str(node.position.line)
else:
ans += node.name + "-"
for x in node.child:
ans += self.printTreeWithLine(x)
ans += '^ '
return ans
def printprob(self):
ans = self.name + str(self.possibility) + ' '
for x in self.child:
ans += x.printprob()
ans += '^ '
return ans
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.name.lower() != other.name.lower():
return False
if len(self.child) != len(other.child):
return False
if True:#self.name == 'arguments' and (self.father.name == 'Or' or self.father.name == "And") :
return self.getTreestr().strip() == other.getTreestr().strip() #and self.block == other.block
class NodeWithType:
def __init__(self, name, d, type='init'):
self.name = name
lst = name.split('🚀')
self.name = lst[0]
self.nameo = name
if lst[1] != 'init' and lst[1] != 'init_ter' and 'ptype' not in lst[1]:
self.namer = name
else:
self.namer = lst[0]
if '_ter' in name and name[-4:] == '_ter':
self.namer = self.name + '_ter'
self.name = self.namer
self.id = d
self.father = None
self.child = []
self.sibiling = None
self.expanded = False
self.fatherlistID = 0
self.treestr = ""
self.block = ""
self.num = 0
self.type = type
self.possibility = 0#max(min(np.random.normal(0.1, 0.08, 10)[0], 1), 0)
def printTree(self, r):
s = r.name + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def getNum(self):
return len(self.getTreestr().strip().split())
def getTreeProb(self, r):
ans = [r.possibility]
if len(r.child) == 0:
return ans
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
ans += self.getTreeProb(c)
return ans
def getTreestr(self):
if self.treestr == "":
self.treestr = self.printTree(self)
return self.treestr
else:
return self.treestr
def printTreeWithVar(self, node, var):
ans = ""
if node.name in var:
ans += var[node.name] + " "
else:
ans += node.namer + " "
for x in node.child:
ans += self.printTreeWithVar(x, var)
ans += '^ '
return ans
def printTreeWithID(self, r):
#print(r.name)
if len(r.child) == 0:
s = r.name + "_" + str(r.id) + " "
else:
s = r.name + "_" + str(r.id) + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTreeWithID(c)
s += "^ "#print(r.name + "^")
return s
def printTreeWithType(self, node):
ans = ""
ans += node.name + '🚀' + node.type + " "
for x in node.child:
ans += self.printTreeWithType(x)
ans += '^ '
return ans
def printprob(self):
ans = self.name + str(self.possibility) + ' '
for x in self.child:
ans += x.printprob()
ans += '^ '
return ans
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.name.lower() != other.name.lower():
return False
if len(self.child) != len(other.child):
return False
if True:#self.name == 'arguments' and (self.father.name == 'Or' or self.father.name == "And") :
return self.getTreestr().strip() == other.getTreestr().strip() #and self.block == other.block
class SearchNode:
def __init__(self, ds):
self.state = [ds.ruledict["start -> Lambda"]]
self.prob = 0
self.aprob = 0
self.bprob = 0
self.root = Node("Lambda", 2)
self.inputparent = ["start"]
self.parent = np.zeros([args.NlLen + args.CodeLen, args.NlLen + args.CodeLen])
#self.parent[args.NlLen]
self.expanded = None
self.ruledict = ds.rrdict
self.expandedname = []
self.depth = [1]
for x in ds.ruledict:
self.expandedname.append(x.strip().split()[0])
self.everTreepath = []
def selcetNode(self, root):
if not root.expanded and root.name in self.expandedname and root.name != "arguments" and self.state[root.fatherlistID] < len(self.ruledict):
return root
else:
for x in root.child:
ans = self.selcetNode(x)
if ans:
return ans
if root.name == "arguments" and root.expanded == False:
return root
return None
def selectExpandedNode(self):
self.expanded = self.selcetNode(self.root)
def getRuleEmbedding(self, ds, nl):
inputruleparent = []
inputrulechild = []
for x in self.state:
if x >= len(ds.rrdict):
inputruleparent.append(ds.Get_Em(["value"], ds.Code_Voc)[0])
inputrulechild.append(ds.pad_seq(ds.Get_Em(["copyword"], ds.Code_Voc), ds.Char_Len))
else:
rule = ds.rrdict[x].strip().lower().split()
inputruleparent.append(ds.Get_Em([rule[0]], ds.Code_Voc)[0])
inputrulechild.append(ds.pad_seq(ds.Get_Em(rule[2:], ds.Code_Voc), ds.Char_Len))
inputrule = ds.pad_seq(self.state, ds.Code_Len)
inputrulechild = ds.pad_list(inputrulechild, ds.Code_Len, ds.Char_Len)
inputruleparent = ds.pad_seq(ds.Get_Em(self.inputparent, ds.Code_Voc), ds.Code_Len)
inputdepth = ds.pad_seq(self.depth, ds.Code_Len)
return inputrule, inputrulechild, inputruleparent, inputdepth
def getTreePath(self, ds):
tmppath = [self.expanded.name.lower()]
node = self.expanded.father
while node:
tmppath.append(node.name.lower())
node = node.father
tmp = ds.pad_seq(ds.Get_Em(tmppath, ds.Code_Voc), 10)
self.everTreepath.append(tmp)
return ds.pad_list(self.everTreepath, ds.Code_Len, 10)
def applyrule(self, rule, nl):
if rule >= len(self.ruledict):
if rule - len(self.ruledict) >= len(nl):
return False
if self.expanded.depth + 1 >= 40:
nnode = Node(nl[rule - len(self.ruledict)], 39)
else:
nnode = Node(nl[rule - len(self.ruledict)], self.expanded.depth + 1)
self.expanded.child.append(nnode)
nnode.father = self.expanded
nnode.fatherlistID = len(self.state)
else:
rules = self.ruledict[rule]
#print(rules)
if rules.strip().split()[0] != self.expanded.name:
return False
#assert(rules.strip().split()[0] == self.expanded.name)
if rules == self.expanded.name + " -> End ":
self.expanded.expanded = True
else:
for x in rules.strip().split()[2:]:
if self.expanded.depth + 1 >= 40:
nnode = Node(x, 39)
else:
nnode = Node(x, self.expanded.depth + 1)
#nnode = Node(x, self.expanded.depth + 1)
self.expanded.child.append(nnode)
nnode.father = self.expanded
nnode.fatherlistID = len(self.state)
#self.parent.append(self.expanded.fatherlistID)
self.parent[args.NlLen + len(self.depth), args.NlLen + self.expanded.fatherlistID] = 1
if rule >= len(self.ruledict):
self.parent[args.NlLen + len(self.depth), rule - len(self.ruledict)] = 1
self.state.append(rule)
self.inputparent.append(self.expanded.name.lower())
self.depth.append(self.expanded.depth)
if self.expanded.name != "arguments":
self.expanded.expanded = True
return True
def printTree(self, r):
s = r.name + " "#print(r.name)
if len(r.child) == 0:
s += "^ "
return s
#r.child = sorted(r.child, key=lambda x:x.name)
for c in r.child:
s += self.printTree(c)
s += "^ "#print(r.name + "^")
return s
def getTreestr(self):
return self.printTree(self.root)
beamss = []
def BeamSearch(inputnl, vds, model, beamsize, batch_size, k):
args.batch_size = len(inputnl[0])
with torch.no_grad():
beams = {}
for i in range(batch_size):
beams[i] = [SearchNode(vds)]
index = 0
antimask = gVar(getAntiMask(args.CodeLen))
endnum = {}
continueSet = {}
while True:
print(index)
tmpbeam = {}
ansV = {}
if len(endnum) == args.batch_size:
#print(beams[0][0].state)
#print(beams[0][0].inputparent)
break
if index >= args.CodeLen:
break
for p in range(beamsize):
tmprule = []
tmprulechild = []
tmpruleparent = []
tmptreepath = []
tmpAd = []
validnum = []
tmpdepth = []
for i in range(args.batch_size):
if p >= len(beams[i]):
continue
x = beams[i][p]
#print(x.getTreestr())
x.selectExpandedNode()
if x.expanded == None or len(x.state) >= args.CodeLen:
ansV.setdefault(i, []).append(x)
else:
#print(x.expanded.name)
validnum.append(i)
a, b, c, d = x.getRuleEmbedding(vds, vds.nl[args.batch_size * k + i])
tmprule.append(a)
tmprulechild.append(b)
tmpruleparent.append(c)
tmptreepath.append(x.getTreePath(vds))
#tmp = np.eye(vds.Code_Len)[x.parent]
#tmp = np.concatenate([tmp, np.zeros([vds.Code_Len, vds.Code_Len])], axis=0)[:vds.Code_Len,:]#self.pad_list(tmp, self.Code_Len, self.Code_Len)
tmpAd.append(x.parent)
tmpdepth.append(d)
#print("--------------------------")
if len(tmprule) == 0:
continue
batch_size = len(tmprule)
antimasks = antimask.unsqueeze(0).repeat(batch_size, 1, 1).unsqueeze(1)
tmprule = np.array(tmprule)
tmprulechild = np.array(tmprulechild)
tmpruleparent = np.array(tmpruleparent)
tmptreepath = np.array(tmptreepath)
tmpAd = np.array(tmpAd)
tmpdepth = np.array(tmpdepth)
'''print(inputnl[3][:index + 1], tmprule[:index + 1])
assert(np.array_equal(inputnl[3][0][:index + 1], tmprule[0][:index + 1]))
assert(np.array_equal(inputnl[4][0][:index + 1], tmpruleparent[0][:index + 1]))
assert(np.array_equal(inputnl[5][0][:index + 1], tmprulechild[0][:index + 1]))
assert(np.array_equal(inputnl[6][0][:index + 1], tmpAd[0][:index + 1]))
assert(np.array_equal(inputnl[7][0][:index + 1], tmptreepath[0][:index + 1]))
assert(np.array_equal(inputnl[8][0][:index + 1], tmpdepth[0][:index + 1]))'''
result = model(gVar(inputnl[0][validnum]), gVar(inputnl[1][validnum]), gVar(tmprule), gVar(tmpruleparent), gVar(tmprulechild), gVar(tmpAd), gVar(tmptreepath), gVar(tmpdepth), antimasks, None, "test")
results = result.data.cpu().numpy()
#print(result, inputCode)
currIndex = 0
for j in range(args.batch_size):
if j not in validnum:
continue
x = beams[j][p]
tmpbeamsize = beamsize
result = np.negative(results[currIndex, index])
currIndex += 1
cresult = np.negative(result)
indexs = np.argsort(result)
for i in range(tmpbeamsize):
if tmpbeamsize >= 30:
break
copynode = deepcopy(x)
#if indexs[i] >= len(vds.rrdict):
#print(cresult[indexs[i]])
c = copynode.applyrule(indexs[i], vds.nl[args.batch_size * k + j])
if not c:
tmpbeamsize += 1
continue
copynode.prob = copynode.prob + np.log(cresult[indexs[i]])
tmpbeam.setdefault(j, []).append(copynode)
#print(tmpbeam[0].prob)
for i in range(args.batch_size):
if i in ansV:
if len(ansV[i]) == beamsize:
endnum[i] = 1
for j in range(args.batch_size):
if j in tmpbeam:
if j in ansV:
for x in ansV[j]:
tmpbeam[j].append(x)
beams[j] = sorted(tmpbeam[j], key=lambda x: x.prob, reverse=True)[:beamsize]
index += 1
for p in range(beamsize):
beam = []
nls = []
for i in range(len(beams)):
#print(beams[i][p].getTreestr())
if p >= len(beams):
beam.append(beams[i][len(beams[i]) - 1])
else:
beam.append(beams[i][p])
nls.append(vds.nl[args.batch_size * k + i])
finetune(beam, k, nls, args.batch_size)
for i in range(len(beams)):
beamss.append(deepcopy(beams[i]))
for i in range(len(beams)):
mans = -1000000
lst = beams[i]
tmpans = 0
for y in lst:
#print(y.getTreestr())
if y.prob > mans:
mans = y.prob
tmpans = y
beams[i] = tmpans
return beams
#return beams
def test():
pre()
dev_set = SumDataset(args, "test")
print(len(dev_set))
args.Nl_Vocsize = len(dev_set.Nl_Voc)
args.Code_Vocsize = len(dev_set.Code_Voc)
args.Vocsize = len(dev_set.Char_Voc)
args.rulenum = len(dev_set.ruledict) + args.NlLen
args.batch_size = 56
rdic = {}
for x in dev_set.Nl_Voc:
rdic[dev_set.Nl_Voc[x]] = x
#print(dev_set.Nl_Voc)
model = Decoder(args)
if torch.cuda.is_available():
print('using GPU')
#os.environ["CUDA_VISIBLE_DEVICES"] = "3"
model = model.cuda()
devloader = torch.utils.data.DataLoader(dataset=dev_set, batch_size=args.batch_size,
shuffle=False, drop_last=False, num_workers=0)
model = model.eval()
load_model(model)
f = open("outval.txt", "w")
index = 0
for x in tqdm(devloader):
ans = BeamSearch((x[0], x[1], x[5], x[2], x[3], x[4], x[6], x[7], x[8]), dev_set, model, 10, args.batch_size, index)
index += 1
for i in range(args.batch_size):
beam = ans[i]
#print(beam[0].parent, beam[0].everTreepath, beam[0].state)
f.write(beam.getTreestr())
f.write("\n")
#exit(0)
#f.write(" ".join(ans.ans[1:-1]))
#f.write("\n")
#f.flush()#print(ans)
open("beams.pkl", "wb").write(pickle.dumps(beamss))
if __name__ == "__main__":
np.random.seed(int(time.time()))
if sys.argv[1] == "train":
train()
else:
test()
#test()
|
py | 7df9c1cf526322402fa4fff173a62d0014e849b4 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from decimal import Decimal
from test_framework.test_framework import ZenacoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class MempoolLimitTest(ZenacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
"-maxmempool=5",
"-spendzeroconfchange=0",
]]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert txid not in self.nodes[0].getrawmempool()
txdata = self.nodes[0].gettransaction(txid)
assert txdata['confirmations'] == 0 #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
|
py | 7df9c2e2ebd6ecedbcbc5c9b671bd382b33e7e8c | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 统计信息函数
Case Name : pg_stat_get_wal_senders()描述:在主机端查询walsender信息。
Description :
1.在主机端查询walsender信息,主节点查询
2.在主机端查询walsender信息,备节点查询
Expect :
1.在主机端查询walsender信息,主节点查询成功
2.在主机端查询walsender信息,备节点查询,失败
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
Primary_SH = CommonSH('PrimaryDbUser')
@unittest.skipIf('6002 ' not in Primary_SH.get_db_cluster_status('detail'),
'单机环境不执行')
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
self.commonsh = CommonSH()
self.commonsh1 = CommonSH('Standby1DbUser')
def test_built_in_func(self):
text = '----step1.在主机端查询walsender信息,主节点查询----'
self.log.info(text)
sql_cmd = self.commonsh.execut_db_sql(
f'select pg_stat_get_wal_senders();')
self.log.info(sql_cmd)
str_info = sql_cmd.split('\n')[-2]
self.log.info(str_info)
num = len(str_info.split(','))
self.log.info(f'num = {num}')
if num == 21:
self.log.info('在主机端查询walsender信息成功')
else:
raise Exception(f'函数执行异常,请检查{text}')
text = '----step2.在主机端查询walsender信息,备节点查询----'
self.log.info(text)
sql_cmd = self.commonsh1.execut_db_sql(
f'select pg_stat_get_wal_senders();')
self.log.info(sql_cmd)
str_info = sql_cmd.split('\n')[-2]
self.log.info(str_info)
num = len(str_info.split(','))
self.log.info(f'num = {num}')
if num == 1:
self.log.info('在备机端查询walsender信息失败')
def tearDown(self):
self.log.info(f'-----{os.path.basename(__file__)} start-----')
|
py | 7df9c2f76b733c3c58901b51ff923d28c1603ff4 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.mode_keys import ModeKeys
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if ops.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not ops.executing_eagerly_outside_functions():
K.get_session().run(assign_ops)
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs,
grouped_updates=None, grouped_session_args=None,
with_loss_tensor=False):
"""Unwrap and return the list of values contained in the PerDevice parameters.
This function calls `flatten_perdevice_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerDevice inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerDevice outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerDevice updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerDevice session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerDevice parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_perdevice_values(distribution_strategy,
grouped_inputs)
if with_loss_tensor:
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs[0])
all_outputs = flatten_perdevice_values(distribution_strategy,
grouped_outputs[1:])
all_outputs = [loss] + all_outputs
else:
all_outputs = flatten_perdevice_values(distribution_strategy,
grouped_outputs)
if grouped_updates:
all_updates = flatten_perdevice_values(distribution_strategy,
grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_perdevice_values(
distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_perdevice_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def flatten_perdevice_values(distribution_strategy, perdevice_values):
"""Unwraps and flattens a nest of PerDevice parameters.
PerDevice values have one value associated with each device. Each entry in
the PerDevice dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerDevice value or a list of
PerDevice values and return all the values in the PerDevice dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
perdevice_values: List of PerDevice object or a single PerDevice object.
Returns:
List of values of all the PerDevice objects.
"""
# This function takes a PerDevice object or a list of PerDevice objects and
# returns all the values associated with it.
return [e for flattened in nest.flatten(perdevice_values)
for e in distribution_strategy.unwrap(flattened)]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `histogram_freq` or `write_grads` is one of the parameters
passed as part of the TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if callback not in [callbacks.TensorBoard, callbacks.ReduceLROnPlateau,
callbacks.LearningRateScheduler, callbacks.CSVLogger,
callbacks.EarlyStopping, callbacks.ModelCheckpoint,
callbacks.TerminateOnNaN, callbacks.ProgbarLogger,
callbacks.History, callbacks.RemoteMonitor]:
logging.warning('Your input callback is not one of the predefined '
'Callbacks that supports DistributionStrategy. You '
'might encounter an error if you access one of the '
'model\'s attributes as part of the callback since '
'these attributes are not set. You can access each of '
'the individual distributed models using the '
'`_grouped_model` attribute of your original model.')
if isinstance(callback, (callbacks.LearningRateScheduler,
callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using '
'%s callback with DistributionStrategy.' % callback)
# If users want to use the TensorBoard callback they cannot use certain
# features of the callback that involve accessing model attributes and
# running ops.
if isinstance(callback, callbacks.TensorBoard):
if callback.__getattribute__('histogram_freq'):
logging.warning(
UserWarning(
'`histogram_freq` in the TensorBoard callback is not '
'supported when using DistributionStrategy. Setting '
'`histogram_freq` to `0`.'))
callback.histogram_freq = 0
if callback.__getattribute__('write_grads'):
logging.warning(
UserWarning(
'`write_grads` in the TensorBoard callback is not supported '
'when using DistributionStrategy. Setting `write_grads` '
'to `False`.'))
callback.histogram_freq = False
def validate_distributed_dataset_inputs(distribution_strategy, x, y,
sample_weights=None):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerDevice object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerDevice object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For example,
when we use `MirroredStrategy` this is a PerDevice object with a tensor
for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_device_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_device_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_device_inputs(distribution_strategy,
sample_weights)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_device_inputs(distribution_strategy, x):
"""Validates PerDevice dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerDevice objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerDevice objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_device_list` is not a tensor.
"""
# Convert the inputs and targets into a list of PerDevice objects.
per_device_list = nest.flatten(x)
x_values_list = []
for x in per_device_list:
if not tensor_util.is_tensor(x):
raise ValueError('Dataset input to the model should be tensors instead '
'they are of type {}'.format(type(x)))
# At this point both x and y contain tensors in the `DistributedValues`
# structure.
x_values = distribution_strategy.unwrap(x)
# Validate that the shape and dtype of all the elements in x are the same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError('Input tensor dtypes do not match for distributed tensor'
' inputs {}'.format(x))
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].get_shape().as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].get_shape().as_list():
raise ValueError('Input tensor shapes do not match for distributed tensor'
' inputs {}'.format(x))
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = K._get_variables(K.get_graph()) # pylint: disable=protected-access
candidate_vars = []
for v in all_variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[variables.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True # pylint: disable=protected-access
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be initialized."""
session = K._get_session() # pylint: disable=protected-access
worker_context = dc_context.get_current_worker_context()
if not worker_context or worker_context.experimental_should_init:
# TODO(yuefengz): if checkpoints exist, restore from checkpoint.
K._initialize_variables(session) # pylint: disable=protected-access
else:
_wait_for_variable_initialization(session)
def validate_inputs(x, y, distribution_strategy, allow_partial_batch=False):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
distribution_strategy: The DistributionStrategy with which the model is
compiled.
allow_partial_batch: Boolean. If false, datasets must have fully
defined shapes.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if (isinstance(x, iterator_ops.Iterator) or
isinstance(y, iterator_ops.Iterator)):
raise ValueError('`DistributionStrategy` does not support inputs of type '
'Iterator. You must pass a `tf.data.Dataset` object or a '
'numpy array as input.')
if is_tpu_strategy(distribution_strategy):
for i in [x, y]:
if (isinstance(i, dataset_ops.DatasetV2) and not allow_partial_batch):
if not is_dataset_shape_fully_defined(i):
raise ValueError(
'Using TPUs currently requires fully defined shapes. Either use '
'set_shape() on the input tensors or use '
'dataset.batch(..., drop_remainder=True).'
'Found unknown shape in input {}.'.format(i))
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return distribution_strategy.extended._global_batch_size # pylint: disable=protected-access
# TODO(sourabhbajaj): Remove this once we use the same API for all strategies.
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return strategy is not None and strategy.__class__.__name__ == 'TPUStrategy'
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = nest.flatten(dataset.output_shapes)
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def get_input_params(distribution_strategy, first_x_value, steps, batch_size,
mode=None):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
first_x_value: This is the first input numpy array that is passed in as the
model input.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we
support partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
num_samples = first_x_value.shape[0]
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not global_batch_size_supported(
distribution_strategy)
# Partial batches are allowed for training as we repeat the
# dataset when converting numpy arrays into a dataset.
# For other modes uneven batch sizes are not allowed except
# for `predict()` on TPUStrategy.
allow_partial_batch = (mode == ModeKeys.TRAIN or
(mode == ModeKeys.PREDICT
and is_tpu_strategy(distribution_strategy)))
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose the
# global batch size as the minimum of number of samples and 32. 32 is
# chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if not allow_partial_batch and num_samples % global_batch_size:
raise ValueError('The number of samples %s is not divisible by '
'batch size %s.' % (num_samples, global_batch_size))
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError('The number of samples %s is not divisible by '
'steps %s. Please change the number of steps to a '
'value that can consume all the samples' % (
num_samples, steps))
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0
if num_samples < min_num_samples:
raise ValueError('Number of samples %s is less than samples required '
'for specified batch_size %s and steps %s' % (
num_samples, global_batch_size, steps))
# We need to return the per replica or global batch size based on the strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
'The batch size (%s) could not be sharded evenly across the sync '
'replicas (%s) in the distribution strategy.' % (
global_batch_size, distribution_strategy.num_replicas_in_sync))
batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = nest.flatten(iterator.output_shapes)
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def list_to_tuple(maybe_list):
"""Datasets treat lists specially, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = control_flow_ops.group(iterator.initialize())
if not context.executing_eagerly():
K.get_session().run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
if len(nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
inputs = flatten_perdevice_values(strategy, inputs)
targets = flatten_perdevice_values(strategy, targets)
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
else:
sample_weights = [
None for _ in range(len(model.outputs) * strategy.num_replicas_in_sync)
]
ins = inputs + targets + sample_weights
if mode == ModeKeys.TRAIN and not isinstance(K.symbolic_learning_phase(),
int):
ins += [True]
return ins
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model._fit_function = None
model._eval_function = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model gaurantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(model, input_tensors=inputs,
share_weights=True)
else:
updated_model = models._clone_functional_model(model, input_tensors=inputs,
share_weights=True)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o)
for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return updated_model
def _build_distributed_network(model, strategy, mode, inputs=None,
targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica,
args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes function to run one step of distributed model execution."""
if context.executing_eagerly():
return _make_eager_execution_function(model, mode)
strategy = model._distribution_strategy
if not get_distributed_model(model, mode):
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _per_device_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_device_function, args=(get_distributed_model(model, mode),))
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
strategy = model._distribution_strategy
if not get_distributed_model(model, mode):
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _per_device_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
with K.get_graph().as_default(), strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs) = strategy.extended.call_for_each_replica(
_per_device_function, args=(get_distributed_model(model, mode),))
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of inptus/outputs
# on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return K.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_device_aggregate_batch(batch_outs, model, mode):
"""Aggregates the per-device batch-level outputs from a distributed step."""
if model._distribution_strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = model._distribution_strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(np.concatenate(nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(distributed_model)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), K.learning_phase_scope(learning_phase):
yield
|
py | 7df9c49cbe22ee1dc38222037b1ce67f8ff5242f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file."""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GITHUB_REPO = 'benthomasson/ansible_behave'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key.
Work around keys with incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning."""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
"""Load yaml config file at the given path."""
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
"""Save yaml config file at the given path."""
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Put `encrypted_password` into the deploy section of .travis.yml."""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
"""Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.
Fetch the Travis public key for the repo, and encrypt the PyPI password
with it before adding, so that only Travis can decrypt and use the PyPI
password.
"""
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
|
py | 7df9c60fb30f57f3eb7e54349891acc0737a5273 | # coding=utf8
import os
import re
import time
import logging
import pdfkit
import requests
from bs4 import BeautifulSoup
htmlTpl = """
<!DOCTYPE html>
<html lang="zh-cn">
<head>
<meta charset="UTF-8">
</head>
<body>
{content}
</body>
</html>
"""
def url_to_html(url, name):
"""
通过URL,获取HTML内容
:param url :html的url
:param name :html文件名
"""
try:
res = requests.get(url)
soup = BeautifulSoup(res.content, 'html5lib')
body = soup.find_all(class_="post")[0]
html = str(body)
# def func(m):
# if not m.group(2).startswith("http"):
# rtn = m.group(1) + "http://www.runoob.com" + m.group(2) + m.group(3)
# return rtn
# else:
# return m.group(1) + m.group(2) + m.group(3)
# # body中的img标签的src相对路径的改成绝对路径
#
# pattern = "(<img .*?src=\")(.*?)(\")"
# html = re.compile(pattern).sub(func, html)
# html = re.compile(pattern)
html = htmlTpl.format(content=html)
html = html.encode("utf-8")
with open(name, 'wb') as f:
f.write(html)
return name
except Exception as e:
logging.error("解析错误", exc_info=True)
def getUrlList():
"""
获取所有URL目录列表
:return:
"""
urls = []
for pageNum in range(1, 14):
response = requests.get("http://www.zhangxinxu.com/wordpress/category/js/page/" + str(pageNum) + '/')
soup = BeautifulSoup(response.content, "html.parser")
# print(soup)
aList = soup.find(class_="post").find_all("a")
#print("page" + str(pageNum))
#print(aList)
for a in aList:
url = a.get('href')
urls.append(url)
print(len(urls))
# response = requests.get("http://www.runoob.com/htmldom/htmldom-tutorial.html")
# soup = BeautifulSoup(response.content, "html.parser")
# menu_tag = soup.find_all(class_="design")[0]
# urls = []
# for a in menu_tag.find_all("a"):
# aUrl = a.get('href')
# # url = "http://www.runoob.com" + aUrl
# # urls.append(url)
# if aUrl.startswith('/htmldom'):
# url = "http://www.runoob.com" + aUrl
# urls.append(url)
return urls
def save_pdf(htmls, file_name):
print(htmls, file_name)
"""
把所有html文件保存到pdf文件
:param htmls: html文件列表
:param file_name: pdf文件名
:return:
"""
options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-name1', 'cookie-value1'),
('cookie-name2', 'cookie-value2'),
],
'outline-depth': 10
}
# linux下单独配置 wkhtmltopdf 安装wkhtmlpdf为*not* using wkhtmltopdf patched qt.
# 从官网下载 wkhtmltopdf 解压到本地文件夹,配置进去
# config=pdfkit.configuration(wkhtmltopdf='/home/lhf/programmes/wkhtmltox/bin/wkhtmltopdf')
# pdfkit.from_file(htmls, file_name, options=options, configuration=config)
# 安装wkhtmltopdf using patched qt 成功时用
pdfkit.from_file(htmls, file_name, options=options)
def main():
start = time.time()
urls = getUrlList()
file_name = u"zhangxinxuBlog.pdf"
htmls = [url_to_html(url, str(index) + ".html") for index, url in enumerate(urls)]
save_pdf(htmls, file_name)
for html in htmls:
os.remove(html)
total_time = time.time() - start
print(u"总共耗时:%f 秒" % total_time)
if __name__ == '__main__':
main()
|
py | 7df9c654a461f9e56e5c31868292f752cc538fdd | import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) |
py | 7df9c6e879f6d0224e9d1e084d6f6971a90c5d7f | #
# @(#) $Id: chain.py,v 1.6 2001/09/28 17:06:17 ivm Exp $
#
# $Log: chain.py,v $
# Revision 1.6 2001/09/28 17:06:17 ivm
# Made it work with Python 2.1
#
# Revision 1.5 2001/05/07 11:28:30 ivm
# Fixed for new FCSLIB
#
# Revision 1.4 1999/10/12 16:15:07 ivm
# Fixed bug in chain with multiple tokens
#
# Revision 1.3 1999/08/17 21:55:54 ivm
# Made UPS compliant
# Use single config file
#
# Revision 1.2 1999/08/03 19:47:23 ivm
# Make connection timeout in chain 5 seconds
# Use connect with timeout in client
#
# Revision 1.1 1999/07/12 19:35:52 ivm
# *** empty log message ***
#
# Revision 1.11 1999/06/01 19:27:44 ivm
# Pinging implemented
#
# Revision 1.10 1999/05/28 15:49:20 ivm
# Use time-out in connect()
#
# Revision 1.9 1999/05/21 18:14:06 ivm
# Fixed simultaneous SET problem
#
# Revision 1.6 1999/05/20 15:03:32 ivm
# Version with deferred execution (correct one)
#
# Revision 1.6 1999/05/20 15:03:32 ivm
# Version with deferred execution (correct one)
#
# Revision 1.4 1999/05/18 14:37:08 ivm
# Implemented versions command
#
# Revision 1.4 1999/05/18 14:37:08 ivm
# Implemented versions command
#
# Revision 1.2 1999/05/17 18:14:03 ivm
# Debug messages cleaned up
#
# Revision 1.4 1999/04/19 19:25:55 ivm
# Recovery implemented
#
# Revision 1.4 1999/04/19 19:25:55 ivm
# Recovery implemented
#
#
import string
import sys
import os
from chainpkt import *
from SockStream import SockStream
from Selector import *
from socket import *
import select
import errno
import time
Error = 'FIPC Error'
class ChainSegment:
def __init__(self, inx, map, sel):
self.UpSock = None
self.UpStr = None
self.DnSock = None
self.DnStr = None
self.Sel = sel
self.Map = map
self.UpInx = None
self.DnInx = None
self.Inx = self.initServerSock(inx, map)
if self.Inx < 0:
# some error
raise Error, 'Can not allocate Chain port'
self.Sel.register(self, rd = self.SSock.fileno())
self.Token = None
self.PusherSeq = None
self.IgnorePusher = -1
self.LastPushSeq = 0
self.connect()
self.LastPing = 0
def initServerSock(self, inx, map):
if inx >= 0:
h, port = self.Map[inx]
self.SSock = socket(AF_INET, SOCK_STREAM)
self.SSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
self.SSock.bind((h, port))
self.SSock.listen(2)
return inx
else: # pick first available
last_exc_type = None
last_exc_value = None
for inx in range(len(map)):
h, p = map[inx]
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
try: sock.bind((h,p))
except:
last_exc_type = sys.exc_type
last_exc_value = sys.exc_value
else:
sock.listen(2)
self.SSock = sock
return inx
# all attempts failed...
if last_exc_type != None:
raise last_exc_type, last_exc_value
return -1
def isCloserUp(self, i, j): # i is closer than j
if i > self.Inx: i = i - len(self.Map)
if j > self.Inx: j = j - len(self.Map)
return i > j
def isCloserDown(self, i, j): # i is closer than j
return self.isCloserUp(j, i)
def upIndex(self):
return self.UpInx
def downIndex(self):
return self.DnInx
def connectSocket(self, addr, tmo = -1):
# returns either connected socket or None on timeout
# -1 means infinite
s = socket(AF_INET, SOCK_STREAM)
if tmo < 0:
s.connect(addr)
return s
s.setblocking(0)
if s.connect_ex(addr) == 0:
s.setblocking(1)
return s
#print 'selecting...'
r,w,x = select.select([], [s], [], tmo)
if s.connect_ex(addr) == 0:
s.setblocking(1)
return s
try: s.getpeername()
except:
print sys.exc_type, sys.exc_value
s.close()
return None
s.setblocking(1)
return s
def connect(self):
inx = self.Inx
#print 'Connect: my inx = ', self.Inx
for i in range(len(self.Map)):
inx = inx - 1
if inx < 0: inx = len(self.Map) - 1
# sock = socket(AF_INET, SOCK_STREAM)
# try to connect to server #inx
addr = self.Map[inx]
sock = None
print 'connecting to #', inx, ' at ', addr
try:
sock = self.connectSocket(addr, 5)
except:
print sys.exc_type, sys.exc_value
pass
if sock == None:
print 'Connection failed'
continue
str = SockStream(sock, '\n')
print 'Sending HELLO...'
str.send('HELLO %d' % self.Inx)
print 'Up connection to %d established' % inx
self.UpSock = sock
self.Sel.register(self, rd = self.UpSock.fileno())
self.UpStr = str
self.UpInx = inx
return inx
return -1
def doRead(self, fd, sel):
#print 'doRead(fd=%d)' % fd
#os.system('netstat | grep 7001')
if fd == self.SSock.fileno():
#print 'doRead(server, fd = %d)' % fd
self.doConnectionRequest()
elif self.DnSock != None and fd == self.DnSock.fileno():
#print 'doRead(down, fd = %d)' % fd
self.doReadDn()
elif self.UpSock != None and fd == self.UpSock.fileno():
#msg = self.UpSock.recv(1000)
#print 'doRead(up, fd = %d) : <%s>' % (fd, msg)
self.doReadUp()
#print 'doRead(): Sel: %s' % sel.ReadList
def getHello(self, str):
msg = str.recv(1000)
lst = string.split(msg)
inx = -1
print 'Hello msg: <%s>' % msg
if len(lst) >= 2 and lst[0] == 'HELLO':
try: inx = string.atoi(lst[1])
except: pass
return inx
def doConnectionRequest(self):
refuse = 0
s, addr = self.SSock.accept()
#print 'Connection request from %s, sock = %s' % (addr, s)
ip, port = addr
str = SockStream(s, '\n')
inx = self.getHello(str)
if inx < 0: refuse = 1 # Unknown client. Refuse
if not refuse and self.DnSock != None and self.DnInx != self.Inx:
# check if this client is "closer" than our down connection
refuse = not self.isCloserDown(inx, self.DnInx)
if self.UpSock == None and not refuse:
# if so far we were alone, it means that this is good
# time to try to connect
#print 'trying to connect...'
refuse = (self.connect() < 0)
#print 'refuse = ', refuse
if refuse:
s.close()
else:
# this client is "closer". Close old connection and
# keep this new client
if self.DnSock != None:
#print 'New client is closer. Disconnect'
self.Sel.unregister(rd = self.DnSock.fileno())
self.DnSock.close()
self.DnSock = s
self.DnInx = inx
self.DnStr = SockStream(self.DnSock, '\n')
self.Sel.register(self, rd = s.fileno())
self.downConnectedCbk(inx)
print 'Down connection to %d established' % inx
def downConnectedCbk(self, inx): #virtual
pass
def doReadDn(self):
self.DnStr.readMore(1024)
#print 'DnStr: EOF = %s, Buf = <%s>' % (self.DnStr.EOF,
# self.DnStr.Buf)
while self.DnStr.msgReady():
msg = self.DnStr.getMsg()
print 'RCVD DN:<%s>' % msg[:100]
pkt = CPacket()
pkt.decode(msg)
pkt = pkt.Body
if pkt.Type == CToken.Type:
self.gotToken(pkt)
elif pkt.Type == CPusher.Type:
self.gotPusher(pkt)
elif pkt.Type == CMessage.Type:
self.gotMessage(pkt)
if self.DnStr.eof():
# Down link is broken. Close the socket,unregister it
self.downDisconnectedCbk()
self.closeDnLink()
def doReadUp(self): # nothing meaningfull, just pings
self.UpStr.readMore(1024)
while self.UpStr.msgReady():
self.UpStr.getMsg()
if self.UpStr.eof():
# Down link is broken. Close the socket,unregister it
self.upDisconnectedCbk()
self.closeUpLink()
self.connect()
def downDisconnectedCbk(self): #virtual
pass
def gotMessage(self, msg):
#print 'gotMessage(%s)' % msg.Body
if msg.isBroadcast() or msg.isPoll() or msg.Dst == self.Inx:
self.processMessageCbk(msg.Src, msg.Dst, msg.Body) # pure virtual
if msg.Src != self.Inx:
forward = 1
if msg.isPoll():
forward = 0
elif msg.isBroadcast():
forward = not self.isCloserUp(msg.Src, self.UpInx)
else:
forward = not self.isCloserUp(msg.Dst, self.UpInx)
if forward:
self.sendUp(msg)
def gotPusher(self, pusher):
#print 'Got pusher(src=%s, seq=%s)' % (pusher.Src, pusher.Seq)
src = pusher.Src
if src == self.Inx:
#print 'self.Token=', self.Token, ' PusherSeq = ', self.PusherSeq
if not self.haveToken() and \
self.PusherSeq != None and \
pusher.Seq > self.IgnorePusher and \
self.PusherSeq >= pusher.Seq:
self.createToken()
elif self.haveToken():
if self.isCloserUp(self.Token.Dst, src):
self.Token.Dst = src
self.forwardToken()
else:
if self.PusherSeq != None and self.Inx > src:
self.PusherSeq = None
self.sendUp(pusher)
def flushOutMsgs(self):
lst = self.getOutMsgList() # pure virtual
#print 'flushOutMsgs: lst = ', lst
for src, dst, txt in lst:
self.sendMessage(src, dst, txt)
return len(lst)
def sendMessage(self, src, dst, txt):
forward = 1
msg = CMessage(src, dst, txt)
if not msg.isBroadcast() and not msg.isPoll():
forward = dst == self.Inx or not \
self.isCloserUp(dst, self.UpInx)
if forward:
self.sendUp(msg)
def gotToken(self, token):
#print 'Got token'
self.Token = token
#
# ignore all pushers we sent so far
self.IgnorePusher = self.LastPushSeq
self.PusherSeq = None
# send our messages
self.gotTokenCbk()
if self.haveToken():
# the callback above may have forwarded the token
if token.Dst != self.Inx and self.isCloserUp(token.Dst, self.UpInx): # the guy died
token.Dst = self.Inx
if token.Dst != self.Inx:
self.forwardToken()
def gotTokenCbk(self): # virtual
#print 'Empty gotTokenCbk'
pass
def upDisconnectedCbk(self):
print 'up link broken'
def needToken(self):
#print 'Need token... ', self.Token
#if self.WaitMode != 'n':
# return
#print 'Need token... ', self.Token
if self.haveToken():
self.flushOutMsgs()
else:
self.sendPusher()
#self.SendPusher = 1
def createToken(self):
print 'creating token...'
self.PusherSeq = None
self.Token = CToken(self.Inx)
self.forwardToken()
def forwardToken(self, to = None):
if self.Token != None:
if to != None:
if self.Token.Dst != self.Inx and \
self.isCloserUp(self.Token.Dst, to):
self.Token.Dst = to
self.sendUp(self.Token)
self.Token = None
def sendPusher(self):
#print 'Sending pusher...'
seq = self.LastPushSeq + 1
self.LastPushSeq = seq
self.PusherSeq = seq
p = CPusher(self.Inx, seq)
self.sendUp(p)
def closeDnLink(self):
if self.DnSock != None:
print 'closing down link'
self.Sel.unregister(rd = self.DnSock.fileno())
self.DnSock.close()
self.DnSock = None
self.DnStr = None
def closeUpLink(self):
if self.UpSock != None:
print 'closing up link'
self.Sel.unregister(rd = self.UpSock.fileno())
self.UpSock.close()
self.UpSock = None
self.UpStr = None
def sendUp(self, msg):
if self.UpSock == None:
if self.connect() < 0:
# something is wrong: we have down link, but
# can not connect anywhere. Break down link
# assume that down peer is dead
self.closeDnLink()
if self.UpStr != None:
pkt = CPacket(msg)
txt = pkt.encode()
print 'SEND UP:<%s>' % txt[:100]
self.UpStr.send(txt)
#os.system('netstat | grep 7001')
def run(self, tmo = -1):
#print 'run(): Sel: %s' % self.Sel.ReadList
#os.system('netstat | grep 7001')
self.Sel.select(tmo)
#print self.LastPing, time.time()
if self.LastPing < time.time() - 300: # ping every 5 minutes
if self.UpSock != None:
#print 'Pinging up...'
self.UpStr.zing(1000) # disconnect after 15 minutes
#print 'UpStr: EOF = %s, Buf = <%s>, LastTxn = %s' % (
# self.UpStr.EOF, self.UpStr.Buf, self.UpStr.LastTxn)
if self.DnSock != None:
#print 'Pinging down...'
self.DnStr.zing(1000)
self.LastPing = time.time()
def haveToken(self):
#print 'haveToken: token = ', self.Token
return self.Token != None
class CallBackStub:
def __init__(self, fcn, arg):
self.Fcn = fcn
self.Arg = arg
def invoke(self):
self.Fcn(self.Arg)
class ChainLink(ChainSegment):
Version = "$Id: chain.py,v 1.6 2001/09/28 17:06:17 ivm Exp $"
def __init__(self, inx, map, sel):
ChainSegment.__init__(self, inx, map, sel)
self.OutMsgList = []
self.InMsgList = []
def sendMsg(self, dst, msg, src = None, sendNow = 0):
if src == None:
src = self.Inx
if sendNow:
self.sendMessage(src, dst, msg)
else:
# buffer it
self.OutMsgList.append((src, dst, msg))
self.needToken()
def insertMsg(self, dst, msg):
self.OutMsgList = [(self.Inx, dst, msg)] + self.OutMsgList
self.needToken()
def processMessageCbk(self, src, dst, msg):
self.InMsgList.append((src, dst, msg))
def haveMsg(self):
return len(self.InMsgList) > 0
def getOutMsgList(self):
lst = self.OutMsgList
self.OutMsgList = []
return lst
def getMsg(self):
if not self.haveMsg():
return None
m = self.InMsgList[0]
self.InMsgList = self.InMsgList[1:]
return m
def recvMsg(self):
while not self.haveMsg():
self.run()
return self.getMsg()
def upDisconnectCbk(self):
print 'up link broken'
def waitForToken(self):
while not self.haveToken():
self.needToken()
self.run(10)
def addCallback(self, name, fcn, arg = None):
cb = CallBackStub(fcn, arg)
#print 'adding callback self.%sCbk' % name
exec('self.%sCbk = cb.invoke' % name)
|
py | 7df9c7836c402ae850343293b80f134530876ae8 | from .task_executor import TaskExecutor
from .log_handling import logger, message, error_message
from time import sleep
import sys
class ExecProfileBase:
"""
Template for an exec profile:
- method "exec" is required
- methods "monitor", "prepare", "monitor", "finalize" are optional
- method "monitor" is required when "exec"
is executing asynchonously (self.async_exec=True):
e.g. if "exec" only triggers the execution on a remote worker,
then "monitor" has to be invoked every few seconds to check
for completion / status of execution.
"""
def __init__(
self,
tool:dict,
inputs:dict,
resources:dict,
commandline:str,
workflow_metadata: dict
):
self.tool = tool
self.inputs = inputs
self.commandline = commandline
self.resources = resources
self.out = {}
self.success = None # indicates success of task execution:
# - None if not completed yet
# - False if not completed with error
# - True if not completed successfully
self.seconds_between_monitor = 4
if not hasattr(self, "execute"):
logger.error(
error_message(
"Initializing Exec Profile",
"The execute method is required but has not been described in the exec profile.",
is_known=True
)
)
sys.exit(1)
self.exec_plan = [
{
"name": "prepare",
"method": self.prepare if hasattr(self, "prepare") else None
},
{
"name": "execute",
"method": self.execute
},
{
"name": "monitor",
"method": self.monitor if hasattr(self, "monitor") else None
},
{
"name": "finalize",
"method": self.finalize if hasattr(self, "finalize") else None
},
]
def deploy(self):
for m in self.exec_plan:
method_name = m["name"]
method = m["method"]
print(method_name)
try:
if method is None:
continue
logger.debug(
message(f"[Exec Profile {method_name}]", "starting")
)
if method_name == "monitor":
while self.success is None:
logger.debug(
message(f"[Exec Profile {method_name}]", "Task execution not finished yet. Waiting.")
)
sleep(self.seconds_between_monitor)
method()
status = "success" if self.success else "failed"
logger.debug(
message(f"[Exec Profile {method_name}]", "Task execution finished with status: {status}")
)
else:
method()
if method_name == "exec":
if self.success is None:
assert self.monitor is not None, \
"Exec method is done but task execution has not been finished " + \
"and no \"monitor\" methdod has been defined. " + \
"Have you forgot to set the output"
logger.debug(
message(f"[Exec Profile {method_name}]", "Execution is started and continued in the background.")
)
else:
status = "success" if self.success else "failed"
logger.debug(
message(f"[Exec Profile {method_name}]", "Task execution finished with status: {status}")
)
else:
logger.debug(
message(f"[Exec Profile {method_name}]", "completed")
)
except AssertionError as e:
logger.error(
error_message(f"Exec Profile {method_name}", e, is_known=True)
)
self.success = False
except Exception as e:
logger.error(
error_message(f"Exec Profile {method_name}", e, is_known=False)
)
self.success = False
class LocalToolExec(ExecProfileBase):
def execute(self):
self.async_exec = False
task_executor = TaskExecutor(
tool=self.tool,
inputs=self.inputs
)
task.run()
self.out = task_executor.out
self.success = task.success
|
py | 7df9c7e0381c7fd5ad6094461eec9c17bc7fe372 | from dagster import (
Array,
Field,
ModeDefinition,
Noneable,
ScalarUnion,
Selector,
Shape,
pipeline,
resource,
solid,
)
from dagster.config.config_type import ConfigTypeKind
from dagster.config.field import resolve_to_config_type
from dagster.core.snap import build_config_schema_snapshot, snap_from_config_type
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
deserialize_value,
serialize_dagster_namedtuple,
serialize_pp,
)
def snap_from_dagster_type(dagster_type):
return snap_from_config_type(resolve_to_config_type(dagster_type))
def test_basic_int_snap():
int_snap = snap_from_dagster_type(int)
assert int_snap.given_name == 'Int'
assert int_snap.key == 'Int'
assert int_snap.kind == ConfigTypeKind.SCALAR
assert int_snap.enum_values is None
assert int_snap.fields is None
def test_basic_dict():
dict_snap = snap_from_dagster_type({'foo': int})
assert dict_snap.key.startswith('Shape.')
assert dict_snap.given_name is None
child_type_keys = dict_snap.get_child_type_keys()
assert child_type_keys
assert len(child_type_keys) == 1
assert child_type_keys[0] == 'Int'
assert child_type_keys[0]
assert dict_snap.fields and len(dict_snap.fields) == 1
field = dict_snap.fields[0]
assert field.name == 'foo'
def test_field_things():
dict_snap = snap_from_dagster_type(
{
'req': int,
'opt': Field(int, is_required=False),
'opt_with_default': Field(int, is_required=False, default_value=2),
'req_with_desc': Field(int, description='A desc'),
}
)
assert dict_snap.fields and len(dict_snap.fields) == 4
field_snap_dict = {field_snap.name: field_snap for field_snap in dict_snap.fields}
assert field_snap_dict['req'].is_required is True
assert field_snap_dict['req'].description is None
assert field_snap_dict['opt'].is_required is False
assert field_snap_dict['opt'].default_provided is False
assert field_snap_dict['opt'].default_value_as_json_str is None
assert field_snap_dict['opt_with_default'].is_required is False
assert field_snap_dict['opt_with_default'].default_provided is True
assert deserialize_value(field_snap_dict['opt_with_default'].default_value_as_json_str) == 2
assert field_snap_dict['req_with_desc'].is_required is True
assert field_snap_dict['req_with_desc'].description == 'A desc'
def test_basic_list():
list_snap = snap_from_dagster_type(Array(int))
assert list_snap.key.startswith('Array')
child_type_keys = list_snap.get_child_type_keys()
assert child_type_keys
assert len(child_type_keys) == 1
assert child_type_keys[0] == 'Int'
def test_basic_optional():
optional_snap = snap_from_dagster_type(Noneable(int))
assert optional_snap.key.startswith('Noneable')
child_type_keys = optional_snap.get_child_type_keys()
assert child_type_keys
assert len(child_type_keys) == 1
assert child_type_keys[0] == 'Int'
assert optional_snap.kind == ConfigTypeKind.NONEABLE
assert optional_snap.enum_values is None
def test_basic_list_list():
list_snap = snap_from_dagster_type([[int]])
assert list_snap.key.startswith('Array')
child_type_keys = list_snap.get_child_type_keys()
assert child_type_keys
assert len(child_type_keys) == 1
assert child_type_keys[0] == 'Array.Int'
assert list_snap.enum_values is None
def test_list_of_dict():
inner_dict_dagster_type = Shape({'foo': Field(str)})
list_of_dict_snap = snap_from_dagster_type([inner_dict_dagster_type])
assert list_of_dict_snap.key.startswith('Array')
child_type_keys = list_of_dict_snap.get_child_type_keys()
assert child_type_keys
assert len(child_type_keys) == 1
assert child_type_keys[0].startswith('Shape')
def test_selector_of_things():
selector_snap = snap_from_dagster_type(Selector({'bar': Field(int)}))
assert selector_snap.key.startswith('Selector')
assert selector_snap.kind == ConfigTypeKind.SELECTOR
assert selector_snap.fields and len(selector_snap.fields) == 1
field_snap = selector_snap.fields[0]
assert field_snap.name == 'bar'
assert field_snap.type_key == 'Int'
def test_kitchen_sink():
kitchen_sink = resolve_to_config_type(
[
{
'opt_list_of_int': Field(int, is_required=False),
'nested_dict': {
'list_list': [[int]],
'nested_selector': Field(
Selector({'some_field': int, 'more_list': Noneable([bool])})
),
},
}
]
)
kitchen_sink_snap = snap_from_dagster_type(kitchen_sink)
rehydrated_snap = deserialize_json_to_dagster_namedtuple(
serialize_dagster_namedtuple(kitchen_sink_snap)
)
assert kitchen_sink_snap == rehydrated_snap
def test_simple_pipeline_smoke_test():
@solid
def solid_without_config(_):
pass
@pipeline
def single_solid_pipeline():
solid_without_config()
config_schema_snapshot = build_config_schema_snapshot(single_solid_pipeline)
assert config_schema_snapshot.all_config_snaps_by_key
serialized = serialize_dagster_namedtuple(config_schema_snapshot)
rehydrated_config_schema_snapshot = deserialize_json_to_dagster_namedtuple(serialized)
assert config_schema_snapshot == rehydrated_config_schema_snapshot
def test_check_solid_config_correct():
@solid(config={'foo': str})
def solid_with_config(_):
pass
@pipeline
def single_solid_pipeline():
solid_with_config()
solid_config_key = solid_with_config.config_field.config_type.key
config_snaps = build_config_schema_snapshot(single_solid_pipeline).all_config_snaps_by_key
assert solid_config_key in config_snaps
solid_config_snap = config_snaps[solid_config_key]
assert solid_config_snap.kind == ConfigTypeKind.STRICT_SHAPE
assert len(solid_config_snap.fields) == 1
foo_field = solid_config_snap.fields[0]
assert foo_field.name == 'foo'
assert foo_field.type_key == 'String'
def test_check_solid_list_list_config_correct():
@solid(config={'list_list_int': [[{'bar': int}]]})
def solid_with_config(_):
pass
@pipeline
def single_solid_pipeline():
solid_with_config()
solid_config_key = solid_with_config.config_field.config_type.key
config_snaps = build_config_schema_snapshot(single_solid_pipeline).all_config_snaps_by_key
assert solid_config_key in config_snaps
solid_config_snap = config_snaps[solid_config_key]
assert solid_config_snap.kind == ConfigTypeKind.STRICT_SHAPE
assert len(solid_config_snap.fields) == 1
list_list_field = solid_config_snap.fields[0]
list_list_type_key = list_list_field.type_key
assert list_list_type_key.startswith('Array.Array.')
list_list_type = config_snaps[list_list_type_key]
assert list_list_type.kind == ConfigTypeKind.ARRAY
list_snap = config_snaps[list_list_type.inner_type_key]
assert list_snap.kind == ConfigTypeKind.ARRAY
assert config_snaps[list_snap.inner_type_key].kind == ConfigTypeKind.STRICT_SHAPE
def test_kitchen_sink_break_out():
@solid(
config_schema=[
{
'opt_list_of_int': Field([int], is_required=False),
'nested_dict': {
'list_list': [[int]],
'nested_selector': Selector(
{'some_field': int, 'noneable_list': Noneable([bool])}
),
},
}
]
)
def solid_with_kitchen_sink_config(_):
pass
@pipeline
def single_solid_pipeline():
solid_with_kitchen_sink_config()
config_snaps = build_config_schema_snapshot(single_solid_pipeline).all_config_snaps_by_key
solid_config_key = solid_with_kitchen_sink_config.config_field.config_type.key
assert solid_config_key in config_snaps
solid_config_snap = config_snaps[solid_config_key]
assert solid_config_snap.kind == ConfigTypeKind.ARRAY
dict_within_list = config_snaps[solid_config_snap.inner_type_key]
assert len(dict_within_list.fields) == 2
opt_field = dict_within_list.get_field('opt_list_of_int')
assert opt_field.is_required is False
assert config_snaps[opt_field.type_key].kind == ConfigTypeKind.ARRAY
nested_dict = config_snaps[dict_within_list.get_field('nested_dict').type_key]
assert len(nested_dict.fields) == 2
nested_selector = config_snaps[nested_dict.get_field('nested_selector').type_key]
noneable_list_bool = config_snaps[nested_selector.get_field('noneable_list').type_key]
assert noneable_list_bool.kind == ConfigTypeKind.NONEABLE
list_bool = config_snaps[noneable_list_bool.inner_type_key]
assert list_bool.kind == ConfigTypeKind.ARRAY
def test_multiple_modes():
@solid
def noop_solid(_):
pass
@resource(config={'a': int})
def a_resource(_):
pass
@resource(config={'b': int})
def b_resource(_):
pass
@pipeline(
mode_defs=[
ModeDefinition(name='mode_a', resource_defs={'resource': a_resource}),
ModeDefinition(name='mode_b', resource_defs={'resource': b_resource}),
]
)
def modez():
noop_solid()
config_snaps = build_config_schema_snapshot(modez).all_config_snaps_by_key
assert a_resource.config_field.config_type.key in config_snaps
assert b_resource.config_field.config_type.key in config_snaps
assert get_config_snap(modez, a_resource.config_field.config_type.key)
assert get_config_snap(modez, b_resource.config_field.config_type.key)
def get_config_snap(pipeline_def, key):
return pipeline_def.get_pipeline_snapshot().config_schema_snapshot.get_config_snap(key)
def test_scalar_union():
# Requiring resolve calls is bad: https://github.com/dagster-io/dagster/issues/2266
@solid(config=ScalarUnion(resolve_to_config_type(str), resolve_to_config_type({'bar': str})))
def solid_with_config(_):
pass
@pipeline
def single_solid_pipeline():
solid_with_config()
config_snaps = build_config_schema_snapshot(single_solid_pipeline).all_config_snaps_by_key
scalar_union_key = solid_with_config.config_field.config_type.key
assert scalar_union_key in config_snaps
assert config_snaps[config_snaps[scalar_union_key].scalar_type_key].key == 'String'
assert (
config_snaps[config_snaps[scalar_union_key].non_scalar_type_key].kind
== ConfigTypeKind.STRICT_SHAPE
)
def test_historical_config_type_snap(snapshot):
old_snap_json = '''{"__class__": "ConfigTypeSnap", "description": "", "enum_values": [], "fields": [], "given_name": "kjdkfjdkfjdkj", "key": "ksjdkfjdkfjd", "kind": {"__enum__": "ConfigTypeKind.STRICT_SHAPE"}, "type_param_keys": []}'''
old_snap = deserialize_json_to_dagster_namedtuple(old_snap_json)
snapshot.assert_match(serialize_pp(old_snap))
|
py | 7df9c7eb27a3045a8a3008e3d63962238d9fb57f | '''
FlappyBird Version 2.9.3 on Windows
Game for python3.5 [Windows]
A similar game to FlappyBird on Windows
For kick the bird you can use 'spacebar' or 'left mouse button'
By using 'p' you pause and resume the game and by using
'F3' you activate and deactivate development mode (you can not
die and there are rectangulars arround the sprites, so you can
see when the bird touches the sticks)
For feedback please write in the comments! or from menu
\BY PAUL KOCIAN OrangoMango (C)2019-2020
'''
#Program for python 3.5
try:
from tkinter import * #needed tkinter module
from tkinter import messagebox, filedialog
import tkinter.ttk as t
except ImportError:
raise ImportError("You need to install tkinter module for python3.5")
from random import randint
import time, sys, os, threading
try:
from playsound import *
except ImportError:
raise ImportError("You need to install playsound module for python3.5")
try: #####IMPORT MODULES AND NOT CRASH PROGRAM WITH TRY-EXCEPT######
import GamingIntro
except ImportError:
raise ImportError("Missing file module for this program: \'GamingIntro.py\'") #My module-file
try:
import HighscoreManager
except ImportError:
raise ImportError("Missing file module for this program: \'HighscoreManager.py\'") #My module-file
try:
import FeedbackInterface
except ImportError:
raise ImportError("Missing file module for this program: \'FeedbackInterface.py\'") #My module-file
try:
import Preferences
except ImportError:
raise ImportError("Missing file module for this program: \'Preferences.py\'") #My module-file
try:
from ErrorCase import ErrorManager
except ImportError:
raise ImportError("Missing file module for this program: \'ErrorCase.py\'") #My module-file
global home
#print(__file__.split("\ "[0])[2])
user = os.path.abspath("").split("\ "[0])[2]#input("Whats your current username directory on this computer?: ") #__file__.split("\ "[0])[2]#
home = "C:/Users/{0}".format(user) + "/"
#Sound
#directory, openfile, openfilename, openfilenames, openfiles, saveasfile, saveasfilename, commanddialog
highscore = 0
if os.path.isdir(home+".FlappyBird") == False: #Create data directory
os.mkdir(home+".FlappyBird")
first_time = True
else:
first_time = False
if os.path.exists(home+".FlappyBird/highscore.txt") == False:
f = open(home+".FlappyBird/highscore.txt", "w")
f.write(str(highscore))
f.close()
else:
f = open(home+".FlappyBird/highscore.txt", "r")
dt = f.read()
highscore = int(dt)
f.close()
class CollisionZone(): #Collision zone class for development mode
def __init__(self, game, x, y, x1, y1, color="blue"):
self.game = game
self.color = color
self.id = self.game.canvas.create_rectangle(x, y, x1, y1, outline=self.color)
def update(self, x, y, x1, y1):
self.game.canvas.delete(self.id)
self.id = self.game.canvas.create_rectangle(x, y, x1, y1, outline=self.color)
def __del__(self):
self.game.canvas.delete(self.id)
class Game():
def __init__(self, dev=False, directory=""):
self.dev = dev #development mode
self.dir = directory #path location
p = Preferences.Preference(pathinit=home, pt=self.dir)
if p.getData() == {}:
self.preferences_data = {'Kick-Key':'<space>', 'GamingIntro-Init':'0', \
'Bindings':'p;F3;s', 'DevMode':'False', 'Scales':'180;14', "Color":"#00ff00"}
else:
self.preferences_data = p.getData()
self.current_pref = [self.preferences_data['Kick-Key']] + \
self.preferences_data['Bindings'].split(";")
self.bars_space = int(self.preferences_data['Scales'].split(";")[0]) #space between bars (px)
self.current_pref = [self.preferences_data['Kick-Key']] + self.preferences_data['Bindings'].split(";")
self.tk = Tk()
self.tk.bind("<Key>", self.keys)
#self.tk.bind("<Destroy>", self.destroy)
menu = Menu(self.tk) #Menu for user
self.tk.config(menu=menu)
def callback():
self.gameover()
def hsc():
#init highscore from my HighscoreManager module
h = HighscoreManager.Highscore(users=os.listdir("C:/Users"), pathinit=home)
hs = h.getTable()
shs = h.getSortedTable(hs) #get sorted highscores
highscore = "Highscores: \n"
for k, v in shs.items():
highscore += str(k)+" "+str(v)+"; "
messagebox.showinfo("OMGames", highscore)
def w_fe():
f = FeedbackInterface.Feedback(pathinit=home, users=os.listdir("C:/Users"))
f.start()
def s_fe():
f = FeedbackInterface.Feedback(pathinit=home, users=os.listdir("C:/Users"))
f.see_feedbacks()
def pref():
#messagebox.showinfo("Missing Option", "This option needs to be continued")
p = Preferences.Preference(pathinit=home)
p.initialization()
#print(p.getData())
self.preferences_data = p.getData()
#print("preferences_data:", self.preferences_data)
#print("\nQUIIIIIIIIIII\n")
self.load_data_pref()
p.mainloop()
def reset():
for file in os.listdir(home+".FlappyBird"):
os.remove(home+".FlappyBird/"+file)
os.rmdir(home+".FlappyBird")
messagebox.showinfo("Info", "Game has been reset")
self.tk.destroy()
sys.exit()
#pref()
filemenu = Menu(menu, tearoff=0)
feedmenu = Menu(menu, tearoff=0)
prefmenu = Menu(menu, tearoff=0)
menu.add_cascade(label="Game", menu=filemenu)
menu.add_cascade(label="Feedback", menu=feedmenu)
menu.add_cascade(label="Preferences", menu=prefmenu)
feedmenu.add_command(label="Write feedback", command=w_fe)
feedmenu.add_command(label="See feedbacks", command=s_fe)
filemenu.add_command(label="See highscores", command=hsc)
filemenu.add_separator()
filemenu.add_command(label="Quit", command = callback)
prefmenu.add_command(label="Change Settings", command=pref)
prefmenu.add_command(label="Reset game", command=reset)
imgicon = PhotoImage(file=os.path.join(self.dir,self.dir+'FlappyBird_Game/icon.gif'), master=self.tk) #Set icon of game
self.tk.tk.call('wm', 'iconphoto', self.tk._w, imgicon)
self.tk.title("Flappy Bird (OMGames) V3.0.1") #Game title
self.canvas = Canvas(self.tk, width=600, height=500)
self.canvas.pack()
self.score = 0 #Default game values (score, highscore, attemps and if the game is Running)
self.attemps = 1
self.highscore = 0
self.sound = True
self.sound2 = True
self.gameIsRunning = False
self.score_text = self.canvas.create_text(290,20, fill="red", \
font="Purisa 20 bold", \
text="Score: %s Attemps: %s " \
"Highscore: %s" % (self.score, self.attemps, \
self.highscore))
self.canvas2 = Canvas(self.tk, width=600, height=100) #A second canvas for the bottom image
self.canvas2.pack()
self.pause = False #if game is paused
def destroy(self, evt=None):
try:
self.save_highscore()
self.tk.destroy()
except:
pass
def load_data_pref(self):
self.tk.unbind(self.current_pref[0])
self.tk.bind("<Key-l>")#self.preferences_data['Kick-Key'], ball.kick)
self.current_pref = [self.preferences_data['Kick-Key']] + self.preferences_data['Bindings'].split(";")
#print("\n", self.current_pref, self.preferences_data, "\n")
self.bars_space = int(self.preferences_data['Scales'].split(";")[0]) #space between bars (px)
def keys(self, evt):
if evt.keysym == self.current_pref[2] or evt.char == self.current_pref[2]: #For activating development mode
self.dev = (not self.dev)
if self.dev:
print("Development mode activated")
else:
print("Development mode deactivated")
if evt.char == self.current_pref[1] or evt.keysym == self.current_pref[1]: #for pause game
self.pause = not (self.pause)
if self.pause:
self.gameIsRunning = False
print("Game paused")
messagebox.showinfo("OMGames", "Game paused, press p to resume")
elif self.pause == False:
print("Game resumed")
messagebox.showinfo("OMGames", "Game resumed, after clicking the ok button the game will start")
self.gameIsRunning = True
self.mainloop()
def play_sound(self, path, bs=False, ks=False): #Playing sound effects using a thread so
def pl(pt): #that the game does not stop by playing the sound
playsound(pt)
if bs:
self.sound = True
'''if ks:
self.sound2 = True'''
if not bs:# or not ks:
x = threading.Thread(target=pl, args=(path,))
x.start()
elif bs:
if self.sound:
x = threading.Thread(target=pl, args=(path,))
self.sound = False
x.start()
'''elif ks:
if self.sound2:
x = threading.Thread(target=pl, args=(path,))
self.sound2 = False
x.start()'''
def mainloop(self): #Game Mainloop
try:
while True:
if self.gameIsRunning:
ball.draw() #Draw the bird
pali[pli[0]].draw() #Draw the sticks
pali[pli[1]].draw()
pali[pli[2]].draw()
pali_r[pri[0]].draw()
pali_r[pri[1]].draw()
pali_r[pri[2]].draw()
self.tk.update()
self.canvas.tag_raise(self.score_text)
time.sleep(0.01)
else:
self.tk.update()
except:
pass
def gameover(self):
self.gameIsRunning = False #Stop the game after gameover
self.tk.update()
self.play_sound(self.dir+"FlappyBird_Game/FlappyBird_Sounds/hit.mp3")
self.play_sound(self.dir+"FlappyBird_Game/FlappyBird_Sounds/die.mp3")
messagebox.showerror("Game over", "GAME OVER - Your Score is: %s Highscore: %s" % (self.score, \
self.highscore))
self.attemps += 1
self.update_score()
a = messagebox.askyesno("Game", "Do you want to continue playing?")
if a:
load = Tk()
load.title("Attemp retry: %s" % self.attemps)
ll = Label(load, text="Attemp retry: %s" % self.attemps)
ll.pack()
pr = t.Progressbar(load, length=150, value=0)
pr.pack()
vl = 0
while vl <= 135:
pr.config(value=vl)
load.update()#_idletasks()
time.sleep(0.7)
vl += randint(0,35)
l = Label(load, text="Done")
l.pack()
time.sleep(2)
self.destroy()
load.destroy()
try:
main(self.attemps, self.highscore, self.dir) #If the user wants to play another
#time, it starts another time the main
except Exception as e:
error = ErrorManager(e)
error.showgui()
error.mainloop()
else:
messagebox.showinfo("Game", "See you Player - Attemps: %s" % self.attemps)
self.save_highscore()
def save_highscore(self):
f2 = open(home+".FlappyBird/highscore.txt", "w") #Save the current highscore
f2.write(str(self.highscore))
f2.close()
try:
self.tk.destroy()
except:
pass
def update_score(self):
self.canvas.itemconfig(self.score_text, text="Score: %s Attemps: %s " \
"Highscore: %s" % (self.score, self.attemps, self.highscore))
self.tk.update()
def GetImageCoords(self, id, cc): #Get coordinates of something
xy = self.canvas.coords(id) #2 items list of coords [x1, y1] because the function is for images
xy.append(xy[0]+cc[0]) #add width to x coord
xy.append(xy[1]+cc[1]) #add height to y coord
return xy #get 4 items list
def collision(self, ball_pos, pos_palo): #Fetch collision between two objects
if ((ball_pos[0] >= pos_palo[0] and ball_pos[0] <= pos_palo[2]) and \
(ball_pos[1] >= pos_palo[1] and ball_pos[1] <= pos_palo[3])) or \
((ball_pos[2] >= pos_palo[0] and ball_pos[2] <= pos_palo[2]) and \
(ball_pos[3] >= pos_palo[1] and ball_pos[3] <= pos_palo[3])):
return True
return False
class Ball():
def __init__(self, game, x, y, image):
self.game = game
self.ih, self.iw = (image.height(), image.width()) #save img width and height
self.xc, self.yc = (x, y)
self.id = self.game.canvas.create_image(x,y, image=image, anchor="nw")
self.y = 0.5
self.game.tk.bind(g.preferences_data['Kick-Key'], self.kick)
self.x = 0
self.s = True
def draw(self):
if self.game.dev and self.s: #if development mode is activated and is the first time after deactivated
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, \
self.yc+self.ih, color="red") #create collision zone
self.s = False #it isn't more the first time
self.game.canvas.move(self.id, self.x, int(self.y)) #vedi int
self.y += 0.3 #the bird must go down
self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/swoosh.mp3", bs=True)
pos = self.game.GetImageCoords(self.id, [self.iw, self.ih])
if self.game.dev:
self.collisionzone.update(pos[0], pos[1], pos[2], pos[3]) #update collision zone
elif not self.s:
del self.collisionzone #delete collision zone
self.s = True
if pos[3] >= 500 or pos[1] <= 0: #if touching the borders
self.game.gameover()
def kick(self, evt):
if self.game.gameIsRunning:
#self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/wing.mp3", bs=True)
self.y -= int(self.game.preferences_data['Scales'].split(";")[1]) #kick the bird 17 pixel upper
class Palo():
def __init__(self, game, x, y, ball, image, image1):
self.game = game
self.ball = ball
self.image = image #top image
self.image1 = image1 #bottom image
self.xc, self.yc = (x, y)
self.id = self.game.canvas.create_image(x,y, image=image1, anchor="nw")
self.x = -1
self.y = 0
self.ih, self.iw = (image.height(), image.width())
self.coord = [x, y, x+self.iw, y+self.ih]
self.side = "bottom" #side of the stick
if self.game.dev:
self.collisionzone = CollisionZone(self.game, self.coord[0], self.coord[1], \
self.coord[2], self.coord[3])
self.s = True
def draw(self):
if self.game.dev and self.s:
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, \
self.yc+self.ih)
self.s = False
self.game.canvas.move(self.id, int(self.x), self.y)
pos_palo = self.game.GetImageCoords(self.id, [self.iw, self.ih])
self.coord = pos_palo
if self.game.dev:
self.collisionzone.update(self.coord[0], self.coord[1], self.coord[2], self.coord[3])
elif not self.s:
del self.collisionzone
self.s = True
ball_pos = self.game.GetImageCoords(self.ball.id, [self.ball.iw, self.ball.ih])
if self.game.collision(ball_pos, pos_palo): #if touching the ball:
if self.game.dev: #with development mode you can not die!
print("GameOver::Status")
#time.sleep(0.4)
else:
self.game.gameover()
if pos_palo[2] <= 0:
self.game.canvas.delete(self.id)
#choose if after the border the stick it will be with side bottom or side top
if bool(randint(0,1)): #random choose #top
y = randint(-60, 0)
self.id = self.game.canvas.create_image(600,y, image=self.image, anchor="nw")
self.side = "top"
return
else: #bottom
y = randint(350, 420)
self.id = self.game.canvas.create_image(600,y, image=self.image1, anchor="nw")
self.side = "bottom"
return
if pos_palo[2] == 220: #===SCORE MANIPULATION===
self.game.play_sound(self.game.dir+"FlappyBird_Game/FlappyBird_Sounds/point.mp3")
self.game.score += 1
if self.game.score > self.game.highscore: #if you beat your highscore
self.game.highscore = self.game.score
self.game.update_score()
class Palo_Riserva():
def __init__(self, game, palo, side, ball, image, image1):
self.game = game
self.palo = palo
self.ball = ball
self.image = image
self.image1 = image1
self.iw, self.ih = (image.width(), image.height())
#create the stick with the opposite side of the other corrispondent stick
if side == "bottom":
self.id = self.game.canvas.create_image(self.palo.coord[0], \
self.palo.coord[3]+self.game.bars_space, \
image=self.image1, anchor="nw")
elif side == "top":
self.id = self.game.canvas.create_image(self.palo.coord[0], \
(self.palo.coord[1]-self.game.bars_space)-self.ih, \
image=self.image, anchor="nw")
self.x = -1
self.y = 0
self.s = True
tempos = self.game.GetImageCoords(self.id, [self.iw, self.ih]) #a temporary position of the stick
self.s = True
self.xc, self.yc = (tempos[0], tempos[1])
if self.game.dev:
self.collisionzone = CollisionZone(self.game, tempos[0], tempos[1], tempos[2], tempos[3])
def draw(self):
if self.game.dev and self.s:
self.collisionzone = CollisionZone(self.game, self.xc, self.yc, self.xc+self.iw, self.yc+self.ih)
self.s = False
self.game.canvas.move(self.id, self.x, self.y)
pos_palo_r = self.game.GetImageCoords(self.id, [self.iw, self.ih])
ball_pos = self.game.GetImageCoords(self.ball.id, [self.ball.iw, self.ball.ih])
if self.game.dev:
self.collisionzone.update(pos_palo_r[0], pos_palo_r[1], pos_palo_r[2], pos_palo_r[3])
elif not self.s:
del self.collisionzone
self.s = True
if self.game.collision(ball_pos, pos_palo_r): #if touching ball:
if self.game.dev:
print("GameOver::Status")
#time.sleep(0.4)
else:
self.game.gameover()
if pos_palo_r[2] <= 0: #after touching border:
self.game.canvas.delete(self.id)
if self.palo.side == "bottom": #top #if the side of the corrispondent stick is bottom this stick has side top
self.id = self.game.canvas.create_image(self.palo.coord[0], (self.palo.coord[1]-self.game.bars_space) \
-self.ih, image=self.image, anchor="nw")
elif self.palo.side == "top": #bottom
self.id = self.game.canvas.create_image(self.palo.coord[0], self.palo.coord[3]+self.game.bars_space, \
image=self.image1, anchor="nw")
def main(atmp, hs, path): #Main function for running game
global pali, pali_r, pri, pli, ball, g
g = Game(directory=path) #For development mode please write here 'g = Game(dev=True)'
g.attemps = atmp #set game attemps
g.highscore = hs #set game highscore
g.update_score()
if int(g.preferences_data['GamingIntro-Init']):
i = GamingIntro.Intro(dir=path+"FlappyBird_Game/") #Normal Intro for game
i.start_prg()
g.dev = True if g.preferences_data['DevMode'] == 'True' else False
backgroundimage = PhotoImage(file=g.dir+"FlappyBird_Game/background.gif", master=g.tk) #load background image
btm = PhotoImage(file=g.dir+"FlappyBird_Game/bottom.gif", master=g.tk) #load bottom image
bg = g.canvas.create_image(0,0, image=backgroundimage, anchor="nw")
g.canvas2.create_image(0,0, image=btm, anchor="nw")
#===IMG===
palo1 = PhotoImage(file=g.dir+"FlappyBird_Game/palo1.gif", master=g.tk)
palo2 = PhotoImage(file=g.dir+"FlappyBird_Game/palo2.gif", master=g.tk)
bird = PhotoImage(file=g.dir+"FlappyBird_Game/bird.gif", master=g.tk)
#=========
ball = Ball(g, 120, 200, bird) #init the bird class
pali = {} #a dictionary containing all the primary sticks
pali_r = {} #a dictionary containing the secondary sticks
pri = ["rpalo1", "rpalo2", "rpalo3"]
pli = ["palo1", "palo2", "palo3"]
c = 0
for x in [610, 810, 1010]:
y_value = randint(250,300)
pali[pli[c]] = Palo(g, x, y_value, ball, palo1, palo2) #Update dictionaries
pali_r[pri[c]] = Palo_Riserva(g, pali[pli[c]], "top", ball, palo1, palo2)
c += 1
g.gameIsRunning = True #Start Game
messagebox.showinfo("Game", "Game will start when you click the ok button, 'Return' or 'space' key")
g.mainloop() #Start Mainloop
if first_time and (not os.path.exists(home+".FlappyBird/directory.txt")):
tk = Tk()
fcd = open(home+".FlappyBird/directory.txt", "w")
cd = filedialog.askdirectory(title="Select the FlappyBird directory", master=tk) + "/"
fcd.write(cd)
fcd.close()
tk.destroy()
#==========================|PATH AREA|===================================
fcd = open(home+".FlappyBird/directory.txt")
cd = fcd.read().rstrip('\n')
fcd.close()
#========================================================================
if first_time:
fi = GamingIntro.Intro(dir=cd+"FlappyBird_Game/", firsttime=True, button=True) #Intro for first time using the game
#==================|Introduction|======================<
intk = Tk()
intk.title("Introduction OMGames")
messagebox.showinfo("OMGames", "Current files directory: %sFlappyBird_Game/" % cd)
pl = Label(intk, text="INTRODUCTION", font=("bold")).pack()
v1 = "Warning: If there is an \'No such file or directory\' error, please " \
"change the directory file with your \'path\'"
v2 = "Warning: This program creates files: dir: \'.FlappyBird/\'"
v3 = "Warning: Program uses modules: tkinter, random, time, sqlite3, sys, os, " \
"playsound, webbrowser, ErrorCase, HighscoreManager, FeedbackInterface, Preferences and GamingIntro"
v4 = "Warning: This game is for 7+ and is not for videodipendent people (Blocked by waiting) - :-)"
v5 = "All related images are copyright by .GEARS 2013, Program copyright by " \
"OrangoMangoGames 2019-2020 (Paul Kocian)"
v6 = "Keys: Spacebar or left button to kick the bird, p to pause and to resume, F3 for " \
"turning on and off development mode, you can change them"
v7 = "Privacy Therms: Your highscore will be accessible by all the players and you can" \
" see their highscore"
v8 = "Remember that if you close this window, you will in any case accept the Privacy Terms"
labels = []
texts = [v1, v2, v3, v4, v5, v6, v7, v8]
for text in texts:
l = Label(intk, text=text)
l.pack()
labels.append(l)
def com():
messagebox.showinfo("OMGames", "Have fun! And beat your highscores!")
intk.destroy()
return
b = Button(intk, text="Accept Privacy and Continue", command=com)
b.pack()
intk.mainloop()
#======================================================<
if __name__ == '__main__':
try:
i = GamingIntro.Intro(dir=cd+"FlappyBird_Game/") #Normal Intro for game
i.start_prg()
main(1, highscore, cd)
except Exception as e: #Fetch errors
error = ErrorManager(e)
error.showgui()
error.mainloop()
|
py | 7df9c839e9c43598e7a3f3a68c5cbd6d59b5f3e4 | """
Move
====
`move.py` contains every relations regarding nekoyume blockchain and
game moves.
"""
import datetime
import hashlib
import os
import random
import re
from bencode import bencode
from coincurve import PublicKey
from requests import get
from requests.exceptions import ConnectionError, Timeout
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm.collections import attribute_mapped_collection
from .battle.characters import Factory as CharacterFactory
from .battle.simul import Simulator
from .exc import InvalidMoveError, OutOfRandomError
from .orm import db
from .tables import Tables
from .util import deserialize_datetime, ensure_block, get_address
NUM_HACK_AND_SLASH_MONSTERS: int = 3
def get_my_public_url():
if 'PUBLIC_URL' in os.environ:
return os.environ['PUBLIC_URL']
try:
if os.environ.get('PORT', '80') != '80':
port = ':' + os.environ.get('PORT', '80')
else:
port = ''
ip = get('http://ip.42.pl/raw').text
has_public_address = get(
f'http://{ip}{port}/ping'
).text == 'pong'
except (ConnectionError, Timeout):
return None
if has_public_address:
return f'http://{ip}{port}'
else:
return None
class Move(db.Model):
"""This object contain general move information."""
__tablename__ = 'move'
#: move's hash
id = db.Column(db.String, primary_key=True)
#: move's block id. if the move isn't confirmed yet, this will be null
block_id = db.Column(db.Integer, db.ForeignKey('block.id'),
nullable=True, index=True)
#: move's block
block = db.relationship('Block', uselist=False, backref='moves')
#: 33 bytes long form (i.e., compressed from) of user's public key.
user_public_key = db.Column(db.LargeBinary, nullable=False, index=True)
#: user's address ("0x"-prefixed 40 hexdecimal string; total 42 chars)
user_address = db.Column(db.String, nullable=False, index=True)
#: move's signature (71 bytes)
signature = db.Column(db.LargeBinary, nullable=False)
#: move name
name = db.Column(db.String, nullable=False, index=True)
#: move details. it contains parameters of move
details = association_proxy(
'move_details', 'value',
creator=lambda k, v: MoveDetail(key=k, value=v)
)
#: move tax (not implemented yet)
tax = db.Column(db.BigInteger, default=0, nullable=False)
#: move creation datetime.
created_at = db.Column(db.DateTime, nullable=False,
default=datetime.datetime.utcnow)
__table_args__ = (
db.CheckConstraint(
db.func.lower(user_address).like('0x%') &
(db.func.length(user_address) == 42)
# TODO: it should has proper test if 40-hex string
),
db.CheckConstraint(db.func.length(user_public_key) == 33),
db.CheckConstraint(
(db.func.length(signature) >= 68) &
(db.func.length(signature) <= 71)
),
)
__mapper_args__ = {
'polymorphic_identity': 'move',
'polymorphic_on': name,
}
@classmethod
def deserialize(cls, serialized: dict, block_id=None) -> 'Move':
if block_id is None and serialized.get('block'):
block_id = serialized['block'].get('id')
return cls(
id=serialized['id'],
user_address=serialized['user_address'],
name=serialized['name'],
user_public_key=bytes.fromhex(serialized['user_public_key']),
signature=bytes.fromhex(serialized['signature']),
tax=serialized['tax'],
details=serialized['details'],
created_at=deserialize_datetime(serialized['created_at']),
block_id=block_id,
)
@property
def valid(self):
"""Check if this object is valid or not"""
if not self.signature:
return False
assert isinstance(self.signature, bytes)
assert 68 <= len(self.signature) <= 71
assert isinstance(self.user_public_key, bytes)
assert len(self.user_public_key) == 33
assert isinstance(self.user_address, str)
assert re.match(r'^(?:0[xX])?[0-9a-fA-F]{40}$', self.user_address)
public_key = PublicKey(self.user_public_key)
verified = public_key.verify(
self.signature,
self.serialize(include_signature=False),
)
if not verified:
return False
if get_address(public_key) != self.user_address:
return False
return self.id == self.hash
@property
def confirmed(self):
"""Check if this object is confirmed or not"""
return self.block and self.block.hash is not None
def serialize(self,
use_bencode=True,
include_signature=False,
include_id=False,
include_block=False):
"""
This function serialize block.
:param use_bencode: check if you want to encode using bencode.
:param include_signature: check if you want to include signature.
:param include_id: check if you want to include linked moves.
:param include_block: check if you want to include block.
"""
binary = (lambda x: x) if use_bencode else bytes.hex
serialized = dict(
user_address=self.user_address,
name=self.name,
details={k: str(v) for k, v in dict(self.details).items()},
tax=self.tax,
created_at=str(self.created_at),
)
if include_signature:
serialized.update(
signature=binary(self.signature),
user_public_key=binary(self.user_public_key),
)
if include_id:
serialized['id'] = self.id
if include_block:
if self.block:
serialized['block'] = self.block.serialize(False)
else:
serialized['block'] = None
if use_bencode:
serialized = bencode(serialized)
return serialized
@property
def hash(self) -> str:
""" Get move hash """
return hashlib.sha256(
self.serialize(include_signature=True)
).hexdigest()
def get_randoms(self) -> list:
""" get random numbers by :doc:`Hash random <white_paper>` """
if not (self.block and self.block.hash and self.id):
return []
result = [ord(a) ^ ord(b) for a, b in zip(self.block.hash, self.id)]
result = result[self.block.difficulty // 4:]
return result
def make_random_generator(self) -> random.Random:
if self.block and self.block.hash and self.id:
bh = bytes.fromhex(self.block.hash)
mi = bytes.fromhex(self.id)
seed = bytes(a ^ b for a, b in zip(bh, mi))
else:
seed = 0
return random.Random(seed)
def roll(self, randoms: list, dice: str, combine=True):
"""
Roll dices based on given randoms
>>> from nekoyume.move import Move
>>> move = Move()
>>> move.roll([1, 7, 3], '2d6')
6
:params randoms: random numbers from
:func:`nekoyume.move.Move.get_randoms`
:params dice: dice to roll (e.g. 2d6)
:params combine: return combined result or not if rolling it multiple.
"""
result = []
if dice.find('+') > 0:
dice, plus = dice.split('+')
plus = int(plus)
else:
plus = 0
cnt, dice_type = (int(i) for i in dice.split('d'))
for i in range(cnt):
try:
result.append(randoms.pop() % dice_type + 1)
except IndexError:
raise OutOfRandomError
if combine:
return sum(result) + plus
else:
return result
@ensure_block
def execute(self):
raise NotImplementedError()
class MoveDetail(db.Model):
""" This object contains move's key/value information. """
#: move id
move_id = db.Column(db.String, db.ForeignKey('move.id'),
nullable=True, primary_key=True)
move = db.relationship(Move, backref=db.backref(
'move_details',
collection_class=attribute_mapped_collection("key"),
cascade="all, delete-orphan"
))
#: MoveDetail's key
key = db.Column(db.String, nullable=False, primary_key=True)
#: MoveDetail's value
value = db.Column(db.String, nullable=False, index=True)
class HackAndSlash(Move):
__mapper_args__ = {
'polymorphic_identity': 'hack_and_slash',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
if avatar.dead:
raise InvalidMoveError
# TODO Load other users avatar
rand = self.make_random_generator()
simul = Simulator(rand, avatar.zone)
factory = CharacterFactory()
my_character = factory.create_from_avatar(
avatar, self.details)
simul.characters.append(my_character)
appear_monsters = Tables.get_monster_appear_list(avatar.zone)
for i in range(NUM_HACK_AND_SLASH_MONSTERS):
simul.characters.append(
factory.create_monster(appear_monsters.select(rand)))
simul.simulate()
my_character.to_avatar(avatar)
return (avatar, dict(
type='hack_and_slash',
result=simul.result,
battle_logger=simul.logger,
))
class Sleep(Move):
__mapper_args__ = {
'polymorphic_identity': 'sleep',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
avatar.hp = avatar.hp_max
return avatar, dict(
type='sleep',
result='success',
)
class CreateNovice(Move):
__mapper_args__ = {
'polymorphic_identity': 'create_novice',
}
@ensure_block
def execute(self, avatar=None):
from .user import Avatar
gold = getattr(avatar, 'gold', 0)
name = self.details.get('name', '')[:10] + '#' + self.user_address[:6]
avatar = Avatar(
name=name,
user=self.user_address,
current_block=self.block,
gold=gold,
class_='novice',
level=1,
zone=list(Tables.zone.keys())[0],
gravatar_hash=self.details.get('gravatar_hash', 'HASH'),
)
factory = CharacterFactory()
character = factory.create_from_avatar(avatar, self.details)
character.to_avatar(avatar, hp_recover=True)
return (avatar, dict(
type='create_novice',
result='success',
))
class FirstClass(Move):
__mapper_args__ = {
'polymorphic_identity': 'first_class',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
if avatar.class_ != 'novice':
return avatar, dict(
type='first_class',
result='failed',
message="Already change class.",
)
avatar.class_ = self.details['class']
return avatar, dict(
type='first_class',
result='success',
)
class MoveZone(Move):
__mapper_args__ = {
'polymorphic_identity': 'move_zone',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
zone = self.details['zone']
if zone not in Tables.zone:
return avatar, dict(
type='move_zone',
result='failed',
message="Invalid zone.",
)
avatar.zone = zone
return avatar, dict(
type='move_zone',
result='success',
)
class LevelUp(Move):
__mapper_args__ = {
'polymorphic_identity': 'level_up',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
exp_max = Tables.get_exp_max(avatar.level)
if exp_max == 0:
return avatar, dict(
type='level_up',
result='failed',
message="Max level.",
)
if avatar.exp < exp_max:
return avatar, dict(
type='level_up',
result='failed',
message="You don't have enough exp.",
)
avatar.exp -= exp_max
avatar.level += 1
setattr(avatar, self.details['new_status'],
getattr(avatar, self.details['new_status']) + 1)
return avatar, dict(
type='level_up',
result='success',
)
class Say(Move):
__mapper_args__ = {
'polymorphic_identity': 'say',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
return avatar, dict(
type='say',
message=self.details['content'],
)
class Send(Move):
__mapper_args__ = {
'polymorphic_identity': 'send',
}
@ensure_block
def execute(self, avatar=None):
if not avatar:
from .user import Avatar
avatar = Avatar.get(self.user_address, self.block_id - 1)
if int(self.details['amount']) <= 0:
return avatar, dict(
type='send',
result='fail',
message="You can't send items with a negative or zero amount."
)
if (self.details['item_index'] not in avatar.items or
avatar.items[self.details['item_index']]
- int(self.details['amount']) < 0):
return avatar, dict(
type='send',
result='fail',
message="You don't have enough items to send."
)
avatar.items[self.details['item_index']] -= int(self.details['amount'])
return avatar, dict(
type='send',
result='success',
)
def receive(self, receiver=None):
if not receiver:
from .user import Avatar
receiver = Avatar.get(self.details['receiver'], self.block_id - 1)
for i in range(int(self.details['amount'])):
receiver.get_item(self.details['item_name'])
return receiver, dict(
type='receive',
result='success',
)
class Sell(Move):
__mapper_args__ = {
'polymorphic_identity': 'sell',
}
class Buy(Move):
__mapper_args__ = {
'polymorphic_identity': 'buy',
}
|
py | 7df9c92f97e92df2de3fd0cf6e45e8c450239988 | import random
import gym
import numpy as np
class GridWorld(gym.Env):
def __init__(self, env_flag=2, append_context=False, continuous_action=True):
super(gym.Env).__init__()
self.deterministic = True
# A, B, C, s_0, D
# ------------------------
# | A, B, C | None |
# ------------------------
# | s_0 | D |
# ------------------------
# 0 stay
# 1 up
# 2 right
# 3 left
# 4 down
self.continuous_action = continuous_action
if self.continuous_action:
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
else:
self.action_space = gym.spaces.Discrete(5)
self.observation_space = None
self._grid_escape_time = 0
self._grid_max_time = 1000
self._current_position = 0
self.env_flag = env_flag
self.append_context = append_context
self.middle_state = [2, 3, 4]
assert self.env_flag in self.middle_state, '{} is accepted.'.format(self.middle_state)
self._ind_to_name = {
0: 's0',
1: 'D',
2: 'A',
3: 'B',
4: 'C',
5: 'None'
}
self.reward_setting = {
0: 0,
1: 1,
2: 10,
3: -10,
4: 0,
5: 0
}
for k in self.reward_setting:
self.reward_setting[k] *= 0.1
self.state_space = len(self.reward_setting)
self._raw_state_length = self.state_space
if self.append_context:
self.state_space += len(self.middle_state)
self.diy_env = True
self.observation_space = gym.spaces.Box(0, 1, (self.state_space, ))
@property
def middle_state_embedding(self):
v = [0] * len(self.middle_state)
v[self.env_flag - 2] = 1
return v
def make_one_hot(self, state):
vec = [0] * self._raw_state_length
vec[state] = 1
return vec
def get_next_position_toy(self, action):
if self._current_position == 0:
if action == 0:
# to D
next_state = 1
else:
# to unknown position
next_state = self.env_flag
# elif self._current_position == 1:
# # keep at D
# next_state = 1
elif self._current_position in self.middle_state + [1]:
# to s0
next_state = 0
else:
raise NotImplementedError('current position exceeds range!!!')
return next_state
def get_next_position(self, action):
# ------------------------
# | A, B, C | None |
# ------------------------
# | s_0 | D |
# ------------------------
# action: 0 stay
# action: 1 up
# action: 2 right
# action: 3 left
# action: 4 down
# self._ind_to_name = {
# 0: 's0',
# 1: 'D',
# 2: 'A',
# 3: 'B',
# 4: 'C',
# 5: 'None'
# }
if not self.deterministic:
if random.random() > 0.5:
action = action
else:
action = random.randint(0, 4)
if action == 0:
if self._current_position in [2, 3, 4]:
return self.env_flag
return self._current_position
left_up_map = {
4: 0,
# 2: 5
}
action_transition_mapping = \
{
0: {1: self.env_flag, 2: 1},
1: {1: 5, 3: 0},
5: {3: self.env_flag, 4:1},
2: left_up_map,
3: left_up_map,
4: left_up_map
}
action_to_state = action_transition_mapping[self._current_position]
if action in action_to_state:
return action_to_state[action]
if self._current_position in [2, 3, 4]:
return self.env_flag
return self._current_position
def step(self, action):
self._grid_escape_time += 1
info = {}
if self.continuous_action:
action_tmp = (action[0] + 1) / 2
action_tmp = int(action_tmp * 5)
if action_tmp >= 5:
action_tmp = 4
next_state = self.get_next_position(action_tmp)
else:
assert isinstance(action, int), 'action should be int type rather than {}'.format(type(action))
next_state = self.get_next_position(action)
done = False # next_state == 1
if self._grid_escape_time >= self._grid_max_time:
done = True
reward = self.reward_setting[next_state]
info['current_position'] = self._ind_to_name[next_state]
next_state_vector = self.make_one_hot(next_state)
self._current_position = next_state
if self.append_context:
next_state_vector += self.middle_state_embedding
return next_state_vector, reward, done, info
def reset(self):
self._grid_escape_time = 0
self._current_position = 0
state = self.make_one_hot(self._current_position)
if self.append_context:
state += self.middle_state_embedding
return state
def seed(self, seed=None):
self.action_space.seed(seed)
class RandomGridWorld(GridWorld):
def __init__(self, append_context=False):
self.possible_choice = [2, 3, 4]
self.renv_flag = random.choice(self.possible_choice)
self.fix_env = None
super(RandomGridWorld, self).__init__(self.renv_flag, append_context)
def reset(self):
if self.fix_env is None:
self.renv_flag = random.choice(self.possible_choice)
self.env_flag = self.renv_flag
else:
self.renv_flag = self.env_flag = self.fix_env
return super(RandomGridWorld, self).reset()
def set_fix_env(self, fix_env):
self.renv_flag = self.env_flag = self.fix_env = fix_env
def set_task(self, task):
self.set_fix_env(task)
def sample_tasks(self, n_tasks):
if n_tasks < len(self.possible_choice):
tasks = [random.choice(self.possible_choice) for _ in range(n_tasks)]
else:
tasks = []
for i in range(n_tasks):
tasks.append(self.possible_choice[i % len(self.possible_choice)])
return tasks
@property
def env_parameter_vector_(self):
return np.array([(self.renv_flag - np.min(self.possible_choice)) / (np.max(self.possible_choice)
- np.min(self.possible_choice))])
@property
def env_parameter_length(self):
return 1
from gym.envs.registration import register
register(
id='GridWorldNS-v2', entry_point=RandomGridWorld
)
if __name__ == '__main__':
import gym
env = gym.make('GridWorldNS-v2')
print('observation space: ', env.observation_space)
print('action space: ', env.action_space)
print(hasattr(env, 'rmdm_env_flag')) |
py | 7df9ca3daf76852d74d8144f772a538efc1a64f0 | # --------------------------------------------#
# 该部分代码用于看网络结构
# --------------------------------------------#
import torch
from torchsummary import summary
from nets.yolo import YoloBody
if __name__ == "__main__":
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
m = YoloBody([[6, 7, 8], [3, 4, 5], [0, 1, 2]], 80).to(device)
summary(m, input_size=(3, 416, 416))
|
py | 7df9ca8db5520d8f2f2ef47a64936b4ad65ccc80 | """
Poison images by adding a mask
"""
from collections.abc import Sequence
from typing import Tuple
import numpy as np
from numpy import dtype
from numpy.core.numeric import indices
from numpy.random import rand
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
class Poison(Operation):
"""Poison specified images by adding a mask with given opacity.
Operates on raw arrays (not tensors).
Parameters
----------
mask : ndarray
The mask to apply to each image.
alpha: float
The opacity of the mask.
indices : Sequence[int]
The indices of images that should have the mask applied.
clamp : Tuple[int, int]
Clamps the final pixel values between these two values (default: (0, 255)).
"""
def __init__(self, mask: np.ndarray, alpha: np.ndarray,
indices, clamp = (0, 255)):
super().__init__()
self.mask = mask
self.indices = np.sort(indices)
self.clamp = clamp
self.alpha = alpha
def generate_code(self) -> Callable:
alpha = np.repeat(self.alpha[:, :, None], 3, axis=2)
mask = self.mask.astype('float') * alpha
to_poison = self.indices
clamp = self.clamp
my_range = Compiler.get_iterator()
def poison(images, temp_array, indices):
for i in my_range(images.shape[0]):
sample_ix = indices[i]
# We check if the index is in the list of indices
# to poison
position = np.searchsorted(to_poison, sample_ix)
if position < len(to_poison) and to_poison[position] == sample_ix:
temp = temp_array[i]
temp[:] = images[i]
temp *= 1 - alpha
temp += mask
np.clip(temp, clamp[0], clamp[1], out=temp)
images[i] = temp
return images
poison.is_parallel = True
poison.with_indices = True
return poison
def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:
assert previous_state.jit_mode
# We do everything in place
return (previous_state, AllocationQuery(shape=previous_state.shape, dtype=np.float32))
|
py | 7df9cb28c89a09c3f794c620faa08ebfc9c0a295 | # -*- coding: utf-8 -*-
#
# panic documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 21 12:25:59 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'panic'
copyright = u'2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'panicdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'panic.tex', u'panic Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'panic', u'panic Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'panic', u'panic Documentation',
u'Author', 'panic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'panic'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
py | 7df9cb609de7d6ee6b6e1b136619f097a4833b57 | # -*- coding: utf-8 -*-
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import range, str, bytes
import os
import nibabel as nb
import numpy as np
from ...utils.misc import package_check
from ...utils import NUMPY_MMAP
from ..base import (BaseInterface, TraitedSpec, traits, File, OutputMultiPath,
BaseInterfaceInputSpec, isdefined)
have_nipy = True
try:
package_check('nipy')
except Exception as e:
have_nipy = False
else:
import nipy.modalities.fmri.design_matrix as dm
import nipy.modalities.fmri.glm as GLM
if have_nipy:
try:
BlockParadigm = dm.BlockParadigm
except AttributeError:
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
class FitGLMInputSpec(BaseInterfaceInputSpec):
session_info = traits.List(
minlen=1,
maxlen=1,
mandatory=True,
desc=('Session specific information generated by'
' ``modelgen.SpecifyModel``, FitGLM does '
'not support multiple runs uless they are '
'concatenated (see SpecifyModel options)'))
hrf_model = traits.Enum(
'Canonical',
'Canonical With Derivative',
'FIR',
desc=("that specifies the hemodynamic reponse "
"function it can be 'Canonical', 'Canonical "
"With Derivative' or 'FIR'"),
usedefault=True)
drift_model = traits.Enum(
"Cosine",
"Polynomial",
"Blank",
desc=("string that specifies the desired drift "
"model, to be chosen among 'Polynomial', "
"'Cosine', 'Blank'"),
usedefault=True)
TR = traits.Float(mandatory=True)
model = traits.Enum(
"ar1",
"spherical",
desc=("autoregressive mode is available only for the "
"kalman method"),
usedefault=True)
method = traits.Enum(
"kalman",
"ols",
desc=("method to fit the model, ols or kalma; kalman "
"is more time consuming but it supports "
"autoregressive model"),
usedefault=True)
mask = traits.File(
exists=True,
desc=("restrict the fitting only to the region defined "
"by this mask"))
normalize_design_matrix = traits.Bool(
False,
desc=("normalize (zscore) the "
"regressors before fitting"),
usedefault=True)
save_residuals = traits.Bool(False, usedefault=True)
plot_design_matrix = traits.Bool(False, usedefault=True)
class FitGLMOutputSpec(TraitedSpec):
beta = File(exists=True)
nvbeta = traits.Any()
s2 = File(exists=True)
dof = traits.Any()
constants = traits.Any()
axis = traits.Any()
reg_names = traits.List()
residuals = traits.File()
a = File(exists=True)
class FitGLM(BaseInterface):
'''
Fit GLM model based on the specified design. Supports only single or concatenated runs.
'''
input_spec = FitGLMInputSpec
output_spec = FitGLMOutputSpec
def _run_interface(self, runtime):
session_info = self.inputs.session_info
functional_runs = self.inputs.session_info[0]['scans']
if isinstance(functional_runs, (str, bytes)):
functional_runs = [functional_runs]
nii = nb.load(functional_runs[0])
data = nii.get_data()
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(nii.shape[:3]) == 1
timeseries = data.copy()[mask, :]
del data
for functional_run in functional_runs[1:]:
nii = nb.load(functional_run, mmap=NUMPY_MMAP)
data = nii.get_data()
npdata = data.copy()
del data
timeseries = np.concatenate((timeseries, npdata[mask, :]), axis=1)
del npdata
nscans = timeseries.shape[1]
if 'hpf' in list(session_info[0].keys()):
hpf = session_info[0]['hpf']
drift_model = self.inputs.drift_model
else:
hpf = 0
drift_model = "Blank"
reg_names = []
for reg in session_info[0]['regress']:
reg_names.append(reg['name'])
reg_vals = np.zeros((nscans, len(reg_names)))
for i in range(len(reg_names)):
reg_vals[:, i] = np.array(
session_info[0]['regress'][i]['val']).reshape(1, -1)
frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans)
conditions = []
onsets = []
duration = []
for i, cond in enumerate(session_info[0]['cond']):
onsets += cond['onset']
conditions += [cond['name']] * len(cond['onset'])
if len(cond['duration']) == 1:
duration += cond['duration'] * len(cond['onset'])
else:
duration += cond['duration']
if conditions:
paradigm = BlockParadigm(
con_id=conditions, onset=onsets, duration=duration)
else:
paradigm = None
design_matrix, self._reg_names = dm.dmtx_light(
frametimes,
paradigm,
drift_model=drift_model,
hfcut=hpf,
hrf_model=self.inputs.hrf_model,
add_regs=reg_vals,
add_reg_names=reg_names)
if self.inputs.normalize_design_matrix:
for i in range(len(self._reg_names) - 1):
design_matrix[:, i] = ((
design_matrix[:, i] - design_matrix[:, i].mean()) /
design_matrix[:, i].std())
if self.inputs.plot_design_matrix:
import pylab
pylab.pcolor(design_matrix)
pylab.savefig("design_matrix.pdf")
pylab.close()
pylab.clf()
glm = GLM.GeneralLinearModel()
glm.fit(
timeseries.T,
design_matrix,
method=self.inputs.method,
model=self.inputs.model)
self._beta_file = os.path.abspath("beta.nii")
beta = np.zeros(mask.shape + (glm.beta.shape[0], ))
beta[mask, :] = glm.beta.T
nb.save(nb.Nifti1Image(beta, nii.affine), self._beta_file)
self._s2_file = os.path.abspath("s2.nii")
s2 = np.zeros(mask.shape)
s2[mask] = glm.s2
nb.save(nb.Nifti1Image(s2, nii.affine), self._s2_file)
if self.inputs.save_residuals:
explained = np.dot(design_matrix, glm.beta)
residuals = np.zeros(mask.shape + (nscans, ))
residuals[mask, :] = timeseries - explained.T
self._residuals_file = os.path.abspath("residuals.nii")
nb.save(
nb.Nifti1Image(residuals, nii.affine), self._residuals_file)
self._nvbeta = glm.nvbeta
self._dof = glm.dof
self._constants = glm._constants
self._axis = glm._axis
if self.inputs.model == "ar1":
self._a_file = os.path.abspath("a.nii")
a = np.zeros(mask.shape)
a[mask] = glm.a.squeeze()
nb.save(nb.Nifti1Image(a, nii.affine), self._a_file)
self._model = glm.model
self._method = glm.method
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["beta"] = self._beta_file
outputs["nvbeta"] = self._nvbeta
outputs["s2"] = self._s2_file
outputs["dof"] = self._dof
outputs["constants"] = self._constants
outputs["axis"] = self._axis
outputs["reg_names"] = self._reg_names
if self.inputs.model == "ar1":
outputs["a"] = self._a_file
if self.inputs.save_residuals:
outputs["residuals"] = self._residuals_file
return outputs
class EstimateContrastInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(
traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),
traits.List(traits.Float), traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('F'),
traits.List(
traits.Either(
traits.Tuple(traits.Str, traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]. if
session list is None or not provided, all sessions are used. For F
contrasts, the condition list should contain previously defined
T-contrasts.""",
mandatory=True)
beta = File(
exists=True,
desc="beta coefficients of the fitted model",
mandatory=True)
nvbeta = traits.Any(mandatory=True)
s2 = File(
exists=True, desc="squared variance of the residuals", mandatory=True)
dof = traits.Any(desc="degrees of freedom", mandatory=True)
constants = traits.Any(mandatory=True)
axis = traits.Any(mandatory=True)
reg_names = traits.List(mandatory=True)
mask = traits.File(exists=True)
class EstimateContrastOutputSpec(TraitedSpec):
stat_maps = OutputMultiPath(File(exists=True))
z_maps = OutputMultiPath(File(exists=True))
p_maps = OutputMultiPath(File(exists=True))
class EstimateContrast(BaseInterface):
'''
Estimate contrast of a fitted model.
'''
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
def _run_interface(self, runtime):
beta_nii = nb.load(self.inputs.beta)
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(beta_nii.shape[:3]) == 1
glm = GLM.GeneralLinearModel()
nii = nb.load(self.inputs.beta)
glm.beta = beta_nii.get_data().copy()[mask, :].T
glm.nvbeta = self.inputs.nvbeta
glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask]
glm.dof = self.inputs.dof
glm._axis = self.inputs.axis
glm._constants = self.inputs.constants
reg_names = self.inputs.reg_names
self._stat_maps = []
self._p_maps = []
self._z_maps = []
for contrast_def in self.inputs.contrasts:
name = contrast_def[0]
contrast = np.zeros(len(reg_names))
for i, reg_name in enumerate(reg_names):
if reg_name in contrast_def[2]:
idx = contrast_def[2].index(reg_name)
contrast[i] = contrast_def[3][idx]
est_contrast = glm.contrast(contrast)
stat_map = np.zeros(mask.shape)
stat_map[mask] = est_contrast.stat().T
stat_map_file = os.path.abspath(name + "_stat_map.nii")
nb.save(nb.Nifti1Image(stat_map, nii.affine), stat_map_file)
self._stat_maps.append(stat_map_file)
p_map = np.zeros(mask.shape)
p_map[mask] = est_contrast.pvalue().T
p_map_file = os.path.abspath(name + "_p_map.nii")
nb.save(nb.Nifti1Image(p_map, nii.affine), p_map_file)
self._p_maps.append(p_map_file)
z_map = np.zeros(mask.shape)
z_map[mask] = est_contrast.zscore().T
z_map_file = os.path.abspath(name + "_z_map.nii")
nb.save(nb.Nifti1Image(z_map, nii.affine), z_map_file)
self._z_maps.append(z_map_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["stat_maps"] = self._stat_maps
outputs["p_maps"] = self._p_maps
outputs["z_maps"] = self._z_maps
return outputs
|
py | 7df9cc409af38725bebff6d152d6c32723e853c1 | __all__ = ["aws_nsconnector"]
from .aws_nsconnector import NSConnector
|
py | 7df9cce6427fe5bbd24ef13f75c991c08a17d816 | #!/usr/bin/env python
u"""
calculate_geoid_undulation.py
Written by Tyler Sutterley (10/2021)
Wrapper function for computing geoid undulations from a gravity model
INPUTS:
lon: longitudinal points to calculate geoid height
lat: latitudinal points to calculate geoid height
gravity_model_file: full path to static gravity model file
OPTIONS:
ELLIPSOID: reference ellipsoid name
CLK66 = Clarke 1866
GRS67 = Geodetic Reference System 1967
GRS80 = Geodetic Reference System 1980
WGS72 = World Geodetic System 1972
WGS84 = World Geodetic System 1984
ATS77 = Quasi-earth centred ellipsoid for ATS77
NAD27 = North American Datum 1927 (=CLK66)
NAD83 = North American Datum 1983 (=GRS80)
INTER = International
KRASS = Krassovsky (USSR)
MAIRY = Modified Airy (Ireland 1965/1975)
TOPEX = TOPEX/POSEIDON ellipsoid
EGM96 = EGM 1996 gravity model
HGH80 = Hughes 1980 Ellipsoid used in some NSIDC data
LMAX: maximum spherical harmonic degree (level of truncation)
TIDE: tide system of output geoid
http://mitgcm.org/~mlosch/geoidcookbook/node9.html
tide_free: no permanent direct and indirect tidal potentials
this is the default (leaving the model as is)
mean_tide: restores permanent tidal potentials (direct and indirect)
zero_tide: restores permanent direct tidal potential
GAUSS: Gaussian Smoothing Radius in km (default is no filtering)
EPS: level of precision for calculating geoid height
ZIP: input gravity field file is compressed in an archive file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
PROGRAM DEPENDENCIES:
geoid_undulation.py: geoidal undulation at a given latitude and longitude
read_ICGEM_harmonics.py: reads the coefficients for a given gravity model file
calculate_tidal_offset.py: calculates the C20 offset for a tidal system
real_potential.py: real potential at a latitude and height for gravity model
norm_potential.py: normal potential of an ellipsoid at a latitude and height
norm_gravity.py: normal gravity of an ellipsoid at a latitude and height
ref_ellipsoid.py: Computes parameters for a reference ellipsoid
gauss_weights.py: Computes Gaussian weights as a function of degree
UPDATE HISTORY:
Updated 10/2021: add more keyword options to match read ICGEM options
Updated 09/2021: define int/float precision to prevent deprecation warning
Updated 11/2020: added function docstrings
Updated 07/2019: split read and wrapper funciton into separate files
Written 07/2017
"""
import numpy as np
from geoid_toolkit.geoid_undulation import geoid_undulation
from geoid_toolkit.read_ICGEM_harmonics import read_ICGEM_harmonics
#-- PURPOSE: calculate geoid heights at a set of latitudes and longitudes
def calculate_geoid_undulation(lon, lat, gravity_model_file, **kwargs):
"""
Wrapper function for computing geoid undulations from a gravity model
Arguments
---------
lon: longitudinal points to calculate geoid height
lat: latitudinal points to calculate geoid height
gravity_model_file: full path to static gravity model file
Keyword arguments
-----------------
ELLIPSOID: reference ellipsoid name
LMAX: maximum spherical harmonic degree (level of truncation)
TIDE: tide system of output geoid
GAUSS: Gaussian Smoothing Radius in km (default is no filtering)
EPS: level of precision for calculating geoid height
ZIP: input gravity field file is compressed in an archive file
Returns
-------
N: geoidal undulation for a given ellipsoid in meters
"""
#-- set default keyword arguments
kwargs.setdefault('LMAX',None)
kwargs.setdefault('ELLIPSOID','WGS84')
kwargs.setdefault('TIDE','tide_free')
kwargs.setdefault('GAUSS',0)
kwargs.setdefault('EPS',1e-8)
kwargs.setdefault('ZIP',False)
#-- read gravity model Ylms and change tide if specified
Ylms = read_ICGEM_harmonics(gravity_model_file,**kwargs)
R = np.float64(Ylms['radius'])
GM = np.float64(Ylms['earth_gravity_constant'])
LMAX = np.int64(Ylms['max_degree'])
#-- calculate geoid at coordinates
N = geoid_undulation(lat, lon, kwargs['ELLIPSOID'],
Ylms['clm'], Ylms['slm'], LMAX, R, GM,
GAUSS=kwargs['GAUSS'], EPS=kwargs['EPS'])
#-- return the geoid undulation
return N
|
py | 7df9cd1983070df4eed2d837ac997d0fdbdc82fb | # =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
from csmpe.plugins import CSMPlugin
from install import install_add_remove
from csmpe.core_plugins.csm_get_inventory.ios.plugin import get_package, get_inventory
class Plugin(CSMPlugin):
"""This plugin adds packages from repository to the device."""
name = "Install Add Plugin"
platforms = {'ASR900'}
phases = {'Add'}
os = {'IOS'}
def run(self):
server_repository_url = self.ctx.server_repository_url
if server_repository_url is None:
self.ctx.error("No repository provided")
return
packages = self.ctx.software_packages
if packages is None:
self.ctx.error("No package list provided")
return
self.ctx.info("Add Package(s) Pending")
self.ctx.post_status("Add Package(s) Pending")
for package in packages:
output = self.ctx.send('dir flash:' + package)
m = re.search('No such file', output)
if not m:
self.ctx.info("No action: {} exists in flash:".format(package))
continue
cmd = "copy {}/{} flash:".format(server_repository_url, package)
install_add_remove(self.ctx, cmd)
self.ctx.info("Package(s) Added Successfully")
# Refresh package and inventory information
get_package(self.ctx)
get_inventory(self.ctx)
|
py | 7df9ceae8123bcf8a4fc32031b69477131186f6f | def solve():
print('let\'s solve')
|
py | 7df9cfca8393cd72a482f7a4fdab511123b9a43b | from .__version__ import __version__
from .main import usum |
py | 7df9d103f0eeff38c3ff4bcbb30f61b5dba7240b | """empty message
Revision ID: ed776090c416
Revises:
Create Date: 2021-03-03 16:35:36.752993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ed776090c416'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.LargeBinary(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('users')
# ### end Alembic commands ###
|
py | 7df9d3d9f317ebacd61c76edc4b21b93d7e2fc9f | import socket
import grpc
import grpc_reflection.v1alpha.reflection as grpc_reflect
import logging
import os
from base64 import b64encode
from concurrent import futures
from http import HTTPStatus
from http.client import HTTPConnection, HTTPSConnection, HTTPException
from grpc_status import rpc_status
from google.rpc import code_pb2, status_pb2
from feed_pb2 import Image, DESCRIPTOR as feed_descriptor
from feed_pb2_grpc import (
ImageFeedServiceServicer,
add_ImageFeedServiceServicer_to_server
)
_TIMEOUT = 10
_PORT_ENV_VARIABLE = 'PORT'
_DEFAULT_PORT = 8061
_SERVICE_NAME = 'ImageFeedService'
_HTTPS_VARIABLE = 'HTTPS'
_HTTPS_DEFAULT = 'False'
_URL_ENV_VARIABLE = 'CAMERA_URL'
_REQUEST_URL_ENV_VARIABLE = 'REQUEST_URL'
_USER_ENV_VARIABLE = 'USER'
_PASS_ENV_VARIABLE = 'PWD'
class Server(ImageFeedServiceServicer):
def __init__(self, connection_url, request_url, user, password, https):
connection_class = HTTPSConnection if https else HTTPConnection
self.__connection = connection_class(connection_url, timeout=_TIMEOUT)
self.__request_url = request_url
self.__headers = {
'Accept': 'image/webp,image/png,image/svg+xml,image/*;'
'q=0.8,video/*;q=0.8,*/*;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
}
if user and password:
user_and_pass = b64encode(
f'{user}:{password}'.encode('utf-8')
).decode("ascii")
self.__headers['Authorization'] = f'Basic {user_and_pass}'
def Get(self, request, context):
self.__connection.connect()
try:
self.__connection.request(
"GET",
self.__request_url,
headers=self.__headers)
response = self.__connection.getresponse()
except (socket.timeout, HTTPException) as ex:
logging.warning(
"Exception while getting image from camera feed: %s",
ex,
exc_info=True)
context.abort_with_status(rpc_status.to_status(
status_pb2.Status(
code=code_pb2.UNAVAILABLE,
message=f"Exception while getting image from camera feed: {ex}")))
if response.status != HTTPStatus.OK:
logging.error("Received response with status %s", response.status)
context.abort_with_status(rpc_status.to_status(
status_pb2.Status(
code=code_pb2.CANCELLED,
message=f"Exception while getting image from camera feed: "
f"Received status code {response.status}")))
img_bytes = response.read()
self.__connection.close()
return Image(data=img_bytes)
def find_env_variable(variable_name, required=True):
variable = os.getenv(variable_name)
if required and not variable:
logging.critical(
f'Unable to find environment variable: {variable_name}')
exit(1)
return variable
def find_https():
https = os.getenv(_HTTPS_VARIABLE, _HTTPS_DEFAULT)
if https in ('True', 'False'):
return https == 'True'
else:
logging.critical(
'\'%s\' should be a boolean variable: expected True or False',
_HTTPS_VARIABLE
)
exit(1)
def main():
connection_url = find_env_variable(_URL_ENV_VARIABLE)
request_url = find_env_variable(_REQUEST_URL_ENV_VARIABLE)
https = find_https()
logging.info(
'Connected to \'%s\' with request url \'%s\' with %s connection',
connection_url,
request_url,
'secure' if https else 'unsecure'
)
user = find_env_variable(_USER_ENV_VARIABLE, required=False)
password = find_env_variable(_PASS_ENV_VARIABLE, required=False)
server = grpc.server(futures.ThreadPoolExecutor())
add_ImageFeedServiceServicer_to_server(
Server(connection_url, request_url, user, password, https),
server
)
service_names = (
feed_descriptor.services_by_name[_SERVICE_NAME].full_name,
grpc_reflect.SERVICE_NAME
)
grpc_reflect.enable_server_reflection(service_names, server)
port = os.getenv(_PORT_ENV_VARIABLE, _DEFAULT_PORT)
server.add_insecure_port(f'[::]:{port}')
logging.info('Starting server at [::]:%d', port)
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig(
format="[ %(levelname)s ] %(asctime)s (%(module)s) %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
main() |
py | 7df9d5a4ae69aff9083ae56c7940dfd0738f3064 | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import shlex
import time
from platforms.platform_base import PlatformBase
from utils.custom_logger import getLogger
from utils.subprocess_with_logger import processRun
from utils.utilities import getRunStatus, setRunStatus
class IOSPlatform(PlatformBase):
def __init__(self, tempdir, idb, args):
super(IOSPlatform, self).__init__(tempdir, args.ios_dir, idb,
args.hash_platform_mapping)
self.setPlatformHash(idb.device)
self.type = "ios"
self.app = None
def preprocess(self, *args, **kwargs):
assert "programs" in kwargs, "Must have programs specified"
programs = kwargs["programs"]
# find the first zipped app file
assert "program" in programs, "program is not specified"
program = programs["program"]
assert program.endswith(".ipa"), \
"IOS program must be an ipa file"
processRun(["unzip", "-o", "-d", self.tempdir, program])
# get the app name
app_dir = os.path.join(self.tempdir, "Payload")
dirs = [f for f in os.listdir(app_dir)
if os.path.isdir(os.path.join(app_dir, f))]
assert len(dirs) == 1, "Only one app in the Payload directory"
app_name = dirs[0]
self.app = os.path.join(app_dir, app_name)
del programs["program"]
bundle_id, _ = processRun(["osascript", "-e",
"id of app \"" + self.app + "\""])
assert len(bundle_id) > 0, "bundle id cannot be found"
self.util.setBundleId(bundle_id[0].strip())
# We know this command will fail. Avoid propogating this
# failure to the upstream
success = getRunStatus()
self.util.run(["--bundle", self.app, "--uninstall", "--justlaunch"])
setRunStatus(success, overwrite=True)
def postprocess(self, *args, **kwargs):
success = getRunStatus()
self.util.run(["--bundle", self.app, "--uninstall_only"])
setRunStatus(success, overwrite=True)
def runBenchmark(self, cmd, *args, **kwargs):
if not isinstance(cmd, list):
cmd = shlex.split(cmd)
assert self.util.bundle_id is not None, "Bundle id is not specified"
arguments = self.getPairedArguments(cmd)
argument_filename = os.path.join(self.tempdir, "benchmark.json")
arguments_json = json.dumps(arguments, indent=2, sort_keys=True)
with open(argument_filename, "w") as f:
f.write(arguments_json)
tgt_argument_filename = os.path.join(self.tgt_dir, "benchmark.json")
self.util.push(argument_filename, tgt_argument_filename)
run_cmd = ["--bundle", self.app,
"--noninteractive", "--noinstall", "--unbuffered"]
platform_args = {}
if "platform_args" in kwargs:
platform_args = kwargs["platform_args"]
if "power" in platform_args and platform_args["power"]:
platform_args["non_blocking"] = True
run_cmd += ["--justlaunch"]
if arguments:
run_cmd += ["--args",
' '.join(["--" + x + " " + arguments[x]
for x in arguments])]
# the command may fail, but the err_output is what we need
log_screen = self.util.run(run_cmd, **platform_args)
return log_screen
def rebootDevice(self):
success = self.util.reboot()
if success:
time.sleep(180)
|
py | 7df9d5ffb240ec94c6451a4f8271d600d98b0770 | import discord as api
prefix = ':'
token = '<your token>'
intents = api.Intents.default()
intents.members = True
intents.presences = True
insensitiveCase = True
ownerID = <your id>
|
py | 7df9d60684822a10c54eb7bb154a6e74f4d7a055 | import unittest
from neurolib.utils.collections import (
BACKWARD_REPLACE,
FORWARD_REPLACE,
_sanitize_keys,
flat_dict_to_nested,
flatten_nested_dict,
sanitize_dot_dict,
star_dotdict,
unwrap_star_dotdict,
)
from neurolib.models.multimodel.builder.wilson_cowan import WilsonCowanNode
from neurolib.models.multimodel import MultiModel
class TestCollections(unittest.TestCase):
NESTED_DICT = {"a": {"b": "c", "d": "e"}}
FLAT_DICT_DOT = {"a.b": "c", "a.d": "e"}
PARAM_DICT = {
"mass0": {"a": 0.4, "b": 1.2, "c": "float", "noise": {"b": 12.0}},
"mass1": {"a": 0.4, "b": 1.2, "c": "int"},
}
PARAMS_ALL_A = {"mass0.a": 0.4, "mass1.a": 0.4}
PARAMS_ALL_B = {"mass0.b": 1.2, "mass0.noise.b": 12.0, "mass1.b": 1.2}
PARAMS_ALL_B_MINUS = {"mass0.b": 1.2, "mass1.b": 1.2}
PARAMS_ALL_B_MINUS_CHANGED = {"mass0.b": 2.7, "mass1.b": 2.7}
PARAMS_ALL_A_CHANGED = {"mass0.a": 0.7, "mass1.a": 0.7}
def test_flatten_nested_dict(self):
flat = flatten_nested_dict(self.NESTED_DICT, sep=".")
self.assertDictEqual(flat, self.FLAT_DICT_DOT)
def test_flat_unflat(self):
flat = flatten_nested_dict(self.NESTED_DICT, sep=".")
unflat = flat_dict_to_nested(flat)
self.assertDictEqual(self.NESTED_DICT, unflat)
def test_star_dotdict(self):
params = star_dotdict(flatten_nested_dict(self.PARAM_DICT), sep=".")
self.assertTrue(isinstance(params, star_dotdict))
# try get params by star
self.assertDictEqual(params["*a"], self.PARAMS_ALL_A)
# change params by star
params["*a"] = 0.7
self.assertDictEqual(params["*a"], self.PARAMS_ALL_A_CHANGED)
# delete params
del params["*a"]
self.assertFalse(params["*a"])
def test_star_dotdict_minus(self):
params = star_dotdict(flatten_nested_dict(self.PARAM_DICT), sep=".")
self.assertTrue(isinstance(params, star_dotdict))
# get params by star
self.assertDictEqual(params["*b"], self.PARAMS_ALL_B)
# get params by star and minus
self.assertDictEqual(params["*b|noise"], self.PARAMS_ALL_B_MINUS)
# change params by star and minus
params["*b|noise"] = 2.7
self.assertDictEqual(params["*b|noise"], self.PARAMS_ALL_B_MINUS_CHANGED)
# delete params by star and minus
del params["*b|noise"]
self.assertFalse(params["*b|noise"])
# check whether the `b` with noise stayed
self.assertEqual(len(params["*b"]), 1)
def test_sanitize_keys(self):
k = "mass1.tau*|noise"
k_san = _sanitize_keys(k, FORWARD_REPLACE)
self.assertEqual(k_san, k.replace("*", "STAR").replace("|", "MINUS"))
k_back = _sanitize_keys(k_san, BACKWARD_REPLACE)
self.assertEqual(k, k_back)
def test_sanitize_dotdict(self):
dct = {"mass1*tau": 2.5, "mass2*tau": 4.1, "mass2.x": 12.0}
should_be = {"mass1STARtau": 2.5, "mass2STARtau": 4.1, "mass2.x": 12.0}
dct_san = sanitize_dot_dict(dct, FORWARD_REPLACE)
self.assertDictEqual(dct_san, should_be)
dct_back = sanitize_dot_dict(dct_san, BACKWARD_REPLACE)
self.assertDictEqual(dct_back, dct)
def test_unwrap_star_dotdict(self):
wc = MultiModel.init_node(WilsonCowanNode())
dct = {"*tau": 2.5}
should_be = {
"WCnode_0.WCmassEXC_0.tau": 2.5,
"WCnode_0.WCmassEXC_0.noise_0.tau": 2.5,
"WCnode_0.WCmassINH_1.tau": 2.5,
"WCnode_0.WCmassINH_1.noise_0.tau": 2.5,
}
unwrapped = unwrap_star_dotdict(dct, wc)
self.assertDictEqual(unwrapped, should_be)
dct = {"STARtau": 2.5}
should_be = {
"WCnode_0.WCmassEXC_0.tau": 2.5,
"WCnode_0.WCmassEXC_0.noise_0.tau": 2.5,
"WCnode_0.WCmassINH_1.tau": 2.5,
"WCnode_0.WCmassINH_1.noise_0.tau": 2.5,
}
unwrapped = unwrap_star_dotdict(dct, wc, replaced_dict=BACKWARD_REPLACE)
self.assertDictEqual(unwrapped, should_be)
if __name__ == "__main__":
unittest.main()
|
py | 7df9d62687366f558b997893e6962ebbc9ad4f5b | import pytest
import os
import json
import shutil
from tempfile import mkdtemp
from Tests.Marketplace.marketplace_services import GCPConfig
from Tests.Marketplace.copy_and_upload_packs import PACKS_RESULTS_FILE
# disable-secrets-detection-start
class TestGetPackNames:
"""
Given:
- A csv list of pack names (ids)
When:
- Getting the pack paths
Then:
- Verify that we got the same packs
"""
@pytest.mark.parametrize("packs_names_input, expected_result", [
("pack1,pack2,pack1", {"pack1", "pack2"}),
("pack1, pack2, pack3", {"pack1", "pack2", "pack3"})
])
def test_get_pack_names_specific(self, packs_names_input, expected_result):
from Tests.Marketplace.copy_and_upload_packs import get_pack_names
modified_packs = get_pack_names(packs_names_input)
assert modified_packs == expected_result
def test_get_pack_names_all(self):
"""
Given:
- content repo path, packs folder path, ignored files list
When:
- Trying to get the pack names of all packs in content repo
Then:
- Verify that we got all packs in content repo
"""
from Tests.Marketplace.marketplace_services import CONTENT_ROOT_PATH, PACKS_FOLDER, IGNORED_FILES
from Tests.Marketplace.copy_and_upload_packs import get_pack_names
packs_full_path = os.path.join(CONTENT_ROOT_PATH, PACKS_FOLDER) # full path to Packs folder in content repo
expected_pack_names = {p for p in os.listdir(packs_full_path) if p not in IGNORED_FILES}
assert get_pack_names('all') == expected_pack_names
class TestHelperFunctions:
def test_get_successful_and_failed_packs(self):
"""
Given:
- File that doesn't exist
- Empty JSON file
- Valid JSON file
When:
- Loading the file of all failed packs from Prepare Content step in Create Instances job
Then:
- Verify that we get an empty dictionary
- Verify that we get an empty dictionary
- Verify that we get the expected dictionary
"""
from Tests.Marketplace.copy_and_upload_packs import get_successful_and_failed_packs
tempdir = mkdtemp()
file = os.path.join(tempdir, PACKS_RESULTS_FILE)
# Case 1: assert file does not exist
successful, failed = get_successful_and_failed_packs(file)
assert successful == {}
assert failed == {}
# Case 2: assert empty file
with open(file, "w") as f:
f.write('')
successful, failed = get_successful_and_failed_packs(file)
assert successful == {}
assert failed == {}
# Case 3: assert valid file
with open(file, "w") as f:
f.write(json.dumps({
"failed_packs": {"TestPack2": {"status": "status2", "aggregated": False}},
"successful_packs": {"TestPack1": {"status": "status1", "aggregated": True}}
}))
successful, failed = get_successful_and_failed_packs(file)
assert successful == {"TestPack1": {"status": "status1", "aggregated": True}}
successful_list = [*successful]
ans = 'TestPack1' in successful_list
assert ans
assert failed == {"TestPack2": {"status": "status2", "aggregated": False}}
failed_list = [*failed]
ans = 'TestPack2' in failed_list
assert ans
try:
shutil.rmtree(tempdir)
except shutil.Error:
pass
class TestRegex:
BUILD_BASE_PATH = f"{GCPConfig.GCS_PUBLIC_URL}/{GCPConfig.CI_BUILD_BUCKET}/content/builds"
BUILD_PATTERN = "upload-packs-build-flow/169013/content/packs"
@pytest.mark.parametrize("gcs_path, latest_zip_suffix", [
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonWidgets/1.0.5/CommonWidgets.zip",
"CommonWidgets/1.0.5/CommonWidgets.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Malware/1.2.4/Malware.zip",
"Malware/1.2.4/Malware.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/HelloWorld/1.1.11/HelloWorld.zip",
"HelloWorld/1.1.11/HelloWorld.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonDashboards/1.0.0/CommonDashboards.zip",
"CommonDashboards/1.0.0/CommonDashboards.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/AutoFocus/1.1.9/AutoFocus.zip",
"AutoFocus/1.1.9/AutoFocus.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/UrlScan/1.0.5/UrlScan.zip",
"UrlScan/1.0.5/UrlScan.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/AccessInvestigation/1.2.2/AccessInvestigation.zip",
"AccessInvestigation/1.2.2/AccessInvestigation.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Phishing/1.10.7/Phishing.zip",
"Phishing/1.10.7/Phishing.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/FeedTAXII/1.0.5/FeedTAXII.zip",
"FeedTAXII/1.0.5/FeedTAXII.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/WhereIsTheEgg/1.0.0/WhereIsTheEgg.zip",
"WhereIsTheEgg/1.0.0/WhereIsTheEgg.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/TIM_Processing/1.1.6/TIM_Processing.zip",
"TIM_Processing/1.1.6/TIM_Processing.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/DemistoRESTAPI/1.1.2/DemistoRESTAPI.zip",
"DemistoRESTAPI/1.1.2/DemistoRESTAPI.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonPlaybooks/1.8.5/CommonPlaybooks.zip",
"CommonPlaybooks/1.8.5/CommonPlaybooks.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Base/1.3.24/Base.zip",
"Base/1.3.24/Base.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/rasterize/1.0.4/rasterize.zip",
"rasterize/1.0.4/rasterize.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/VirusTotal/1.0.1/VirusTotal.zip",
"VirusTotal/1.0.1/VirusTotal.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/DemistoLocking/1.0.0/DemistoLocking.zip",
"DemistoLocking/1.0.0/DemistoLocking.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/TIM_SIEM/1.0.3/TIM_SIEM.zip",
"TIM_SIEM/1.0.3/TIM_SIEM.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/ExportIndicators/1.0.0/ExportIndicators.zip",
"ExportIndicators/1.0.0/ExportIndicators.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/DefaultPlaybook/1.0.2/DefaultPlaybook.zip",
"DefaultPlaybook/1.0.2/DefaultPlaybook.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonTypes/2.2.1/CommonTypes.zip",
"CommonTypes/2.2.1/CommonTypes.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/ImageOCR/1.0.1/ImageOCR.zip",
"ImageOCR/1.0.1/ImageOCR.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonScripts/1.2.69/CommonScripts.zip",
"CommonScripts/1.2.69/CommonScripts.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Active_Directory_Query/1.0.7/Active_Directory_Query.zip",
"Active_Directory_Query/1.0.7/Active_Directory_Query.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/CommonReports/1.0.1/CommonReports.zip",
"CommonReports/1.0.1/CommonReports.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Whois/1.1.6/Whois.zip",
"Whois/1.1.6/Whois.zip"),
(f"{BUILD_BASE_PATH}/{BUILD_PATTERN}/Blockade.io/1.0.0/Blockade.io.zip",
"Blockade.io/1.0.0/Blockade.io.zip"),
(f"{GCPConfig.GCS_PUBLIC_URL}/oproxy-dev.appspot.com/wow/content/packs/TIM-wow_a/99.98.99/TIM-wow_a.zip",
"TIM-wow_a/99.98.99/TIM-wow_a.zip")
])
def test_latest_zip_regex(self, gcs_path, latest_zip_suffix):
""" Testing all of our corepacks paths to make sure we are not missing one of them, last test is for a
generic bucket.
Given:
- A path of latest version pack in a gcs bucket
When:
- Searching for the pack latest zip suffix
Then:
- Getting the expected suffix
"""
from Tests.Marketplace.copy_and_upload_packs import LATEST_ZIP_REGEX
assert LATEST_ZIP_REGEX.findall(gcs_path)[0] == latest_zip_suffix
# disable-secrets-detection-end
|
py | 7df9d6b60dd0baca5e3aea21b36a39ef18ed1b25 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#
# Generated Tue Apr 09 11:10:09 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import cybox_common
import base64
from datetime import datetime, tzinfo, timedelta
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_date(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
return datetime.strptime(input_data,
'%Y-%m-%d').replace(tzinfo = tz)
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(lwrite, level, pretty_print=True):
if pretty_print:
lwrite(' ' * level)
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return unicode(s1).encode(ExternalEncoding)
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return unicode(s1).encode(ExternalEncoding)
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, lwrite, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
lwrite(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(lwrite, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(lwrite, level, namespace, name, pretty_print)
def exportSimple(self, lwrite, level, name):
if self.content_type == MixedContainer.TypeString:
lwrite('<%s>%s</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
lwrite('<%s>%d</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
lwrite('<%s>%f</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
lwrite('<%s>%g</%s>' %
(self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
lwrite('<%s>%s</%s>' %
(self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, lwrite, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s", "%s"),\n'
% (self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(lwrite, level)
lwrite('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(lwrite, level + 1)
showIndent(lwrite, level)
lwrite(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class GUIObjectType(cybox_common.ObjectPropertiesType):
"""The GUIObjectType type is intended to characterize generic GUI
objects."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Height=None, Width=None):
super(GUIObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Height = Height
self.Width = Width
def factory(*args_, **kwargs_):
if GUIObjectType.subclass:
return GUIObjectType.subclass(*args_, **kwargs_)
else:
return GUIObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Height(self): return self.Height
def set_Height(self, Height): self.Height = Height
def validate_IntegerObjectPropertyType(self, value):
# Validate type cybox_common.IntegerObjectPropertyType, a restriction on None.
pass
def get_Width(self): return self.Width
def set_Width(self, Width): self.Width = Width
def hasContent_(self):
if (
self.Height is not None or
self.Width is not None or
super(GUIObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='GUIObj:', name_='GUIObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='GUIObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='GUIObj:', name_='GUIObjectType'):
super(GUIObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='GUIObjectType')
def exportChildren(self, lwrite, level, namespace_='GUIObj:', name_='GUIObjectType', fromsubclass_=False, pretty_print=True):
super(GUIObjectType, self).exportChildren(lwrite, level, 'GUIObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Height is not None:
self.Height.export(lwrite, level, 'GUIObj:', name_='Height', pretty_print=pretty_print)
if self.Width is not None:
self.Width.export(lwrite, level, 'GUIObj:', name_='Width', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(GUIObjectType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Height':
obj_ = cybox_common.IntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_Height(obj_)
elif nodeName_ == 'Width':
obj_ = cybox_common.IntegerObjectPropertyType.factory()
obj_.build(child_)
self.set_Width(obj_)
super(GUIObjectType, self).buildChildren(child_, node, nodeName_, True)
# end class GUIObjectType
GDSClassesMapping = {
'Build_Utility': cybox_common.BuildUtilityType,
'Errors': cybox_common.ErrorsType,
'Time': cybox_common.TimeType,
'Width': cybox_common.IntegerObjectPropertyType,
'Certificate_Issuer': cybox_common.StringObjectPropertyType,
'Metadata': cybox_common.MetadataType,
'Hash': cybox_common.HashType,
'Information_Source_Type': cybox_common.ControlledVocabularyStringType,
'Block_Hash_Value': cybox_common.HashValueType,
'Fuzzy_Hash_Structure': cybox_common.FuzzyHashStructureType,
'SubDatum': cybox_common.MetadataType,
'Segment_Hash': cybox_common.HashValueType,
'Digital_Signature': cybox_common.DigitalSignatureInfoType,
'Code_Snippets': cybox_common.CodeSnippetsType,
'Value': cybox_common.StringObjectPropertyType,
'Length': cybox_common.IntegerObjectPropertyType,
'Certificate_Subject': cybox_common.StringObjectPropertyType,
'Encoding': cybox_common.ControlledVocabularyStringType,
'Internationalization_Settings': cybox_common.InternationalizationSettingsType,
'Tool_Configuration': cybox_common.ToolConfigurationType,
'English_Translation': cybox_common.StringObjectPropertyType,
'Functions': cybox_common.FunctionsType,
'String_Value': cybox_common.StringObjectPropertyType,
'Build_Utility_Platform_Specification': cybox_common.PlatformSpecificationType,
'Compiler_Informal_Description': cybox_common.CompilerInformalDescriptionType,
'System': cybox_common.ObjectPropertiesType,
'Platform': cybox_common.PlatformSpecificationType,
'Usage_Context_Assumptions': cybox_common.UsageContextAssumptionsType,
'Type': cybox_common.ControlledVocabularyStringType,
'Compilers': cybox_common.CompilersType,
'Tool_Type': cybox_common.ControlledVocabularyStringType,
'String': cybox_common.ExtractedStringType,
'Tool': cybox_common.ToolInformationType,
'Build_Information': cybox_common.BuildInformationType,
'Tool_Hashes': cybox_common.HashListType,
'Compiler_Platform_Specification': cybox_common.PlatformSpecificationType,
'Error_Instances': cybox_common.ErrorInstancesType,
'Data_Segment': cybox_common.StringObjectPropertyType,
'Language': cybox_common.StringObjectPropertyType,
'Identifier': cybox_common.PlatformIdentifierType,
'Strings': cybox_common.ExtractedStringsType,
'File_System_Offset': cybox_common.IntegerObjectPropertyType,
'Reference_Description': cybox_common.StructuredTextType,
'Code_Snippet': cybox_common.ObjectPropertiesType,
'Configuration_Settings': cybox_common.ConfigurationSettingsType,
'Simple_Hash_Value': cybox_common.SimpleHashValueType,
'Byte_String_Value': cybox_common.HexBinaryObjectPropertyType,
'Instance': cybox_common.ObjectPropertiesType,
'Import': cybox_common.StringObjectPropertyType,
'Property': cybox_common.PropertyType,
'Tool_Specific_Data': cybox_common.ToolSpecificDataType,
'Execution_Environment': cybox_common.ExecutionEnvironmentType,
'Dependencies': cybox_common.DependenciesType,
'Offset': cybox_common.IntegerObjectPropertyType,
'Date': cybox_common.DateRangeType,
'Hashes': cybox_common.HashListType,
'Segments': cybox_common.HashSegmentsType,
'Segment_Count': cybox_common.IntegerObjectPropertyType,
'Usage_Context_Assumption': cybox_common.StructuredTextType,
'Block_Hash': cybox_common.FuzzyHashBlockType,
'Dependency': cybox_common.DependencyType,
'Error': cybox_common.ErrorType,
'Trigger_Point': cybox_common.HexBinaryObjectPropertyType,
'Environment_Variable': cybox_common.EnvironmentVariableType,
'Byte_Run': cybox_common.ByteRunType,
'Contributors': cybox_common.PersonnelType,
'Image_Offset': cybox_common.IntegerObjectPropertyType,
'Imports': cybox_common.ImportsType,
'Library': cybox_common.LibraryType,
'Height': cybox_common.IntegerObjectPropertyType,
'References': cybox_common.ToolReferencesType,
'Internal_Strings': cybox_common.InternalStringsType,
'Custom_Properties': cybox_common.CustomPropertiesType,
'Configuration_Setting': cybox_common.ConfigurationSettingType,
'Libraries': cybox_common.LibrariesType,
'Function': cybox_common.StringObjectPropertyType,
'Description': cybox_common.StructuredTextType,
'User_Account_Info': cybox_common.ObjectPropertiesType,
'Build_Configuration': cybox_common.BuildConfigurationType,
'Address': cybox_common.HexBinaryObjectPropertyType,
'Search_Within': cybox_common.IntegerObjectPropertyType,
'Segment': cybox_common.HashSegmentType,
'Compiler': cybox_common.CompilerType,
'Name': cybox_common.StringObjectPropertyType,
'Signature_Description': cybox_common.StringObjectPropertyType,
'Block_Size': cybox_common.IntegerObjectPropertyType,
'Search_Distance': cybox_common.IntegerObjectPropertyType,
'Fuzzy_Hash_Value': cybox_common.FuzzyHashValueType,
'Dependency_Description': cybox_common.StructuredTextType,
'Contributor': cybox_common.ContributorType,
'Tools': cybox_common.ToolsInformationType,
'Data_Size': cybox_common.DataSizeType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Object'
rootClass = GUIObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_=rootTag,
# namespacedef_='',
# pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Object'
rootClass = GUIObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GUI_Object'
rootClass = GUIObjectType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout.write, 0, name_="GUI_Object",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"GUIObjectType"
]
|
py | 7df9d75ef68d3e26450cd14eabe1779f17495665 | # Generated by Django 3.2 on 2021-04-29 20:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Authentications', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='student_id',
),
]
|
py | 7df9d89b2197d85e426f43526f8336958d453fb5 | import basic
while True:
text = input('SkriptPY >>> ')
result, error = basic.run('\n<stdin>', text)
if error: print(error.as_string())
else: print(result) |
py | 7df9d9aefbc53143a605f49a3788bfa668a2256a | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from collections import namedtuple
import paddle
from paddle.io import IterableDataset
from paddle.utils import try_import
from paddlenlp.utils.log import logger
from paddlenlp.transformers import tokenize_chinese_chars
__all__ = [
'ClassifierIterator', 'MRCIterator', 'MCQIterator', 'ImdbTextPreProcessor',
'HYPTextPreProcessor'
]
def get_related_pos(insts, seq_len, memory_len=128):
"""generate relative postion ids"""
beg = seq_len + seq_len + memory_len
r_position = [list(range(beg - 1, seq_len - 1, -1)) + \
list(range(0, seq_len)) for i in range(len(insts))]
return np.array(r_position).astype('int64').reshape([len(insts), beg, 1])
def pad_batch_data(insts,
insts_data_type="int64",
pad_idx=0,
final_cls=False,
pad_max_len=None,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False,
return_seq_lens=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
if pad_max_len:
max_len = pad_max_len
else:
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
# Input id
if final_cls:
inst_data = np.array([
inst[:-1] + list([pad_idx] * (max_len - len(inst))) + [inst[-1]]
for inst in insts
])
else:
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype(insts_data_type).reshape([-1, max_len, 1])]
# Position id
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_input_mask:
# This is used to avoid attention on paddings.
if final_cls:
input_mask_data = np.array([[1] * len(inst[:-1]) + [0] *
(max_len - len(inst)) + [1]
for inst in insts])
else:
input_mask_data = np.array([[1] * len(inst) + [0] *
(max_len - len(inst))
for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
if return_seq_lens:
seq_lens_type = [-1]
seq_lens = np.array([len(inst) for inst in insts])
return_list += [seq_lens.astype("int64").reshape(seq_lens_type)]
return return_list if len(return_list) > 1 else return_list[0]
class TextPreprocessor(object):
def __call__(self, text):
raise NotImplementedError("TextPreprocessor object can't be called")
class ImdbTextPreprocessor(TextPreprocessor):
def __call__(self, text):
text = text.strip().replace('<br /><br />', ' ')
text = text.replace('\t', '')
return text
class HYPTextPreprocessor(TextPreprocessor):
def __init__(self):
self.bs4 = try_import('bs4')
def __call__(self, text):
text = self.bs4.BeautifulSoup(text, "html.parser").get_text()
text = text.strip().replace('\n', '').replace('\t', '')
return text
class ClassifierIterator(object):
def __init__(self,
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length=512,
memory_len=128,
repeat_input=False,
in_tokens=False,
mode="train",
random_seed=None,
preprocess_text_fn=None):
self.batch_size = batch_size
self.tokenizer = tokenizer
self.trainer_num = trainer_num
self.trainer_id = trainer_id
self.max_seq_length = max_seq_length
self.memory_len = memory_len
self.repeat_input = repeat_input
self.in_tokens = in_tokens
self.dataset = [data for data in dataset]
self.num_examples = None
self.mode = mode
self.shuffle = True if mode == "train" else False
if random_seed is None:
random_seed = 12345
self.random_seed = random_seed
self.preprocess_text_fn = preprocess_text_fn
def shuffle_sample(self):
if self.shuffle:
self.global_rng = np.random.RandomState(self.random_seed)
self.global_rng.shuffle(self.dataset)
def _cnt_list(self, inp):
"""Cnt_list"""
cnt = 0
for lit in inp:
if lit:
cnt += 1
return cnt
def _convert_to_features(self, example, qid):
"""
Convert example to features fed into model
"""
if "text" in example: # imdb
text = example["text"]
elif "sentence" in example: # iflytek
text = example["sentence"]
if self.preprocess_text_fn:
text = self.preprocess_text_fn(text)
label = example["label"]
doc_spans = []
_DocSpan = namedtuple("DocSpan", ["start", "length"])
start_offset = 0
max_tokens_for_doc = self.max_seq_length - 2
tokens_a = self.tokenizer.tokenize(text)
while start_offset < len(tokens_a):
length = len(tokens_a) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(tokens_a):
break
start_offset += min(length, self.memory_len)
features = []
Feature = namedtuple("Feature",
["src_ids", "label_id", "qid", "cal_loss"])
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = tokens_a[doc_span.start:doc_span.start +
doc_span.length] + ["[SEP]"] + ["[CLS]"]
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
features.append(
Feature(
src_ids=token_ids, label_id=label, qid=qid, cal_loss=1))
if self.repeat_input:
features_repeat = features
features = list(map(lambda x: x._replace(cal_loss=0), features))
features = features + features_repeat
return features
def _get_samples(self, pre_batch_list, is_last=False):
if is_last:
# Pad batch
len_doc = [len(doc) for doc in pre_batch_list]
max_len_idx = len_doc.index(max(len_doc))
dirty_sample = pre_batch_list[max_len_idx][-1]._replace(cal_loss=0)
for sample_list in pre_batch_list:
sample_list.extend([dirty_sample] *
(max(len_doc) - len(sample_list)))
samples = []
min_len = min([len(doc) for doc in pre_batch_list])
for cnt in range(min_len):
for batch_idx in range(self.batch_size * self.trainer_num):
sample = pre_batch_list[batch_idx][cnt]
samples.append(sample)
for idx in range(len(pre_batch_list)):
pre_batch_list[idx] = pre_batch_list[idx][min_len:]
return samples
def _pad_batch_records(self, batch_records, gather_idx=[]):
batch_token_ids = [record.src_ids for record in batch_records]
if batch_records[0].label_id is not None:
batch_labels = [record.label_id for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
else:
batch_labels = np.array([]).astype("int64").reshape([-1, 1])
# Qid
if batch_records[-1].qid is not None:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape([-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
if gather_idx:
batch_gather_idx = np.array(gather_idx).astype("int64").reshape(
[-1, 1])
need_cal_loss = np.array([1]).astype("int64")
else:
batch_gather_idx = np.array(list(range(len(batch_records)))).astype(
"int64").reshape([-1, 1])
need_cal_loss = np.array([0]).astype("int64")
# Padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids, pad_idx=self.tokenizer.pad_token_id, pad_max_len=self.max_seq_length, \
final_cls=True, return_input_mask=True)
padded_task_ids = np.zeros_like(padded_token_ids, dtype="int64")
padded_position_ids = get_related_pos(padded_token_ids, \
self.max_seq_length, self.memory_len)
return_list = [
padded_token_ids, padded_position_ids, padded_task_ids, input_mask,
batch_labels, batch_qids, batch_gather_idx, need_cal_loss
]
return return_list
def _prepare_batch_data(self, examples):
batch_records, max_len, gather_idx = [], 0, []
for index, example in enumerate(examples):
max_len = max(max_len, len(example.src_ids))
if self.in_tokens:
to_append = (len(batch_records) + 1
) * max_len <= self.batch_size
else:
to_append = len(batch_records) < self.batch_size
if to_append:
batch_records.append(example)
if example.cal_loss == 1:
gather_idx.append(index % self.batch_size)
else:
yield self._pad_batch_records(batch_records, gather_idx)
batch_records, max_len = [example], len(example.src_ids)
gather_idx = [index % self.batch_size
] if example.cal_loss == 1 else []
yield self._pad_batch_records(batch_records, gather_idx)
def _create_instances(self):
examples = self.dataset
pre_batch_list = []
insert_idx = []
for qid, example in enumerate(examples):
features = self._convert_to_features(example, qid)
if self._cnt_list(
pre_batch_list) < self.batch_size * self.trainer_num:
if insert_idx:
pre_batch_list[insert_idx[0]] = features
insert_idx.pop(0)
else:
pre_batch_list.append(features)
if self._cnt_list(
pre_batch_list) == self.batch_size * self.trainer_num:
assert self._cnt_list(pre_batch_list) == len(
pre_batch_list), "the two value must be equal"
assert not insert_idx, "the insert_idx must be null"
sample_batch = self._get_samples(pre_batch_list)
for idx, lit in enumerate(pre_batch_list):
if not lit:
insert_idx.append(idx)
for batch_records in self._prepare_batch_data(sample_batch):
yield batch_records
if self.mode != "train":
if self._cnt_list(pre_batch_list):
pre_batch_list += [
[]
for _ in range(self.batch_size * self.trainer_num -
self._cnt_list(pre_batch_list))
]
sample_batch = self._get_samples(pre_batch_list, is_last=True)
for batch_records in self._prepare_batch_data(sample_batch):
yield batch_records
def __call__(self):
curr_id = 0
for batch_records in self._create_instances():
if curr_id == self.trainer_id or self.mode != "train":
yield batch_records
curr_id = (curr_id + 1) % self.trainer_num
def get_num_examples(self):
if self.num_examples is None:
self.num_examples = 0
for qid, example in enumerate(self.dataset):
self.num_examples += len(
self._convert_to_features(example, qid))
return self.num_examples
class MRCIterator(ClassifierIterator):
"""
Machine Reading Comprehension iterator. Only for answer extraction.
"""
def __init__(self,
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length=512,
memory_len=128,
repeat_input=False,
in_tokens=False,
mode="train",
random_seed=None,
doc_stride=128,
max_query_length=64):
super(MRCIterator, self).__init__(
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length,
memory_len,
repeat_input,
in_tokens,
mode,
random_seed,
preprocess_text_fn=None)
self.doc_stride = doc_stride
self.max_query_length = max_query_length
self.examples = []
self.features = []
self.features_all = []
self._preprocess_data()
def shuffle_sample(self):
if self.shuffle:
self.global_rng = np.random.RandomState(self.random_seed)
self.global_rng.shuffle(self.features_all)
def _convert_qa_to_examples(self):
Example = namedtuple('Example', [
'qas_id', 'question_text', 'doc_tokens', 'orig_answer_text',
'start_position', 'end_position'
])
examples = []
for qa in self.dataset:
qas_id = qa["id"]
question_text = qa["question"]
context = qa["context"]
start_pos = None
end_pos = None
orig_answer_text = None
if self.mode == 'train':
if len(qa["answers"]) != 1:
raise ValueError(
"For training, each question should have exactly 1 answer."
)
orig_answer_text = qa["answers"][0]
answer_offset = qa["answer_starts"][0]
answer_length = len(orig_answer_text)
doc_tokens = [
context[:answer_offset],
context[answer_offset:answer_offset + answer_length],
context[answer_offset + answer_length:]
]
start_pos = 1
end_pos = 1
actual_text = " ".join(doc_tokens[start_pos:(end_pos + 1)])
if orig_answer_text.islower():
actual_text = actual_text.lower()
if actual_text.find(orig_answer_text) == -1:
logger.info("Could not find answer: '%s' vs. '%s'" %
(actual_text, orig_answer_text))
continue
else:
doc_tokens = tokenize_chinese_chars(context)
example = Example(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_pos,
end_position=end_pos)
examples.append(example)
return examples
def _convert_example_to_feature(self, examples):
Feature = namedtuple("Feature", [
"qid", "example_index", "doc_span_index", "tokens",
"token_to_orig_map", "token_is_max_context", "src_ids",
"start_position", "end_position", "cal_loss"
])
features = []
self.features_all = []
unique_id = 1000
is_training = self.mode == "train"
print("total {} examples".format(len(examples)), flush=True)
for (example_index, example) in enumerate(examples):
query_tokens = self.tokenizer.tokenize(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position,
tok_end_position) = self._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
example.orig_answer_text)
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
_DocSpan = namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
features_each = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
tokens.append("[CLS]")
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[i + 1] = tok_to_orig_index[
split_token_index]
is_max_context = self._check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[i + 1] = is_max_context
tokens += all_doc_tokens[doc_span.start:doc_span.start +
doc_span.length]
tokens.append("[SEP]")
for token in query_tokens:
tokens.append(token)
tokens.append("[SEP]")
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
start_position = None
end_position = None
if is_training:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = 1 #len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
feature = Feature(
qid=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
src_ids=token_ids,
start_position=start_position,
end_position=end_position,
cal_loss=1)
features.append(feature)
features_each.append(feature)
if example_index % 1000 == 0:
print(
"processing {} examples".format(example_index),
flush=True)
unique_id += 1
# Repeat
if self.repeat_input:
features_each_repeat = features_each
features_each = list(
map(lambda x: x._replace(cla_loss=0), features_each))
features_each += features_each_repeat
self.features_all.append(features_each)
return features
def _preprocess_data(self):
# Construct examples
self.examples = self._convert_qa_to_examples()
# Construct features
self.features = self._convert_example_to_feature(self.examples)
def get_num_examples(self):
if not self.features_all:
self._preprocess_data()
return len(sum(self.features_all, []))
def _improve_answer_span(self, doc_tokens, input_start, input_end,
orig_answer_text):
"""Improve answer span"""
tok_answer_text = " ".join(self.tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(self, doc_spans, cur_span_index, position):
"""Check is max context"""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
break
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context,
num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
if best_span_index > cur_span_index:
return False
return cur_span_index == best_span_index
def _pad_batch_records(self, batch_records, gather_idx=[]):
"""Pad batch data"""
batch_token_ids = [record.src_ids for record in batch_records]
if self.mode == "train":
batch_start_position = [
record.start_position for record in batch_records
]
batch_end_position = [
record.end_position for record in batch_records
]
batch_start_position = np.array(batch_start_position).astype(
"int64").reshape([-1, 1])
batch_end_position = np.array(batch_end_position).astype(
"int64").reshape([-1, 1])
else:
batch_size = len(batch_token_ids)
batch_start_position = np.zeros(
shape=[batch_size, 1], dtype="int64")
batch_end_position = np.zeros(shape=[batch_size, 1], dtype="int64")
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape([-1, 1])
if gather_idx:
batch_gather_idx = np.array(gather_idx).astype("int64").reshape(
[-1, 1])
need_cal_loss = np.array([1]).astype("int64")
else:
batch_gather_idx = np.array(list(range(len(batch_records)))).astype(
"int64").reshape([-1, 1])
need_cal_loss = np.array([0]).astype("int64")
# padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length,
return_input_mask=True)
padded_task_ids = np.zeros_like(padded_token_ids, dtype="int64")
padded_position_ids = get_related_pos(
padded_task_ids, self.max_seq_length, self.memory_len)
return_list = [
padded_token_ids, padded_position_ids, padded_task_ids, input_mask,
batch_start_position, batch_end_position, batch_qids,
batch_gather_idx, need_cal_loss
]
return return_list
def _create_instances(self):
"""Generate batch records"""
pre_batch_list = []
insert_idx = []
for qid, features in enumerate(self.features_all):
if self._cnt_list(
pre_batch_list) < self.batch_size * self.trainer_num:
if insert_idx:
pre_batch_list[insert_idx[0]] = features
insert_idx.pop(0)
else:
pre_batch_list.append(features)
if self._cnt_list(
pre_batch_list) == self.batch_size * self.trainer_num:
assert self._cnt_list(pre_batch_list) == len(
pre_batch_list), "the two value must be equal"
assert not insert_idx, "the insert_idx must be null"
sample_batch = self._get_samples(pre_batch_list)
for idx, lit in enumerate(pre_batch_list):
if not lit:
insert_idx.append(idx)
for batch_records in self._prepare_batch_data(sample_batch):
yield batch_records
if self.mode != "train":
if self._cnt_list(pre_batch_list):
pre_batch_list += [
[]
for _ in range(self.batch_size * self.trainer_num -
self._cnt_list(pre_batch_list))
]
sample_batch = self._get_samples(pre_batch_list, is_last=True)
for batch_records in self._prepare_batch_data(sample_batch):
yield batch_records
class MCQIterator(MRCIterator):
"""
Multiple choice question iterator.
"""
def __init__(self,
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length=512,
memory_len=128,
repeat_input=False,
in_tokens=False,
mode="train",
random_seed=None,
doc_stride=128,
max_query_length=64,
choice_num=4):
self.choice_num = choice_num
super(MCQIterator, self).__init__(
dataset, batch_size, tokenizer, trainer_num, trainer_id,
max_seq_length, memory_len, repeat_input, in_tokens, mode,
random_seed)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
tokens_a = list(tokens_a)
tokens_b = list(tokens_b)
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return tokens_a, tokens_b
def _convert_qa_to_examples(self):
Example = namedtuple(
'Example', ['qas_id', 'context', 'question', 'choice', 'label'])
examples = []
for qas_id, qa in enumerate(self.dataset):
context = '\n'.join(qa['context']).lower()
question = qa['question'].lower()
choice = [c.lower() for c in qa['choice']]
# pad empty choice
for k in range(len(choice), self.choice_num):
choice.append('')
label = qa['label']
example = Example(
qas_id=qas_id,
context=context,
question=question,
choice=choice,
label=label)
examples.append(example)
return examples
def _convert_example_to_feature(self, examples):
Feature = namedtuple(
'Feature', ['qid', 'src_ids', 'segment_ids', 'label', 'cal_loss'])
features = []
self.features_all = []
pad_token_id = self.tokenizer.pad_token_id
for (ex_index, example) in enumerate(examples):
context_tokens = self.tokenizer.tokenize(example.context)
question_tokens = self.tokenizer.tokenize(example.question)
choice_tokens_lst = [
self.tokenizer.tokenize(choice) for choice in example.choice
]
# nums = 4
question_choice_pairs = \
[self._truncate_seq_pair(question_tokens, choice_tokens, self.max_query_length - 2)
for choice_tokens in choice_tokens_lst]
total_qc_num = sum(
[(len(q) + len(c)) for q, c in question_choice_pairs])
max_tokens_for_doc = self.max_seq_length - total_qc_num - 4
_DocSpan = namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(context_tokens):
length = len(context_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(context_tokens):
break
start_offset += min(length, self.doc_stride)
features_each = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
qa_features = []
for q_tokens, c_tokens in question_choice_pairs:
segment_tokens = ['[CLS]']
token_type_ids = [0]
segment_tokens += context_tokens[
doc_span.start:doc_span.start + doc_span.length]
token_type_ids += [0] * doc_span.length
segment_tokens += ['[SEP]']
token_type_ids += [0]
segment_tokens += q_tokens
token_type_ids += [1] * len(q_tokens)
segment_tokens += ['[SEP]']
token_type_ids += [1]
segment_tokens += c_tokens
token_type_ids += [1] * len(c_tokens)
segment_tokens += ['[SEP]']
token_type_ids += [1]
input_ids = self.tokenizer.convert_tokens_to_ids(
segment_tokens)
feature = Feature(
qid=example.qas_id,
label=example.label,
src_ids=input_ids,
segment_ids=token_type_ids,
cal_loss=1)
qa_features.append(feature)
features.append(qa_features)
features_each.append(qa_features)
# Repeat
if self.repeat_input:
features_each_repeat = features_each
features_each = list(
map(lambda x: x._replace(cla_loss=0), features_each))
features_each += features_each_repeat
self.features_all.append(features_each)
return features
def _pad_batch_records(self, batch_records, gather_idx=[]):
batch_token_ids = [[record.src_ids for record in records]
for records in batch_records]
if batch_records[0][0].label is not None:
batch_labels = [[record.label for record in records]
for records in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
else:
batch_labels = np.array([]).astype("int64").reshape([-1, 1])
# Qid
batch_qids = [[record.qid for record in records]
for records in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape([-1, 1])
if gather_idx:
batch_gather_idx = np.array(gather_idx).astype("int64").reshape(
[-1, 1])
need_cal_loss = np.array([1]).astype("int64")
else:
batch_gather_idx = np.array(list(range(len(batch_records)))).astype(
"int64").reshape([-1, 1])
need_cal_loss = np.array([0]).astype("int64")
batch_task_ids = [[record.segment_ids for record in records]
for records in batch_records]
# Padding
batch_padded_token_ids = []
batch_input_mask = []
batch_padded_task_ids = []
batch_padded_position_ids = []
batch_size = len(batch_token_ids)
for i in range(batch_size):
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids[i],
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length,
return_input_mask=True)
padded_task_ids = pad_batch_data(
batch_task_ids[i],
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length)
padded_position_ids = get_related_pos(
padded_task_ids, self.max_seq_length, self.memory_len)
batch_padded_token_ids.append(padded_token_ids)
batch_input_mask.append(input_mask)
batch_padded_task_ids.append(padded_task_ids)
batch_padded_position_ids.append(padded_position_ids)
batch_padded_token_ids = np.array(batch_padded_token_ids).astype(
"int64").reshape([batch_size * self.choice_num, -1, 1])
batch_padded_position_ids = np.array(batch_padded_position_ids).astype(
"int64").reshape([batch_size * self.choice_num, -1, 1])
batch_padded_task_ids = np.array(batch_padded_task_ids).astype(
"int64").reshape([batch_size * self.choice_num, -1, 1])
batch_input_mask = np.array(batch_input_mask).astype("float32").reshape(
[batch_size * self.choice_num, -1, 1])
return_list = [
batch_padded_token_ids, batch_padded_position_ids,
batch_padded_task_ids, batch_input_mask, batch_labels, batch_qids,
batch_gather_idx, need_cal_loss
]
return return_list
def _prepare_batch_data(self, examples_list):
batch_records, max_len, gather_idx = [], 0, []
real_batch_size = self.batch_size * self.choice_num
index = 0
for examples in examples_list:
records = []
gather_idx_candidate = []
for example in examples:
if example.cal_loss == 1:
gather_idx_candidate.append(index % real_batch_size)
max_len = max(max_len, len(example.src_ids))
records.append(example)
index += 1
if self.in_tokens:
to_append = (len(batch_records) + 1
) * self.choice_num * max_len <= self.batch_size
else:
to_append = len(batch_records) < self.batch_size
if to_append:
batch_records.append(records)
gather_idx += gather_idx_candidate
else:
yield self._pad_batch_records(batch_records, gather_idx)
batch_records, max_len = [records], max(
len(record.src_ids) for record in records)
start_index = index - len(records) + 1
gather_idx = gather_idx_candidate
if len(batch_records) > 0:
yield self._pad_batch_records(batch_records, gather_idx)
def _get_samples(self, pre_batch_list, is_last=False):
if is_last:
# Pad batch
len_doc = [[len(doc) for doc in doc_list]
for doc_list in pre_batch_list]
len_doc = list(itertools.chain(*len_doc))
max_len_idx = len_doc.index(max(len_doc))
doc_idx = max_len_idx % self.choice_num
doc_list_idx = max_len_idx // self.choice_num
dirty_sample = pre_batch_list[doc_list_idx][doc_idx][-1]._replace(
cal_loss=0)
for sample_list in pre_batch_list:
for samples in sample_list:
samples.extend([dirty_sample] *
(max(len_doc) - len(samples)))
samples = []
min_len = min([len(doc) for doc in pre_batch_list])
for cnt in range(min_len):
for batch_idx in range(self.batch_size * self.trainer_num):
sample = pre_batch_list[batch_idx][cnt]
samples.append(sample)
for idx in range(len(pre_batch_list)):
pre_batch_list[idx] = pre_batch_list[idx][min_len:]
return samples
class SemanticMatchingIterator(MRCIterator):
def _convert_qa_to_examples(self):
Example = namedtuple('Example',
['qid', 'text_a', 'text_b', 'text_c', 'label'])
examples = []
for qid, qa in enumerate(self.dataset):
text_a, text_b, text_c = list(
map(lambda x: x.replace('\n', '').strip(),
[qa["text_a"], qa["text_b"], qa["text_c"]]))
example = Example(
qid=qid,
text_a=text_a,
text_b=text_b,
text_c=text_c,
label=qa["label"])
examples += [example]
return examples
def _create_tokens_and_type_id(self, text_a_tokens, text_b_tokens, start,
length):
tokens = ['[CLS]'] + text_a_tokens[start:start + length] + [
'[SEP]'
] + text_b_tokens[start:start + length] + ['[SEP]']
token_type_ids = [0] + [0] * (length + 1) + [1] * (length + 1)
return tokens, token_type_ids
def _convert_example_to_feature(self, examples):
Feature = namedtuple('Feature', [
'qid', 'src_ids', 'segment_ids', 'pair_src_ids', 'pair_segment_ids',
'label', 'cal_loss'
])
features = []
self.features_all = []
pad_token_id = self.tokenizer.pad_token_id
for (ex_index, example) in enumerate(examples):
text_a_tokens = self.tokenizer.tokenize(example.text_a)
text_b_tokens = self.tokenizer.tokenize(example.text_b)
text_c_tokens = self.tokenizer.tokenize(example.text_c)
a_len, b_len, c_len = list(
map(lambda x: len(x),
[text_a_tokens, text_b_tokens, text_c_tokens]))
# Align 3 text
min_text_len = min([a_len, b_len, c_len])
text_a_tokens = text_a_tokens[:min_text_len]
text_b_tokens = text_b_tokens[:min_text_len]
text_c_tokens = text_c_tokens[:min_text_len]
_DocSpan = namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
max_tokens_for_doc = (self.max_seq_length - 3) // 2
while start_offset < len(text_a_tokens):
length = len(text_a_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(text_a_tokens):
break
start_offset += min(length, self.doc_stride)
features_each = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens1, token_type_ids1 = self._create_tokens_and_type_id(
text_a_tokens, text_b_tokens, doc_span.start,
doc_span.length)
tokens2, token_type_ids2 = self._create_tokens_and_type_id(
text_a_tokens, text_c_tokens, doc_span.start,
doc_span.length)
input_ids1 = self.tokenizer.convert_tokens_to_ids(tokens1)
input_ids2 = self.tokenizer.convert_tokens_to_ids(tokens2)
feature = Feature(
qid=example.qid,
label=example.label,
src_ids=input_ids1,
segment_ids=token_type_ids1,
pair_src_ids=input_ids2,
pair_segment_ids=token_type_ids2,
cal_loss=1)
features.append(feature)
features_each.append(feature)
# Repeat
if self.repeat_input:
features_each_repeat = features_each
features_each = list(
map(lambda x: x._replace(cla_loss=0), features_each))
features_each += features_each_repeat
self.features_all.append(features_each)
return features
def _create_pad_ids(self, batch_records, prefix=""):
src_ids = prefix + "src_ids"
segment_ids = prefix + "segment_ids"
batch_token_ids = [getattr(record, src_ids) for record in batch_records]
batch_task_ids = [
getattr(record, segment_ids) for record in batch_records
]
# Padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length,
return_input_mask=True)
padded_task_ids = pad_batch_data(
batch_task_ids,
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length)
padded_position_ids = get_related_pos(
padded_task_ids, self.max_seq_length, self.memory_len)
return [
padded_token_ids, padded_position_ids, padded_task_ids, input_mask
]
def _pad_batch_records(self, batch_records, gather_idx=[]):
if batch_records[0].label is not None:
batch_labels = [record.label for record in batch_records]
batch_labels = np.array(batch_labels).astype("int64").reshape(
[-1, 1])
else:
batch_labels = np.array([]).astype("int64").reshape([-1, 1])
# Qid
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape([-1, 1])
if gather_idx:
batch_gather_idx = np.array(gather_idx).astype("int64").reshape(
[-1, 1])
need_cal_loss = np.array([1]).astype("int64")
else:
batch_gather_idx = np.array(list(range(len(batch_records)))).astype(
"int64").reshape([-1, 1])
need_cal_loss = np.array([0]).astype("int64")
return_list = self._create_pad_ids(batch_records) \
+ self._create_pad_ids(batch_records, "pair_") \
+ [batch_labels, batch_qids, batch_gather_idx, need_cal_loss]
return return_list
class SequenceLabelingIterator(ClassifierIterator):
def __init__(self,
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length=512,
memory_len=128,
repeat_input=False,
in_tokens=False,
mode="train",
random_seed=None,
no_entity_id=-1):
super(SequenceLabelingIterator, self).__init__(
dataset,
batch_size,
tokenizer,
trainer_num,
trainer_id,
max_seq_length,
memory_len,
repeat_input,
in_tokens,
mode,
random_seed,
preprocess_text_fn=None)
self.no_entity_id = no_entity_id
def _convert_to_features(self, example, qid):
"""
Convert example to features fed into model
"""
tokens = example['tokens']
label = example["labels"]
doc_spans = []
_DocSpan = namedtuple("DocSpan", ["start", "length"])
start_offset = 0
max_tokens_for_doc = self.max_seq_length - 2
while start_offset < len(tokens):
length = len(tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(tokens):
break
start_offset += min(length, self.memory_len)
features = []
Feature = namedtuple("Feature",
["src_ids", "label_ids", "qid", "cal_loss"])
for (doc_span_index, doc_span) in enumerate(doc_spans):
curr_tokens = ["[CLS]"] + tokens[doc_span.start:doc_span.start +
doc_span.length] + ["[SEP]"]
token_ids = self.tokenizer.convert_tokens_to_ids(curr_tokens)
label = [self.no_entity_id
] + label[doc_span.start:doc_span.start +
doc_span.length] + [self.no_entity_id]
features.append(
Feature(
src_ids=token_ids, label_ids=label, qid=qid, cal_loss=1))
if self.repeat_input:
features_repeat = features
features = list(map(lambda x: x._replace(cal_loss=0), features))
features = features + features_repeat
return features
def _pad_batch_records(self, batch_records, gather_idx=[]):
batch_token_ids = [record.src_ids for record in batch_records]
batch_length = [len(record.src_ids) for record in batch_records]
batch_length = np.array(batch_length).astype("int64").reshape([-1, 1])
if batch_records[0].label_ids is not None:
batch_labels = [record.label_ids for record in batch_records]
else:
batch_labels = np.array([]).astype("int64").reshape([-1, 1])
# Qid
if batch_records[-1].qid is not None:
batch_qids = [record.qid for record in batch_records]
batch_qids = np.array(batch_qids).astype("int64").reshape([-1, 1])
else:
batch_qids = np.array([]).astype("int64").reshape([-1, 1])
if gather_idx:
batch_gather_idx = np.array(gather_idx).astype("int64").reshape(
[-1, 1])
need_cal_loss = np.array([1]).astype("int64")
else:
batch_gather_idx = np.array(list(range(len(batch_records)))).astype(
"int64").reshape([-1, 1])
need_cal_loss = np.array([0]).astype("int64")
# Padding
padded_token_ids, input_mask = pad_batch_data(
batch_token_ids,
pad_idx=self.tokenizer.pad_token_id,
pad_max_len=self.max_seq_length,
return_input_mask=True)
if batch_records[0].label_ids is not None:
padded_batch_labels = pad_batch_data(
batch_labels,
pad_idx=self.no_entity_id,
pad_max_len=self.max_seq_length)
padded_task_ids = np.zeros_like(padded_token_ids, dtype="int64")
padded_position_ids = get_related_pos(padded_token_ids, \
self.max_seq_length, self.memory_len)
return_list = [
padded_token_ids, padded_position_ids, padded_task_ids, input_mask,
padded_batch_labels, batch_length, batch_qids, batch_gather_idx,
need_cal_loss
]
return return_list
|
py | 7df9d9f07ca8301f11673eb43a4618adc5fd66f9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import dxfile.dxtomo
|
py | 7df9dc54e5a35da158c7c5ed99f6ebd5c464abaf | """Support for Xiomi Gateway alarm control panels."""
from functools import partial
import logging
from miio import DeviceException
from homeassistant.components.alarm_control_panel import (
SUPPORT_ALARM_ARM_AWAY,
AlarmControlPanelEntity,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
XIAOMI_STATE_ARMED_VALUE = "on"
XIAOMI_STATE_DISARMED_VALUE = "off"
XIAOMI_STATE_ARMING_VALUE = "oning"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi Gateway Alarm from a config entry."""
entities = []
gateway = hass.data[DOMAIN][config_entry.entry_id]
entity = XiaomiGatewayAlarm(
gateway,
f"{config_entry.title} Alarm",
config_entry.data["model"],
config_entry.data["mac"],
config_entry.unique_id,
)
entities.append(entity)
async_add_entities(entities, update_before_add=True)
class XiaomiGatewayAlarm(AlarmControlPanelEntity):
"""Representation of the XiaomiGatewayAlarm."""
def __init__(
self, gateway_device, gateway_name, model, mac_address, gateway_device_id
):
"""Initialize the entity."""
self._gateway = gateway_device
self._name = gateway_name
self._gateway_device_id = gateway_device_id
self._unique_id = f"{model}-{mac_address}"
self._icon = "mdi:shield-home"
self._available = None
self._state = None
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def device_id(self):
"""Return the device id of the gateway."""
return self._gateway_device_id
@property
def device_info(self):
"""Return the device info of the gateway."""
return {
"identifiers": {(DOMAIN, self._gateway_device_id)},
}
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_AWAY
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a device command handling error messages."""
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from miio device: %s", result)
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
async def async_alarm_arm_away(self, code=None):
"""Turn on."""
await self._try_command(
"Turning the alarm on failed: %s", self._gateway.alarm.on
)
async def async_alarm_disarm(self, code=None):
"""Turn off."""
await self._try_command(
"Turning the alarm off failed: %s", self._gateway.alarm.off
)
async def async_update(self):
"""Fetch state from the device."""
try:
state = await self.hass.async_add_executor_job(self._gateway.alarm.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
if state == XIAOMI_STATE_ARMED_VALUE:
self._state = STATE_ALARM_ARMED_AWAY
elif state == XIAOMI_STATE_DISARMED_VALUE:
self._state = STATE_ALARM_DISARMED
elif state == XIAOMI_STATE_ARMING_VALUE:
self._state = STATE_ALARM_ARMING
else:
_LOGGER.warning(
"New state (%s) doesn't match expected values: %s/%s/%s",
state,
XIAOMI_STATE_ARMED_VALUE,
XIAOMI_STATE_DISARMED_VALUE,
XIAOMI_STATE_ARMING_VALUE,
)
self._state = None
_LOGGER.debug("State value: %s", self._state)
|
py | 7df9dcf886f53a04847c482969a7d840262d08b9 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnumclient.common import base
from magnumclient.common import utils
from magnumclient import exceptions
# Derived classes may append their own custom attributes to this default list
CREATION_ATTRIBUTES = ['name', 'node_count', 'discovery_url', 'master_count']
class BaseTemplate(base.Resource):
# template_name must be overridden by any derived class.
# template_name should be an uppercase plural, e.g. "Clusters"
template_name = ''
def __repr__(self):
return "<" + self.__class__.template_name + " %s>" % self._info
class BaseTemplateManager(base.Manager):
# template_name must be overridden by any derived class.
# template_name should be a lowercase plural, e.g. "clusters"
template_name = ''
@classmethod
def _path(cls, id=None):
return '/v1/' + cls.template_name + \
'/%s' % id if id else '/v1/' + cls.template_name
def list(self, limit=None, marker=None, sort_key=None,
sort_dir=None, detail=False):
"""Retrieve a list of bays.
:param marker: Optional, the UUID of a bay, eg the last
bay from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of bays to return.
2) limit == 0, return the entire list of bays.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about bays.
:returns: A list of bays.
"""
if limit is not None:
limit = int(limit)
filters = utils.common_filters(marker, limit, sort_key, sort_dir)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), self.__class__.template_name)
else:
return self._list_pagination(self._path(path),
self.__class__.template_name,
limit=limit)
def get(self, id):
try:
return self._list(self._path(id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exceptions.InvalidAttribute(
"Key must be in %s" % ",".join(CREATION_ATTRIBUTES))
return self._create(self._path(), new)
def delete(self, id):
return self._delete(self._path(id))
def update(self, id, patch, rollback=False):
url = self._path(id)
if rollback:
url += '/?rollback=True'
return self._update(url, patch)
|
py | 7df9def8c7f3d9089be569e67a0ac5431c9b9cd5 | # -*- coding: utf8 -*-
"Convertor for scalar type"
def integer_convertor(target):
"Convert a variable to integer"
if target == '':
return 0
return int(target)
def float_convertor(target):
"Convert a variable to float"
if target == '':
return 0.0
return float(target)
|
py | 7df9df84ca31f9813c9301c19ffa9762848a6d80 | from lib.flask_mailplus import send_template_message
from coder.app import create_celery_app
celery = create_celery_app()
@celery.task()
def deliver_contact_email(email, message):
"""
Send a contact e-mail.
:param email: E-mail address of the visitor
:type user_id: str
:param message: E-mail message
:type user_id: str
:return: None
"""
ctx = {'email': email, 'message': message}
send_template_message(subject='[Snake Eyes] Contact',
sender=email,
recipients=[celery.conf.get('MAIL_USERNAME')],
reply_to=email,
template='contact/mail/index', ctx=ctx)
return None
|
py | 7df9dfc673630c51ec8ef11004bedc612e948045 | # -*- coding: utf-8 -*-
from pybel.examples import sialic_acid_graph
from pybel.examples.sialic_acid_example import cd33, cd33_phosphorylated, shp2, syk, trem2
from pybel.manager.models import Edge, Namespace, Network
from pybel.manager.query_manager import graph_from_edges
from pybel.testing.cases import TemporaryCacheClsMixin
from pybel.testing.mocks import mock_bel_resources
chebi_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/chebi/chebi-20170725.belns'
class TestSeeding(TemporaryCacheClsMixin):
"""This module tests the seeding functions in the query manager"""
@classmethod
def setUpClass(cls):
"""Adds the sialic acid subgraph for all query tests"""
super(TestSeeding, cls).setUpClass()
@mock_bel_resources
def insert(mock):
"""Inserts the Sialic Acid Subgraph using the mock resources"""
cls.manager.insert_graph(sialic_acid_graph, store_parts=True)
insert()
def test_namespace_existence(self):
a = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/hgnc-human-genes/hgnc-human-genes-20170725.belns'
n = self.manager.session.query(Namespace).filter(Namespace.url == a).one()
def test_namespace_existence_b(self):
ns = self.manager.session.query(Namespace).filter(Namespace.url == chebi_url).one()
self.assertIsNotNone(ns)
def test_sialic_acid_in_node_store(self):
r = 'sialic acid'
n = self.manager.get_namespace_entry(chebi_url, r)
self.assertIsNotNone(n)
self.assertEqual(r, n.name)
def test_namespace_existence_c(self):
a = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/go-biological-process/go-biological-process-20170725.belns'
self.manager.session.query(Namespace).filter(Namespace.url == a).one()
def test_network_existence(self):
networks = self.manager.session.query(Network).all()
self.assertEqual(1, len(networks))
def test_edge_existence(self):
edges = self.manager.session.query(Edge).all()
self.assertEqual(11, len(edges))
def test_seed_by_pmid(self):
pmids = ['26438529']
edges = self.manager.query_edges_by_pubmed_identifiers(pmids)
self.assertLess(0, len(edges))
def test_seed_by_pmid_no_result(self):
missing_pmids = ['11111']
edges = self.manager.query_edges_by_pubmed_identifiers(missing_pmids)
self.assertEqual(0, len(edges))
def test_seed_by_induction_raise(self):
"""Test that seeding by induction fails when an empty list is given."""
with self.assertRaises(ValueError):
self.manager.query_induction([])
def test_seed_by_induction_raise_length_one(self):
"""Test that seeding by induction fails when a list of length one is given."""
shp2_model = self.manager.get_node_by_dsl(shp2)
with self.assertRaises(ValueError):
self.manager.query_induction([shp2_model])
def test_seed_by_induction(self):
"""Test seeding by inducing over a list of nodes."""
shp2_model = self.manager.get_node_by_dsl(shp2)
syk_model = self.manager.get_node_by_dsl(syk)
trem2_model = self.manager.get_node_by_dsl(trem2)
edges = self.manager.query_induction([shp2_model, syk_model, trem2_model])
self.assertEqual(2, len(edges))
graph = graph_from_edges(edges)
self.assertEqual(3, graph.number_of_nodes(), msg='Nodes: {}'.format(graph.nodes()))
self.assertIn(trem2, graph)
self.assertIn(syk, graph)
self.assertIn(shp2, graph)
self.assertEqual(2, graph.number_of_edges())
def test_seed_by_neighbors(self):
"""Test seeding a graph by neighbors of a list of nodes."""
node = self.manager.get_node_by_dsl(shp2)
edges = self.manager.query_neighbors([node])
self.assertEqual(2, len(edges))
graph = graph_from_edges(edges)
self.assertEqual(4, graph.number_of_nodes(), msg='Nodes: {}'.format(graph.nodes()))
self.assertIn(cd33_phosphorylated, graph)
self.assertIn(cd33, graph)
self.assertIn(syk,graph)
self.assertIn(shp2, graph)
self.assertEqual(3, graph.number_of_edges())
|
py | 7df9dfcc31d8d5cf7090db7c180d072ce3c9a85f | """This python module implements the class ``ClassifierComparision``
which can be used to compare the accuracy results of two different
classification results (e.g. results from different fruits.Fruit
objects).
This file can also be used as a scripted invoked from the command line.
You get all available arguments with
>>> python configs_compare.py -h
The module can also be used without any dependencies to fruits.
"""
import os
import argparse
from typing import List, Union, Tuple
import networkx as nx
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from fruitalyser import _get_color
DEFAULT_COMPARISION_COLUMN = "FRUITS Acc"
class ClassifierComparision:
"""Implements methods for the comparision of two classification
techniques using the information of their accuracy on different
datasets.
:param acc1: A one dimensional numpy array containing accuracy
results of one technique (the one that is expected to be
better in general) for different datasets.
:type acc1: np.ndarray
:param acc2: A one dimensional numpy array containing accuracy
results of a second technique.
:type acc2: np.ndarray
:param label1: Short label that describes the first technique.
:type label1: str
:param label2: Short label that describes the second technique.
:type label2: str
"""
def __init__(self,
accuracies: np.ndarray,
labels: List[str]):
self._ndatasets = accuracies.shape[0]
self._nclassifiers = accuracies.shape[1]
if len(labels) != self._nclassifiers:
raise ValueError("Lengths of accuracies and labels differ")
self._accuracies = accuracies.copy()
maximum = self._accuracies.max()
if maximum > 1.0:
self._accuracies /= maximum
self._labels = labels
def scatterplot(self,
indices: Union[List[Tuple[int, int]], None] = None,
opacity: Union[List[float], None] = None) -> tuple:
"""Creates a 2D scatter plot for each pair of the given
accuracy results.
:param indices: List of integer pairs that define which methods
to compare. If ``None`` is given, then all plots will be
compared.
:type indices: Union[List[Tuple[int]], None], optional
:param opacity: List of floats that has the same length as
the original accuracy results. The points in the scatter
plot will be colored based on the values in this list.,
defaults to None
:type opacity: Union[List[float], None], optional
:returns: Figure and axis that you get from ``plt.subplots``.
:rtype: tuple
"""
colors = np.zeros((self._ndatasets, 4))
colors[:, :3] = _get_color(0)
colors[:, 3] = opacity
if indices is None:
indices = [(i, j)
for i in range(self._nclassifiers)
for j in range(self._nclassifiers)]
fig, axs = plt.subplots(self._nclassifiers, self._nclassifiers)
else:
fig, axs = plt.subplots(len(indices), 1)
if len(indices) == 1:
axs = np.array([axs], dtype=object)
axs = axs.reshape((len(indices), 1))
c = 0
for i in range(axs.shape[0]):
for j in range(axs.shape[1]):
ii, jj = indices[c]
axs[i][j].axis('square')
axs[i][j].set_xlim([0, 1])
axs[i][j].set_ylim([0, 1])
if ii == jj:
weights = np.ones_like(self._accuracies[:, ii])
weights /= self._ndatasets
axs[i][j].hist(
self._accuracies[:, ii],
weights=weights,
)
else:
axs[i][j].scatter(
self._accuracies[:, jj], self._accuracies[:, ii],
c=opacity,
cmap="copper_r",
)
axs[i][j].plot([0, 1], [0, 1],
transform=axs[i][j].transAxes,
color=_get_color(1), ls="--")
axs[i][j].plot([0.05, 1], [0, 0.95],
transform=axs[i][j].transAxes,
color=_get_color(1)+(0.3,), ls="--")
axs[i][j].plot([0, 0.95], [0.05, 1],
transform=axs[i][j].transAxes,
color=_get_color(1)+(0.3,), ls="--")
meanii = self._accuracies[:, ii].mean()
meanjj = self._accuracies[:, jj].mean()
axs[i][j].axhline(meanii, xmin=0, xmax=meanii,
color=_get_color(3)+(0.5,), ls="--")
axs[i][j].axvline(meanjj, ymin=0, ymax=meanjj,
color=_get_color(3)+(0.5,), ls="--")
axs[i][j].text(0.02, 0.98, self._labels[ii],
size="large", ha="left", va="top")
axs[i][j].text(0.98, 0.02, self._labels[jj],
size="large", ha="right", va="bottom")
c += 1
return fig, axs
def test_greater(self, i: int, j: int):
"""Tests whether the null-hypothesis of technique at index ``i``
being less or equally good compared to method ``j`` can be
rejected by performing an one-sided paired Wilcoxon signed-rank
test.
:type i: int
:type j: int
:returns: Value of the test function and p-value of the test.
:rtype: tuple
"""
stat, p = sp.stats.wilcoxon(self._accuracies[:, i],
self._accuracies[:, j],
alternative="greater")
return stat, p
def critical_difference_diagram(self, alpha: float = 0.05):
"""Draws and returns a figure of a critical difference diagram
based on the accuracies given to the class object.
This type of plot was described in the paper
'Statistical Comparision of Classifiers over Multiple Data Sets'
by Janez Demsar, 2006.
:param alpha: Significance value used for doing pairwise
Wilcoxon signed-rank tests., defaults to 0.05
:type alpha: float, optional
:returns: Figure and axis that matches to the return types of
``plt.subplots(1, 1)``.
:rtype: tuple
"""
p = np.zeros((int(self._nclassifiers * (self._nclassifiers-1) / 2),),
dtype=np.float32)
c = 0
for i in range(self._nclassifiers - 1):
for j in range(i+1, self._nclassifiers):
p[c] = sp.stats.wilcoxon(self._accuracies[:, i],
self._accuracies[:, j],
zero_method='pratt')[1]
c += 1
p_order = np.argsort(p)
holm_bonferroni = alpha / np.arange(p.shape[0], 0, -1)
p_significant = (p[p_order] <= holm_bonferroni)[p_order.argsort()]
# calculate average ranks
avg_ranks = sp.stats.rankdata(self._accuracies, axis=1)
avg_ranks = self._nclassifiers - avg_ranks + 1
avg_ranks = avg_ranks.mean(axis=0)
avg_ranks_order = avg_ranks.argsort()[::-1]
lowest_rank = min(1, int(np.floor(avg_ranks.min())))
highest_rank = max(len(avg_ranks), int(np.ceil(avg_ranks.max())))
width = 6 + 0.3 * max(map(len, self._labels))
height = 1.0 + self._nclassifiers * 0.1
# initialize plot
fig, ax = plt.subplots(1, 1, figsize=(width, height))
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax.spines['right'].set_color("none")
ax.spines['left'].set_color("none")
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines['bottom'].set_color("none")
ax.spines['top'].set_linewidth(2.5)
ax.xaxis.set_ticks_position('top')
ax.tick_params(which='major', width=2.5, length=5, labelsize=12)
ax.tick_params(which='minor', width=2.0, length=3, labelsize=12)
ax.set_xlim(highest_rank, lowest_rank)
ax.set_ylim(0.0, 1.0)
fig.subplots_adjust(bottom=-0.6, top=0.7)
half = int(np.ceil(self._nclassifiers / 2))
# visual configurations
rank_xshift: float = 0.02 * (highest_rank-lowest_rank)
label_xshift: float = 0.05 * (highest_rank-lowest_rank)
label_offset: float = 0.01 * (highest_rank-lowest_rank)
first_marking: float = 0.6
markings_vspace: float = 0.35 * 1/half
markings_color: tuple = (0.15, 0.15, 0.15, 1.0)
cliques_color: tuple = _get_color(1) + (0.8,)
# draw left branching markings
for i, index in enumerate(avg_ranks_order[:half]):
ax.axvline(
x=avg_ranks[index],
ymin=first_marking + (half-i-1)*markings_vspace,
ymax=1.0,
c=markings_color,
lw=2.0,
)
ax.axhline(
y=first_marking + (half-i-1)*markings_vspace,
xmin=(half-i-1) * label_xshift / (highest_rank-lowest_rank),
xmax=((highest_rank-avg_ranks[index])
/ (highest_rank-lowest_rank)),
c=markings_color,
lw=2.0,
)
ax.text(highest_rank - rank_xshift - (half-i-1)*label_xshift,
first_marking + (half-i-1)*markings_vspace,
f"{avg_ranks[index]:.2f}",
ha="left", va="bottom", size=8)
ax.text(highest_rank - (half-i-1)*label_xshift + label_offset,
first_marking + (half-i-1)*markings_vspace,
f"{self._labels[index]}",
ha="right", va="center", size=14)
# draw right branching markings
for i, index in enumerate(avg_ranks_order[half:]):
ax.axvline(
x=avg_ranks[index],
ymin=first_marking + i*markings_vspace,
ymax=1.0,
c=markings_color,
lw=2.0,
)
ax.axhline(
y=first_marking + i*markings_vspace,
xmin=((highest_rank-avg_ranks[index])
/ (highest_rank-lowest_rank)),
xmax=1.0 - i * label_xshift / (highest_rank-lowest_rank),
c=markings_color,
lw=2.0,
)
ax.text(lowest_rank + rank_xshift + i*label_xshift,
first_marking + i*markings_vspace,
f"{avg_ranks[index]:.2f}",
ha="right", va="bottom", size=8)
ax.text(lowest_rank + i*label_xshift - label_offset,
first_marking + i*markings_vspace,
f"{self._labels[index]}",
ha="left", va="center", size=14)
# get cliques based on the calculated p-values
adjacency_matrix = np.zeros((self._nclassifiers, self._nclassifiers))
connect_at = np.where(~p_significant)
indexing = np.array(np.triu_indices(self._nclassifiers, k=1))
for index in connect_at:
i, j = indexing[:, index]
adjacency_matrix[i, j] = 1
ccliques = list(nx.find_cliques(nx.Graph(adjacency_matrix)))
cliques = []
for clique in ccliques:
if len(clique) > 1:
cliques.append(clique)
# draw the cliques
i = 1
if len(cliques) < 4:
first_clique_line = 0.9 + (len(cliques) + 4) / 100
else:
first_clique_line = 0.97
clique_line_diff = (1 - (first_marking + (half-1)*markings_vspace))
clique_line_diff -= 0.001
clique_line_diff /= len(cliques)
clique_line_y = first_clique_line
for clique in cliques:
left = min(clique)
right = max(clique)
ax.axhline(
y=clique_line_y,
xmin=((highest_rank-avg_ranks[avg_ranks_order[left]])
/ (highest_rank-lowest_rank)),
xmax=((highest_rank-avg_ranks[avg_ranks_order[right]])
/ (highest_rank-lowest_rank)),
color=cliques_color,
linewidth=4.0,
)
clique_line_y -= clique_line_diff
return fig, ax
def _get_user_input():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--csv_files", type=str,
help="CSV File names with accuracy results "
+ "seperated by ';'",
required=True)
parser.add_argument("-p", "--file_path", type=str,
help="Default path for the csv files",
default="")
parser.add_argument("-c", "--columns", type=str,
help="Names of columns in the given files with the "
+ " data that is going to be compared",
default=None)
parser.add_argument("-l", "--labels", type=str,
help="Labels for the different methods that "
+ "are compared seperated by ';'",
default=None)
parser.add_argument("-o", "--opacity_column",
help="Color in points based on this column",
default=None)
parser.add_argument("-sp", "--scatter_plot",
help="Show the scatter plots",
action="store_true")
parser.add_argument("-cd", "--critical_difference",
help="Show the critical difference diagram",
action="store_true")
parser.add_argument("-s", "--save_figure",
help="Save a shown figure. "
+ "Use this option together with '-cd' or '-sp'.",
action="store_true")
parser.add_argument("-t", "--test",
help="Do a wilcoxon test for all paired methods",
action="store_true")
parser.add_argument("-n", "--figure_name",
help="Name of the image file",
type=str, default=None)
return parser.parse_args()
def main():
args = _get_user_input()
files = args.csv_files.split(";")
labels = files
files = list(map(lambda x: x if x.endswith(".csv") else x + ".csv", files))
if args.file_path is not None:
files = list(map(lambda x: os.path.join(args.file_path, x), files))
columns = [DEFAULT_COMPARISION_COLUMN] * len(files)
if args.columns is not None:
columns = args.columns.split(";")
if args.labels is not None:
labels = args.labels.split(";")
f = pd.read_csv(files[0])
accs = np.zeros((len(f), len(files)))
for i in range(len(files)):
accs[:, i] = pd.read_csv(files[i])[columns[i]]
opacity = args.opacity_column
if opacity is not None:
opacity = f[opacity]
else:
opacity = f["TrS"] + f["TeS"]
comparision = ClassifierComparision(accs, labels)
if args.test:
print(f"\nOne-sided paired Wilcoxon signed-rank test")
print("------------------------------------------")
for i in range(len(files)):
for j in range(len(files)):
if i == j:
continue
print(f"H0: {labels[i]} <= {labels[j]} "
+ f"\t H1: {labels[i]} > {labels[j]}")
T, p = comparision.test_greater(i, j)
print(f"\n{T = }, {p = }")
print("------------------------------------------")
if args.scatter_plot:
fig1, axs = comparision.scatterplot(opacity=opacity)
if args.critical_difference:
fig2, ax = comparision.critical_difference_diagram()
if args.save_figure:
name = "comparison"
if args.figure_name is not None:
name = args.figure_name
if args.critical_difference:
plt.savefig(f"{name}.jpg", dpi=256)
elif args.scatter_plot:
plt.savefig(f"{name}.jpg", dpi=512, bbox_inches="tight")
if args.critical_difference or args.scatter_plot:
plt.show()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.