hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23d1f2c4f4ea5639727ded8d5757f9d66fc0cc39 | 13,959 | py | Python | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
]
| 1 | 2022-03-20T12:25:34.000Z | 2022-03-20T12:25:34.000Z | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
]
| null | null | null | TarSync.py | waynegramlich/Fab | d4a23067a0354ffda106f7032df0501c8db24499 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""TarSync.py: Synchronize .fcstd and .tar files.
Usage: TarSync.py [OPTIONS] [DIR] ...
Recursively scans directories searching for `.fcstd`/`.FCstd` files
and synchronizes them with associated `.tar` files. The current
directory is used if no explicit directory or files are listed.
Options:
* [-n] Visit all files without doing anything. Use with [-v] option.
* [-v] Verbose mode.
Rationale:
A FreeCAD `.fcstd` file is basically a bunch of text files compressed with gzip.
For fun, the `unzip -l XYZ.fcstd` command lists the files contained in `XYZ.fcstd`.
Due to the repetitive nature of the text files contained therein, the gzip algorithm
can achieve significant overall file compression.
A `git` repository basically consists of a bunch files called blob's, where the
term "blob" stands for Binary Large Object. Each blob represents some version
of a file stored the repository. Being binary files, `.fcstd` files can be
stored inside of a git repository. However, the compressed (i.e. binary)
nature of `.fcstd` files can make the git repository storage requirements
grow at a pretty rapid rate as multiple versions of the `.fcstd` files get stored
into a git repository.
To combat the storage growth requirements, `git` uses a compression algorithm that
is applied to the repository as a whole. These compressed files are called Pack files.
Pack files are generated and updated whenever git decides to do so. Over time,
the overall git storage requirements associated with uncompressed files grows at a
slower rate than gzip compressed files. In addition, each time a git repositories
are synchronized, the over the wire protocol is via Pack file.
This program will convert a file from compressed in gzip format into simpler
uncompressed format call a `.tar` file. (`tar` stands for Tape ARchive for
back in the days of magnetic tapes.) Basically, what this program does is
manage two files in tandem, `XYZ.fcstd` and `XYZ.tar`. It does this by
comparing the modification times between the two files translates the content
of the newer file on top of the older file. When done, both files will have
the same modification time. This program works recursively over an entire
directory tree.
To use this program with a git repository, configure your `.gitignore` to
ignore `.fcstd` files in your repository by adding `*.fcstd` to your
`.gitignore` file. Run this program before doing a `git commit`
Whenever you update your git repository from a remote one, run this program
to again, to keep the `.fcstd` files in sync with any updated `.tar` files.
"""
# [Basic Git Concepts]
# (https://www.oreilly.com/library/view/version-control-with/9781449345037/ch04.html)
#
# FreeCAD forum topics:
# [https://forum.freecadweb.org/viewtopic.php?t=38353&start=30](1)
# [https://forum.freecadweb.org/viewtopic.php?f=8&t=36844a](2)
# [https://forum.freecadweb.org/viewtopic.php?t=40029&start=10](3)
# [https://forum.freecadweb.org/viewtopic.php?p=1727](4)
# [https://forum.freecadweb.org/viewtopic.php?t=8688](5)
# [https://forum.freecadweb.org/viewtopic.php?t=32521](6)
# [https://forum.freecadweb.org/viewtopic.php?t=57737)(7)
# [https://blog.lambda.cx/posts/freecad-and-git/](8)
# [https://tante.cc/2010/06/23/managing-zip-based-file-formats-in-git/](9)
from argparse import ArgumentParser
from io import BytesIO
import os
from pathlib import Path
from tarfile import TarFile, TarInfo
from tempfile import TemporaryDirectory
from typing import List, IO, Optional, Tuple
import time
from zipfile import ZIP_DEFLATED, ZipFile
# main():
def main() -> None:
"""Execute the main program."""
# Create an *argument_parser*:
parser: ArgumentParser = ArgumentParser(
description="Synchronize .fcstd/.tar files."
)
parser.add_argument("directories", metavar="DIR", type=str, nargs="*",
help="Directory to recursively scan")
parser.add_argument("-n", "--dry-run", action="store_true",
help="verbose mode")
parser.add_argument("-v", "--verbose", action="store_true",
help="verbose mode")
parser.add_argument("--unit-test", action="store_true",
help="run unit tests")
# Parse arguments:
arguments = parser.parse_args()
directories: Tuple[str, ...] = tuple(arguments.directories)
if arguments.unit_test:
# Run the unit test:
unit_test()
directories = ()
synchronize_directories(directories, arguments.dry_run, arguments.verbose)
# synchronize_directories():
def synchronize_directories(directory_names: Tuple[str, ...],
dry_run: bool, verbose: bool) -> Tuple[str, ...]:
"""Synchronize some directories.
* Arguments:
* *directory_names* (Tuple[str, ...):
A list of directories to recursively synchronize.
* dry_run (bool):
If False, the directories are scanned, but not synchronized. If True, the directories
are both scanned and synchronized.
* verbose (bool):
If True, the a summary message is printed if for each (possible) synchronization.
The actual synchronization only occurs if *dry_run* is False.
* Returns
* (Tuple[str, ...]) containing the summary
"""
# Recursively find all *fcstd_paths* in *directories*:
fcstd_paths: List[Path] = []
directory_name: str
for directory_name in directory_names:
suffix: str = "fcstd"
for suffix in ("fcstd", "fcSTD"):
fcstd_paths.extend(Path(directory_name).glob(f"**/*.{suffix}"))
# Perform all of the synchronizations:
summaries: List[str] = []
for fcstd_path in fcstd_paths:
summary: str = synchronize(fcstd_path, dry_run)
summaries.append(summary)
if verbose:
print(summary) # pragma: no unit cover
return tuple(summaries)
# Synchronize():
def synchronize(fcstd_path: Path, dry_run: bool = False) -> str:
"""Synchronize an .fcstd file with associated .tar file.
* Arguments:
* fcstd_path (Path):
The `.fcstd` file to synchronize.
* dry_run (bool):
If True, no synchronization occurs and only the summary string is returned.
(Default: False)
* Returns:
* (str) a summary string.
Synchronizes an `.fcstd` file with an associated `.tar` file and.
A summary is always returned even in *dry_run* mode.
"""
# Determine timestamps for *fstd_path* and associated *tar_path*:
tar_path: Path = fcstd_path.with_suffix(".tar")
fcstd_timestamp: int = int(fcstd_path.stat().st_mtime) if fcstd_path.exists() else 0
tar_timestamp: int = int(tar_path.stat().st_mtime) if tar_path.exists() else 0
# Using the timestamps do the synchronization (or not):
zip_file: ZipFile
tar_file: TarFile
tar_info: TarInfo
fcstd_name: str = str(fcstd_path)
tar_name: str = str(tar_path)
summary: str
if fcstd_timestamp > tar_timestamp:
# Update *tar_path* from *tar_path*:
summary = f"{fcstd_name} => {tar_name}"
if not dry_run:
with ZipFile(fcstd_path, "r") as zip_file:
with TarFile(tar_path, "w") as tar_file:
from_names: Tuple[str, ...] = tuple(zip_file.namelist())
for from_name in from_names:
from_content: bytes = zip_file.read(from_name)
# print(f"Read {fcstd_path}:{from_name}:"
# f"{len(from_content)}:{is_ascii(from_content)}")
tar_info = TarInfo(from_name)
tar_info.size = len(from_content)
# print(f"tar_info={tar_info} size={tar_info.size}")
tar_file.addfile(tar_info, BytesIO(from_content))
os.utime(tar_path, (fcstd_timestamp, fcstd_timestamp)) # Force modification time.
elif tar_timestamp > fcstd_timestamp:
# Update *fcstd_path* from *tar_path*:
summary = f"{tar_name} => {fcstd_name}"
if not dry_run:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
with ZipFile(fcstd_path, "w", ZIP_DEFLATED) as zip_file:
for tar_info in tar_infos:
buffered_reader: Optional[IO[bytes]] = tar_file.extractfile(tar_info)
assert buffered_reader
buffer: bytes = buffered_reader.read()
# print(f"{tar_info.name}: {len(buffer)}")
zip_file.writestr(tar_info.name, buffer)
os.utime(fcstd_path, (tar_timestamp, tar_timestamp)) # Force modification time.
else:
summary = f"{fcstd_name} in sync with {tar_name}"
return summary
# unit_test():
def unit_test() -> None:
"""Run the unit test."""
directory_name: str
# Use create a temporary *directory_path* to run the tests in:
with TemporaryDirectory() as directory_name:
a_content: str = "a contents"
b_content: str = "b contents"
buffered_reader: Optional[IO[bytes]]
c_content: str = "c contents"
directory_path: Path = Path(directory_name)
tar_name: str
tar_file: TarFile
tar_path: Path = directory_path / "test.tar"
tar_path_name: str = str(tar_path)
zip_file: ZipFile
zip_name: str
zip_path: Path = directory_path / "test.fcstd"
zip_path_name: str = str(zip_path)
# Create *zip_file* with a suffix of `.fcstd`:
with ZipFile(zip_path, "w", ZIP_DEFLATED) as zip_file:
zip_file.writestr("a", a_content)
zip_file.writestr("b", b_content)
assert zip_path.exists(), f"{zip_path_name=} not created"
zip_timestamp: int = int(zip_path.stat().st_mtime)
assert zip_timestamp > 0, f"{zip_path=} had bad timestamp."
# Perform synchronize with a slight delay to force a different modification time:
time.sleep(1.1)
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only 1 summary expected"
summary: str = summaries[0]
desired_summary: str = f"{zip_path_name} => {tar_path_name}"
assert summary == desired_summary, f"{summary} != {desired_summary}"
assert tar_path.exists(), f"{tar_path_name=} not created"
tar_timestamp: int = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"{zip_timestamp=} != {tar_timestamp=}"
# Now read *tar_file* and verify that it has the correct content:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
for tar_info in tar_infos:
buffered_reader = tar_file.extractfile(tar_info)
assert buffered_reader, f"Unable to read {tar_file=}"
content: str = buffered_reader.read().decode("latin-1")
found: bool = False
if tar_info.name == "a":
assert content == a_content, f"'{content}' != '{a_content}'"
found = True
elif tar_info.name == "b":
assert content == b_content, f"'{content}' != '{b_content}'"
found = True
assert found, f"Unexpected tar file name {tar_info.name}"
# Now run synchronize again and verify that nothing changed:
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only one summary expected"
summary = summaries[0]
desired_summary = f"{str(zip_path)} in sync with {str(tar_path)}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
zip_timestamp = int(zip_path.stat().st_mtime)
tar_timestamp = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"timestamps {zip_timestamp=} != {tar_timestamp=}"
# Now update *tar_file* with new content (i.e. `git pull`).:
time.sleep(1.1) # Use delay to force a different timestamp.
with TarFile(tar_path, "w") as tar_file:
tar_info = TarInfo("c")
tar_info.size = len(c_content)
tar_file.addfile(tar_info, BytesIO(bytes(c_content, "latin-1")))
tar_info = TarInfo("a")
tar_info.size = len(a_content)
tar_file.addfile(tar_info, BytesIO(bytes(a_content, "latin-1")))
# Verify that the timestamp changed and force a synchronize().
new_tar_timestamp: int = int(tar_path.stat().st_mtime)
assert new_tar_timestamp > tar_timestamp, f"{new_tar_timestamp=} <= {tar_timestamp=}"
summary = synchronize(zip_path)
desired_summary = f"{tar_path_name} => {zip_path_name}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
# Verify that the *zip_path* got updated verify that the content changed:
new_zip_timestamp: int = int(zip_path.stat().st_mtime)
assert new_zip_timestamp == new_tar_timestamp, (
f"{new_zip_timestamp=} != {new_tar_timestamp=}")
with ZipFile(zip_path, "r") as zip_file:
zip_names: Tuple[str, ...] = tuple(zip_file.namelist())
for zip_name in zip_names:
zip_content: str = zip_file.read(zip_name).decode("latin-1")
assert buffered_reader
found = False
if zip_name == "a":
assert zip_content == a_content, "Content mismatch"
found = True
elif zip_name == "c":
assert zip_content == c_content, "Content mismatch"
found = True
assert found, "Unexpected file '{zip_name}'"
if __name__ == "__main__":
main()
| 45.617647 | 98 | 0.646321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,855 | 0.491081 |
23d1f9c2f299c304c7761f6ac8842a0f28c28618 | 20,325 | py | Python | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
]
| 2 | 2017-12-19T15:32:46.000Z | 2018-01-12T11:24:24.000Z | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
]
| null | null | null | tcga_encoder/analyses/old/spearmans_input_cluster_from_hidden.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
]
| null | null | null | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
#from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.data.hallmark_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
def join_weights( W_hidden2z, W_hidden ):
W = {}
n_z = W_hidden2z.shape[1]
columns = np.array( ["z_%d"%i for i in range(n_z)])
for input_source, source_w in W_hidden.iteritems():
#pdb.set_trace()
W[ input_source ] = pd.DataFrame( np.dot( source_w, W_hidden2z ), index = source_w.index, columns = columns )
return W
def get_hidden2z_weights( model_store ):
layer = "rec_z_space"
model_store.open()
w = model_store[ "%s"%(layer) + "/W/w%d"%(0)].values
model_store.close()
return w
def get_hidden_weights( model_store, input_sources, data_store ):
rna_genes = data_store["/RNA/FAIR"].columns
meth_genes = data_store["/METH/FAIR"].columns
mirna_hsas = data_store["/miRNA/FAIR"].columns
post_fix = "_scaled"
idx=1
n_sources = len(input_sources)
W = {}
for w_idx, input_source in zip( range(n_sources), input_sources ):
w = model_store[ "rec_hidden" + "/W/w%d"%(w_idx)].values
#pdb.set_trace()
d,k = w.shape
columns = np.array( ["h_%d"%i for i in range(k)])
if input_source == "RNA":
rows = rna_genes
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "miRNA":
rows = mirna_hsas
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "METH":
rows = meth_genes
#rows = np.array( [ "M-%s"%g for g in meth_genes], dtype=str )
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "TISSUE":
rows = tissue_names
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
model_store.close()
return W
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_test( true_y, est_y ):
n = len(true_y)
n_1 = true_y.sum()
n_0 = n - n_1
if n_1 == 0 or n_1 == n:
return 0.5, 0.0, 0.0, 1.0
auc = roc_auc_score( true_y, est_y )
difference = auc - 0.5
if difference < 0:
# switch labels
se = auc_standard_error( auc, n_0, n_1 )
se_null = auc_standard_error( 0.5, n_0, n_1 )
else:
se = auc_standard_error( 1-auc, n_1, n_0 )
se_null = auc_standard_error( 0.5, n_1, n_0 )
se_combined = np.sqrt( se**2 + se_null**2 )
z_value = np.abs(difference) / se_combined
p_value = 1.0 - stats.norm.cdf( np.abs(z_value) )
return auc, se, z_value, p_value
def find_keepers_over_groups( z, groups, name, nbr2keep, stats2use ):
inners = []; p_inners=[]
mx_inner = 0.0
norm_z = np.linalg.norm(z)
for X, stat in zip( groups, stats2use ):
pearsons = np.zeros( X.shape[1] )
pvalues = np.zeros( X.shape[1] )
for x,x_idx in zip( X.values.T, range(X.shape[1])):
if stat == "pearson":
pearsons[x_idx], pvalues[x_idx] = stats.pearsonr( z, x )
elif stat == "auc":
true_y = (x>0).astype(int)
auc, se, zvalue, pvalue = auc_test( true_y, z ) #np.sqrt( ses_tissue**2 + se_r_tissue**2 )
pearsons[x_idx] = auc-0.5
pvalues[x_idx] = pvalue
#pdb.set_trace()
#norms = norm_z*np.linalg.norm( X, axis=0 )
#inner = pd.Series( np.dot( z, X )/norms, index = X.columns, name=name )
inner = pd.Series( pearsons, index = X.columns, name=name )
p_inner = pd.Series( pvalues, index = X.columns, name=name )
inners.append(inner)
p_inners.append(p_inner)
this_mx = np.max(np.abs(inner))
if this_mx > mx_inner:
mx_inner = this_mx
all_keepers = []
#all_pvalues = []
for inner,p_inner in zip(inners,p_inners):
#inner.sort_values(inplace=True)
#inner = inner / mx_inner
#abs_inner = np.abs( inner )
#ordered = np.argsort( -inner.values )
ordered = np.argsort( p_inner.values )
ordered = pd.DataFrame( np.vstack( (inner.values[ordered],p_inner.values[ordered] ) ).T, index =inner.index[ordered],columns=["r","p"] )
#pdb.set_trace()
#keepers = pd.concat( [ordered[:nbr2keep], ordered[-nbr2keep:]], axis=0 )
keepers = ordered[:nbr2keep]
#pdb.set_trace()
#keepers = keepers.sort_values()
all_keepers.append(keepers)
return all_keepers
def find_keepers(z, X, name, nbr2keep):
inner = pd.Series( np.dot( z, X ), index = X.columns, name=name )
inner.sort_values(inplace=True)
inner = inner / np.max(np.abs(inner))
#signed = np.sign( inner )
abs_inner = np.abs( inner )
ordered = np.argsort( -abs_inner.values )
ordered = pd.Series( inner.values[ordered], index =inner.index[ordered],name=name )
keepers = ordered[:nbr2keep]
keepers = keepers.sort_values()
return keepers
def main( data_location, results_location ):
pathway_info = Pathways()
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
model_filename = os.path.join( results_path, "full_vae_model.h5" )
save_dir = os.path.join( results_path, "hallmark_clustering" )
check_and_mkdir(save_dir)
z_dir = os.path.join( save_dir, "z_pics" )
check_and_mkdir(z_dir)
h_dir = os.path.join( save_dir, "h_pics" )
check_and_mkdir(h_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
model_store = pd.HDFStore( model_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
#input_sources = ["METH","RNA","miRNA"]
input_sources = ["RNA","miRNA","METH"]
W_hidden = get_hidden_weights( model_store, input_sources, data_store )
W_hidden2z = get_hidden2z_weights( model_store )
size_per_unit = 0.25
size1 = max( min( 40, int( W_hidden["RNA"].values.shape[0]*size_per_unit ) ), 12 )
size2 = max( min( 40, int( W_hidden["miRNA"].values.shape[0]*size_per_unit )), 12 )
#pdb.set_trace()
cmap = sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
htmap3 = sns.clustermap ( pd.concat( [W_hidden["RNA"],W_hidden["miRNA"]],0).T.corr(), cmap=cmap, square=True, figsize=(size1,size2) )
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
htmap3.ax_row_dendrogram.set_visible(False)
htmap3.ax_col_dendrogram.set_visible(False)
pp.savefig( save_dir + "/weights_rna__mirna_clustermap.png", fmt="png", bbox_inches = "tight")
#size2 = max( int( n_inputs*size_per_unit ), 12 )
size1 = max( min( 40, int( W_hidden["RNA"].values.shape[0]*size_per_unit )), 12 )
cmap = sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
htmap3 = sns.clustermap ( W_hidden["RNA"].T.corr(), cmap=cmap, square=True, figsize=(size1,size1) )
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
htmap3.ax_row_dendrogram.set_visible(False)
htmap3.ax_col_dendrogram.set_visible(False)
pp.savefig( save_dir + "/weights_rna_clustermap.png", fmt="png", bbox_inches = "tight")
size1 = max( min( 40, int( W_hidden["miRNA"].values.shape[0]*size_per_unit )), 12 )
htmap3 = sns.clustermap ( W_hidden["miRNA"].T.corr(), cmap=cmap, square=True, figsize=(size1,size1) )
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
htmap3.ax_row_dendrogram.set_visible(False)
htmap3.ax_col_dendrogram.set_visible(False)
pp.savefig( save_dir + "/weights_mirna_clustermap.png", fmt="png", bbox_inches = "tight")
size1 = max(min( 40, int( W_hidden["METH"].values.shape[0]*size_per_unit )), 12 )
htmap3 = sns.clustermap ( W_hidden["METH"].T.corr(), cmap=cmap, square=True, figsize=(size1,size1) )
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
htmap3.ax_row_dendrogram.set_visible(False)
htmap3.ax_col_dendrogram.set_visible(False)
pp.savefig( save_dir + "/weights_meth_clustermap.png", fmt="png", bbox_inches = "tight")
#pdb.set_trace()
weighted_z = join_weights( W_hidden2z, W_hidden )
#pdb.set_trace()
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
barcodes = data_store["/CLINICAL/observed"][ data_store["/CLINICAL/observed"][["RNA","miRNA","METH","DNA"]].sum(1)==4 ].index.values
Z=Z.loc[barcodes]
Z_values = Z.values
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
rna = np.log(1+data_store["/RNA/RSEM"].loc[ barcodes ])
mirna = np.log(1+data_store["/miRNA/RSEM"].loc[ barcodes ])
meth = np.log(0.1+data_store["/METH/METH"].loc[ barcodes ])
dna = data_store["/DNA/channel/0"].loc[ barcodes ]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
n = len(Z)
n_tissues = len(tissue_names)
n_h = W_hidden2z.shape[0]
rna_normed = rna; mirna_normed = mirna; meth_normed = meth; dna_normed=2*dna-1
for t_idx in range(n_tissues):
t_query = tissue_idx == t_idx
X = rna[t_query]
X -= X.mean(0)
X /= X.std(0)
rna_normed[t_query] = X
X = mirna[t_query]
X -= X.mean(0)
X /= X.std(0)
mirna_normed[t_query] = X
X = meth[t_query]
X -= X.mean(0)
X /= X.std(0)
meth_normed[t_query] = X
#pdb.set_trace()
nbr = 20
Z_keep_rna=[]
Z_keep_mirna=[]
Z_keep_meth=[]
Z_keep_dna = []
for z_idx in range(n_z):
z_values = Z_values[:,z_idx]
order_z = np.argsort(z_values)
rna_w = weighted_z["RNA"][ "z_%d"%(z_idx)]
mirna_w = weighted_z["miRNA"][ "z_%d"%(z_idx)]
meth_w = weighted_z["METH"][ "z_%d"%(z_idx)]
order_rna = np.argsort( -np.abs(rna_w.values) )
order_mirna = np.argsort( -np.abs(mirna_w.values) )
order_meth = np.argsort( -np.abs(meth_w.values) )
rna_w_ordered = pd.Series( rna_w.values[ order_rna ], index = rna_w.index[order_rna], name="RNA")
mirna_w_ordered = pd.Series( mirna_w.values[ order_mirna ], index = mirna_w.index[order_mirna], name="miRNA")
meth_w_ordered = pd.Series( meth_w.values[ order_meth ], index = meth_w.index[order_meth], name="METH")
f = pp.figure( figsize = (12,8))
ax1 = f.add_subplot(321);ax2 = f.add_subplot(323);ax3 = f.add_subplot(325);
ax_pie1 = f.add_subplot(133); #ax_pie3 = f.add_subplot(424); ax_pie4 = f.add_subplot(426)
max_ax = np.max( np.hstack( (rna_w_ordered[:nbr].values,meth_w_ordered[:nbr].values,mirna_w_ordered[:nbr].values) ) )
min_ax = np.min( np.hstack( (rna_w_ordered[:nbr].values,meth_w_ordered[:nbr].values,mirna_w_ordered[:nbr].values) ) )
h1=rna_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax1,color="red",legend=False,title=None,fontsize=8); ax1.set_xlim(min_ax,max_ax); ax1.set_title(""); h1.set_xticklabels([]); ax1.legend(["RNA"])
h2=meth_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax2,color="blue",legend=False,title=None,fontsize=8);ax2.set_xlim(min_ax,max_ax); ax2.set_title(""); h2.set_xticklabels([]); ax2.legend(["METH"])
h3=mirna_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax3,color="black",legend=False,title=None,fontsize=8); ax3.set_xlim(min_ax,max_ax); ax3.set_title("");ax3.legend(["miRNA"])
neg_rna = pp.find( rna_w_ordered.values<0) ; pos_rna = pp.find( rna_w_ordered.values>0)
neg_meth = pp.find( meth_w_ordered.values<0) ; pos_meth = pp.find( meth_w_ordered.values>0)
rna_readable = pathway_info.CancerEnrichment(rna_w_ordered[:nbr].index, 1+0*np.abs( rna_w_ordered[:nbr].values) )
meth_readable = pathway_info.CancerEnrichment(meth_w_ordered[:nbr].index, 1+0*np.abs( meth_w_ordered[:nbr].values ) )
# rna_readable_p = pathway_info.CancerEnrichment(rna_w_ordered.index[pos_rna[:20]], 1+0*rna_w_ordered.values[pos_rna[:20]] )
# meth_readable_p = pathway_info.CancerEnrichment(meth_w_ordered.index[pos_meth[:20]], 1+0*meth_w_ordered.values[pos_meth[:20]])
# #
# rna_readable_n = pathway_info.CancerEnrichment(rna_w_ordered.index[neg_rna[:20]], -1+0*rna_w_ordered.values[neg_rna[:20]] )
# meth_readable_n = pathway_info.CancerEnrichment(meth_w_ordered.index[neg_meth[:20]], -1+0*meth_w_ordered.values[neg_meth[:20]] )
rna_readable.name="rna"
meth_readable.name="meth"
# rna_readable_p.name="rna_p"
# meth_readable_p.name="meth_p"
# rna_readable_n.name="rna_n"
# meth_readable_n.name="meth_n"
#joined = pd.concat( [rna_readable[:20],\
# meth_readable[:20]], axis=1 )
joined = pd.concat( [rna_readable,\
meth_readable], axis=1 )
# joined = pd.concat( [rna_readable_p,rna_readable_n,\
# meth_readable_p,meth_readable_n], axis=1 )
#
# maxvalues = joined.index[ np.argsort( -np.abs(joined.fillna(0)).sum(1).values ) ]
#
# joined=joined.loc[maxvalues]
# joined = joined[:25]
#br = joined.plot(kind="barh",ax=ax_pie1,color=["red","red","blue","blue"],legend=False,stacked=True, sort_columns=False,fontsize=8);
br = joined.plot(kind="barh",ax=ax_pie1,color=["red","blue"],legend=False,stacked=True, sort_columns=False,fontsize=8);
max_ax = np.max( joined.values.flatten() )
min_ax = np.min( joined.values.flatten() )
max_ax = np.max( max_ax, -min_ax )
min_ax = -max_ax
#pdb.set_trace()
#ax_pie1.set_xlim(min_ax,max_ax);
#br = joined.plot(kind="barh",ax=ax_pie1,color=["red","blue"],legend=True,stacked=True, sort_columns=False);
pp.suptitle( "Z %d"%(z_idx))
pp.savefig( z_dir + "/z%d_weighted.png"%(z_idx), format="png", dpi=300 )
#pp.show()
#pdb.set_trace()
pp.close('all')
for z_idx in range(n_h):
#z_values = Z_values[:,z_idx]
#order_z = np.argsort(z_values)
rna_w = W_hidden["RNA"][ "h_%d"%(z_idx)]
mirna_w = W_hidden["miRNA"][ "h_%d"%(z_idx)]
meth_w = W_hidden["METH"][ "h_%d"%(z_idx)]
order_rna = np.argsort( -np.abs(rna_w.values) )
order_mirna = np.argsort( -np.abs(mirna_w.values) )
order_meth = np.argsort( -np.abs(meth_w.values) )
rna_w_ordered = pd.Series( rna_w.values[ order_rna ], index = rna_w.index[order_rna], name="RNA")
mirna_w_ordered = pd.Series( mirna_w.values[ order_mirna ], index = mirna_w.index[order_mirna], name="miRNA")
meth_w_ordered = pd.Series( meth_w.values[ order_meth ], index = meth_w.index[order_meth], name="METH")
f = pp.figure( figsize = (12,8))
ax1 = f.add_subplot(321);ax2 = f.add_subplot(323);ax3 = f.add_subplot(325);
ax_pie1 = f.add_subplot(133); #ax_pie3 = f.add_subplot(424); ax_pie4 = f.add_subplot(426)
max_ax = np.max( np.hstack( (rna_w_ordered[:nbr].values,meth_w_ordered[:nbr].values,mirna_w_ordered[:nbr].values) ) )
min_ax = np.min( np.hstack( (rna_w_ordered[:nbr].values,meth_w_ordered[:nbr].values,mirna_w_ordered[:nbr].values) ) )
h1=rna_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax1,color="red",legend=False,title=None,fontsize=8); ax1.set_xlim(min_ax,max_ax); ax1.set_title(""); h1.set_xticklabels([]); ax1.legend(["RNA"])
h2=meth_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax2,color="blue",legend=False,title=None,fontsize=8);ax2.set_xlim(min_ax,max_ax); ax2.set_title(""); h2.set_xticklabels([]); ax2.legend(["METH"])
h3=mirna_w_ordered[:nbr].sort_values(ascending=False).plot(kind='barh',ax=ax3,color="black",legend=False,title=None,fontsize=8); ax3.set_xlim(min_ax,max_ax); ax3.set_title("");ax3.legend(["miRNA"])
neg_rna = pp.find( rna_w_ordered.values<0) ; pos_rna = pp.find( rna_w_ordered.values>0)
neg_meth = pp.find( meth_w_ordered.values<0) ; pos_meth = pp.find( meth_w_ordered.values>0)
rna_readable = pathway_info.CancerEnrichment(rna_w_ordered[:nbr].index, 1+0*np.abs( rna_w_ordered[:nbr].values) )
meth_readable = pathway_info.CancerEnrichment(meth_w_ordered[:nbr].index, 1+0*np.abs( meth_w_ordered[:nbr].values ) )
# rna_readable_p = pathway_info.CancerEnrichment(rna_w_ordered.index[pos_rna[:20]], 1+0*rna_w_ordered.values[pos_rna[:20]] )
# meth_readable_p = pathway_info.CancerEnrichment(meth_w_ordered.index[pos_meth[:20]], 1+0*meth_w_ordered.values[pos_meth[:20]])
# #
# rna_readable_n = pathway_info.CancerEnrichment(rna_w_ordered.index[neg_rna[:20]], -1+0*rna_w_ordered.values[neg_rna[:20]] )
# meth_readable_n = pathway_info.CancerEnrichment(meth_w_ordered.index[neg_meth[:20]], -1+0*meth_w_ordered.values[neg_meth[:20]] )
rna_readable.name="rna"
meth_readable.name="meth"
# rna_readable_p.name="rna_p"
# meth_readable_p.name="meth_p"
# rna_readable_n.name="rna_n"
# meth_readable_n.name="meth_n"
#joined = pd.concat( [rna_readable[:20],\
# meth_readable[:20]], axis=1 )
joined = pd.concat( [rna_readable,\
meth_readable], axis=1 )
# joined = pd.concat( [rna_readable_p,rna_readable_n,\
# meth_readable_p,meth_readable_n], axis=1 )
#
# maxvalues = joined.index[ np.argsort( -np.abs(joined.fillna(0)).sum(1).values ) ]
#
# joined=joined.loc[maxvalues]
# joined = joined[:25]
#br = joined.plot(kind="barh",ax=ax_pie1,color=["red","red","blue","blue"],legend=False,stacked=True, sort_columns=False,fontsize=8);
br = joined.plot(kind="barh",ax=ax_pie1,color=["red","blue"],legend=False,stacked=True, sort_columns=False,fontsize=8);
max_ax = np.max( joined.values.flatten() )
min_ax = np.min( joined.values.flatten() )
max_ax = np.max( max_ax, -min_ax )
min_ax = -max_ax
#br = joined.plot(kind="barh",ax=ax_pie1,color=["red","blue"],legend=True,stacked=True, sort_columns=False);
pp.suptitle( "H %d"%(z_idx))
pp.savefig( h_dir + "/h%d_weighted.png"%(z_idx), format="png", dpi=300 )
#pp.show()
#pdb.set_trace()
pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | 42.080745 | 222 | 0.667847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,912 | 0.241673 |
23d49ee738e43aa66d515d38988b95d1c1f66917 | 102 | py | Python | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
]
| 541 | 2015-05-27T04:34:38.000Z | 2022-03-23T18:00:16.000Z | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
]
| 85 | 2015-05-27T14:27:27.000Z | 2022-02-27T18:51:08.000Z | src/django/tests/test_settings.py | segestic/django-builder | 802e73241fe29ea1afb2df15a3addee87f39aeaa | [
"MIT"
]
| 129 | 2015-05-27T20:55:43.000Z | 2022-03-23T14:18:07.000Z |
from XXX_PROJECT_NAME_XXX.settings import * # noqa
# Override any settings required for tests here
| 20.4 | 51 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.519608 |
23d6f93dd725259d766c98af0f0522d89793519e | 3,808 | py | Python | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
]
| null | null | null | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
]
| null | null | null | m2m/search/models.py | blampe/M2M | d8c025481ba961fe85b95f9e851a7678e08227c3 | [
"MIT"
]
| 1 | 2018-06-27T14:05:43.000Z | 2018-06-27T14:05:43.000Z | from django.db import models
#from djangosphinx import SphinxSearch, SphinxRelation, SphinxQuerySet
#import djangosphinx.apis.current as sphinxapi
from advancedsearch.models import Movie, Episode, Song
from browseNet.models import Host, Path
# Create your models here.
class File(models.Model):
'''An indexed file on the Network'''
id = models.IntegerField(primary_key=True, db_column='ID') # Field name made lowercase.
MIDs = models.ForeignKey(Movie, related_name='files', null=True, on_delete=models.SET_NULL)
SIDs = models.ForeignKey(Episode, null=True, related_name='files', on_delete=models.SET_NULL)
MuIDs = models.ForeignKey(Song, null=True,related_name='files', on_delete=models.SET_NULL)
path = models.ForeignKey(Path,db_column='PID') # Field name made lowercase.
filename = models.CharField(max_length=765, db_column='FileName') # Field name made lowercase.
filenameend = models.CharField(max_length=12, db_column='FileNameEnd') # Field name made lowercase.
dateadded = models.DateTimeField(db_column='DateAdded') # Field name made lowercase.
filesize = models.BigIntegerField(db_column='FileSize') # Field name made lowercase.
filedate = models.DateTimeField(db_column='FileDate') # Field name made lowercase.
indexed = models.NullBooleanField(null=True, db_column='Indexed', blank=True) # Field name made lowercase.
# good = 1, bad = 0, unclear = 3
goodfile = models.IntegerField(default=1)
objects = models.Manager()
videoEndings = ".avi|.mpg|.mp4|.m4v|.mov|.mpeg|.wmv|.mkv|.divx|.flv|.m2ts"
audioEndings = ".mp3|.flac|.ogg|.wma|.m4a|.aac|.wav|.aif|.au"
textEndings = ".txt|.chm|.pdf|.html|.rtf|.doc|.docx|.odt|.tex"
imageEndings = ".jpg|.jpeg|.raw|.tiff|.gif|.png|.psd|.tga|.tpic|.svg"
def remove_problems(self):
self.remove_dne_problem()
self.remove_saving_problem()
def remove_dne_problem(self):
try:
self.dneproblem
self.dneproblem.delete()
self.save()
except:
pass
def remove_saving_problem(self):
try:
self.savingproblem
self.savingproblem.delete()
self.save()
except:
pass
def remove_bad_file_problem(self):
try:
self.badfileproblem
self.badfileproblem.delete()
self.save()
except:
pass
def remove_under_problem(self):
try:
self.undefproblem
self.undefproblem.delete()
self.save()
except:
pass
def __unicode__(self):
#-*-coding:iso-8859-1-*-
return u'{}'.format(self.filename)
class Meta:
db_table = u'file'
class History(models.Model):
uid = models.IntegerField(db_column='UID') # Field name made lowercase.
position = models.IntegerField(db_column='Position') # Field name made lowercase.
searchstring = models.CharField(max_length=765, db_column='SearchString') # Field name made lowercase.
mode = models.IntegerField(db_column='Mode') # Field name made lowercase.
hosttype = models.IntegerField(db_column='HostType') # Field name made lowercase.
flags = models.IntegerField(db_column='Flags') # Field name made lowercase.
date = models.IntegerField(db_column='Date') # Field name made lowercase.
datevalue = models.IntegerField(db_column='DateValue') # Field name made lowercase.
minsize = models.IntegerField(db_column='MinSize') # Field name made lowercase.
maxsize = models.IntegerField(db_column='MaxSize') # Field name made lowercase.
hits = models.IntegerField(db_column='Hits') # Field name made lowercase.
class Meta:
db_table = u'history'
| 39.666667 | 110 | 0.665966 | 3,520 | 0.92437 | 0 | 0 | 0 | 0 | 0 | 0 | 1,181 | 0.310137 |
23d7aa18934d135f4447648b4a864fe8e8b4a99c | 1,790 | py | Python | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
]
| 27 | 2017-01-31T03:28:26.000Z | 2021-09-05T21:02:36.000Z | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
]
| 18 | 2018-02-03T16:44:18.000Z | 2021-06-26T04:12:17.000Z | moods.py | henry232323/Discord-Pesterchum | 70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3 | [
"MIT"
]
| 5 | 2017-09-23T15:53:08.000Z | 2020-07-26T06:19:13.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2020, henry232323
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
class Moods(object):
moods = ["chummy", "rancorous", "offline", "pleasant", "distraught",
"pranky", "smooth", "ecstatic", "relaxed", "discontent",
"devious", "sleek", "detestful", "mirthful", "manipulative",
"vigorous", "perky", "acceptant", "protective", "mystified",
"amazed", "insolent", "bemused"]
def __init__(self):
self.usermoods = dict()
self.value = 0
@staticmethod
def getMood(name):
name = "offline" if name.lower() == "abscond" else name
return Moods.moods.index(name.lower())
@staticmethod
def getName(index):
return Moods.moods[index]
| 42.619048 | 76 | 0.701117 | 667 | 0.372626 | 0 | 0 | 218 | 0.121788 | 0 | 0 | 1,344 | 0.750838 |
23d7e7b0e05f376311c1a1430b049eda79a5c69d | 4,465 | py | Python | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
]
| 101 | 2015-01-09T14:59:57.000Z | 2021-11-06T23:33:50.000Z | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
]
| 48 | 2015-01-30T05:53:47.000Z | 2019-03-21T23:17:40.000Z | reclass/utils/tests/test_refvalue.py | bbinet/reclass | c08b844b328fa0fe182db49dd423cc203a016ce9 | [
"Artistic-2.0"
]
| 50 | 2015-01-30T08:56:07.000Z | 2020-12-25T02:34:08.000Z | #
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <[email protected]>
# Released under the terms of the Artistic Licence 2.0
#
from reclass.utils.refvalue import RefValue
from reclass.defaults import PARAMETER_INTERPOLATION_SENTINELS, \
PARAMETER_INTERPOLATION_DELIMITER
from reclass.errors import UndefinedVariableError, \
IncompleteInterpolationError
import unittest
def _var(s):
return '%s%s%s' % (PARAMETER_INTERPOLATION_SENTINELS[0], s,
PARAMETER_INTERPOLATION_SENTINELS[1])
CONTEXT = {'favcolour':'yellow',
'motd':{'greeting':'Servus!',
'colour':'${favcolour}'
},
'int':1,
'list':[1,2,3],
'dict':{1:2,3:4},
'bool':True
}
def _poor_mans_template(s, var, value):
return s.replace(_var(var), value)
class TestRefValue(unittest.TestCase):
def test_simple_string(self):
s = 'my cat likes to hide in boxes'
tv = RefValue(s)
self.assertFalse(tv.has_references())
self.assertEquals(tv.render(CONTEXT), s)
def _test_solo_ref(self, key):
s = _var(key)
tv = RefValue(s)
res = tv.render(CONTEXT)
self.assertTrue(tv.has_references())
self.assertEqual(res, CONTEXT[key])
def test_solo_ref_string(self):
self._test_solo_ref('favcolour')
def test_solo_ref_int(self):
self._test_solo_ref('int')
def test_solo_ref_list(self):
self._test_solo_ref('list')
def test_solo_ref_dict(self):
self._test_solo_ref('dict')
def test_solo_ref_bool(self):
self._test_solo_ref('bool')
def test_single_subst_bothends(self):
s = 'I like ' + _var('favcolour') + ' and I like it'
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_single_subst_start(self):
s = _var('favcolour') + ' is my favourite colour'
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_single_subst_end(self):
s = 'I like ' + _var('favcolour')
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_deep_subst_solo(self):
var = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(var)
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, var,
CONTEXT['motd']['greeting']))
def test_multiple_subst(self):
greet = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(greet) + ' I like ' + _var('favcolour') + '!'
tv = RefValue(s)
self.assertTrue(tv.has_references())
want = _poor_mans_template(s, greet, CONTEXT['motd']['greeting'])
want = _poor_mans_template(want, 'favcolour', CONTEXT['favcolour'])
self.assertEqual(tv.render(CONTEXT), want)
def test_multiple_subst_flush(self):
greet = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(greet) + ' I like ' + _var('favcolour')
tv = RefValue(s)
self.assertTrue(tv.has_references())
want = _poor_mans_template(s, greet, CONTEXT['motd']['greeting'])
want = _poor_mans_template(want, 'favcolour', CONTEXT['favcolour'])
self.assertEqual(tv.render(CONTEXT), want)
def test_undefined_variable(self):
s = _var('no_such_variable')
tv = RefValue(s)
with self.assertRaises(UndefinedVariableError):
tv.render(CONTEXT)
def test_incomplete_variable(self):
s = PARAMETER_INTERPOLATION_SENTINELS[0] + 'incomplete'
with self.assertRaises(IncompleteInterpolationError):
tv = RefValue(s)
if __name__ == '__main__':
unittest.main()
| 34.882813 | 76 | 0.600224 | 3,472 | 0.777081 | 0 | 0 | 0 | 0 | 0 | 0 | 755 | 0.168979 |
23d88124e0abeec9041b9f813d746d7445479956 | 1,506 | py | Python | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
]
| null | null | null | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
]
| null | null | null | backend/neuroflow/routes/mood.py | isamu-isozaki/neuroflow-challenge | ca29b8e48be4853317ab706acd4731ea0a8bab10 | [
"MIT"
]
| null | null | null | """
Author: Isamu Isozaki ([email protected])
Description: description
Created: 2021-12-01T16:32:53.089Z
Modified: !date!
Modified By: modifier
"""
from flask import Blueprint, redirect, jsonify, url_for, request
from neuroflow.repository import create_mood, get_authorized, load_moods_from_user
from functools import wraps
from flask_cors import cross_origin
blueprint = Blueprint('mood', __name__,
url_prefix='/mood')
def authorized():
def authorized_decorator(f):
@wraps(f)
def wrap(*args, **kwargs):
if not request.headers.get('Authorization', None):
return 'Unauthorized', 401
user = get_authorized(request)
if not user:
return 'Unauthorized', 401
return f(user, *args, **kwargs)
return wrap
return authorized_decorator
@blueprint.route('', methods=['POST', 'GET'])
@cross_origin()
@authorized()
def mood_processing(user):
if request.method == 'POST':
try:
request_json = request.get_json()
mood_val = float(request_json['mood'])
assert 0 <= mood_val <= 10
mood = create_mood(mood_val, user)
except Exception as e:
print(e)
return "Invalid request.", 400
del mood['_sa_instance_state']
del mood['user']
return jsonify({'mood': mood})
else:
moods = load_moods_from_user(user)
return jsonify({'moods': moods})
| 30.12 | 82 | 0.616866 | 0 | 0 | 0 | 0 | 928 | 0.616202 | 0 | 0 | 292 | 0.193891 |
23d8fd0ae625c1772c3f3bb0a2d8ee76180f8da6 | 2,684 | py | Python | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
]
| 2 | 2020-03-07T23:32:41.000Z | 2020-05-22T15:35:16.000Z | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
]
| 1 | 2020-05-25T11:17:15.000Z | 2020-05-26T06:58:37.000Z | capstone/upload_to_s3.py | slangenbach/udacity-de-nanodegree | ba885eb4c6fbce063e443375a89b92dbc46fa809 | [
"MIT"
]
| 2 | 2020-03-31T13:00:01.000Z | 2021-07-14T14:34:37.000Z | import logging
import time
from pathlib import Path
from configparser import ConfigParser
import boto3
from botocore.exceptions import ClientError
def create_bucket(bucket_name: str, region: str = 'us-west-2'):
"""
Create S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-creating-buckets.html
:param bucket_name: Name of S3 bucket
:param region: AWS region where bucket is created
:return: True if bucket is created or already exists, False if ClientError occurs
"""
try:
s3_client = boto3.client('s3', region=region)
# list buckets
response = s3_client.list_buckets()
# check if bucket exists
if bucket_name not in response['Buckets']:
s3_client.create_bucket(Bucket=bucket_name)
else:
logging.warning(f"{bucket_name} already exist in AWS region {region}")
except ClientError as e:
logging.exception(e)
return False
return True
def upload_file(file_name: str, bucket: str, object_name: str = None, region: str = 'us-west-2'):
"""
Upload file to S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
:param file_name: Path to file including filename
:param bucket: Bucket where file is uploaded to
:param object_name: Name of file inside S3 bucket
:param region: AWS region where bucket is located
:return: True if upload succeeds, False if ClientError occurs
"""
if object_name is None:
object_name = file_name
try:
s3_client = boto3.client('s3', region=region)
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.exception(e)
return False
return True
if __name__ == '__main__':
# load config
config = ConfigParser()
config.read('app.cfg')
# start logging
logging.basicConfig(level=config.get("logging", "level"), format="%(asctime)s - %(levelname)s - %(message)s")
logging.info("Started")
# start timer
start_time = time.perf_counter()
# define
data_path = Path(__file__).parent.joinpath('data')
# check if bucket exists
create_bucket(bucket_name='fff-streams')
# upload files to S3
upload_file(data_path.joinpath('world_happiness_2017.csv'), bucket='fff-streams', object_name='world_happiness.csv')
upload_file(data_path.joinpath('temp_by_city_clean.csv'), bucket='fff-streams', object_name='temp_by_city.csv')
# stop timer
stop_time = time.perf_counter()
logging.info(f"Uploaded files in {(stop_time - start_time):.2f} seconds")
logging.info("Finished")
| 31.209302 | 120 | 0.688897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,248 | 0.464978 |
23da034ad35f31e90c8e53d6592ca43cf2dabf3f | 4,734 | py | Python | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
]
| null | null | null | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
]
| null | null | null | Timer.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import time
help_msg = '''------ §aMCR 时钟插件帮助信息 §f------
§b!!time help §f- §c显示帮助消息
§b!!time ct §f- §c显示当前时间
§b!!time timer [秒] §f- §c开启倒计时
§b!!time stopwatch start §f- §c开启秒表
§b!!time stopwatch stop §f- §c停止秒表
--------------------------------'''
no_input = '''------ §a温馨提示 §f------
§c未知指令 请输入 !!time help 获取帮助
--------------------------------'''
stop_T = False
def on_info(server, info):
if info.is_player == 1:
if info.content.startswith('!!time'):
args = info.content.split(' ')
if len(args) == 1:
for line in help_msg.splitlines():
server.tell(info.player, line)
elif args[1] == 'help':
for line in help_msg.splitlines():
server.tell(info.player, line)
elif args[1] == 'ct':
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
int_current_time = int(time.strftime("%H", t))
if int_current_time in range(6, 12):
server.tell(info.player, "------ §a当前时间 §f------")
server.tell(info.player, "§b 早上好")
server.tell(info.player, "§b 现在时间是: " + current_time)
server.tell(info.player, "--------------------------------")
elif int_current_time in range(12, 19):
server.tell(info.player, "------ §a当前时间 §f------")
server.tell(info.player, "§b 下午好")
server.tell(info.player, "§b 现在时间是: " + current_time)
server.tell(info.player, "--------------------------------")
elif int_current_time in range(19, 24):
server.tell(info.player, "------ §a当前时间 §f------")
server.tell(info.player, "§b 晚上好")
server.tell(info.player, "§b 现在时间是: " + current_time)
server.tell(info.player, "--------------------------------")
elif int_current_time in range(0, 6):
server.tell(info.player, "------ §a当前时间 §f------")
server.tell(info.player, "§b 晚上好")
server.tell(info.player, "§b 现在时间是: " + current_time)
server.tell(info.player, "--------------------------------")
else:
server.tell(info.player, "------ §a当前时间 §f------")
server.tell(info.player, "§b 现在时间是: " + current_time)
server.tell(info.player, "--------------------------------")
elif args[1] == 'timer':
second = int(args[2])
count = 0
while count < second:
count_now = second - count
if count_now >= 30:
server.tell(info.player, "倒计时还剩: " + "§a" + str(count_now))
time.sleep(1)
count += 1
elif 30 > count_now > 10:
server.tell(info.player, "倒计时还剩: " + "§e" + str(count_now))
time.sleep(1)
count += 1
else:
server.tell(info.player, "倒计时还剩: " + "§c" + str(count_now))
time.sleep(1)
count += 1
server.tell(info.player, "时间到!")
server.execute(
'execute at ' + info.player + ' run playsound minecraft:block.bell.use player ' + info.player)
server.execute(
'execute at ' + info.player + ' run playsound minecraft:block.bell.use player ' + info.player)
server.execute(
'execute at ' + info.player + ' run playsound minecraft:block.bell.use player ' + info.player)
elif args[1] == 'stopwatch':
status = args[2]
if status == 'start':
start(server, info)
elif status == 'stop':
stop(server, info)
else:
for line in no_input.splitlines():
server.tell(info.player, line)
def on_load(server, old):
server.add_help_message('!!time', '时钟系统帮助')
def start(server, info):
global stop_T
stop_T = True
start_time = time.time()
server.tell(info.player, "§b秒表开启")
while stop_T:
r = round(time.time() - start_time, 0)
server.tell(info.player, "§b计时: " + str(r) + " 秒")
time.sleep(1)
def stop(server, info):
global stop_T
if stop_T:
stop_T = False
server.tell(info.player, "§b秒表已停止")
else:
server.tell(info.player, "§b秒表未开启")
| 41.526316 | 114 | 0.444022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,408 | 0.277493 |
23dbf2b9d9cefc92e0075e49e75f8a00b52cb7f9 | 4,174 | py | Python | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
]
| 3 | 2020-12-04T07:29:31.000Z | 2022-01-30T10:14:41.000Z | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
]
| null | null | null | core/loader.py | CrackerCat/ZetaSploit | 4589d467c9fb81c1a5075cd43358b2df9b896530 | [
"MIT"
]
| 1 | 2021-03-27T06:14:43.000Z | 2021-03-27T06:14:43.000Z | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import threading
import os
from core.badges import badges
from core.helper import helper
class loader:
def __init__(self):
self.badges = badges()
self.helper = helper()
def get_module(self, mu, name, folderpath):
folderpath_list = folderpath.split(".")
for i in dir(mu):
if i == name:
pass
return getattr(mu, name)
else:
if i in folderpath_list:
i = getattr(mu, i)
return self.get_module(i, name, folderpath)
def import_plugins(self, plugin_owner, plugin_system, controller):
plugins = dict()
plugin_path = "plugins/" + plugin_owner + "/" + plugin_system
for plugin_type in os.listdir(plugin_path):
plugin_path = plugin_path + "/" + plugin_type
for plugin in os.listdir(plugin_path):
if plugin == '__init__.py' or plugin[-3:] != '.py':
continue
else:
try:
plugin_directory = plugin_path.replace("/", ".").replace("\\", ".") + "." + plugin[:-3]
plugin_file = __import__(plugin_directory)
plugin_object = self.get_module(plugin_file, plugin[:-3], plugin_directory)
plugin_object = plugin_object.ZetaSploitPlugin(controller)
plugins[plugin_object.details['Name']] = plugin_object
except Exception as e:
print(self.badges.E + "Failed to load plugin! Reason: "+str(e))
return plugins
def import_modules(self):
modules = dict()
module_path = "modules"
for module_system in os.listdir(module_path):
module_path = module_path + "/" + module_system
for module_type in os.listdir(module_path):
module_path = module_path + "/" + module_type
for module in os.listdir(module_path):
if module == '__init__.py' or module[-3:] != '.py':
continue
else:
try:
module_directory = module_path.replace("/", ".").replace("\\", ".") + "." + module[:-3]
module_file = __import__(module_directory)
module_object = self.get_module(module_file, module[:-3], module_directory)
module_object = module_object.ZetaSploitModule()
modules[module_object.details['Name']] = module_object
except Exception as e:
print(self.badges.E + "Failed to load plugin! Reason: " + str(e))
return modules
def load_plugins(self, owner, system, controller):
plugins = self.import_plugins(owner, system, controller)
return plugins
def load_modules(self):
modules = self.import_modules()
return modules | 43.030928 | 115 | 0.598946 | 2,929 | 0.701725 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.307858 |
23dc4f684d9d5300357e5bf6d8fabca6e13f5585 | 8,556 | py | Python | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
]
| null | null | null | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
]
| null | null | null | parameter_setting/parameters_setting_cropping_impact.py | MorganeAudrain/Calcium_new | 1af0ab4f70b91d1ca55c6053112c1744b1da1bd3 | [
"MIT"
]
| null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 5
@author: Melisa Maidana
This script runs different cropping parameters, motion correct the cropped images using reasonable motion correction parameters that were previously selected
by using the parameters_setting_motion_correction scripts, and then run source extraction (with multiple parameters) and creates figures of the cropped
image and the extracted cells from that image. The idea is to compare the resulting source extraction neural footprint for different cropping selections.
Ideally the extracted sources should be similar. If that is the case, then all the parameter setting for every step can be run in small pieces of the image,
select the best ones, and implemented lated in the complete image.
"""
import os
import sys
import psutil
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pylab as pl
# This should be in another file. Let's leave it here for now
sys.path.append('/home/sebastian/Documents/Melisa/calcium_imaging_analysis/src/')
sys.path.remove('/home/sebastian/Documents/calcium_imaging_analysis')
import src.configuration
import caiman as cm
import src.data_base_manipulation as db
from src.steps.cropping import run_cropper as main_cropping
from src.steps.motion_correction import run_motion_correction as main_motion_correction
from src.steps.source_extraction import run_source_extraction as main_source_extraction
import src.analysis.metrics as metrics
from caiman.source_extraction.cnmf.cnmf import load_CNMF
#Paths
analysis_states_database_path = 'references/analysis/analysis_states_database.xlsx'
backup_path = 'references/analysis/backup/'
#parameters_path = 'references/analysis/parameters_database.xlsx'
## Open thw data base with all data
states_df = db.open_analysis_states_database()
mouse = 51565
session = 1
trial = 1
is_rest = 1
# CROPPING
# Select the rows for cropping
x1_crops = np.arange(200,0,-50)
x2_crops = np.arange(350,550,50)
y1_crops = np.arange(200,0,-50)
y2_crops = np.arange(350,550,50)
n_processes = psutil.cpu_count()
cm.cluster.stop_server()
# Start a new cluster
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=n_processes, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
logging.info(f'Starting cluster. n_processes = {n_processes}.')
#parametrs for motion correction
parameters_motion_correction = {'motion_correct': True, 'pw_rigid': True, 'save_movie_rig': False,
'gSig_filt': (5, 5), 'max_shifts': (25, 25), 'niter_rig': 1,
'strides': (48, 48),
'overlaps': (96, 96), 'upsample_factor_grid': 2, 'num_frames_split': 80,
'max_deviation_rigid': 15,
'shifts_opencv': True, 'use_cuda': False, 'nonneg_movie': True, 'border_nan': 'copy'}
#parameters for source extraction
gSig = 5
gSiz = 4 * gSig + 1
corr_limits = np.linspace(0.4, 0.6, 5)
pnr_limits = np.linspace(3, 7, 5)
cropping_v = np.zeros(5)
motion_correction_v = np.zeros(5)
selected_rows = db.select(states_df,'cropping', mouse = mouse, session = session, trial = trial , is_rest = is_rest)
mouse_row = selected_rows.iloc[0]
for kk in range(4):
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
parameters_cropping = {'crop_spatial': True, 'cropping_points_spatial': cropping_interval,
'crop_temporal': False, 'cropping_points_temporal': []}
mouse_row = main_cropping(mouse_row, parameters_cropping)
cropping_v[kk] = mouse_row.name[5]
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
states_df = db.open_analysis_states_database()
for kk in range(4):
selected_rows = db.select(states_df, 'motion_correction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_motion_correction(mouse_row, parameters_motion_correction, dview)
mouse_row_new = metrics.get_metrics_motion_correction(mouse_row_new, crispness=True)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
motion_correction_v[kk]=mouse_row_new.name[6]
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
parameters_source_extraction = {'session_wise': False, 'fr': 10, 'decay_time': 0.1,
'min_corr': corr_limits[ii],
'min_pnr': pnr_limits[jj], 'p': 1, 'K': None, 'gSig': (gSig, gSig),
'gSiz': (gSiz, gSiz),
'merge_thr': 0.7, 'rf': 60, 'stride': 30, 'tsub': 1, 'ssub': 2, 'p_tsub': 1,
'p_ssub': 2, 'low_rank_background': None, 'nb': 0, 'nb_patch': 0,
'ssub_B': 2,
'init_iter': 2, 'ring_size_factor': 1.4, 'method_init': 'corr_pnr',
'method_deconvolution': 'oasis', 'update_background_components': True,
'center_psf': True, 'border_pix': 0, 'normalize_init': False,
'del_duplicates': True, 'only_init': True}
for kk in range(4):
selected_rows = db.select(states_df, 'source_extraction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_source_extraction(mouse_row, parameters_source_extraction, dview)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path=backup_path)
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
figure, axes = plt.subplots(4, 3, figsize=(50, 30))
version = ii * pnr_limits.shape[0] + jj +1
for kk in range(4):
selected_rows = db.select(states_df, 'component_evaluation', 56165, cropping_v=cropping_v[kk], motion_correction_v = 1, source_extraction_v= version)
mouse_row = selected_rows.iloc[0]
decoding_output = mouse_row['decoding_output']
decoded_file = eval(decoding_output)['main']
m = cm.load(decoded_file)
axes[kk,0].imshow(m[0, :, :], cmap='gray')
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
[x_, _x, y_, _y] = cropping_interval
rect = Rectangle((y_, x_), _y - y_, _x - x_, fill=False, color='r', linestyle='--', linewidth = 3)
axes[kk,0].add_patch(rect)
output_cropping = mouse_row['cropping_output']
cropped_file = eval(output_cropping)['main']
m = cm.load(cropped_file)
axes[kk,1].imshow(m[0, :, :], cmap='gray')
output_source_extraction = eval(mouse_row['source_extraction_output'])
cnm_file_path = output_source_extraction['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
corr_path = output_source_extraction['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
axes[kk, 2].imshow(cn_filter)
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[kk, 2].plot(*v.T, c='w',linewidth=3)
fig_dir ='/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/cropping/meta/figures/cropping_inicialization/'
fig_name = fig_dir + db.create_file_name(2,mouse_row.name) + '_corr_' + f'{round(corr_limits[ii],1)}' + '_pnr_' + f'{round(pnr_limits[jj])}' + '.png'
figure.savefig(fig_name)
| 50.329412 | 161 | 0.661524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,446 | 0.285881 |
23dd6ab36e5a83840094cc404aedad771f6f9076 | 1,676 | py | Python | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
]
| 1 | 2021-04-15T13:05:03.000Z | 2021-04-15T13:05:03.000Z | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
]
| 1 | 2018-12-11T13:41:45.000Z | 2018-12-11T14:15:15.000Z | src/data/energidataservice_api.py | titanbender/electricity-price-forecasting | c288a9b6d7489ac03ee800318539195bd1cd2650 | [
"MIT"
]
| 1 | 2020-01-01T21:03:02.000Z | 2020-01-01T21:03:02.000Z |
import pandas as pd
import json
import urllib2
def download_nordpool(limit, output_file):
'''
The method downloads the nordpool available data from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=8bd7a37f-1098-4643-865a-01eb55c62d21&limit=' + str(limit)
print("downloading nordpool data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("nordpool data has been downloaded and saved")
def download_dayforward(limit, output_file):
'''
The method downloads the available day ahead spotprices in DK and neighboring countries data
from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=c86859d2-942e-4029-aec1-32d56f1a2e5d&limit=' + str(limit)
print("downloading day forward data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("day forward data has been downloaded and saved")
if __name__ == '__main__':
print("connecting with the API")
download_nordpool(10000000, 'nordpool_data.csv')
download_dayforward(10000000, 'dayforward_data.csv') | 37.244444 | 127 | 0.7679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,063 | 0.634248 |
23df352466c71a2286ba6b66bb76f8b89e0ba1ff | 1,873 | py | Python | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
]
| 1 | 2022-03-16T22:20:12.000Z | 2022-03-16T22:20:12.000Z | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
]
| 2 | 2022-03-22T23:34:38.000Z | 2022-03-24T17:35:53.000Z | models/cnn.py | amayuelas/NNKGReasoning | 0e3623b344fd4e3088ece897f898ddbb1f80888d | [
"MIT"
]
| null | null | null | from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, entity_dim):
super(CNN, self).__init__()
self.dim = entity_dim
self.conv1 = nn.Conv1d(in_channels=1, out_channels=10,
kernel_size=6)
self.conv2 = nn.Conv1d(in_channels=10, out_channels=10,
kernel_size=6)
self.pool = nn.MaxPool1d(kernel_size=5)
self.fc1 = nn.Linear(int(self.dim / 25 - 2) * 10, self.dim)
self.fc2 = nn.Linear(self.dim, self.dim * 2)
self.fc3 = nn.Linear(self.dim * 2, self.dim)
def forward(self, x):
x = x.unsqueeze(1)
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = x.view(-1, x.shape[1] * x.shape[2])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CNN2(nn.Module):
def __init__(self, entity_dim):
super(CNN2, self).__init__()
self.dim = entity_dim
self.conv1 = nn.Conv1d(in_channels=1, out_channels=10,
kernel_size=6)
self.conv2 = nn.Conv1d(in_channels=10, out_channels=10,
kernel_size=6)
self.pool = nn.MaxPool1d(kernel_size=5)
self.fc1 = nn.Linear(2 * int(self.dim / 25 - 2) * 10, 2 * self.dim)
self.fc3 = nn.Linear(self.dim * 2, self.dim)
def forward(self, x1, x2):
x1 = x1.unsqueeze(1)
x1 = self.pool(self.conv1(x1))
x1 = self.pool(self.conv2(x1))
x2 = x2.unsqueeze(1)
x2 = self.pool(self.conv1(x2))
x2 = self.pool(self.conv2(x2))
x = torch.cat((x1, x2), dim=-1)
x = x.view(-1, x.shape[1] * x.shape[2])
x = F.relu(self.fc1(x))
x = self.fc3(x)
return x | 31.745763 | 75 | 0.538708 | 1,778 | 0.949279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
23df5a83027200920168a92b6eedd813725d6db4 | 2,608 | py | Python | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
]
| null | null | null | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
]
| null | null | null | students/K33421/Novikova Veronika/practice/warriors_project/warriors_app/views.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
]
| null | null | null | from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import *
class WarriorListView(APIView):
def get(self, request):
warriors = Warrior.objects.all()
serializer = WarriorSerializer(warriors, many=True)
return Response({"Warriors": serializer.data})
class ProfessionCreateView(APIView):
def post(self, request):
profession = request.data.get("profession")
serializer = ProfessionSerializer(data=profession)
if serializer.is_valid(raise_exception=True):
profession_saved = serializer.save()
return Response({"Success": "Profession '{}' created succesfully.".format(profession_saved.title)})
class SkillCreateView(APIView):
def post(self, request):
skill = request.data.get("skill")
serializer = SkillSerializer(data=skill)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response({"Success": "Created successfully."})
class WarriorCreateView(generics.CreateAPIView):
serializer_class = WarriorSerializer
queryset = Warrior.objects.all()
class ProfessionView(APIView):
def get(self, request):
prof = Profession.objects.all()
serializer = ProfessionSerializer(prof, many=True)
return Response({"Profession": serializer.data})
class SkillView(APIView):
def get(self, request):
skill = Skill.objects.all()
serializer = SkillSerializer(skill, many=True)
return Response({"Skill": serializer.data})
class WarriorSkillCreateView(generics.CreateAPIView):
serializer_class = WarriorSkillSerializer
queryset = SkillOfWarrior.objects.all()
class WarriorsSkills(APIView):
def get(self, request):
skill = Warrior.objects.all()
serializer = WarriorSkillsSerializer(skill, many=True)
return Response({"Skill": serializer.data})
class WarriorsProfessions(APIView):
def get(self, request):
prof = Warrior.objects.all()
serializer = WarriorProfSerializer(prof, many=True)
return Response({"Professions": serializer.data})
class SingleWarriorView(generics.RetrieveAPIView):
serializer_class = SingleWarriorSerializer
queryset = Warrior.objects.all()
class WarriorUpdateView(generics.UpdateAPIView):
serializer_class = WarriorSerializer
queryset = Warrior.objects.all()
lookup_field = 'pk'
class WarriorDestroyView(generics.DestroyAPIView):
queryset = Warrior.objects.all()
serializer_class = WarriorSerializer
| 29.636364 | 107 | 0.71434 | 2,423 | 0.929064 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.057899 |
23e0261a193fa6f445356c45a1780f878354e500 | 157 | py | Python | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
]
| null | null | null | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
]
| null | null | null | utils/platform.py | dennisding/build | e9342c2f235f64a8e125b3e6208426f1c2a12346 | [
"Apache-2.0"
]
| null | null | null | # -*- encoding:utf-8 -*-
class Platform:
def __init__(self):
pass
class Win(Platform):
pass
class Ios(Platform):
pass
class Android(Platform):
pass | 11.214286 | 24 | 0.687898 | 125 | 0.796178 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.152866 |
23e0459ade4fcfb40deaedb8969b8ab2785c8442 | 1,801 | py | Python | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
]
| null | null | null | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
]
| 18 | 2016-03-30T08:43:45.000Z | 2017-03-27T11:14:17.000Z | drone/flight/driving/motor_dummy.py | dpm76/eaglebone | 46403d03359a780f385ccb1f05b462869eddff89 | [
"ISC"
]
| 2 | 2016-03-06T20:38:06.000Z | 2019-09-10T14:46:35.000Z | '''
Created on 19 de ene. de 2016
@author: david
'''
import time
class MotorDummy(object):
MAX_THROTTLE = 80.0 #percentage
def __init__(self, motorId):
"""
Constructor
@param motorId: Identificator of the motor. A number between 0 to 3 (in case of quadcopter)
"""
self._motorId = motorId
self._throttle = 0.0
def start(self):
self._throttle = 0.0
def setThrottle(self, throttle):
self._throttle = float(throttle)
time.sleep(0.001)
def getThrottle(self):
return self._throttle
def addThrottle(self, increment):
"""
Increases or decreases the motor's throttle
@param increment: Value added to the current throttle percentage. This can be negative to decrease.
"""
self.setThrottle(self._throttle + increment)
def setMaxThrottle(self):
"""
Sends the max throttle signal (useful for calibrating process)
"""
self._throttle = 100.0
def setMinThrottle(self):
"""
Sends the min throttle signal (useful for calibrating process, or setting the motor in stand-by state)
"""
self._throttle = 0.0
def standBy(self):
"""
Set the motor in stand-by state
"""
self.setMinThrottle()
def idle(self):
"""
Set the motor in idle state
"""
self._throttle = 0.0
def stop(self):
"""
Stops the motor
"""
self._throttle = 0.0
| 20.465909 | 110 | 0.494725 | 1,732 | 0.961688 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.416991 |
23e397535cfd73ea5daf63a3a67cc1be6978c490 | 29,136 | py | Python | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
]
| 6 | 2019-12-31T17:25:14.000Z | 2021-12-15T14:30:05.000Z | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
]
| 17 | 2020-01-03T00:03:30.000Z | 2022-03-14T19:17:50.000Z | src/valr_python/ws_client.py | duncan-lumina/valr-python | 9c94b76990416b4b709d507b538bd8265ed51312 | [
"MIT"
]
| 6 | 2020-06-24T03:23:37.000Z | 2021-12-17T14:20:46.000Z | import asyncio
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import Union
try:
import simplejson as json
except ImportError:
import json
import websockets
from valr_python.enum import AccountEvent
from valr_python.enum import CurrencyPair
from valr_python.enum import MessageFeedType
from valr_python.enum import TradeEvent
from valr_python.enum import WebSocketType
from valr_python.exceptions import HookNotFoundError
from valr_python.exceptions import WebSocketAPIException
from valr_python.utils import JSONType
from valr_python.utils import _get_valr_headers
__all__ = ('WebSocketClient',)
def get_event_type(ws_type: WebSocketType) -> Type[Union[TradeEvent, AccountEvent]]:
return TradeEvent if ws_type == WebSocketType.TRADE else AccountEvent
class WebSocketClient:
"""The WebSocket API is an advanced technology that makes it possible to open a two-way interactive
communication session between a client and a server. With this API, you can send messages to a server and
receive event-driven responses without having to poll the server for a reply.
Example Usage
~~~~~~~~~~~~~
>>> import asyncio
>>> from typing import Dict
>>> from pprint import pprint
>>> from valr_python import WebSocketClient
>>> from valr_python.enum import TradeEvent
>>> from valr_python.enum import WebSocketType
>>>
>>> def pretty_hook(data: Dict):
... pprint(data)
>>>
>>> c = WebSocketClient(api_key='api_key', api_secret='api_secret', currency_pairs=['BTCZAR'],
... ws_type=WebSocketType.TRADE.name,
... trade_subscriptions=[TradeEvent.MARKET_SUMMARY_UPDATE.name],
... hooks={TradeEvent.MARKET_SUMMARY_UPDATE.name : pretty_hook})
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(c.run())
{'currencyPairSymbol': 'BTCZAR',
'data': {'askPrice': '151601',
'baseVolume': '314.7631144',
'bidPrice': '151600',
'changeFromPrevious': '2.14',
'created': '2020-02-06T22:47:03.129Z',
'currencyPairSymbol': 'BTCZAR',
'highPrice': '152440',
'lastTradedPrice': '151600',
'lowPrice': '146765',
'previousClosePrice': '148410',
'quoteVolume': '47167382.04552981'},
'type': 'MARKET_SUMMARY_UPDATE'}
Connection
~~~~~~~~~~
Our WebSocket API is accessible on the following address: wss://api.valr.com.
Account WebSocket connection: In order to receive streaming updates about your VALR account, you would
open up a WebSocket connection to wss://api.valr.com/ws/account
Trade WebSocket connection: In order to receive streaming updates about Trade data, you would open up a
WebSocket connection to wss://api.valr.com/ws/trade
Authentication
~~~~~~~~~~~~~~
Our WebSocket API needs authentication. To authenticate, pass in the following headers to the first
call that establishes the WebSocket connection.
X-VALR-API-KEY: Your API Key
X-VALR-SIGNATURE: Generated signature. The signature is generated using the following parameters:
Api Secret
Timestamp of request
HTTP verb 'GET'
Path (either /ws/account or /ws/trade)
Request Body should be empty
X-VALR-TIMESTAMP: Timestamp of the request
The headers that are passed to establish the connection are the same 3 headers you pass to
any authenticated call to the REST API.
Subscribing to events
~~~~~~~~~~~~~~~~~~~~~
Once you open a connection to Account, you are automatically subscribed to all messages for all events on
the Account WebSocket connection. You will start receiving message feeds pertaining to your VALR account.
For example, you will receive messages when your balance is updated or when a new trade is executed on your account.
On the other hand, when you open a connection to Trade, in order to receive message feeds about trading data, you
must subscribe to events you are interested in on the Trade WebSocket connection. For example, if you want to
receive messages when markets fluctuate, you must send a message on the connection with the following payload:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
"BTCZAR"
]
}
]
}
Here, the event you are subscribing to is called MARKET_SUMMARY_UPDATE and the currency pair you are subscribing to
is an array. We currently only support BTCZAR and ETHZAR. XRPZAR will be added in due course.
Unsubscribing from events
~~~~~~~~~~~~~~~~~~~~~~~~~
When you are no longer interested in receiving messages for certain events on the Trade WebSocket connection,
you can send a synthetic "unsubscribe" message. For example, if you want to unsubscribe from MARKET_SUMMARY_UPDATE
event, you would send a message as follows:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
]
}
]
}
Staying connected with Ping-Pong messages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To ensure that you stay connected to either the Account or Trade WebSocket you can send a "PING" message on the
WebSocket you wish to monitor. VALR will respond with a PONG event. The message must be as follows:
{
"type": "PING"
}
Events (On Trade WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a list of events you can subscribe to on the Trade WebSocket connection:
Event Description
AGGREGATED_ORDERBOOK_UPDATE When subscribed to this event for a given currency pair, the client receives the
top 20 bids and asks from the order book for that currency pair.
MARKET_SUMMARY_UPDATE When subscribed to this event for a given currency pair, the client receives a
message feed with the latest market summary for that currency pair.
NEW_TRADE_BUCKET When subscribed to this event for a given currency pair, the client receives the
Open, High, Low, Close data valid for the last 60 seconds.
NEW_TRADE When subscribed to this event for a given currency pair, the client receives
message feeds with the latest trades that are executed for that currency pair.
AGGREGATED_ORDERBOOK_UPDATE
In order to subscribe to AGGREGATED_ORDERBOOK_UPDATE for BTCZAR and ETHZAR, you must send the following message
on the Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"AGGREGATED_ORDERBOOK_UPDATE",
"pairs":[
"BTCZAR",
"ETHZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"AGGREGATED_ORDERBOOK_UPDATE",
"pairs":[
]
}
]
}
MARKET_SUMMARY_UPDATE
In order to subscribe to MARKET_SUMMARY_UPDATE for just BTCZAR, you must send the following message on the
Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
"BTCZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
]
}
]
}
NEW_TRADE_BUCKET
In order to subscribe to NEW_TRADE_BUCKET for BTCZAR as well as ETHZAR, you must send the following message on the
Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE_BUCKET",
"pairs":[
"BTCZAR",
"ETHZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE_BUCKET",
"pairs":[
]
}
]
}
NEW_TRADE
In order to subscribe to NEW_TRADE just for BTCZAR, you must send the following message on the Trade WebSocket
connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE",
"pairs":[
"BTCZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE",
"pairs":[
]
}
]
}
Message Feeds (On Trade WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As and when events occur, the message feeds come through to the Trade WebSocket connection for the events the client
has subscribed to. You will find an example message feed for each event specified above.
AGGREGATED_ORDERBOOK_UPDATE
Sample message feed:
{
"type":"AGGREGATED_ORDERBOOK_UPDATE",
"currencyPairSymbol":"BTCZAR",
"data":{
"Asks":[
{
"side":"sell",
"quantity":"0.005",
"price":"9500",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"0.01",
"price":"9750",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"0.643689",
"price":"10000",
"currencyPair":"BTCZAR",
"orderCount":3
},
{
"side":"sell",
"quantity":"0.2",
"price":"11606",
"currencyPair":"BTCZAR",
"orderCount":2
},
{
"side":"sell",
"quantity":"0.67713484",
"price":"14000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"15000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"16000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"17000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"18000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"19000",
"currencyPair":"BTCZAR",
"orderCount":1
}
],
"Bids":[
{
"side":"buy",
"quantity":"0.038",
"price":"9000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8802",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.2",
"price":"8801",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8800",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8700",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8600",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8500",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8400",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.3",
"price":"8200",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8100",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"1.08027437",
"price":"1",
"currencyPair":"BTCZAR",
"orderCount":3
}
]
}
}
MARKET_SUMMARY_UPDATE
Sample message feed:
{
"type":"MARKET_SUMMARY_UPDATE",
"currencyPairSymbol":"BTCZAR",
"data":{
"currencyPairSymbol":"BTCZAR",
"askPrice":"9500",
"bidPrice":"9000",
"lastTradedPrice":"9500",
"previousClosePrice":"9000",
"baseVolume":"0.0551",
"highPrice":"10000",
"lowPrice":"9000",
"created":"2016-04-25T19:41:16.237Z",
"changeFromPrevious":"5.55"
}
}
NEW_TRADE_BUCKET
Sample message feed:
{
"type":"NEW_TRADE_BUCKET",
"currencyPairSymbol":"BTCZAR",
"data":{
"currencyPairSymbol":"BTCZAR",
"bucketPeriodInSeconds":60,
"startTime":"2019-04-25T19:41:00Z",
"open":"9500",
"high":"9500",
"low":"9500",
"close":"9500",
"volume":"0"
}
}
NEW_TRADE
Sample message feed:
{
"type":"NEW_TRADE",
"currencyPairSymbol":"BTCZAR",
"data":{
"price":"9500",
"quantity":"0.001",
"currencyPair":"BTCZAR",
"tradedAt":"2019-04-25T19:51:55.393Z",
"takerSide":"buy"
}
}
Message Feeds (On Account WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As and when events occur, the message feeds come through to the Account WebSocket connection. As mentioned
previously, the client is automatically subscribed to all events on the Account WebSocket connection as soon as
the connection is established. That means, the client need not subscribe to events on the Account WebSocket
connection. That also means that the client cannot unsubscribe from these events.
Here is a list of events that occur on the Account WebSocket and the corresponding sample message feed:
NEW_ACCOUNT_HISTORY_RECORD : NEW SUCCESSFUL TRANSACTION
Sample message feed:
{
"type":"NEW_ACCOUNT_HISTORY_RECORD",
"data":{
"transactionType":{
"type":"SIMPLE_BUY",
"description":"Simple Buy"
},
"debitCurrency":{
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"supportedWithdrawDecimalPlaces":2
},
"debitValue":"10",
"creditCurrency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"creditValue":"0.00104473",
"feeCurrency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"feeValue":"0.00000789",
"eventAt":"2019-04-25T20:36:53.426Z",
"additionalInfo":{
"costPerCoin":9500,
"costPerCoinSymbol":"R",
"currencyPairSymbol":"BTCZAR"
}
}
}
BALANCE_UPDATE : BALANCE HAS BEEN UPDATED
Sample message feed:
{
"type":"BALANCE_UPDATE",
"data":{
"currency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"available":"0.88738681",
"reserved":"0.97803484",
"total":"1.86542165"
}
}
NEW_ACCOUNT_TRADE : NEW TRADE EXECUTED ON YOUR ACCOUNT
Sample message feed:
{
"type":"NEW_ACCOUNT_TRADE",
"currencyPairSymbol":"BTCZAR",
"data":{
"price":"9500",
"quantity":"0.00105263",
"currencyPair":"BTCZAR",
"tradedAt":"2019-04-25T20:36:53.426Z",
"side":"buy"
}
}
INSTANT_ORDER_COMPLETED: NEW SIMPLE BUY/SELL EXECUTED
Sample message feed:
{
"type":"INSTANT_ORDER_COMPLETED",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"success":true,
"paidAmount":"10",
"paidCurrency":"R",
"receivedAmount":"0.00104473",
"receivedCurrency":"BTC",
"feeAmount":"0.00000789",
"feeCurrency":"BTC",
"orderExecutedAt":"2019-04-25T20:36:53.445"
}
}
OPEN_ORDERS_UPDATE : NEW ORDER ADDED TO OPEN ORDERS
Sample message feed (all open orders are returned) :
{
"type":"OPEN_ORDERS_UPDATE",
"data":[
{
"orderId":"38511e49-a755-4f8f-a2b1-232bae6967dc",
"side":"sell",
"remainingQuantity":"0.1",
"originalPrice":"10000",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"createdAt":"2019-04-17T19:51:35.776Z",
"originalQuantity":"0.1",
"filledPercentage":"0.00",
"customerOrderId":""
},
{
"orderId":"d1d9f20a-778c-4f4a-98a1-d336da960158",
"side":"sell",
"remainingQuantity":"0.1",
"originalPrice":"10000",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"createdAt":"2019-04-20T13:48:44.922Z",
"originalQuantity":"0.1",
"filledPercentage":"0.00",
"customerOrderId":"4"
}
]
}
ORDER_PROCESSED : ORDER PROCESSED
Sample message feed:
{
"type":"ORDER_PROCESSED",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"success":true,
"failureReason":""
}
}
ORDER_STATUS_UPDATE : ORDER STATUS HAS BEEN UPDATED
Sample message feed:
{
"type":"ORDER_STATUS_UPDATE",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"orderStatusType":"Filled",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"originalPrice":"80000",
"remainingQuantity":"0.01",
"originalQuantity":"0.01",
"orderSide":"buy",
"orderType":"limit",
"failedReason":"",
"orderUpdatedAt":"2019-05-10T14:47:24.826Z",
"orderCreatedAt":"2019-05-10T14:42:37.333Z",
"customerOrderId":"4"
}
}
orderStatusType can be one of the following values: "Placed", "Failed", "Cancelled", "Filled", "Partially Filled",
"Instant Order Balance Reserve Failed", "Instant Order Balance Reserved","Instant Order Completed".
FAILED_CANCEL_ORDER : UNABLE TO CANCEL ORDER
Sample message feed:
{
"type":"FAILED_CANCEL_ORDER",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"message":"An error occurred while cancelling your order."
}
}
NEW_PENDING_RECEIVE : NEW PENDING CRYPTO DEPOSIT
Sample message feed:
{
"type":"NEW_PENDING_RECEIVE",
"data":{
"currency":{
"id":3,
"symbol":"ETH",
"decimalPlaces":8,
"isActive":true,
"shortName":"ETH",
"longName":"Ethereum",
"currencyDecimalPlaces":18,
"supportedWithdrawDecimalPlaces":8
},
"receiveAddress":"0xA7Fae2Fd50886b962d46FF4280f595A3982aeAa5",
"transactionHash":"0x804bbfa946b57fc5ffcb0c37ec02e7503435d19c35bf8eb0b0c6deb289f7009a",
"amount":0.01,
"createdAt":"2019-04-25T21:16:28Z",
"confirmations":1,
"confirmed":false
}
}
This message feed is sent through every time there is an update to the number of confirmations to this
pending deposit.
SEND_STATUS_UPDATE : CRYPTO WITHDRAWAL STATUS UPDATE
Sample message feed:
{
"type":"SEND_STATUS_UPDATE",
"data":{
"uniqueId":"beb8a612-1a1a-4d68-9bd3-96d5ea341119",
"status":"SEND_BROADCASTED",
"confirmations":0
}
}
"""
_WEBSOCKET_API_URI = 'wss://api.valr.com'
_ACCOUNT_CONNECTION = f'{_WEBSOCKET_API_URI}{WebSocketType.ACCOUNT.value}'
_TRADE_CONNECTION = f'{_WEBSOCKET_API_URI}{WebSocketType.TRADE.value}'
def __init__(self, api_key: str, api_secret: str, hooks: Dict[str, Callable],
currency_pairs: Optional[List[str]] = None, ws_type: str = 'trade',
trade_subscriptions: Optional[List[str]] = None):
self._api_key = api_key
self._api_secret = api_secret
self._ws_type = WebSocketType[ws_type.upper()]
self._hooks = {get_event_type(self._ws_type)[e.upper()]: f for e, f in hooks.items()}
if currency_pairs:
self._currency_pairs = [CurrencyPair[p.upper()] for p in currency_pairs]
else:
self._currency_pairs = [p for p in CurrencyPair]
if self._ws_type == WebSocketType.ACCOUNT:
self._uri = self._ACCOUNT_CONNECTION
else:
self._uri = self._TRADE_CONNECTION
if self._ws_type == WebSocketType.TRADE:
if trade_subscriptions:
self._trade_subscriptions = [TradeEvent[e] for e in trade_subscriptions]
else:
self._trade_subscriptions = [e for e in TradeEvent]
elif trade_subscriptions:
raise ValueError(f'trade subscriptions requires ws_type of {WebSocketType.TRADE.name} ')
else:
self._trade_subscriptions = None
async def run(self):
"""Open an async websocket connection, consume responses and executed mapped hooks. Async hooks are also
supported. The method relies on the underlying 'websockets' libraries ping-pong support. No API-level
ping-pong messages are sent to keep the connection alive (not necessary). Support for custom-handling of
websockets.exceptions.ConnectionClosed must be handled in the application.
"""
headers = _get_valr_headers(api_key=self._api_key, api_secret=self._api_secret, method='GET',
path=self._ws_type.value, data='')
async with websockets.connect(self._uri, ssl=True, extra_headers=headers) as ws:
if self._ws_type == WebSocketType.TRADE:
await ws.send(self.get_subscribe_data(self._currency_pairs, self._trade_subscriptions))
async for message in ws:
data = json.loads(message)
try:
# ignore auth and subscription response messages
if data['type'] not in (MessageFeedType.SUBSCRIBED.name, MessageFeedType.AUTHENTICATED.name):
func = self._hooks[get_event_type(self._ws_type)[data['type']]]
# apply hooks to mapped stream events
if asyncio.iscoroutinefunction(func):
await func(data)
else:
func(data)
except KeyError:
events = [e.name for e in get_event_type(self._ws_type)]
if data['type'] in events:
raise HookNotFoundError(f'no hook supplied for {data["type"]} event')
raise WebSocketAPIException(f'WebSocket API failed to handle {data["type"]} event: {data}')
@staticmethod
def get_subscribe_data(currency_pairs, events) -> JSONType:
"""Get subscription data for ws client request"""
subscriptions = [{"event": e.name, "pairs": [p.name for p in currency_pairs]} for e in events]
data = {
"type": MessageFeedType.SUBSCRIBE.name,
"subscriptions": subscriptions
}
return json.dumps(data, default=str)
| 30.864407 | 120 | 0.512699 | 28,271 | 0.970312 | 0 | 0 | 405 | 0.0139 | 1,847 | 0.063392 | 25,465 | 0.874005 |
23e4cf7747f358650ecc3229b90396e47c6f5137 | 110 | py | Python | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
]
| 1 | 2021-06-23T08:13:15.000Z | 2021-06-23T08:13:15.000Z | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
]
| null | null | null | bagua/torch_api/compression.py | fossabot/bagua | 2a8434159bfa502e61739b5eabd91dca57c9256c | [
"MIT"
]
| null | null | null | from enum import Enum
class Compressor(Enum):
NoneCompressor = None
Uint8Compressor = "MinMaxUInt8"
| 15.714286 | 35 | 0.736364 | 85 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.118182 |
23e64fd0f143ca1fd055ab9e432dcd782eb331eb | 2,215 | py | Python | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
]
| null | null | null | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
]
| null | null | null | emailer.py | dblossom/raffle-checker | 807d33a305e836579a423986be2a7ff7c2d655e1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
from database import Database
from rafflecollector import RaffleCollector
import os
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import schedule
import time
class Emailer:
db = Database()
email_id = os.environ['RAFFLE_EMAIL']
email_pass = os.environ['RAFFLE_EMAIL_PASSWORD']
port = 465 # For SSL
context = ssl.create_default_context()
message = MIMEMultipart("alternative")
def __init__(self):
self.send_alive_email()
self.check_db_tickets()
def check_db_tickets(self):
ticket_list = self.db.get_all_tickets()
rc = RaffleCollector()
raffle_winners = rc.winning_numbers()
for key, value in raffle_winners.items():
for tup in ticket_list:
if tup[0] == int(value):
self.build_message(self.db.get_email_pid(tup[1]),
tup[0])
self.send_email()
def build_message(self,to_email,ticket):
self.message["From"] = self.email_id
self.message["To"] = to_email
self.message["Subject"] = "Congratulations, You're a winner!"
text = """\
Congratulations! Ticket# """ + str(ticket) + """ is a winner!
"""
winner_message = MIMEText(text,"plain")
self.message.attach(winner_message)
def send_email(self):
with smtplib.SMTP_SSL("smtp.gmail.com", self.port, context=self.context) as server:
server.login(self.email_id, self.email_pass)
server.sendmail(self.email_id, self.message["To"], self.message.as_string())
def send_alive_email(self):
self.message["From"] = self.email_id
self.message["To"] = self.db.get_email_pid(1)
self.message["Subject"] = "Daily heartbeat email!"
text = """\
This is your daily heartbeat email!
"""
heartbeat = MIMEText(text,"plain")
self.message.attach(heartbeat)
self.send_email()
if __name__ == "__main__":
e = Emailer()
schedule.every().day.at("22:00").do(e.__init__)
while True:
schedule.run_pending()
time.sleep(1)
| 31.642857 | 91 | 0.621219 | 1,806 | 0.81535 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.161625 |
23e79af618c8a287421e1a5d39cd45ed069fab6f | 4,391 | py | Python | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
]
| null | null | null | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
]
| 3 | 2021-04-29T22:57:09.000Z | 2021-05-03T15:32:39.000Z | website_handling/website_check.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
]
| 1 | 2021-08-29T09:53:09.000Z | 2021-08-29T09:53:09.000Z | import os
import json
import shutil
import time
from pathlib import Path
from sys import platform
# TODO: (stackoverflow.com/question/17136514/how-to-get-3rd-party-cookies)
# stackoverflow.com/questions/22200134/make-selenium-grab-all-cookies, add the selenium, phantomjs part to catch ALL cookies
# TODO: Maybe save cookies to global variable to compare them in another function without saving them?
'''
loading more than one addon for firefox to use with selenium:
extensions = [
'[email protected]',
'',
''
]
for extension in extensions:
driver.install_addon(extension_dir + extension, temporary=True)
'''
def load_with_addon(driver, websites):
"""This method will load all websites with 'i don't care about cookies' preinstalled.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies with addon...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/with_addon/')) != 0:
shutil.rmtree('data/save/with_addon/')
os.mkdir('data/save/with_addon/')
print('saving cookies in firefox with addons ...')
# the extension directory needs to be the one of your local machine
# linux
if platform == "linux":
extension_dir = os.getenv("HOME") + "/.mozilla/firefox/7ppp44j6.default-release/extensions/"
driver.install_addon(extension_dir + '[email protected]', temporary=True)
# windows
if platform == "win32":
extension_dir = str(
Path.home()) + "/AppData/Roaming/Mozilla/Firefox/Profiles/shdzeteb.default-release/extensions/"
print(extension_dir)
driver.install_addon(extension_dir + '[email protected]', temporary=True)
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
cookies_addons = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_addons:
cookies_dict = cookie
print('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/with_addon/%s/' % name):
os.mkdir('data/save/with_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def load_without_addon(driver, websites):
"""This method will load all websites on a vanilla firefox version.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies in vanilla...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/without_addon/')) != 0:
shutil.rmtree('data/save/without_addon/')
os.mkdir('data/save/without_addon')
print('saving cookies in firefox without addons ...')
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
time.sleep(5)
cookies_vanilla = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_vanilla:
cookies_dict = cookie
print('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/without_addon/%s/' % name):
os.mkdir('data/save/without_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def close_driver_session(driver):
"""This method will end the driver session and close all windows. Driver needs to be initialized again afterwards"""
driver.quit()
| 35.128 | 125 | 0.662491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,312 | 0.526532 |
23e9be3b6c2cc45718ae9d2bebea994634002d02 | 925 | py | Python | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
]
| 3 | 2021-11-08T05:23:08.000Z | 2021-11-08T09:46:51.000Z | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
]
| null | null | null | src/utils/import_lock.py | ThatOneAnimeGuy/seiso | f8ad20a0ec59b86b88149723eafc8e6d9f8be451 | [
"BSD-3-Clause"
]
| 2 | 2021-11-08T05:23:12.000Z | 2021-11-16T01:16:35.000Z | from flask import current_app
from ..internals.database.database import get_cursor
def take_lock(service, artist_service_id, post_service_id):
query = 'INSERT INTO post_import_lock (service, artist_service_id, post_service_id) VALUES (%s, %s, %s) ON CONFLICT DO NOTHING RETURNING id'
with get_cursor() as cursor:
cursor.execute(query, (service, artist_service_id, post_service_id,))
result = cursor.fetchone()
if result is None:
return None
return result['id']
def release_lock(lock_id):
try:
query = 'DELETE FROM post_import_lock WHERE id = %s'
with get_cursor() as cursor:
cursor.execute(query, (lock_id,))
except:
current_app.logger.exception(f'Could not release post import lock {lock_id}')
def clear_lock_table():
query = 'DELETE FROM post_import_lock'
with get_cursor() as cursor:
cursor.execute(query)
| 35.576923 | 144 | 0.68973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.277838 |
23ecadb81a5ec6b2f9e0c728e946a750d6f1f36e | 93 | py | Python | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
]
| 1 | 2018-11-21T04:50:57.000Z | 2018-11-21T04:50:57.000Z | modules/tankshapes/__init__.py | bullseyestudio/guns-game | 3104c44e43ea7f000f6b9e756d622f98110d0a21 | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | """ Tank shapes package for Guns.
This init file marks the package as a usable module.
"""
| 15.5 | 52 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.989247 |
23ece7de650d89db697b4f1ccb8b587a85d078b4 | 99 | py | Python | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
]
| 5 | 2021-12-01T21:44:22.000Z | 2021-12-09T19:11:21.000Z | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
]
| null | null | null | jonathan/Aufgabe23/1.py | codingkrabbe/adventofcode | 21965a9519e8c20ab154354fd4b4ad3c807b7b95 | [
"MIT"
]
| 3 | 2021-12-01T21:41:20.000Z | 2021-12-03T14:17:24.000Z | def main():
lines = open('input.txt', 'r').readlines()
if __name__ == '__main__':
main()
| 14.142857 | 46 | 0.565657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.242424 |
23ed67548a141b4172f60911a628a2325339dc44 | 4,468 | py | Python | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
]
| 1 | 2017-06-08T11:41:00.000Z | 2017-06-08T11:41:00.000Z | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
]
| null | null | null | podstreamer.py | Swall0w/pymusic | 73e08e6a5ad4c6d418a0074fc3a83be0896cf97c | [
"MIT"
]
| null | null | null | import feedparser
import vlc
import argparse
import sys
import time
import curses
import wget
def arg():
parser = argparse.ArgumentParser(
description='Simple Podcast Streamer.')
parser.add_argument('--add', '-a', type=str, default=None,
help='Pass Podcast an URL argument that you want to add.')
parser.add_argument('--list', '-l', action='store_true',
help='Podcast lists that are contained.')
parser.add_argument('--delete', '-d', type=int, default=-1,
help='delete podcast channel.')
parser.add_argument('--detail', type=int, default=-1,
help='See podcast channel detail.')
parser.add_argument('--play', '-p', action='store_true',
help='Play Podcast. Please pass channel and\
track argument with play argument.')
parser.add_argument('--download', action='store_true',
help='Download Podcast. Please pass channel and track argument')
parser.add_argument('--channel', '-c', type=int,
help='Podcast Channel that you want to listen to.')
parser.add_argument('--track', '-t', type=int,
help='Podcast track that you want to listen to.')
return parser.parse_args()
def converttime(times):
minutes, seconds = divmod(times, 60)
hours, minutes = divmod(minutes, 60)
return int(hours), int(minutes), int(seconds)
def stream(rss_url, track):
try:
rssdata = feedparser.parse(rss_url)
rssdata = rssdata.entries[track]
except:
print('Unexepted Error: {0}'.format(sys.exc_info()))
sys.exit(1)
mp3_url = rssdata.media_content[0]['url']
player = vlc.MediaPlayer(mp3_url)
player.audio_set_volume(100)
player.play()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.nodelay(1)
while True:
try:
if player.is_playing():
status = 'playing...'
else:
status = 'pause...'
key_input = stdscr.getch()
if key_input == ord('k'):
player.audio_set_volume(int(player.audio_get_volume() + 5))
elif key_input == ord('j'):
player.audio_set_volume(int(player.audio_get_volume() - 5))
elif key_input == ord('l'):
player.set_time(player.get_time() + 10000)
elif key_input == ord('h'):
player.set_time(player.get_time() - 10000)
elif key_input == ord(' '):
player.pause()
elif key_input == ord('q'):
curses.nocbreak()
curses.echo()
curses.endwin()
sys.exit(0)
else:
pass
hours, minutes, seconds = converttime(player.get_time() / 1000)
m_hours, m_minutes, m_seconds = converttime(
player.get_length() / 1000)
comment = '\r{0} time: {1:0>2}:{2:0>2}:{3:0>2} /\
{4:0>2}:{5:0>2}:{6:0>2} volume:{7} '.format(
status, hours, minutes, seconds, m_hours, m_minutes,
m_seconds, player.audio_get_volume()
)
stdscr.addstr(0, 0, rssdata.title)
stdscr.addstr(1, 0, comment)
stdscr.refresh()
time.sleep(0.1)
except KeyboardInterrupt:
curses.nocbreak()
curses.echo()
curses.endwin()
def write_list(filename,items):
with open(filename, 'w') as f:
for item in items:
f.write(item + '\n')
def detail(channel_url):
rssdata = feedparser.parse(channel_url)
for index, entry in enumerate(rssdata.entries):
print(index, entry.title)
def main():
args = arg()
# Load Channels
with open('.channels', 'r') as f:
channels = [item.strip() for item in f.readlines()]
if args.list:
for index, channel in enumerate(channels):
print(index, channel)
if args.add:
channels.append(args.add)
write_list('.channels', channels)
if args.delete>=0:
del channels[args.delete]
write_list('.channels', channels)
if args.detail >= 0:
detail(channels[args.detail])
if args.play:
stream(channels[args.channel], args.track)
if args.download:
mp3_url = feedparser.parse(channels[args.channel]).entries[
args.track].media_content[0]['url']
wget.download(mp3_url)
if __name__ == '__main__':
main()
| 30.813793 | 75 | 0.57744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 765 | 0.171218 |
23edadd6c1315ae3bef9cd266a3d92857c911930 | 229 | py | Python | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
]
| null | null | null | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
]
| null | null | null | tfbs_footprinter-runner.py | thirtysix/TFBS_footprinting | f627e0a5186e00fe166dad46b21d9b2742b51760 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience wrapper for running tfbs_footprinter directly from source tree."""
from tfbs_footprinter.tfbs_footprinter import main
if __name__ == '__main__':
main()
| 17.615385 | 81 | 0.694323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.58952 |
23ee7f3b59a96672f837686dde3019287c34f061 | 2,573 | py | Python | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
]
| 2 | 2019-12-05T07:57:14.000Z | 2019-12-05T13:02:08.000Z | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
]
| 31 | 2019-12-05T15:14:47.000Z | 2020-12-04T14:37:46.000Z | metalfi/src/data/meta/importance/shap.py | CemOezcan/metalfi | d7a071eea0229ce621fa07e3474a26d43bfaac66 | [
"MIT"
]
| 1 | 2020-12-04T13:40:11.000Z | 2020-12-04T13:40:11.000Z | import shap
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
from metalfi.src.data.meta.importance.featureimportance import FeatureImportance
class ShapImportance(FeatureImportance):
def __init__(self, dataset):
super(ShapImportance, self).__init__(dataset)
self._name = "_SHAP"
def calculateScores(self):
sc = StandardScaler()
X = DataFrame(data=sc.fit_transform(self._data_frame.drop(self._target, axis=1)),
columns=self._data_frame.drop(self._target, axis=1).columns)
y = self._data_frame[self._target]
for model in self._linear_models:
self._feature_importances.append(self.linearShap(model, X, y))
for model in self._tree_models:
self._feature_importances.append(self.treeShap(model, X, y))
for model in self._kernel_models:
self._feature_importances.append(self.kernelShap(model, X, y))
def treeShap(self, model, X, y):
model.fit(X, y)
imp = shap.TreeExplainer(model).shap_values(X)
#shap.summary_plot(imp[1], X, plot_type="bar")
return self.createDataFrame(imp[1], X)
def linearShap(self, model, X, y):
model.fit(X, y)
imp = shap.LinearExplainer(model, X).shap_values(X)
#shap.summary_plot(imp, X, plot_type="bar")
return self.createDataFrame(imp, X)
def treeRegressionShap(self, model, X, y):
model.fit(X, y)
imp = shap.TreeExplainer(model).shap_values(X)
#shap.summary_plot(imp, X, plot_type="bar")
return self.createDataFrame(imp, X)
def kernelShap(self, model, X, y, k=10):
model.fit(X, y)
X_summary = shap.kmeans(X, k)
imp = shap.KernelExplainer(model.predict, X_summary).shap_values(X)
#shap.summary_plot(imp, X, plot_type="bar")
return self.createDataFrame(imp, X)
def createDataFrame(self, array, X):
if str(type(array)).endswith("'list'>"):
importances = list(map(lambda x: x / len(array),
map(sum,
zip(*[self.calculateImportances(c) for c in array]))))
else:
importances = self.calculateImportances(array)
return DataFrame(data=importances, index=X.columns, columns=["Importances"])
def calculateImportances(self, array):
importances = list()
for i in range(len(array[0])):
importances.append(sum([abs(x[i]) for x in array]) / len(array))
return importances
| 34.306667 | 93 | 0.629227 | 2,397 | 0.931597 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.079285 |
23ef7212ca626e96219a55f6302d2adc0e8dabbe | 5,704 | py | Python | Engine.py | MaciejKrol51/chess | 457590768d338b900253ba345e64e56afbdf1ddd | [
"Apache-2.0"
]
| null | null | null | Engine.py | MaciejKrol51/chess | 457590768d338b900253ba345e64e56afbdf1ddd | [
"Apache-2.0"
]
| null | null | null | Engine.py | MaciejKrol51/chess | 457590768d338b900253ba345e64e56afbdf1ddd | [
"Apache-2.0"
]
| null | null | null | def is_area_in_board(area):
if 0 <= area[0] <= 7 and 0 <= area[1] <= 7:
return True
return False
def cancel_castling(checker):
if abs(checker.val) == 50 or abs(checker.val) == 900:
checker.castling = False
def is_king_beaten(board, color):
for row in board:
for area in row:
if area.checker is not None and area.checker.color != color: # wedlug Pycharm is not
if area.checker.king_attack(board):
return True
return False
class Engine:
def __init__(self):
self.b_check = False
self.b_move_check = 0
self.w_check = False
self.w_move_check = 0
self.move_count = 0
self.win = 1
def what_kind_of_move(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
if abs(checker.val) == 10 and new_pos in checker.set_passe(board, self.move_count):
return 'Passe'
elif abs(checker.val) == 900 and new_pos in checker.set_castling(board):
return 'Castling'
else:
return 'Normal'
def normal_move(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
cancel_castling(checker)
checker.pos = new_pos
board[prev_pos[0]][prev_pos[1]].checker = None
board[new_pos[0]][new_pos[1]].checker = checker
def move_checker(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
if abs(checker.val) == 10:
self.passe_move(prev_pos, new_pos, board)
elif abs(checker.val) == 900:
self.castling_move(prev_pos, new_pos, board)
else:
self.normal_move(prev_pos, new_pos, board)
self.move_count += 1
def castling_move(self, prev_pos, new_pos, board):
if new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_castling(board):
row = 0
if self.which_tour() == 1:
row = 7
board[row][4].checker.castling = False
self.normal_move((row, 4), new_pos, board)
if new_pos[1] == 2:
self.normal_move((row, 0), (row, 3), board)
else:
self.normal_move((row, 7), (row, 5), board)
else:
self.normal_move(prev_pos, new_pos, board)
def passe_move(self, prev_pos, new_pos, board):
if new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_double_move(board):
board[prev_pos[0]][prev_pos[1]].checker.move_passe = self.move_count
self.normal_move(prev_pos, new_pos, board)
elif new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_passe(board, self.move_count):
self.normal_move(prev_pos, new_pos, board)
color = board[new_pos[0]][new_pos[1]].checker.color
board[new_pos[0] + 1 * color][new_pos[1]].checker = None
else:
self.normal_move(prev_pos, new_pos, board)
board[new_pos[0]][new_pos[1]].checker.is_promotion(board)
def is_check(self, w_king_beat, b_king_beat):
if w_king_beat and b_king_beat:
if self.w_check is False and self.b_check is False:
self.w_check = True
self.w_move_check = self.move_count
self.b_check = True
self.b_move_check = self.move_count
elif w_king_beat and self.w_check is False:
self.w_check = True
self.w_move_check = self.move_count
self.b_check = False
self.b_move_check = 0
elif b_king_beat and self.b_check is False:
self.b_check = True
self.b_move_check = self.move_count
self.w_check = False
self.w_move_check = 0
def is_checkmate(self):
if self.w_check is True and self.move_count != self.w_move_check and self.move_count - self.w_move_check <= 2:
self.win = -1
return True
elif self.b_check is True and self.move_count != self.b_move_check and self.move_count - self.b_move_check <= 2:
self.win = 1
return True
else:
return False
def is_end(self, board):
w_king_beat = is_king_beaten(board, 1)
b_king_beat = is_king_beaten(board, -1)
if w_king_beat or b_king_beat:
self.is_check(w_king_beat, b_king_beat)
return self.is_checkmate()
self.w_check = False
self.w_move_check = 0
self.b_check = False
self.b_move_check = 0
return False
def which_tour(self):
if self.move_count % 2 == 0:
return 1
else:
return -1
def copy(self):
copy = Engine()
copy.b_check = self.b_check
copy.b_move_check = self.b_move_check
copy.w_check = self.w_check
copy.w_move_check = self.w_move_check
copy.move_count = self.move_count
copy.win = self.win
return copy
def value_of_table(self, board, bot):
ans = 0
for row in range(8):
for area in range(8):
if board[row][area].checker is not None:
ans += board[row][area].checker.val #+ bot.get_position_val(board[row][area].checker)
return ans
def back_move(self, prev_pos, now_pos, move_checker, beat_checker, board):
board[prev_pos[0]][prev_pos[1]].checker = move_checker
board[now_pos[0]][now_pos[1]].checker = beat_checker
self.move_count -= 1
| 37.526316 | 121 | 0.577489 | 5,162 | 0.904979 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.017356 |
23f06c21c858b67e6817ed29322c8b3b1f30395d | 2,281 | py | Python | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
]
| null | null | null | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
]
| null | null | null | jsportal_docsite/portal/markdown_extensions/__init__.py | jumpscale7/prototypes | a17f20aa203d4965708b6e0e3a34582f55baac30 | [
"Apache-2.0"
]
| null | null | null | """
Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
Changed by Mohammad Tayseer to add CSS classes to table
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.extensions.tables import TableProcessor
from markdown.util import etree
class BootstrapTableProcessor(TableProcessor):
# This method actually was copied from TableProcessor.run. The only change is adding
# `table.set('class', 'table')` to set Bootstrap table class
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
table.set('class', 'table table-striped table-bordered table-hover')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
class BootstrapTableExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('bootstraptable',
BootstrapTableProcessor(md.parser),
'<hashheader')
def makeExtension(*args, **kwargs):
return BootstrapTableExtension(*args, **kwargs)
| 35.092308 | 88 | 0.621657 | 1,725 | 0.756247 | 0 | 0 | 0 | 0 | 0 | 0 | 767 | 0.336256 |
23f14aa8cb681028e47a2e9707262f0b7d8d18f4 | 6,320 | py | Python | NAS/single-path-one-shot/src/MNIST/test.py | naviocean/SimpleCVReproduction | 61b43e3583977f42e6f91ef176ec5e1701e98d33 | [
"Apache-2.0"
]
| 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | NAS/single-path-one-shot/src/MNIST/test.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
]
| 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | NAS/single-path-one-shot/src/MNIST/test.py | Twenty3hree/SimpleCVReproduction | 9939f8340c54dbd69b0017cecad875dccf428f26 | [
"Apache-2.0"
]
| 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | import argparse
import json
import logging
import os
import sys
import time
import cv2
import numpy as np
import PIL
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from angle import generate_angle
# from cifar100_dataset import get_dataset
from slimmable_resnet20 import mutableResNet20
from utils import (ArchLoader, AvgrageMeter, CrossEntropyLabelSmooth, accuracy,
get_lastest_model, get_parameters, save_checkpoint, bn_calibration_init)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def get_args():
parser = argparse.ArgumentParser("ResNet20-Cifar100-oneshot")
parser.add_argument('--arch-batch', default=200,
type=int, help="arch batch size")
parser.add_argument(
'--path', default="Track1_final_archs.json", help="path for json arch files")
parser.add_argument('--eval', default=False, action='store_true')
parser.add_argument('--eval-resume', type=str,
default='./snet_detnas.pkl', help='path for eval model')
parser.add_argument('--batch-size', type=int,
default=10240, help='batch size')
parser.add_argument('--save', type=str, default='./weights',
help='path for saving trained weights')
parser.add_argument('--label-smooth', type=float,
default=0.1, help='label smoothing')
parser.add_argument('--auto-continue', type=bool,
default=True, help='report frequency')
parser.add_argument('--display-interval', type=int,
default=20, help='report frequency')
parser.add_argument('--val-interval', type=int,
default=10000, help='report frequency')
parser.add_argument('--save-interval', type=int,
default=10000, help='report frequency')
parser.add_argument('--train-dir', type=str,
default='data/train', help='path to training dataset')
parser.add_argument('--val-dir', type=str,
default='data/val', help='path to validation dataset')
args = parser.parse_args()
return args
def main():
args = get_args()
# archLoader
arch_loader = ArchLoader(args.path)
# Log
log_format = '[%(asctime)s] %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%d %I:%M:%S')
t = time.time()
local_time = time.localtime(t)
if not os.path.exists('./log'):
os.mkdir('./log')
fh = logging.FileHandler(os.path.join(
'log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
use_gpu = False
if torch.cuda.is_available():
use_gpu = True
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(root="./data", train=False, transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
print('load data successfully')
model = mutableResNet20(10)
criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)
if use_gpu:
model = nn.DataParallel(model)
loss_function = criterion_smooth.cuda()
device = torch.device("cuda")
else:
loss_function = criterion_smooth
device = torch.device("cpu")
model = model.to(device)
print("load model successfully")
all_iters = 0
print('load from latest checkpoint')
lastest_model, iters = get_lastest_model()
if lastest_model is not None:
all_iters = iters
checkpoint = torch.load(
lastest_model, map_location=None if use_gpu else 'cpu')
model.load_state_dict(checkpoint['state_dict'], strict=True)
# 参数设置
args.loss_function = loss_function
args.val_dataloader = val_loader
print("start to validate model")
validate(model, device, args, all_iters=all_iters, arch_loader=arch_loader)
def validate(model, device, args, *, all_iters=None, arch_loader=None):
assert arch_loader is not None
objs = AvgrageMeter()
top1 = AvgrageMeter()
top5 = AvgrageMeter()
loss_function = args.loss_function
val_dataloader = args.val_dataloader
model.eval()
# model.apply(bn_calibration_init)
max_val_iters = 0
t1 = time.time()
result_dict = {}
arch_dict = arch_loader.get_arch_dict()
base_model = mutableResNet20(10).cuda()
with torch.no_grad():
for key, value in arch_dict.items(): # 每一个网络
max_val_iters += 1
# print('\r ', key, ' iter:', max_val_iters, end='')
for data, target in val_dataloader: # 过一遍数据集
target = target.type(torch.LongTensor)
data, target = data.to(device), target.to(device)
output = model(data, value["arch"])
prec1, prec5 = accuracy(output, target, topk=(1, 5))
print("acc1: ", prec1.item())
n = data.size(0)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
tmp_dict = {}
tmp_dict['arch'] = value['arch']
tmp_dict['acc'] = top1.avg
result_dict[key] = tmp_dict
with open("acc_result.json","w") as f:
json.dump(result_dict, f)
# angle_result_dict = {}
# with torch.no_grad():
# for key, value in arch_dict.items():
# angle = generate_angle(base_model, model.module, value["arch"])
# tmp_dict = {}
# tmp_dict['arch'] = value['arch']
# tmp_dict['acc'] = angle.item()
# print("angle: ", angle.item())
# angle_result_dict[key] = tmp_dict
# print('\n', "="*10, "RESULTS", "="*10)
# for key, value in result_dict.items():
# print(key, "\t", value)
# print("="*10, "E N D", "="*10)
# with open("angle_result.json", "w") as f:
# json.dump(angle_result_dict, f)
if __name__ == "__main__":
main()
| 31.287129 | 91 | 0.612025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,612 | 0.253858 |
23f14e1f84f7c3d2bff9dca3e337c8e7cd4c2c5e | 3,231 | py | Python | examples/pixel/plot_0_image.py | DeepanshS/csdmpy | ae8d20dd09f217bb462af67a3145bb6fcb025def | [
"BSD-3-Clause"
]
| 7 | 2020-01-04T20:46:08.000Z | 2021-05-26T21:09:25.000Z | examples/pixel/plot_0_image.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
]
| 16 | 2021-06-09T06:28:27.000Z | 2022-03-01T18:12:33.000Z | examples/pixel/plot_0_image.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
]
| 1 | 2020-01-03T17:04:16.000Z | 2020-01-03T17:04:16.000Z | # -*- coding: utf-8 -*-
"""
Image, 2D{3} datasets
^^^^^^^^^^^^^^^^^^^^^
"""
# %%
# The 2D{3} dataset is two dimensional, :math:`d=2`, with
# a single three-component dependent variable, :math:`p=3`.
# A common example from this subset is perhaps the RGB image dataset.
# An RGB image dataset has two spatial dimensions and one dependent
# variable with three components corresponding to the red, green, and blue color
# intensities.
#
# The following is an example of an RGB image dataset.
import csdmpy as cp
filename = "https://osu.box.com/shared/static/vdxdaitsa9dq45x8nk7l7h25qrw2baxt.csdf"
ImageData = cp.load(filename)
print(ImageData.data_structure)
# %%
# The tuple of the dimension and dependent variable instances from
# ``ImageData`` instance are
x = ImageData.dimensions
y = ImageData.dependent_variables
# %%
# respectively. There are two dimensions, and the coordinates along each
# dimension are
print("x0 =", x[0].coordinates[:10])
# %%
print("x1 =", x[1].coordinates[:10])
# %%
# respectively, where only first ten coordinates along each dimension is displayed.
# %%
# The dependent variable is the image data, as also seen from the
# :attr:`~csdmpy.DependentVariable.quantity_type` attribute
# of the corresponding :ref:`dv_api` instance.
print(y[0].quantity_type)
# %%
# From the value `pixel_3`, `pixel` indicates a pixel data, while `3`
# indicates the number of pixel components.
# %%
# As usual, the components of the dependent variable are accessed through
# the :attr:`~csdmpy.DependentVariable.components` attribute.
# To access the individual components, use the appropriate array indexing.
# For example,
print(y[0].components[0])
# %%
# will return an array with the first component of all data values. In this case,
# the components correspond to the red color intensity, also indicated by the
# corresponding component label. The label corresponding to
# the component array is accessed through the
# :attr:`~csdmpy.DependentVariable.component_labels`
# attribute with appropriate indexing, that is
print(y[0].component_labels[0])
# %%
# To avoid displaying larger output, as an example, we print the shape of
# each component array (using Numpy array's `shape` attribute) for the three
# components along with their respective labels.
# %%
print(y[0].component_labels[0], y[0].components[0].shape)
# %%
print(y[0].component_labels[1], y[0].components[1].shape)
# %%
print(y[0].component_labels[2], y[0].components[2].shape)
# %%
# The shape (768, 1024) corresponds to the number of points from the each
# dimension instances.
# %%
# .. note::
# In this example, since there is only one dependent variable, the index
# of `y` is set to zero, which is ``y[0]``. The indices for the
# :attr:`~csdmpy.DependentVariable.components` and the
# :attr:`~csdmpy.DependentVariable.component_labels`,
# on the other hand, spans through the number of components.
# %%
# Now, to visualize the dataset as an RGB image,
import matplotlib.pyplot as plt
ax = plt.subplot(projection="csdm")
ax.imshow(ImageData, origin="upper")
plt.tight_layout()
plt.show()
| 31.990099 | 85 | 0.701021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,518 | 0.779325 |
23f1798fb64ee4b5169a0bf90b985ef75feb7390 | 76 | py | Python | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
]
| null | null | null | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
]
| null | null | null | xdl/blueprints/__init__.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
]
| null | null | null | from .procedure import (
CrossCouplingBlueprint,
GenericBlueprint
)
| 15.2 | 27 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
23f2b2f6f97b3acdf979b2b92b12fa1475acc97b | 141 | py | Python | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
]
| null | null | null | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
]
| null | null | null | ex013 - Reajuste Salarial/app.py | daphi-ny/python-exercicios | 0836fd1a134f07dc1cb29f7c31fce75fff65f963 | [
"MIT"
]
| null | null | null | s = float(input('Digite o valor do salário: R$ '))
p = s + (s * 15 / 100)
print('o salário de R$ {} com mais 15% ficará {:.2f}'.format(s, p)) | 47 | 67 | 0.58156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.569444 |
23f63778d171661ca3379def8f64e54d84bf8d22 | 2,868 | py | Python | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
]
| 1 | 2022-03-21T01:51:44.000Z | 2022-03-21T01:51:44.000Z | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
]
| null | null | null | analysis/files/files.py | mg98/arbitrary-data-on-blockchains | 6450e638cf7c54f53ef247ff779770b22128a024 | [
"MIT"
]
| null | null | null | import codecs
import sqlite3
import json
from fnmatch import fnmatch
from abc import ABC, abstractmethod
class FilesAnalysis(ABC):
"""Abstraction for analysis of transaction input data that contain popular file types."""
def __init__(self, chain: str, limit: int = 0, content_types: list[str] = ['*']):
"""
Initialize files analysis.
:param chain Blockchain.
:param limit Limit results processed by BigQuery.
:param content_types List of considerable content types for this analysis. Asterix-sign supported.
"""
self.chain = chain
self.limit = limit
self.file_signatures = FilesAnalysis.get_file_signatures(content_types)
def __enter__(self):
self.conn = sqlite3.connect("results.db")
return self
def __exit__(self, type, val, tb):
self.conn.close()
def insert(self, hash: str, content_type: str, method: str, block_timestamp: str, type: str, data: str, to_contract: bool = False):
self.conn.execute("""
INSERT INTO files_results (
chain, hash, content_type, method, block_timestamp, type, data, to_contract
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (self.chain, hash, content_type, method, block_timestamp, type, data, to_contract))
self.conn.commit()
@staticmethod
def get_file_signatures(content_types: list[str]) -> dict[str,list[str]]:
"""Returns dict of file signatures filtered by `content_types`."""
with open('analysis/files/file-signatures.json') as f: file_signatures = json.load(f)
return {
content_type : file_signatures[content_type]
for content_type in list(
filter(lambda k: any(fnmatch(k, ct) for ct in content_types), file_signatures)
)
}
def get_content_type(self, input):
"""Returns content type detected in input (candidate with most signature digits)."""
top_candidate = (None, 0) # tuple of content type and signature length
for (content_type, sigs) in self.file_signatures.items():
for sig in sigs:
if sig in input:
if top_candidate[1] < len(sig):
top_candidate = (content_type, len(sig))
return top_candidate[0]
@staticmethod
def hex_to_base64(hex_value: str):
"""Converts hex to base64."""
return codecs.encode(codecs.decode(hex_value, 'hex'), 'base64').decode()
def run(self):
"""Runs the query on BigQuery and persists results to the database."""
# setup database
self.conn.execute("""
CREATE TABLE IF NOT EXISTS files_results (
chain TEXT,
hash TEXT,
content_type TEXT,
method TEXT,
to_contract BOOLEAN,
type TEXT,
data TEXT,
block_timestamp DATETIME,
deleted BOOLEAN DEFAULT 0
)
""")
self.conn.execute("DELETE FROM files_results WHERE chain = ?", (self.chain,))
self.conn.commit()
self.run_core()
@abstractmethod
def run_core(self):
"""Runs the query on BigQuery and persists results to the database."""
raise NotImplementedError("Must override run_core")
| 32.224719 | 132 | 0.709902 | 2,761 | 0.962692 | 0 | 0 | 744 | 0.259414 | 0 | 0 | 1,230 | 0.42887 |
23f755b41ceb13c51fd1941958609398bf18c29d | 3,615 | py | Python | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
]
| null | null | null | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
]
| 3 | 2020-02-11T23:47:00.000Z | 2021-06-10T21:13:10.000Z | info/models/movie.py | wojciezki/movie_info | 88f089e8eaa5310cf5b03f7aae4f6c9b871282f2 | [
"MIT"
]
| null | null | null | # Create your models here.
import datetime
from django.db import models
from rest_framework.compat import MinValueValidator
class Movie(models.Model):
title = models.CharField(max_length=512)
year = models.IntegerField(validators=[MinValueValidator(0), ],
null=True,
blank=True)
rated = models.CharField(max_length=64,
null=True,
blank=True)
released = models.CharField(max_length=64,
null=True,
blank=True)
runtime = models.CharField(max_length=64,
null=True,
blank=True)
genre = models.CharField(max_length=64,
null=True,
blank=True)
director = models.CharField(max_length=512,
null=True,
blank=True)
writer = models.CharField(max_length=512,
null=True,
blank=True)
actors = models.TextField(null=True,
blank=True)
plot = models.TextField(null=True,
blank=True)
language = models.CharField(max_length=64,
null=True,
blank=True)
country = models.CharField(max_length=64,
null=True,
blank=True)
awards = models.CharField(max_length=512,
null=True,
blank=True)
poster = models.TextField(null=True,
blank=True)
ratings = models.TextField(null=True,
blank=True)
metascore = models.CharField(max_length=64,
null=True,
blank=True)
imdbrating = models.CharField(max_length=64,
null=True,
blank=True)
imdbvotes = models.CharField(max_length=64,
null=True,
blank=True)
imdbid = models.CharField(max_length=64,
null=True,
blank=True)
type = models.CharField(max_length=64,
null=True,
blank=True)
dvd = models.CharField(max_length=64,
null=True,
blank=True)
boxoffice = models.CharField(max_length=512,
null=True,
blank=True)
production = models.CharField(max_length=512,
null=True,
blank=True)
website = models.CharField(max_length=512,
null=True,
blank=True)
def __str__(self):
return f'{self.title}'
def total_comments(self, request):
from_date = request.query_params.get('from_date', None)
to_date = request.query_params.get('to_date', None)
if from_date and to_date:
from_date = datetime.datetime.strptime(from_date, "%Y-%m-%d").date()
to_date = datetime.datetime.strptime(to_date, "%Y-%m-%d").date()
return self.comments.filter(created__lte=to_date, created__gte=from_date).count()
else:
return self.comments.all().count()
| 41.079545 | 93 | 0.458645 | 3,487 | 0.964592 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.022407 |
23faddb427ccf2b4a51011515cdd3a2b5edefbe2 | 1,211 | py | Python | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakotathon | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
]
| 8 | 2019-09-11T12:59:57.000Z | 2021-08-11T16:31:58.000Z | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
]
| 66 | 2015-04-06T17:11:21.000Z | 2019-02-03T18:09:52.000Z | examples/pymt-frostnumbermodel-multidim-parameter-study.py | csdms/dakota | 6af575b0c21384b2a1ab51e26b6a08512313bd84 | [
"MIT"
]
| 5 | 2015-03-24T22:39:34.000Z | 2018-04-21T12:14:05.000Z | """An example of using Dakota as a component with PyMT.
This example requires a WMT executor with PyMT installed, as well as
the CSDMS Dakota interface and FrostNumberModel installed as
components.
"""
import os
from pymt.components import MultidimParameterStudy, FrostNumberModel
from dakotathon.utils import configure_parameters
c, d = FrostNumberModel(), MultidimParameterStudy()
parameters = {
"component": type(c).__name__,
"descriptors": ["T_air_min", "T_air_max"],
"partitions": [3, 3],
"lower_bounds": [-20.0, 5.0],
"upper_bounds": [-5.0, 20.0],
"response_descriptors": [
"frostnumber__air",
"frostnumber__surface",
"frostnumber__stefan",
],
"response_statistics": ["median", "median", "median"],
}
parameters, substitutes = configure_parameters(parameters)
parameters["run_directory"] = c.setup(os.getcwd(), **substitutes)
cfg_file = "frostnumber_model.cfg" # get from pymt eventually
parameters["initialize_args"] = cfg_file
dtmpl_file = cfg_file + ".dtmpl"
os.rename(cfg_file, dtmpl_file)
parameters["template_file"] = dtmpl_file
d.setup(parameters["run_directory"], **parameters)
d.initialize("dakota.yaml")
d.update()
d.finalize()
| 27.522727 | 68 | 0.721718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.453344 |
23fdbc64ade39f6aaca5e42eb2790bc7ac6b2823 | 4,427 | py | Python | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
]
| 1 | 2020-01-17T07:54:02.000Z | 2020-01-17T07:54:02.000Z | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
]
| null | null | null | tensorflow/train_pretrained.py | sevakon/mobilenetv2 | e6634da41c377ae1c76662d061e6b2b804a3b09c | [
"MIT"
]
| null | null | null | from callback import ValidationHistory
from dataloader import Dataloader
from normalizer import Normalizer
import tensorflow as tf
import numpy as np
import argparse
def get_model(input_shape, n_classes):
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(n_classes, activation='softmax')
])
model.summary()
return model
def get_model_with_nn_head(input_shape, n_classes):
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(.1),
tf.keras.layers.Dense(n_classes, activation='softmax')
])
model.summary()
return model
def write_metrics_to_file(loss_acc, fold_idx):
file = open("pretrained_model/metrics_fold{}.txt".format(fold_idx), "x")
file.write('Best saved model validation accuracy: {}\n'.format(loss_acc[1]))
file.write('Best saved model validation loss: {}\n'.format(loss_acc[0]))
file.close()
def train(config, fold_idx):
print(' ... TRAIN MODEL ON FOLD #{}'.format(fold_idx + 1))
loader = Dataloader(img_size=config.input_size,
n_folds=config.n_folds, seed=config.seed)
loader = loader.fit(config.folder)
classes = loader.classes
train, train_steps = loader.train(batch_size=config.batch_size,
fold_idx=fold_idx, normalize=False)
val, val_steps = loader.val(64, fold_idx)
model = get_model((config.input_size, config.input_size, 3), len(classes))
model.compile(optimizer=tf.keras.optimizers.Adam(), # Optimizer
# Loss function to minimize
loss=tf.keras.losses.CategoricalCrossentropy(),
# List of metrics to monitor
metrics=[tf.keras.metrics.CategoricalAccuracy()])
filepath="pretrained_model/mobilenet_fold{}".format(fold_idx)
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath,
monitor='val_categorical_accuracy',
verbose=1,
save_best_only=True,
mode='max')
logdir = "logs/fold{}/".format(fold_idx)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=logdir)
val_history = ValidationHistory()
callbacks = [checkpoint, tensorboard, val_history]
model.fit(train.repeat(),
epochs=config.epochs,
steps_per_epoch = train_steps,
validation_data=val.repeat(),
validation_steps=val_steps,
callbacks=callbacks)
write_metrics_to_file(val_history.best_model_stats('acc'), fold_idx)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument(
"-f",
"--folder",
required=True,
help="Path to directory containing images")
# Optional arguments.
parser.add_argument(
"-s",
"--input_size",
type=int,
default=224,
help="Input image size.")
parser.add_argument(
"-b",
"--batch_size",
type=int,
default=2,
help="Number of images in a training batch.")
parser.add_argument(
"-e",
"--epochs",
type=int,
default=100,
help="Number of training epochs.")
parser.add_argument(
"-seed",
"--seed",
type=int,
default=42,
help="Seed for data reproducing.")
parser.add_argument(
"-n",
"--n_folds",
type=int,
default=5,
help="Number of folds for CV Training")
args = parser.parse_args()
for fold_idx in range(args.n_folds):
train(args, fold_idx)
| 33.793893 | 87 | 0.5733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.153151 |
23fe13301d5fe663179594a9c1c64fdce727026b | 1,354 | py | Python | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
]
| 52 | 2016-08-04T02:15:52.000Z | 2021-12-20T20:33:07.000Z | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
]
| 3 | 2019-11-15T15:13:51.000Z | 2020-11-25T10:42:34.000Z | source/test.py | valrus/alfred-org-mode-workflow | 30f81772ad16519317ccb170d36782e387988633 | [
"MIT"
]
| 9 | 2019-03-06T04:21:29.000Z | 2021-08-16T02:28:33.000Z | # coding=utf-8
from orgmode_entry import OrgmodeEntry
entry = u'#A Etwas machen:: DL: Morgen S: Heute Ausstellung am 23.09.2014 12:00 oder am Montag bzw. am 22.10 13:00 sollte man anschauen. '
org = OrgmodeEntry()
# Use an absolute path
org.inbox_file = '/Users/Alex/Documents/Planung/Planning/Inbox.org'
org.delimiter = ':: ' # tag to separate the head from the body of the entry
org.heading_suffix = "\n* " # depth of entry
org.use_priority_tags = True # use priority tags: #b => [#B]
org.priority_tag = '#' # tag that marks a priority value
org.add_creation_date = True # add a creation date
org.replace_absolute_dates = True # convert absolute dates like 01.10 15:00 into orgmode dates => <2016-10-01 Sun 15:00>
org.replace_relative_dates = True # convert relative dates like monday or tomorrow into orgmode dates
# Convert a schedule pattern into an org scheduled date
org.convert_scheduled = True # convert sche
org.scheduled_pattern = "S: "
# Convert a deadline pattern into an org deadline
org.convert_deadlines = True
org.deadline_pattern = "DL: "
org.smart_line_break = True # convert a pattern into a linebreak
org.line_break_pattern = "\s\s" # two spaces
# Cleanup spaces (double, leading, and trailing)
org.cleanup_spaces = True
entry = 'TODO ' + entry
message = org.add_entry(entry).encode('utf-8')
print(message)
| 33.02439 | 140 | 0.739291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.579025 |
23fead2b5260640c347d0b505721cb2630c98560 | 407 | py | Python | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
]
| null | null | null | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
]
| 70 | 2017-06-01T11:02:51.000Z | 2017-06-30T00:35:32.000Z | 25/00/2.py | pylangstudy/201706 | f1cc6af6b18e5bd393cda27f5166067c4645d4d3 | [
"CC0-1.0"
]
| null | null | null | import gzip
import bz2
import lzma
s = b'witch which has which witches wrist watch'
with open('2.txt', 'wb') as f: f.write(s)
with gzip.open('2.txt.gz', 'wb') as f: f.write(s)
with bz2.open('2.txt.bz2', 'wb') as f: f.write(s)
with lzma.open('2.txt.xz', 'wb') as f: f.write(s)
print('txt', len(s))
print('gz ', len(gzip.compress(s)))
print('bz2', len(bz2.compress(s)))
print('xz ', len(lzma.compress(s)))
| 25.4375 | 49 | 0.641278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.289926 |
23ff90db58dc31d3acc655b347ff8c32734fce8f | 751 | py | Python | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
]
| null | null | null | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
]
| null | null | null | timezones.py | rayjustinhuang/BitesofPy | 03b694c5259ff607621419d9677c5caff90a6057 | [
"MIT"
]
| null | null | null | import pytz
from datetime import datetime
MEETING_HOURS = range(6, 23) # meet from 6 - 22 max
TIMEZONES = set(pytz.all_timezones)
def within_schedule(utc, *timezones):
"""Receive a utc datetime and one or more timezones and check if
they are all within schedule (MEETING_HOURS)"""
times = []
timezone_list = list(timezones)
for zone in timezone_list:
if zone not in TIMEZONES:
raise ValueError
tz = pytz.timezone(zone)
times.append(pytz.utc.localize(utc).astimezone(tz))
boolean = []
for time in times:
if time.hour in MEETING_HOURS:
boolean.append(True)
else:
boolean.append(False)
return all(boolean)
pass | 25.033333 | 68 | 0.624501 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.18775 |
9b000540f0f753d3e1bc63731ed866572a4a795c | 450 | py | Python | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
]
| null | null | null | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
]
| null | null | null | config.py | saurabhchardereal/kernel-tracker | 60d53e6ae377925f8540f148b742869929337088 | [
"MIT"
]
| null | null | null | from os import sys, environ
from tracker.__main__ import args
# Name of the file to save kernel versions json
DB_FILE_NAME = "data.json"
# By default looks up in env for api and chat id or just put your stuff in here
# directly if you prefer it that way
BOT_API = environ.get("BOT_API")
CHAT_ID = environ.get("CHAT_ID")
if args.notify:
if (BOT_API and CHAT_ID) is None:
print("Either BOT_API or CHAT_ID is empty!")
sys.exit(1)
| 28.125 | 79 | 0.717778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.506667 |
9b019d69f7dc7afa332c3b317d1c035ebf327b40 | 94 | py | Python | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
]
| null | null | null | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
]
| 7 | 2020-06-05T21:03:39.000Z | 2021-09-22T18:33:33.000Z | dive_sites/apps.py | Scuba-Chris/dive_site_api | 9c5f2a26e6c8a1e2eeaf6cd1b4174e764f83a6b6 | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class DiveSitesConfig(AppConfig):
name = 'dive_sites'
| 15.666667 | 33 | 0.765957 | 57 | 0.606383 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.12766 |
9b02acdde4f64a083c7db9498cddd0e187f2c1df | 615 | py | Python | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
]
| 189 | 2019-11-15T11:33:50.000Z | 2022-03-27T08:23:35.000Z | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
]
| 3 | 2020-05-29T03:26:32.000Z | 2021-07-11T15:46:07.000Z | week9/tests/test_utils.py | zzsza/kyle-school | 8cf6cffd3d86a25c29f914a9d4802cdb8e6dd478 | [
"MIT"
]
| 39 | 2019-11-16T04:02:06.000Z | 2022-03-21T04:18:14.000Z | # test_utils.py를 아래 내용으로 overwrite합니다(-a 옵션 없이!)
import pytest
import pandas as pd
import datetime
from utils import is_working_day, load_data
def test_is_working_day():
assert is_working_day(datetime.date(2020,7,5)) == False
assert is_working_day(datetime.date(2020,7,4)) == False
assert is_working_day(datetime.date(2020,7,6)) == True
@pytest.fixture(scope="session")
def result_fixture():
result = load_data()
return result
def test_len(result_fixture):
assert len(result_fixture) == 150
def test_object_type(result_fixture):
assert isinstance(result_fixture, pd.DataFrame)
| 21.964286 | 59 | 0.747967 | 0 | 0 | 0 | 0 | 97 | 0.150855 | 0 | 0 | 85 | 0.132193 |
9b02d42862a5d0797afc71d43094512a70c96510 | 3,302 | py | Python | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/dnstwist/Integrations/dnstwist/dnstwist.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
]
| 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import json
import subprocess
from CommonServerPython import *
TWIST_EXE = '/dnstwist/dnstwist.py'
if demisto.command() == 'dnstwist-domain-variations':
KEYS_TO_MD = ["whois_updated", "whois_created", "dns_a", "dns_mx", "dns_ns"]
DOMAIN = demisto.args()['domain']
LIMIT = int(demisto.args()['limit'])
WHOIS = demisto.args().get('whois')
def get_dnstwist_result(domain, include_whois):
args = [TWIST_EXE, '-f', 'json']
if include_whois:
args.append('-w')
args.append(domain)
res = subprocess.check_output(args)
return json.loads(res)
def get_domain_to_info_map(dns_twist_result):
results = []
for x in dns_twist_result:
temp = {} # type: dict
for k, v in x.items():
if k in KEYS_TO_MD:
if x["domain"] not in temp:
temp["domain-name"] = x["domain"]
if k == "dns_a":
temp["IP Address"] = v
else:
temp[k] = v
if temp:
results.append(temp)
return results
dnstwist_result = get_dnstwist_result(DOMAIN, WHOIS == 'yes')
new_result = get_domain_to_info_map(dnstwist_result)
md = tableToMarkdown('dnstwist for domain - ' + DOMAIN, new_result,
headers=["domain-name", "IP Address", "dns_mx", "dns_ns", "whois_updated", "whois_created"])
domain_context = new_result[0] # The requested domain for variations
domains_context_list = new_result[1:LIMIT + 1] # The variations domains
domains = []
for item in domains_context_list:
temp = {"Name": item["domain-name"]}
if "IP Address" in item:
temp["IP"] = item["IP Address"]
if "dns_mx" in item:
temp["DNS-MX"] = item["dns_mx"]
if "dns_ns" in item:
temp["DNS-NS"] = item["dns_ns"]
if "whois_updated" in item:
temp["WhoisUpdated"] = item["whois_updated"]
if "whois_created" in item:
temp["WhoisCreated"] = item["whois_created"]
domains.append(temp)
ec = {"Domains": domains}
if "domain-name" in domain_context:
ec["Name"] = domain_context["domain-name"]
if "IP Address" in domain_context:
ec["IP"] = domain_context["IP Address"]
if "dns_mx" in domain_context:
ec["DNS-MX"] = domain_context["dns_mx"]
if "dns_ns" in domain_context:
ec["DNS-NS"] = domain_context["dns_ns"]
if "whois_updated" in domain_context:
ec["WhoisUpdated"] = domain_context["whois_updated"]
if "whois_created" in domain_context:
ec["WhoisCreated"] = domain_context["whois_created"]
entry_result = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': dnstwist_result,
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'dnstwist.Domain(val.Name == obj.Name)': ec}
}
demisto.results(entry_result)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
subprocess.check_output([TWIST_EXE, '-h'], stderr=subprocess.STDOUT)
demisto.results('ok')
sys.exit(0)
| 35.891304 | 117 | 0.58934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.297396 |
9b036ad8294f9db8fecca4b31663a18176793718 | 595 | py | Python | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
]
| null | null | null | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
]
| null | null | null | venv/Lib/site-packages/classutils/introspection.py | avim2809/CameraSiteBlocker | bfc0434e75e8f3f95c459a4adc86b7673200816e | [
"Apache-2.0"
]
| null | null | null | # encoding: utf-8
import inspect
def caller(frame=2):
"""
Returns the object that called the object that called this function.
e.g. A calls B. B calls calling_object. calling object returns A.
:param frame: 0 represents this function
1 represents the caller of this function (e.g. B)
2 (default) represents the caller of B
:return: object reference
"""
stack = inspect.stack()
try:
obj = stack[frame][0].f_locals[u'self']
except KeyError:
pass # Not called from an object
else:
return obj
| 24.791667 | 72 | 0.616807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.684034 |
9b049ff801a11852ac7c1f7e34a2e069aca68527 | 3,395 | py | Python | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
]
| 6 | 2018-08-10T17:11:10.000Z | 2020-04-29T07:05:36.000Z | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
]
| 58 | 2018-08-13T08:36:08.000Z | 2021-07-07T08:32:52.000Z | test/test_resourcerequirements.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
]
| 7 | 2018-08-10T12:53:18.000Z | 2021-11-08T05:15:42.000Z | # pylint: disable=missing-docstring,protected-access
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from icetea_lib.ResourceProvider.ResourceRequirements import ResourceRequirements
class ResourceRequirementTestcase(unittest.TestCase):
def setUp(self):
self.simple_testreqs = {
"type": "process",
"allowed_platforms": [],
"expires": 2000,
"nick": None,
"tags": {"test": True}
}
self.simple_testreqs2 = {
"type": "process",
"allowed_platforms": ["DEV3"],
"nick": None,
}
self.recursion_testreqs = {
"type": "process",
"allowed_platforms": ["DEV3"],
"application": {"bin": "test_binary"},
"nick": None,
}
self.actual_descriptor1 = {"platform_name": "DEV2", "resource_type": "mbed"}
self.actual_descriptor2 = {"platform_name": "DEV1", "resource_type": "process"}
self.actual_descriptor3 = {"platform_name": "DEV3", "resource_type": "process"}
self.actual_descriptor4 = {"resource_type": "process", "bin": "test_binary"}
def test_get(self):
dutreq = ResourceRequirements(self.simple_testreqs)
self.assertEqual(dutreq.get("type"), "process")
dutreq = ResourceRequirements(self.recursion_testreqs)
self.assertEqual(dutreq.get("application.bin"), "test_binary")
self.assertIsNone(dutreq.get("application.bin.not_exist"))
def test_set(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq.set("test_key", "test_val")
self.assertEqual(dutreq._requirements["test_key"], "test_val")
# Test override
dutreq.set("test_key", "test_val2")
self.assertEqual(dutreq._requirements["test_key"], "test_val2")
# test tags merging. Also a test for set_tag(tags=stuff)
dutreq.set("tags", {"test": False, "test2": True})
self.assertEqual(dutreq._requirements["tags"], {"test": False, "test2": True})
dutreq.set("tags", {"test2": False})
self.assertEqual(dutreq._requirements["tags"], {"test": False, "test2": False})
def test_set_tags(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq._set_tag(tag="test", value=False)
dutreq._set_tag(tag="test2", value=True)
self.assertDictEqual(dutreq._requirements["tags"], {"test": False, "test2": True})
def test_empty_tags(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq._set_tag("test", value=None)
dutreq.remove_empty_tags()
self.assertEqual(dutreq._requirements["tags"], {})
self.assertEqual(dutreq.remove_empty_tags(tags={"test1": True, "test2": None}),
{"test1": True})
if __name__ == '__main__':
unittest.main()
| 38.146067 | 90 | 0.648895 | 2,630 | 0.774669 | 0 | 0 | 0 | 0 | 0 | 0 | 1,338 | 0.394109 |
9b04ad53449f706663e52db825a5918226304aab | 321 | py | Python | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
]
| 1 | 2016-05-26T08:18:36.000Z | 2016-05-26T08:18:36.000Z | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
]
| null | null | null | hadoop_example/reduce.py | hatbot-team/hatbot | e7fea42b5431cc3e93d9e484c5bb5232d8f2e981 | [
"MIT"
]
| null | null | null | #!/bin/python3
import sys
prev = ''
cnt = 0
for x in sys.stdin.readlines():
q, w = x.split('\t')[0], int(x.split('\t')[1])
if (prev == q):
cnt += 1
else:
if (cnt > 0):
print(prev + '\t' + str(cnt))
prev = q
cnt = w
if (cnt > 0):
print(prev + '\t' + str(cnt))
| 17.833333 | 50 | 0.433022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.099688 |
9b076c62dfd81be9905f0f82e953e93e7d7c02e5 | 313 | py | Python | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
]
| null | null | null | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
]
| null | null | null | covid19_id/pemeriksaan_vaksinasi/vaksinasi_harian.py | hexatester/covid19-id | 8d8aa3f9092a40461a308f4db054ab4f95374849 | [
"MIT"
]
| null | null | null | import attr
from covid19_id.utils import ValueInt
@attr.dataclass(slots=True)
class VaksinasiHarian:
key_as_string: str
key: int
doc_count: int
jumlah_vaksinasi_2: ValueInt
jumlah_vaksinasi_1: ValueInt
jumlah_jumlah_vaksinasi_1_kum: ValueInt
jumlah_jumlah_vaksinasi_2_kum: ValueInt
| 20.866667 | 43 | 0.782748 | 231 | 0.738019 | 0 | 0 | 259 | 0.827476 | 0 | 0 | 0 | 0 |
9b0792a063a2b49e22d50a2e57caac25388b1b3e | 511 | py | Python | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
]
| null | null | null | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
]
| null | null | null | tests/blockchain/test_hashing_and_proof.py | thecoons/blockchain | 426ede04d058b5eb0e595fcf6e9c71d16605f9a7 | [
"MIT"
]
| null | null | null | import json
import hashlib
from .test_case.blockchain import BlockchainTestCase
class TestHashingAndProofs(BlockchainTestCase):
def test_hash_is_correct(self):
self.create_block()
new_block = self.blockchain.last_block
new_block_json = json.dumps(
self.blockchain.last_block, sort_keys=True
).encode()
new_hash = hashlib.sha256(new_block_json).hexdigest()
assert len(new_hash) == 64
assert new_hash == self.blockchain.hash(new_block)
| 26.894737 | 61 | 0.702544 | 427 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9b0816140cf40f94ed1ecf980a99d990c62d409b | 14,495 | py | Python | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
]
| null | null | null | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
]
| null | null | null | xgbse/_kaplan_neighbors.py | gdmarmerola/xgboost-survival-embeddings | cb672d5c2bf09c7d8cbf9edf7807a153bce4db40 | [
"Apache-2.0"
]
| null | null | null | import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as st
from sklearn.neighbors import BallTree
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
from xgbse.non_parametric import (
calculate_kaplan_vectorized,
get_time_bins,
calculate_interval_failures,
)
# at which percentiles will the KM predict
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanNeighbors(XGBSEBaseEstimator):
"""
## XGBSEKaplanNeighbor
Convert xgboost into a nearest neighbor model, where we use hamming distance to define
similar elements as the ones that co-ocurred the most at the ensemble terminal nodes.
Then, at each neighbor-set compute survival estimates with the Kaplan-Meier estimator.
"""
def __init__(self, xgb_params=DEFAULT_PARAMS, n_neighbors=30, radius=None):
"""
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
n_neighbors (Int): number of neighbors for computing KM estimates
radius (Float): If set, uses a radius around the point for neighbors search
"""
self.xgb_params = xgb_params
self.n_neighbors = n_neighbors
self.radius = radius
self.persist_train = False
self.index_id = None
self.radius = None
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=True,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and outputting its leaf indices.
Build search index in the new space to allow nearest neighbor queries at scoring time.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEKaplanNeighbors: fitted instance of XGBSEKaplanNeighbors
"""
self.E_train, self.T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(self.T_train, self.E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# creating nearest neighbor index
leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(leaves, metric="hamming", leaf_size=40)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.index_id = index_id
return self
def predict(
self,
X,
time_bins=None,
return_ci=False,
ci_width=0.683,
return_interval_probs=False,
):
"""
Make queries to nearest neighbor search index build on the transformed XGBoost space.
Compute a Kaplan-Meier estimator for each neighbor-set. Predict the KM estimators.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
time_bins (np.array): specified time windows to use when making survival predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
ci_width (Float): width of confidence interval
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
(pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
probability values
lower_ci (np.array): lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
if self.radius:
assert self.radius > 0, "Radius must be positive"
neighs, _ = self.tree.query_radius(
leaves, r=self.radius, return_distance=True
)
number_of_neighbors = np.array([len(neigh) for neigh in neighs])
if np.argwhere(number_of_neighbors == 1).shape[0] > 0:
# If there is at least one sample without neighbors apart from itself
# a warning is raised suggesting a radius increase
warnings.warn(
"Warning: Some samples don't have neighbors apart from itself. Increase the radius",
RuntimeWarning,
)
else:
_, neighs = self.tree.query(leaves, k=self.n_neighbors)
# gathering times and events/censors for neighbor sets
T_neighs = self.T_train[neighs]
E_neighs = self.E_train[neighs]
# vectorized (very fast!) implementation of Kaplan Meier curves
if time_bins is None:
time_bins = self.time_bins
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
preds_df, upper_ci, lower_ci = calculate_kaplan_vectorized(
T_neighs, E_neighs, time_bins, z
)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
def _align_leaf_target(neighs, target):
# getting times and events for each leaf element
target_neighs = neighs.apply(lambda x: target[x])
# converting to vectorized kaplan format
# filling nas due to different leaf sizes with 0
target_neighs = (
pd.concat([pd.DataFrame(e) for e in target_neighs.values], axis=1)
.T.fillna(0)
.values
)
return target_neighs
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanTree(XGBSEBaseEstimator):
"""
## XGBSEKaplanTree
Single tree implementation as a simplification to `XGBSEKaplanNeighbors`.
Instead of doing nearest neighbor searches, fit a single tree via `xgboost`
and calculate KM curves at each of its leaves.
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS_TREE,
):
self.xgb_params = xgb_params
self.persist_train = False
self.index_id = None
"""
Args:
xgb_params (Dict): parameters for fitting the tree, see
https://xgboost.readthedocs.io/en/latest/parameter.html
"""
def fit(
self,
X,
y,
persist_train=True,
index_id=None,
time_bins=None,
ci_width=0.683,
**xgb_kwargs,
):
"""
Fit a single decision tree using xgboost. For each leaf in the tree,
build a Kaplan-Meier estimator.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
ci_width (Float): width of confidence interval
Returns:
XGBSEKaplanTree: Trained instance of XGBSEKaplanTree
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# training XGB
self.bst = xgb.train(self.xgb_params, dtrain, num_boost_round=1, **xgb_kwargs)
# getting leaves
leaves = self.bst.predict(dtrain, pred_leaf=True)
# organizing elements per leaf
leaf_neighs = (
pd.DataFrame({"leaf": leaves})
.groupby("leaf")
.apply(lambda x: list(x.index))
)
# getting T and E for each leaf
T_leaves = _align_leaf_target(leaf_neighs, T_train)
E_leaves = _align_leaf_target(leaf_neighs, E_train)
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
# vectorized (very fast!) implementation of Kaplan Meier curves
(
self._train_survival,
self._train_upper_ci,
self._train_lower_ci,
) = calculate_kaplan_vectorized(T_leaves, E_leaves, time_bins, z)
# adding leaf indexes
self._train_survival = self._train_survival.set_index(leaf_neighs.index)
self._train_upper_ci = self._train_upper_ci.set_index(leaf_neighs.index)
self._train_lower_ci = self._train_lower_ci.set_index(leaf_neighs.index)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.tree = BallTree(leaves.reshape(-1, 1), metric="hamming", leaf_size=40)
self.index_id = index_id
return self
def predict(self, X, return_ci=False, return_interval_probs=False):
"""
Run samples through tree until terminal nodes. Predict the Kaplan-Meier
estimator associated to the leaf node each sample ended into.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
preds_df (pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
probability values
lower_ci (np.array): lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
# searching for kaplan meier curves in leaves
preds_df = self._train_survival.loc[leaves].reset_index(drop=True)
upper_ci = self._train_upper_ci.loc[leaves].reset_index(drop=True)
lower_ci = self._train_lower_ci.loc[leaves].reset_index(drop=True)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
| 34.186321 | 135 | 0.635598 | 12,906 | 0.890376 | 0 | 0 | 0 | 0 | 0 | 0 | 7,717 | 0.53239 |
9b086dcb5153716593628ec1966115cfb5eef668 | 3,932 | py | Python | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
]
| null | null | null | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
]
| null | null | null | homework_2/1.py | jelic98/raf_mu | 8b965fa41d5f89eeea371ab7b8e15bd167325b5f | [
"Apache-2.0"
]
| 1 | 2021-05-30T15:26:52.000Z | 2021-05-30T15:26:52.000Z | import math
import numpy as np
import pandas as pd
import tensorflow as tf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import warnings
warnings.filterwarnings('ignore')
# Hiperparametri
epoch_max = 10
alpha_max = 0.025
alpha_min = 0.001
batch_size = 32
window_size = 14
test_ratio = 0.1
max_time = 16
lstm_size = 64
# Ucitavanje podataka
csv = pd.read_csv('data/sp500.csv')
dates, data = csv['Date'].values, csv['Close'].values
# Konverzija datuma
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]
dates = [dates[i + max_time] for i in range(len(dates) - max_time)]
# Grupisanje podataka pomocu kliznog prozora
data = [data[i : i + window_size] for i in range(len(data) - window_size)]
# Normalizacija podataka
norm = [data[0][0]] + [data[i-1][-1] for i, _ in enumerate(data[1:])]
data = [curr / norm[i] - 1.0 for i, curr in enumerate(data)]
nb_samples = len(data) - max_time
nb_train = int(nb_samples * (1.0 - test_ratio))
nb_test = nb_samples - nb_train
nb_batches = math.ceil(nb_train / batch_size)
# Grupisanje podataka za propagaciju greske kroz vreme
x = [data[i : i + max_time] for i in range(nb_samples)]
y = [data[i + max_time][-1] for i in range(nb_samples)]
# Skup podataka za treniranje
train_x = [x[i : i + batch_size] for i in range(0, nb_train, batch_size)]
train_y = [y[i : i + batch_size] for i in range(0, nb_train, batch_size)]
# Skup podataka za testiranje
test_x, test_y = x[-nb_test:], y[-nb_test:]
# Skup podataka za denormalizaciju
norm_y = [norm[i + max_time] for i in range(nb_samples)]
norm_test_y = norm_y[-nb_test:]
tf.reset_default_graph()
# Cene tokom prethodnih dana
X = tf.placeholder(tf.float32, [None, max_time, window_size])
# Cena na trenutni dan
Y = tf.placeholder(tf.float32, [None])
# Stopa ucenja
L = tf.placeholder(tf.float32)
# LSTM sloj
rnn = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(lstm_size)])
# Izlaz LSTM sloja
val, _ = tf.nn.dynamic_rnn(rnn, X, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
# Poslednji izlaz LSTM sloja
last = tf.gather(val, val.get_shape()[0] - 1)
# Obucavajuci parametri
weight = tf.Variable(tf.random_normal([lstm_size, 1]))
bias = tf.Variable(tf.constant(0.0, shape=[1]))
# Predvidjena cena
prediction = tf.add(tf.matmul(last, weight), bias)
# MSE za predikciju
loss = tf.reduce_mean(tf.square(tf.subtract(prediction, Y)))
# Gradijentni spust pomocu Adam optimizacije
optimizer = tf.train.AdamOptimizer(L).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Treniranje modela
for epoch in range(epoch_max):
# Adaptiranje stope ucenja
epoch_loss, alpha = 0, max(alpha_min, alpha_max * (1 - epoch / epoch_max))
# Mini batch gradijentni spust
for b in np.random.permutation(nb_batches):
loss_val, _ = sess.run([loss, optimizer], {X: train_x[b], Y: train_y[b], L: alpha})
epoch_loss += loss_val
print('Epoch: {}/{}\tLoss: {}'.format(epoch+1, epoch_max, epoch_loss))
# Testiranje modela
test_pred = sess.run(prediction, {X: test_x, Y: test_y, L: alpha})
# Tacnost modela za predikciju monotonosti fluktuacije cene
acc = sum(1 for i in range(nb_test) if test_pred[i] * test_y[i] > 0) / nb_test
print('Accuracy: {}'.format(acc))
# Denormalizacija podataka
denorm_y = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_y)]
denorm_pred = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_pred)]
# Prikazivanje predikcija
plt.figure(figsize=(16,4))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
plt.plot(dates[-nb_test:], denorm_y, '-b', label='Actual')
plt.plot(dates[-nb_test:], denorm_pred, '--r', label='Predicted')
plt.gcf().autofmt_xdate()
plt.legend()
plt.show()
| 31.206349 | 95 | 0.694557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.209054 |
9b091fad5fab76f79772a42218911d8db0cd0709 | 420 | py | Python | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
]
| 418 | 2017-10-05T05:52:49.000Z | 2022-03-24T09:50:06.000Z | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
]
| 1,049 | 2017-09-16T09:34:55.000Z | 2022-03-23T16:13:04.000Z | src/pretalx/submission/migrations/0053_reviewphase_can_tag_submissions.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
]
| 155 | 2017-10-16T18:32:01.000Z | 2022-03-15T12:48:33.000Z | # Generated by Django 3.1 on 2020-10-10 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("submission", "0052_auto_20201010_1307"),
]
operations = [
migrations.AddField(
model_name="reviewphase",
name="can_tag_submissions",
field=models.CharField(default="never", max_length=12),
),
]
| 22.105263 | 67 | 0.621429 | 329 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.292857 |
9b09888d30cc7622a264796e061dbd4cba10dd9a | 440 | py | Python | zzzeeksphinx/theme.py | aidos/zzzeeksphinx | c0fa4be4d40752632e879ec109850caa316ec8af | [
"MIT"
]
| 3 | 2017-08-10T22:26:25.000Z | 2017-09-10T16:07:23.000Z | zzzeeksphinx/theme.py | zzzeek/zzzeeksphinx | 663f5c353e9c3ef3f9676384d429f504feaf20d3 | [
"MIT"
]
| 9 | 2020-07-18T12:31:49.000Z | 2021-10-08T15:19:43.000Z | zzzeeksphinx/theme.py | zzzeek/zzzeeksphinx | 663f5c353e9c3ef3f9676384d429f504feaf20d3 | [
"MIT"
]
| 1 | 2021-02-20T20:57:00.000Z | 2021-02-20T20:57:00.000Z | from os import path
package_dir = path.abspath(path.dirname(__file__))
def setup(app):
app.add_html_theme("zsbase", path.join(package_dir, "themes", "zsbase"))
app.add_html_theme(
"zzzeeksphinx", path.join(package_dir, "themes", "zzzeeksphinx")
)
app.add_html_theme("zsmako", path.join(package_dir, "themes", "zsmako"))
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 25.882353 | 76 | 0.665909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.284091 |
9b0a82ae7938b94fafa2d863a1f8c7ee8913dbbc | 2,674 | py | Python | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
]
| 2 | 2019-08-15T11:51:17.000Z | 2019-08-15T12:59:37.000Z | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
]
| 1 | 2020-08-11T14:25:45.000Z | 2020-08-11T14:25:45.000Z | playground/toy_grads_compositional.py | TUIlmenauAMS/nca_mss | f0deb4b0acd0e317fb50340a57979c2e0a43c293 | [
"MIT"
]
| 1 | 2021-03-16T12:30:31.000Z | 2021-03-16T12:30:31.000Z | # -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'MacSeNet'
import torch
from torch.autograd import Variable
import numpy as np
dtype = torch.DoubleTensor
np.random.seed(2183)
torch.manual_seed(2183)
# D is the "batch size"; N is input dimension;
# H is hidden dimension; N_out is output dimension.
D, N, H, N_out = 1, 20, 20, 20
# Create random Tensors to hold input and outputs, and wrap them in Variables.
# Setting requires_grad=False indicates that we do not need to compute gradients
# with respect to these Variables during the backward pass.
x = Variable(torch.randn(N, D).type(dtype), requires_grad=True)
y = Variable(torch.randn(N_out, D).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Variables during the backward pass.
layers = []
biases = []
w_e = Variable(torch.randn(N, H).type(dtype), requires_grad=True)
b_e = Variable(torch.randn(H,).type(dtype), requires_grad=True)
w_d = Variable(torch.randn(H, N_out).type(dtype), requires_grad=True)
b_d = Variable(torch.randn(N_out,).type(dtype), requires_grad=True)
layers.append(w_e)
layers.append(w_d)
biases.append(b_e)
biases.append(b_d)
# Matrices we need the gradients wrt
parameters = torch.nn.ParameterList()
p_e = torch.nn.Parameter(torch.randn(N, H).type(dtype), requires_grad=True)
p_d = torch.nn.Parameter(torch.randn(H, N_out).type(dtype), requires_grad=True)
parameters.append(p_e)
parameters.append(p_d)
# Non-linearity
relu = torch.nn.ReLU()
comb_matrix = torch.autograd.Variable(torch.eye(N), requires_grad=True).double()
for index in range(2):
b_sc_m = relu(parameters[index].mm((layers[index] + biases[index]).t()))
b_scaled = layers[index] * b_sc_m
comb_matrix = torch.matmul(b_scaled, comb_matrix)
y_pred = torch.matmul(comb_matrix, x)
loss = (y - y_pred).norm(1)
loss.backward()
delta_term = (torch.sign(y_pred - y)).mm(x.t())
# With relu
w_tilde_d = relu(parameters[1].mm((layers[1] + biases[1]).t())) * w_d
w_tilde_e = w_e * relu(parameters[0].mm((layers[0] + biases[0]).t()))
relu_grad_dec = p_d.mm((w_d + b_d).t()).gt(0).double()
relu_grad_enc = p_e.mm((w_e + b_e).t()).gt(0).double()
p_d_grad_hat = (delta_term.mm(w_tilde_e.t()) * w_d * relu_grad_dec).mm((w_d + b_d))
p_e_grad_hat = (w_tilde_d.t().mm(delta_term) * w_e * relu_grad_enc).mm((w_e + b_e))
print('Error between autograd computation and calculated:'+str((parameters[1].grad - p_d_grad_hat).abs().max()))
print('Error between autograd computation and calculated:'+str((parameters[0].grad - p_e_grad_hat).abs().max()))
# EOF
| 33.012346 | 112 | 0.726253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.272625 |
9b0afc3b991d4aa30a7baf6f443e94f56c8d47d5 | 2,657 | py | Python | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
]
| null | null | null | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
]
| 2 | 2021-02-08T20:36:58.000Z | 2022-03-29T21:58:35.000Z | Servers/Frontend/project/main/views.py | chrisbvt/ML-MalwareDetection | e00d0a0026a7c28886c3d2ab8ca9933e60f049cc | [
"MIT"
]
| null | null | null | # project/main/views.py
#################
#### imports ####
#################
import os
import json
import requests
import pickle
from flask import Blueprint, Flask, jsonify, request, g, url_for, abort, redirect, flash, render_template, current_app
from flask_login import current_user
from flask_login import login_required
from .forms import UploadForm
from werkzeug.utils import secure_filename
from project.indexer_lib import get_metadata_from_file
################
#### config ####
################
main_blueprint = Blueprint('main', __name__,)
################
#### routes ####
################
@main_blueprint.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
uploaded_file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if uploaded_file:
filename = secure_filename(uploaded_file.filename)
uploaded_file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], filename))
full_path = os.path.join(current_app.config['UPLOAD_FOLDER'], filename)
sha1, md5, frame = get_metadata_from_file(full_path)
url = "https://" + current_app.config['ML_SERVER'] + ":" + current_app.config['ML_PORT'] + "/ml/classify_file"
payload = {'sha1': sha1, 'md5': md5, 'metadata': pickle.dumps(frame)}
print "Url was: %s" % url
print "Payload length was: %d" % len(pickle.dumps(frame))
req = requests.post(url, data=payload, verify=False)
print "Response was: "
print req.text
response_json = json.loads(req.text)
rating = abs(int(response_json["Malicious"]) - 1)
print rating
return render_template('main/analysis.html', sha1=sha1, md5=md5, rating=rating, file_name=filename,
current_user=current_user)
else:
return render_template('main/index.html',
message="File failed to upload",
form=UploadForm(), current_user=current_user)
return render_template('main/index.html', form=UploadForm(), current_user=current_user)
@main_blueprint.route('/about')
def about():
return render_template('main/about.html', current_user=current_user)
@main_blueprint.route('/terms')
def terms():
return render_template('main/terms.html', current_user=current_user)
| 34.960526 | 122 | 0.617237 | 0 | 0 | 0 | 0 | 2,040 | 0.767783 | 0 | 0 | 643 | 0.242002 |
9b0ea10947bac276566d22b561a64d291c54aa39 | 3,195 | py | Python | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
]
| null | null | null | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
]
| null | null | null | blog/forms.py | oversabiproject/ghostrr | 0bf49537ddf0436d08d705b29bffbd49b66e7c65 | [
"MIT"
]
| null | null | null | import string
from django import forms
from django.conf import settings
from django.shortcuts import get_object_or_404
from accounts.models import User, Profile
from .models import Blogs
from .utils import get_limit_for_level, write_to_limit
class EditLimitForm(forms.Form):
free_limit = forms.IntegerField(help_text='Enter the limit for the free users')
pro_limit = forms.IntegerField(help_text='Enter the limit for the pro users')
enterprise_limit = forms.IntegerField(help_text='Enter the limit for the enterprise users')
def save(self):
free_limit = self.cleaned_data.get("free_limit")
pro_limit = self.cleaned_data.get("pro_limit")
enterprise_limit = self.cleaned_data.get("enterprise_limit")
write_to_limit(free_limit, pro_limit, enterprise_limit)
return 'Saved'
class CreateBlogForm(forms.Form):
pk = forms.IntegerField()
title = forms.CharField(max_length=255, help_text='Enter a meaningful title of 5-15 words for the blog.')
sentence = forms.CharField(widget=forms.TextInput(), help_text='Enter the first two or more meaningful sentences to set the blog context, approximately 50 - 100 words expected.')
copy_text = forms.CharField(widget=forms.TextInput(), required=False)
copy_length = forms.IntegerField(help_text='Select the length of copy you want')
def clean_copy_length(self):
copy_length = int(self.data.get('copy_length'))
if copy_length not in [1,2]:
raise forms.ValidationError('Invalid length selected')
return copy_length
def clean_title(self):
title = self.data.get('title')
if len(title.split(' ')) < 5:
raise forms.ValidationError('Very few words have been entered for the title. Please enter at least 5 words')
if len(title.split(' ')) > 30:
raise forms.ValidationError('A lot of words have been entered for the title. Please enter less than 30 words only')
return title
def clean_sentence(self):
sentence = self.data.get('sentence')
sentence_split = sentence.split('.')
sentence_len = len(sentence_split)
# # Validate length
# if sentence_len < 10:
# raise forms.ValidationError('Input sentences are too few')
# Validate words length
word_len = 0
for i in sentence_split:
word_len += len(i.split(' '))
if word_len < 50:
raise forms.ValidationError('Very few words have been entered for the Blog description. Please enter at least 50 words')
if word_len > 200:
raise forms.ValidationError('A lot of words have been entered. Please enter less than 200 words')
# # Validate length extra
# word_avg = word_len / sentence_len
# if word_avg < 15:
# raise forms.ValidationError('Sentences entered are too short, Consider making the sentences more longer or meaningful.')
# # Reducing punctuation marks
# for i in string.punctuation:
# sentence = sentence.replace(i+i,i)
return sentence
def save(self, commit=True):
title = self.cleaned_data.get('title')
sentence = self.cleaned_data.get('sentence')
copy_text = self.cleaned_data.get('copy_text')
copy_length = self.cleaned_data.get('copy_length')
# Creating new blog
blog = Blogs(title=title, sentence=sentence, copy_length=copy_length, copy_text=copy_text)
return blog | 34.728261 | 179 | 0.747418 | 2,944 | 0.92144 | 0 | 0 | 0 | 0 | 0 | 0 | 1,242 | 0.388732 |
9b0f367d08c895d53158d4654de98cbeabd4b541 | 1,032 | py | Python | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
]
| null | null | null | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
]
| null | null | null | Class Work/Recursion & Search /app.py | Pondorasti/CS-1.2 | c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c | [
"MIT"
]
| null | null | null | a = [1, 2, 3, 5, 6]
def recursive_search(array, item_to_find, current_index=0):
if current_index == len(array):
return None
elif array[current_index] == item_to_find:
return current_index
else:
return recursive_search(array, item_to_find, current_index + 1)
# print(recursive_search(a, 3))
def binary_search(array, target):
start = 0
end = len(array) - 1
while (start <= end):
mid = (start + end) // 2
if array[mid] == target:
return mid
elif target < array[mid]:
end = mid - 1
else:
start = mid + 1
return None
a = [3,4,5,6,10,12,20]
print(binary_search(a, 5))
def recursive_fibonacci(index, current_index = 1, first = 0, second = 1):
if index == 0:
return 0
elif index == current_index:
return second
else:
return recursive_fibonacci(index, current_index = current_index + 1, first = second, second = first + second)
print(recursive_fibonacci(0)) | 21.957447 | 117 | 0.593992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.030039 |
9b0fef936f066c73b4c06e85baae1161aaa35969 | 1,134 | py | Python | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
]
| null | null | null | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
]
| null | null | null | src/heap/tests/test_max_binary_heap.py | codermrhasan/data-structures-and-algorithms | 98c828bad792d3d6cdd909a8c6935583a8d9f468 | [
"MIT"
]
| null | null | null | from heap.max_binary_heap import MaxBinaryHeap
def test_exists_and_instantiation():
assert MaxBinaryHeap
assert MaxBinaryHeap()
def test_properties():
bh = MaxBinaryHeap()
bh.heapList = [1,2,3]
assert bh.heapList == [1,2,3]
def test_percUp():
bh = MaxBinaryHeap()
bh.heapList = [3,2,1,4]
bh.percUp()
assert bh.heapList == [4,3,1,2]
def test_insert():
bh = MaxBinaryHeap()
bh.insert(1)
bh.insert(2)
bh.insert(3)
bh.insert(4)
assert bh.heapList == [4,3,2,1]
def test_maxChild():
bh = MaxBinaryHeap()
bh.heapList = [4,3,2,1]
assert bh.heapList[bh.maxChild(0)] == 3
assert bh.heapList[bh.maxChild(1)] == 1
def test_percDown():
bh = MaxBinaryHeap()
bh.heapList = [1,4,3,2]
bh.percDown()
assert bh.heapList == [4,2,3,1]
def test_delMax():
bh = MaxBinaryHeap()
bh.heapList=[4,3,2,1]
data = bh.delMax()
assert data == 4
assert bh.heapList == [3,1,2]
bh.delMax()
bh.delMax()
assert bh.heapList == [1]
def test_buildHeap():
bh = MaxBinaryHeap()
bh.buildHeap([1,2,3,4])
assert bh.heapList == [4,2,3,1] | 22.235294 | 46 | 0.611993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9b10e4943ad1ee0b4dae85b2c1d4d6a1aefffc28 | 409 | py | Python | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
]
| 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
]
| null | null | null | network_anomaly/code/del_duplicate.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
def del_duplicate(ip_combine):
ip_combine = list(set(ip_combine))
ip_combine_temp = []
del ip_combine[0]
# print(len(ip_combine))
for i in range(len(ip_combine)):
ip1, ip2 = ip_combine[i].split(":")
if ip2+":"+ip1 not in ip_combine_temp:
ip_combine_temp.append(ip_combine[i])
# print(len(ip_combine_temp))
return ip_combine_temp | 31.461538 | 49 | 0.635697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.220049 |
9b119221fff46228bdcf97a9b0a6cdd84ac53dfa | 6,623 | py | Python | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
]
| 45 | 2016-03-19T14:39:40.000Z | 2021-12-15T06:34:57.000Z | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
]
| 73 | 2016-03-19T16:15:45.000Z | 2022-02-22T16:37:16.000Z | klusta/kwik/mock.py | hrnciar/klusta | 408e898e8d5dd1788841d1f682e51d0dc003a296 | [
"BSD-3-Clause"
]
| 41 | 2016-04-08T14:04:00.000Z | 2021-09-09T20:49:41.000Z | # -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
import numpy.random as nr
from .mea import staggered_positions
from .h5 import open_h5
from .model import _create_clustering
#------------------------------------------------------------------------------
# Mock functions
#------------------------------------------------------------------------------
def artificial_waveforms(n_spikes=None, n_samples=None, n_channels=None):
# TODO: more realistic waveforms.
return .25 * nr.normal(size=(n_spikes, n_samples, n_channels))
def artificial_features(*args):
return .25 * nr.normal(size=args)
def artificial_masks(n_spikes=None, n_channels=None):
masks = nr.uniform(size=(n_spikes, n_channels))
masks[masks < .25] = 0
return masks
def artificial_traces(n_samples, n_channels):
# TODO: more realistic traces.
return .25 * nr.normal(size=(n_samples, n_channels))
def artificial_spike_clusters(n_spikes, n_clusters, low=0):
return nr.randint(size=n_spikes, low=low, high=max(1, n_clusters))
def artificial_spike_samples(n_spikes, max_isi=50):
return np.cumsum(nr.randint(low=0, high=max_isi, size=n_spikes))
def artificial_correlograms(n_clusters, n_samples):
return nr.uniform(size=(n_clusters, n_clusters, n_samples))
def mock_prm(dat_path):
return dict(
prb_file='1x32_buzsaki',
traces=dict(
raw_data_files=[dat_path, dat_path],
voltage_gain=10.,
sample_rate=20000,
n_channels=32,
dtype='int16',
),
spikedetekt={
'n_features_per_channel': 4,
},
klustakwik2={},
)
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True,
with_kwd=True,
add_original=True,
):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
kwx_filename = op.join(dir_path, '_test.kwx')
kwd_filename = op.join(dir_path, '_test.raw.kwd')
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
def _write_metadata(key, value):
f.write_attr('/application_data/spikedetekt', key, value)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high_factor', 0.95 * .5)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('n_features_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
if recording_size > 0:
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
else:
recording_offset = 1
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1', 'channel_order',
np.arange(1, n_channels - 1)[::-1])
graph = np.array([[1, 2], [2, 3]])
f.write_attr('/channel_groups/1', 'adjacency_graph', graph)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters)]
if add_original:
clusterings += [('original', n_clusters * 2)]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
groups = {0: 0, 1: 1, 2: 2}
_create_clustering(f, clustering, 1, spike_clusters, groups)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
f.write_attr('/recordings/0/raw', 'hdf5_path', kwd_filename)
f.write_attr('/recordings/1/raw', 'hdf5_path', kwd_filename)
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
| 36.191257 | 79 | 0.562434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,731 | 0.261362 |
9b11b55cfbda19b56fe51d5da114dd0268d96bc2 | 1,824 | py | Python | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
]
| 8 | 2019-07-03T15:33:52.000Z | 2021-10-21T00:56:43.000Z | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
]
| 3 | 2020-09-02T19:04:36.000Z | 2022-03-12T19:46:50.000Z | telluride_decoding/preprocess_audio.py | RULCSoft/telluride_decoding | ff2a5b421a499370b379e7f4fc3f28033c045e17 | [
"Apache-2.0"
]
| 7 | 2019-07-03T15:50:24.000Z | 2020-11-26T12:16:10.000Z | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code to compute the audio intensity for preprocessing.
Code that stores incoming arbitrary audio data, and then yields fixed window
sizes for processing (like computing the intensity.)
After initializing the object, add a block of data to the object, and then
pull fixed sized blocks of data with a given half_window_width, and separated
by window_step samples. Data is always X x num_features, where X can change from
add_data call to call, but num_features must not change. Do not reuse the object
because it has internal state from previous calls.
"""
import numpy as np
from telluride_decoding import result_store
class AudioIntensityStore(result_store.WindowedDataStore):
"""Process a window of data, calculating the mean-squared value.
"""
def next_window(self):
for win in super(AudioIntensityStore, self).next_window():
yield np.mean(np.square(win))
class AudioLoudnessMick(result_store.WindowedDataStore):
"""Process a window of data, using Mick's loudness approximation.
"""
def next_window(self):
for audio_data in super(AudioLoudnessMick, self).next_window():
yield np.mean(np.abs(audio_data) ** np.log10(2))
| 36.48 | 80 | 0.733553 | 535 | 0.293311 | 266 | 0.145833 | 0 | 0 | 0 | 0 | 1,343 | 0.736294 |
9b122420662104df8bedddda57c416404fd43cea | 3,355 | py | Python | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
]
| 2 | 2019-01-26T02:44:14.000Z | 2019-08-06T00:40:56.000Z | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
]
| 1 | 2019-05-23T22:35:27.000Z | 2019-05-25T20:23:50.000Z | aioouimeaux/device/__init__.py | frawau/aioouimeaux | ea473ded95e41e350793b0e289944a359049c501 | [
"BSD-3-Clause"
]
| null | null | null | import logging
from urllib.parse import urlsplit
import asyncio as aio
from functools import partial
from .api.service import Service
from .api.xsd import device as deviceParser
from ..utils import requests_get
log = logging.getLogger(__name__)
class DeviceUnreachable(Exception): pass
class UnknownService(Exception): pass
class UnknownSignal(Exception): pass
class NotACallable(Exception): pass
class Device(object):
def __init__(self, url):
self._state = None
self.host = urlsplit(url).hostname
#self.port = urlsplit(url).port
self.services = {}
self.initialized = aio.Future()
self._callback = {"statechange":None}
xx = aio.ensure_future(self._get_xml(url))
async def _get_xml(self,url):
base_url = url.rsplit('/', 1)[0]
xml = await requests_get(url)
self._config = deviceParser.parseString(xml.raw_body).device
sl = self._config.serviceList
for svc in sl.service:
svcname = svc.get_serviceType().split(':')[-2]
service = Service(svc, base_url)
await service.initialized
service.eventSubURL = base_url + svc.get_eventSubURL()
self.services[svcname] = service
setattr(self, svcname, service)
fut = self.basicevent.GetBinaryState()
await fut
self._state = fut.result()["BinaryState"]
self.initialized.set_result(True)
def register_callback(self,signal,func):
if func is not None:
if signal not in self._callback:
raise UnknownSignal
if not callable(func):
raise NotACallable
self._callback[signal]=func
def _update_state(self, value):
self._state = int(value)
if self._callback["statechange"]:
if aio.iscoroutinefunction(self._callback["statechange"]):
aio.ensure_future(self._callback["statechange"](self))
else:
self._callback["statechange"](self)
def get_state(self, force_update=False):
"""
Returns 0 if off and 1 if on.
"""
if force_update or self._state is None:
xx = self.basicevent.GetBinaryState()
return self._state
def get_service(self, name):
try:
return self.services[name]
except KeyError:
raise UnknownService(name)
def list_services(self):
return self.services.keys()
def ping(self):
try:
self.get_state()
except Exception:
raise DeviceUnreachable(self)
def explain(self,prefix=""):
for name, svc in self.services.items():
print("{}{}".format(prefix, name))
print(prefix+'-' * len(name))
for aname, action in svc.actions.items():
print("%s %s(%s)" % (prefix,aname, ', '.join(action.args)))
print()
@property
def model(self):
return self._config.modelDescription
@property
def name(self):
return self._config.friendlyName
@property
def serialnumber(self):
return self._config.serialNumber
def test():
device = Device("http://10.42.1.102:49152/setup.xml")
print(device.get_service('basicevent').SetBinaryState(BinaryState=1))
if __name__ == "__main__":
test()
| 28.432203 | 76 | 0.614903 | 2,910 | 0.867362 | 0 | 0 | 223 | 0.066468 | 702 | 0.20924 | 253 | 0.07541 |
9b1232a2760be1096b010b97407d362bad15d50f | 2,012 | py | Python | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
]
| null | null | null | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
]
| 3 | 2021-03-17T16:05:01.000Z | 2021-05-01T18:47:43.000Z | src/lib/localtime.py | RonaldHiemstra/BronartsmeiH | 1ad3838b43abfe9a1f3416334439c8056aa50dde | [
"MIT"
]
| null | null | null | """File providing localtime support."""
import time
import network
import ntptime
from machine import RTC, reset
from config import Config
system_config = Config('system_config.json')
class Localtime():
"""Synchronized realtime clock using NTP."""
def __init__(self, utc_offset=None):
self.utc_offset = utc_offset or system_config.get('utc_offset')
self.__synced = None
self._sync()
def _sync(self):
try:
ntptime.settime() # Synchronize the system time using NTP
except Exception as ex:
print('ERROR: ntp.settime() failed. err:', ex)
if network.WLAN().isconnected():
reset()
# year, month, day, day_of_week, hour, minute, second, millisecond
datetime_ymd_w_hms_m = list(RTC().datetime())
datetime_ymd_w_hms_m[4] += self.utc_offset
RTC().init(datetime_ymd_w_hms_m)
self.__synced = datetime_ymd_w_hms_m[2]
del datetime_ymd_w_hms_m
def now(self):
"""Retrieve a snapshot of the current time in milliseconds accurate."""
class Now():
"""Class representing a snapshot of the current time."""
def __init__(self):
(self.year, self.mon, self.day, self.dow,
self.hour, self.min, self.sec, self.msec) = RTC().datetime()
self._time = None
def get_time(self) -> float:
"""Convert this time snapshot to a time float value."""
if self._time is None:
self._time = time.mktime([self.year, self.mon, self.day,
self.hour, self.min, self.sec, 0, 0])
# self._time += self.msec / 1000 # float overflow when adding msec :(
return self._time
snapshot = Now()
if snapshot.day != self.__synced and snapshot.hour == 4: # sync every day @ 4am
self._sync()
snapshot = Now()
return snapshot
| 39.45098 | 92 | 0.578032 | 1,825 | 0.907058 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.263917 |
9b126b83c2c4f4a5775d0727f5ece4feb0b27a5c | 448 | py | Python | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
]
| 59 | 2019-12-05T13:23:14.000Z | 2021-12-07T13:54:25.000Z | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
]
| 266 | 2020-09-22T16:22:56.000Z | 2021-10-17T18:13:11.000Z | accounts/api/urls.py | tejaswari7/JagratiWebApp | e9030f8bd6319a7bb43e036bb7bc43cca01d64a1 | [
"MIT"
]
| 213 | 2020-05-20T18:17:21.000Z | 2022-03-06T11:03:42.000Z | from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registration_view, name='api_register'),
path('login/', views.LoginView.as_view(), name='api_login'),
path('complete_profile/', views.complete_profile_view, name='api_complete_profile'),
path('logout/', views.LogoutView.as_view(), name='api_logout'),
path('check_login_status/', views.check_login_status, name='api_check_login_status'),
] | 44.8 | 89 | 0.736607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.337054 |
9b148edd9574c90b50e4da5fcd67e478a02f6b95 | 8,347 | py | Python | IPython/kernel/multikernelmanager.py | techtonik/ipython | aff23ecf89ba87ee49168d3cecc213bdbc3b06f9 | [
"BSD-3-Clause-Clear"
]
| 1 | 2022-03-13T23:06:43.000Z | 2022-03-13T23:06:43.000Z | IPython/kernel/multikernelmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
]
| null | null | null | IPython/kernel/multikernelmanager.py | andreasjansson/ipython | 09b4311726f46945b936c699f7a6489d74d7397f | [
"BSD-3-Clause-Clear"
]
| 1 | 2020-05-03T10:25:12.000Z | 2020-05-03T10:25:12.000Z | """A kernel manager for multiple kernels
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import uuid
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import (
Instance, Dict, Unicode, Any, DottedObjectName,
)
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class DuplicateKernelError(Exception):
pass
class MultiKernelManager(LoggingConfigurable):
"""A class for managing multiple kernels."""
kernel_manager_class = DottedObjectName(
"IPython.kernel.blockingkernelmanager.BlockingKernelManager", config=True,
help="""The kernel manager class. This is configurable to allow
subclassing of the KernelManager for customized behavior.
"""
)
def _kernel_manager_class_changed(self, name, old, new):
self.kernel_manager_factory = import_item(new)
kernel_manager_factory = Any(help="this is kernel_manager_class after import")
def _kernel_manager_factory_default(self):
return import_item(self.kernel_manager_class)
context = Instance('zmq.Context')
def _context_default(self):
return zmq.Context.instance()
connection_dir = Unicode('')
_kernels = Dict()
def list_kernel_ids(self):
"""Return a list of the kernel ids of the active kernels."""
# Create a copy so we can iterate over kernels in operations
# that delete keys.
return list(self._kernels.keys())
def __len__(self):
"""Return the number of running kernels."""
return len(self.list_kernel_ids())
def __contains__(self, kernel_id):
return kernel_id in self._kernels
def start_kernel(self, **kwargs):
"""Start a new kernel.
The caller can pick a kernel_id by passing one in as a keyword arg,
otherwise one will be picked using a uuid.
To silence the kernel's stdout/stderr, call this using::
km.start_kernel(stdout=PIPE, stderr=PIPE)
"""
kernel_id = kwargs.pop('kernel_id', unicode(uuid.uuid4()))
if kernel_id in self:
raise DuplicateKernelError('Kernel already exists: %s' % kernel_id)
# kernel_manager_factory is the constructor for the KernelManager
# subclass we are using. It can be configured as any Configurable,
# including things like its transport and ip.
km = self.kernel_manager_factory(connection_file=os.path.join(
self.connection_dir, "kernel-%s.json" % kernel_id),
config=self.config,
)
km.start_kernel(**kwargs)
# start just the shell channel, needed for graceful restart
km.start_channels(shell=True, iopub=False, stdin=False, hb=False)
self._kernels[kernel_id] = km
return kernel_id
def shutdown_kernel(self, kernel_id, now=False):
"""Shutdown a kernel by its kernel uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel to shutdown.
now : bool
Should the kernel be shutdown forcibly using a signal.
"""
k = self.get_kernel(kernel_id)
k.shutdown_kernel(now=now)
k.shell_channel.stop()
del self._kernels[kernel_id]
def shutdown_all(self, now=False):
"""Shutdown all kernels."""
for kid in self.list_kernel_ids():
self.shutdown_kernel(kid, now=now)
def interrupt_kernel(self, kernel_id):
"""Interrupt (SIGINT) the kernel by its uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel to interrupt.
"""
return self.get_kernel(kernel_id).interrupt_kernel()
def signal_kernel(self, kernel_id, signum):
"""Sends a signal to the kernel by its uuid.
Note that since only SIGTERM is supported on Windows, this function
is only useful on Unix systems.
Parameters
==========
kernel_id : uuid
The id of the kernel to signal.
"""
return self.get_kernel(kernel_id).signal_kernel(signum)
def restart_kernel(self, kernel_id):
"""Restart a kernel by its uuid, keeping the same ports.
Parameters
==========
kernel_id : uuid
The id of the kernel to interrupt.
"""
return self.get_kernel(kernel_id).restart_kernel()
def get_kernel(self, kernel_id):
"""Get the single KernelManager object for a kernel by its uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel.
"""
km = self._kernels.get(kernel_id)
if km is not None:
return km
else:
raise KeyError("Kernel with id not found: %s" % kernel_id)
def get_connection_info(self, kernel_id):
"""Return a dictionary of connection data for a kernel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
connection_dict : dict
A dict of the information needed to connect to a kernel.
This includes the ip address and the integer port
numbers of the different channels (stdin_port, iopub_port,
shell_port, hb_port).
"""
km = self.get_kernel(kernel_id)
return dict(transport=km.transport,
ip=km.ip,
shell_port=km.shell_port,
iopub_port=km.iopub_port,
stdin_port=km.stdin_port,
hb_port=km.hb_port,
)
def _make_url(self, transport, ip, port):
"""Make a ZeroMQ URL for a given transport, ip and port."""
if transport == 'tcp':
return "tcp://%s:%i" % (ip, port)
else:
return "%s://%s-%s" % (transport, ip, port)
def _create_connected_stream(self, kernel_id, socket_type, channel):
"""Create a connected ZMQStream for a kernel."""
cinfo = self.get_connection_info(kernel_id)
url = self._make_url(cinfo['transport'], cinfo['ip'],
cinfo['%s_port' % channel]
)
sock = self.context.socket(socket_type)
self.log.info("Connecting to: %s" % url)
sock.connect(url)
return ZMQStream(sock)
def create_iopub_stream(self, kernel_id):
"""Return a ZMQStream object connected to the iopub channel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
stream : ZMQStream
"""
iopub_stream = self._create_connected_stream(kernel_id, zmq.SUB, 'iopub')
iopub_stream.socket.setsockopt(zmq.SUBSCRIBE, b'')
return iopub_stream
def create_shell_stream(self, kernel_id):
"""Return a ZMQStream object connected to the shell channel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
stream : ZMQStream
"""
shell_stream = self._create_connected_stream(kernel_id, zmq.DEALER, 'shell')
return shell_stream
def create_hb_stream(self, kernel_id):
"""Return a ZMQStream object connected to the hb channel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
stream : ZMQStream
"""
hb_stream = self._create_connected_stream(kernel_id, zmq.REQ, 'hb')
return hb_stream
| 32.103846 | 84 | 0.576614 | 7,260 | 0.869774 | 0 | 0 | 0 | 0 | 0 | 0 | 4,255 | 0.509764 |
9b15d3d976307caf107a8e4d5a8af162262589b1 | 256 | py | Python | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
]
| null | null | null | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
]
| 1 | 2016-09-29T05:34:12.000Z | 2016-09-30T16:26:07.000Z | python-codes/100-exercises/example11.py | yjwx0017/test | 80071d6b4b83e78282a7607e6311f5c71c87bb3c | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 题目:古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,
# 小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?
# 2 2 4 6 10 ...
# 输出前20个月
f1 = 2
f2 = 2
for i in range(1, 20):
print '%d\n%d' % (f1, f2)
f1 = f1 + f2
f2 = f1 + f2
| 17.066667 | 43 | 0.582031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.754854 |
9b17a28e7a678defe48fd07eac1522b08da41fac | 13,312 | py | Python | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
]
| 2 | 2018-10-05T13:32:46.000Z | 2022-01-01T22:51:20.000Z | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
]
| 4 | 2021-06-08T19:33:40.000Z | 2022-03-11T23:18:06.000Z | light8/Configs/Config.danceshow2002.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
]
| null | null | null | from random import randrange
from time import time
from __future__ import generators,division
from Subs import *
patch = {
'side l' : 45,
'side r' : 46,
'main 1' : 1,
'main 2' : 2,
'main 3' : 3,
'main 4' : 4,
'main 5' : 5,
'main 6' : 6,
'main 7' : 7,
'main 8' : 8,
'main 9' : 9,
'main 10' : 10,
'center sc' : 20,
'sr sky' : 43,
'blacklight' : 15,
'house':68,
('b0 1 r' ,'b01'):54, # left bank over house
('b0 2 p' ,'b02'):53,
('b0 3 o' ,'b03'):52,
('b0 4 b' ,'b04'):51,
('b0 5 r' ,'b05'):50,
('b0 6 lb','b06'):49,
('b1 1' ,'b11'):55, # mid bank
('b1 2' ,'b12'):56,
('b1 3' ,'b13'):57,
('b1 4' ,'b14'):58,
('b1 5' ,'b15'):59,
('b1 6' ,'b16'):60,
('b2 1 lb','b21'):61, # right bank
('b2 2 r' ,'b22'):62,
('b2 3 b' ,'b23'):63,
('b2 4 o' ,'b24'):64,
('b2 5 p' ,'b25'):65,
('b2 6 r' ,'b26'):66,
}
from util import maxes,scaledict
FL=100
def fulls(chans):
# pass a list or multiple args
return dict([(c,FL) for c in chans])
def levs(chans,levs):
return dict([(c,v) for c,v in zip(chans,levs)])
def blacklight(params, slideradjuster):
params.add_param('nd',CheckboxParam())
while 1:
yield {'blacklight':100*params['nd']}
def strobe(params, slideradjuster):
patterns = {
'blue' : fulls((23,27,31,35,'b0 4 b','b2 3 b')),
'cyc' : {42:FL,43:FL},
'scp all' : fulls((13,16,18,19,39)),
'1-5' : fulls(range(1, 6)),
}
params.add_param('offtime',SliderParam(range=(0.1,0.3), res=0.001,
initial=0.11, length=100))
params.add_param('ontime',SliderParam(range=(0.0,0.8), res=0.001,
length=100))
params.add_param('pattern',ListParam(patterns.keys()))
params.add_param('current',LabelParam('none'))
params.add_param('count',SliderParam(range=(0, 10), res=1, initial=0))
lastchanged = time()
state = 0
blinkcounter = 0
my_pattern = None
while 1:
if params['count'] and blinkcounter > params['count']:
blinkcounter = 0
slideradjuster.set(0)
if params['pattern'] != None:
params['current'] = params['pattern']
my_pattern = params['pattern']
if state == 0:
delay = params['offtime']
else:
delay = params['ontime']
if time() > (lastchanged + delay):
# ready for change
state = not state
lastchanged = time()
blinkcounter += 0.5
try: # protect against keyerrors (and possibly everything else)
if state:
yield patterns[my_pattern]
else:
yield scaledict(patterns[my_pattern], .1)
except:
yield {}
def chase(params, slideradjuster):
patterns = {
'all': ( fulls(('b01','b21')),
fulls(('b02','b22')),
fulls(('b03','b23')),
fulls(('b04','b24')),
fulls(('b05','b25')),
fulls(('b06','b26')),
),
'red':( fulls(('b0 1 r','b2 2 r')),
fulls(('b0 5 r','b2 6 r'))),
'randcol':([fulls((x,)) for x
in ("b21 b23 b25 b03 b06 b24 b22 "+
"b24 b03 b23 b01 b04 b05 b22 "+
"b02 b02 b26 b21 b06 b25 b26 "+
"b01 b04 b05").split()]),
'ctrpong':[fulls((x,)) for x in (
"b11 b12 b13 b14 b15 b16 b15 b14 b13 b12".split())],
'l-r': ( fulls(('b01','b11','b21')),
fulls(('b02','b12','b22')),
fulls(('b03','b13','b23')),
fulls(('b04','b14','b24')),
fulls(('b05','b15','b25')),
fulls(('b06','b16','b26'))),
'flutter':(
fulls(('main 6','b15')),
fulls(('main 1','b12')),
fulls(('main 2','b11')),
fulls(('b12', 'main 3')),
fulls(('b15', 'main 9')),
fulls(('b16', 'main 4')),
fulls(('main 4','b13')),
fulls(('main 3','b11')),
fulls(('main 8','b15')),
fulls(('main 9','b12')),
fulls(('b11', 'main 1')),
fulls(('main 5','b15')),
fulls(('b13', 'main 6')),
fulls(('b14', 'main 2')),
fulls(('main 7','b16')),
),
'randstage':([fulls((x,)) for x
in ("""
b22 27 b04 26 b26 21 28 b25 23 b02 31 b05 32 34 b03 24 b01 25
b23 29 22 35 30 b24 33 36 """).split()]),
}
params.add_param('steptime',SliderParam(range=(.1,3),
initial=.4,length=150))
params.add_param('overlap',SliderParam(range=(0,8),initial=1.5))
params.add_param('pattern',ListParam(options=patterns.keys(),
initial='all'))
params.add_param('current',LabelParam('none'))
steps=()
def fn(x):
warm=.1
# the _/\_ wave for each step. input 0..1, output 0..1
if x<0 or x>1:
return warm
if x<.5:
return warm+(1.0-warm)*(x*2)
else:
return warm+(1.0-warm)*(2-(x*2))
def stepbrightness(stepnum,numsteps,overlap,pos):
startpos = stepnum/numsteps
p=( (pos-startpos)*(1.0+overlap) )%1.0
ret=fn( p )
#print "step %(stepnum)i/%(numsteps)i pos %(pos)f ,p=%(p)f is %(ret)f" % locals()
return ret
queued=[] # list of steps, each step is starttime,stepcue
lastaddtime=time()-100
currentpattern='all'
steps=patterns[currentpattern]
stepsiter=iter(())
while 1:
params['current'] = params['pattern']
# changed pattern?
if params['pattern']!=currentpattern and params['pattern'] in patterns:
currentpattern=params['pattern']
steps=patterns[currentpattern]
stepsiter=iter(steps) # restart iterator
# time to put a new step in the queue?
if time()>lastaddtime+params['steptime']:
lastaddtime=time()
try:
nextstep = stepsiter.next()
except StopIteration:
stepsiter=iter(steps)
nextstep=stepsiter.next()
queued.append( (time(),nextstep) )
# loop over queue, putting still-active (scaled) steps in shiftedsteps
keepers=[]
shiftedsteps=[]
for started,s in queued:
steptime = time()-started
finish = started+(1.0+params['overlap'])*params['steptime']
pos = (time()-started)/(finish-started)
if time()<finish:
keepers.append((started,s))
shiftedsteps.append( scaledict(s,fn(pos)) )
if len(keepers)>30:
print "too many steps in chase - dumping some"
queued=keepers[:20]
else:
queued=keepers
# pos=(time()%params['steptime'])/params['steptime'] # 0..1 animated variable
# shiftedsteps=[]
# for i,s in zip(range(0,len(steps)),steps):
# shiftedsteps.append( scaledict(s, stepbrightness(i,len(steps),params['overlap'],pos)) )
yield maxes(shiftedsteps)
def randomdimmer(params, slideradjuster):
params.add_param('magic', CheckboxParam())
params.add_param('cheese', TextParam())
params.add_param('stuff', ListParam(('a', 'b', 'c')))
curtime = time()
dim = 1
while 4:
if time() - curtime > 1:
dim = randrange(1, 64)
curtime = time()
yield {dim : 100, 20 : params.get_param_value('magic')}
subs = {
'over pit sm' : levs(range(1, 13),(100,0,0,91,77,79,86,55,92,77,59,0)),
'over pit lg' : fulls(range(1, 13)),
('house', 'black') : { 68:100 },
('cyc', 'lightBlue'):{42:FL,43:FL},
('scp hot ctr', 'yellow'):{18:FL},
('scp more', '#AAAA00'):{18:FL,14:FL},
('scp all', '#AAAA00'):fulls((13,16,18,19,39)),
('col oran', '#EEEE99'):fulls((21,25,29,33)),
('col red', 'red'):fulls((24,28,32,36)),
('col red big', 'red'):fulls((24,28,32,36,
'b0 1 r','b0 5 r','b2 2 r','b2 6 r')),
('col blue', 'blue'):fulls((23,27,31,35,'b0 4 b','b2 3 b')),
('col gree', 'green'):fulls((22,26,30,34)),
'sidepost':fulls((45,46)),
'edges':fulls((55,60,49,54,61,66)),
'bank1ctr':fulls(('b12','b13','b14','b15')),
('blacklight', 'purple'):blacklight,
'over pit ctr' : fulls((6,)),
('strobe', 'grey'):strobe,
# 'midstage' : dict([(r, 100) for r in range(11, 21)]),
# 'backstage' : dict([(r, 100) for r in range(21, 31)]),
# 'frontchase' : mr_effect,
'chase' : chase,
'chase2' : chase,
# 'random' : randomdimmer,
}
subs["*10"] = { "14" : 46.000000,
"18" : 46.000000,
"22" : 88.000000,
"23" : 95.000000,
"24" : 19.000000,
"26" : 88.000000,
"27" : 95.000000, "28" : 19.000000,
"30" : 88.000000, "31" : 95.000000,
"32" : 19.000000, "34" : 88.000000,
"35" : 95.000000, "36" : 19.000000,
"b0 5 r" : 7.000000, "b0 4 b" : 95.000000,
"b0 1 r" : 7.000000, "b2 2 r" : 7.000000,
"b2 3 b" : 95.000000, "b2 6 r" : 7.000000, }
subs["*13"] = { "main 1" : 51.0, "main 2" : 51.0, "main 3" : 51.0,
"main 4" : 51.0, "main 5" : 51.0, "main 6" : 51.0,
"main 7" : 51.0, "main 8" : 51.0, "main 9" : 51.0,
"main 10" : 51.0, "11" : 51.0, "12" : 51.0,
"blacklight" : 0.0, "21" : 56.0, "22" : 50.0,
"24" : 51.0, "25" : 56.0, "26" : 50.0, "28" : 51.0,
"29" : 56.0, "30" : 50.0, "32" : 51.0, "33" : 56.0,
"34" : 50.0, "36" : 51.0, "b0 5 r" : 51.0,
"b0 1 r" : 51.0, "b2 2 r" : 51.0, "b2 6 r" : 51.0, }
subs["*16"] = { "main 1" : 54, "main 4" : 49, "main 5" : 41, "main 6" : 43,
"main 7" : 46, "main 8" : 29, "main 9" : 50, "main 10" : 41,
"11" : 32, "13" : 77, "16" : 77, "18" : 77, "19" : 77, "39" : 77,
"42" : 30, "sr sky" : 30,}
subs["*3"] = { "main 1" : 47, "main 2" : 47, "main 3" : 47, "main 4" : 47,
"main 5" : 47, "main 6" : 47, "main 7" : 47, "main 8" : 47, "main 9" : 47,
"main 10" : 47, "11" : 47, "12" : 47, "blacklight" : 0, "21" : 67,
"22" : 69, "23" : 69, "24" : 78, "25" : 67, "26" : 69, "27" : 69,
"28" : 78, "29" : 67, "30" : 69, "31" : 69, "32" : 78, "33" : 67,
"34" : 69, "35" : 69, "36" : 78, "b0 4 b" : 69, "b1 2" : 61,
"b1 3" : 61, "b1 4" : 61, "b1 5" : 61, "b2 3 b" : 69,}
subs["*12"] = { "main 1" : 25, "main 4" : 23, "main 5" : 19, "main 6" : 20,
"main 7" : 22, "main 8" : 14, "main 9" : 23, "main 10" : 19,
"11" : 15, "13" : 36, "16" : 36, "18" : 36, "19" : 36, "22" : 65,
"23" : 100, "24" : 23, "26" : 65, "27" : 100, "28" : 23, "30" : 65,
"31" : 100, "32" : 23, "34" : 65, "35" : 100, "36" : 23, "39" : 36,
"b0 4 b" : 100, "b1 2" : 62, "b1 3" : 62, "b1 4" : 62, "b1 5" : 62,
"b2 3 b" : 100,}
subs["*curtain"] = { "main 4" : 44, "main 5" : 37, "main 6" : 86,
"main 7" : 42, "main 8" : 32, "main 9" : 45, "42" : 41, "sr sky" : 41,
"b0 6 lb" : 27, "b0 1 r" : 27, "b1 1" : 27, "b1 2" : 100, "b1 3" : 100,
"b1 4" : 100, "b1 5" : 100, "b1 6" : 27, "b2 1 lb" : 27, "b2 6 r" : 27,
}
subs["ba outrs"] = fulls("b01 b02 b03 b04 b05 b06 b21 b22 b23 b24 b25 b26".split())
subs["ba some"] = {'b02':40,'b03':FL,'b04':FL,'b05':40,
'b22':40,'b23':FL,'b24':FL,'b25':40,}
subs['*curtain'].update(subs['ba some'])
subs["*2"] = { "main 1" : 77, "main 4" : 70, "main 5" : 59, "main 6" : 61,
"main 7" : 66, "main 8" : 42, "main 9" : 71, "main 10" : 59,
"11" : 45, "24" : 77, "28" : 77, "32" : 77, "36" : 77, "b0 5 r" : 77,
"b0 1 r" : 77, "b2 2 r" : 77, "b2 6 r" : 77,}
subs["*6"] = { "main 1" : 37, "main 4" : 33, "main 5" : 28, "main 6" : 29,
"main 7" : 32, "main 8" : 20, "main 9" : 34, "main 10" : 28,
"11" : 22, "13" : 37, "blacklight" : 0, "16" : 37, "18" : 37,
"19" : 37, "21" : 82, "22" : 82, "23" : 82, "24" : 82, "25" : 82,
"26" : 82, "27" : 82, "28" : 82, "29" : 82, "30" : 82, "31" : 82,
"32" : 82, "33" : 82, "34" : 82, "35" : 82, "36" : 82, "39" : 37,
"b0 5 r" : 82, "b0 4 b" : 82, "b0 1 r" : 82, "b2 2 r" : 82, "b2 3 b" : 82,
"b2 6 r" : 82,}
subs["*8"] = { "13" : 60, "16" : 60, "18" : 60, "19" : 60, "22" : 14,
"23" : 100, "26" : 14, "27" : 100, "30" : 14, "31" : 100, "34" : 14,
"35" : 100, "39" : 60, "b0 6 lb" : 14, "b0 4 b" : 100, "b0 1 r" : 14,
"b1 1" : 14, "b1 2" : 70, "b1 3" : 70, "b1 4" : 70, "b1 5" : 70,
"b1 6" : 14, "b2 1 lb" : 14, "b2 3 b" : 100, "b2 6 r" : 14,}
subs["*5"] = { "main 1" : 81, "main 4" : 74, "main 5" : 62, "main 6" : 64,
"main 7" : 70, "main 8" : 44, "main 9" : 75, "main 10" : 62,
"11" : 48, "21" : 29, "24" : 29, "25" : 29, "28" : 29, "29" : 29,
"32" : 29, "33" : 29, "36" : 29, "42" : 37, "sr sky" : 37, "b0 5 r" : 29,
"b0 4 b" : 72, "b0 3 o" : 72, "b0 2 p" : 29, "b2 2 r" : 29, "b2 3 b" : 72,
"b2 4 o" : 72, "b2 5 p" : 29,}
| 38.810496 | 100 | 0.457031 | 0 | 0 | 6,566 | 0.493239 | 0 | 0 | 0 | 0 | 4,396 | 0.330228 |
9b1b79fb32008ae0e7fb1fae04c9752108435ac6 | 3,672 | py | Python | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
]
| null | null | null | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
]
| null | null | null | python/src/scipp/__init__.py | g5t/scipp | d819c930a5e438fd65e42e2e4e737743b8d39d37 | [
"BSD-3-Clause"
]
| null | null | null | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author Simon Heybrock
# flake8: noqa
from . import runtime_config
user_configuration_filename = runtime_config.config_filename
config = runtime_config.load()
del runtime_config
from ._scipp import _debug_
if _debug_:
import warnings
def custom_formatwarning(msg, *args, **kwargs):
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
warnings.warn(
'You are running a "Debug" build of scipp. For optimal performance use a "Release" build.'
)
from ._scipp import __version__
# Import classes
from ._scipp.core import Variable, DataArray, Dataset, GroupByDataArray, \
GroupByDataset, Unit
# Import errors
from ._scipp.core import BinEdgeError, BinnedDataError, CoordError, \
DataArrayError, DatasetError, DimensionError, \
DTypeError, NotFoundError, SizeError, SliceError, \
UnitError, VariableError, VariancesError
# Import submodules
from ._scipp.core import units, dtype, buckets, geometry
# Import functions
from ._scipp.core import choose, divide, floor_divide, logical_and, \
logical_or, logical_xor, minus, mod, plus, times
# Import python functions
from .show import show, make_svg
from .table import table
from .plotting import plot
from .extend_units import *
from .html import to_html, make_html
from .object_list import _repr_html_
from ._utils import collapse, slices
from ._utils.typing import is_variable, is_dataset, is_data_array, \
is_dataset_or_array
from .compat.dict import to_dict, from_dict
from .sizes import _make_sizes
# Wrappers for free functions from _scipp.core
from ._bins import *
from ._counts import *
from ._comparison import *
from ._cumulative import *
from ._dataset import *
from ._groupby import *
from ._math import *
from ._operations import *
from ._unary import *
from ._reduction import *
from ._shape import *
from ._trigonometry import *
from ._variable import *
setattr(Variable, '_repr_html_', make_html)
setattr(DataArray, '_repr_html_', make_html)
setattr(Dataset, '_repr_html_', make_html)
from .io.hdf5 import to_hdf5 as _to_hdf5
setattr(Variable, 'to_hdf5', _to_hdf5)
setattr(DataArray, 'to_hdf5', _to_hdf5)
setattr(Dataset, 'to_hdf5', _to_hdf5)
setattr(Variable, 'sizes', property(_make_sizes))
setattr(DataArray, 'sizes', property(_make_sizes))
setattr(Dataset, 'sizes', property(_make_sizes))
from ._bins import _bins, _set_bins, _events
setattr(Variable, 'bins', property(_bins, _set_bins))
setattr(DataArray, 'bins', property(_bins, _set_bins))
setattr(Dataset, 'bins', property(_bins, _set_bins))
setattr(Variable, 'events', property(_events))
setattr(DataArray, 'events', property(_events))
from ._structured import _fields
setattr(
Variable, 'fields',
property(
_fields,
doc=
"""Provides access to fields of structured types such as vectors or matrices."""
))
from ._bins import _groupby_bins
setattr(GroupByDataArray, 'bins', property(_groupby_bins))
setattr(GroupByDataset, 'bins', property(_groupby_bins))
setattr(Variable, 'plot', plot)
setattr(DataArray, 'plot', plot)
setattr(Dataset, 'plot', plot)
# Prevent unwanted conversion to numpy arrays by operations. Properly defining
# __array_ufunc__ should be possible by converting non-scipp arguments to
# variables. The most difficult part is probably mapping the ufunc to scipp
# functions.
for _obj in [Variable, DataArray, Dataset]:
setattr(_obj, '__array_ufunc__', None)
| 31.930435 | 98 | 0.735566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 877 | 0.238834 |
9b1bd86935affb209f3416a74dae1cedee23495f | 1,733 | py | Python | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
]
| 2 | 2021-07-15T14:11:29.000Z | 2022-03-25T23:20:54.000Z | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
]
| null | null | null | SimpleBeep.py | RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation | 1c7a5cbfb5373aa5eccde00638bbdff062c57a2d | [
"MIT"
]
| 1 | 2021-07-15T14:11:48.000Z | 2021-07-15T14:11:48.000Z | # Import the required 'libraries' for pin definitions and PWM
from machine import Pin, PWM
# Also import a subset for sleep and millisecond sleep. If you just import
# the utime you will have to prefix each call with "utime."
from utime import sleep, sleep_ms
# Define what the buzzer object is - a PWM output on pin 15
buzzer = PWM(Pin(15))
# A list of frequencies
tones = (200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1100, 1200, 1400, 1500)
# Define the function to play a single tone then stop
def buzz(freq):
# Set the frequence
buzzer.freq(freq)
# Set the duty cycle (affects volume)
buzzer.duty_u16(15000);
# Let the sound continue for X milliseconds
sleep_ms(30);
# Now switch the sound off
buzzer.duty_u16(0);
# And delay a small amount (gap between tones)
sleep_ms(20);
# Define a similar functionm with no delay between tones
def buzz2(freq):
buzzer.freq(freq)
buzzer.duty_u16(15000);
# Now sound the tones, one after the other
for tone in range(len(tones)):
buzz(tones[tone])
# Small gap in SECONDS after the ascending tones
sleep(1)
# Don't do this, it puts the device to Seep Sleep but it reboots on wakeup just
# like the ESP8266
#machine.deepsleep(1)
# Now sound the tones IN REVERSE ORDER ie descending
for tone in range(len(tones) -1, -1, -1):
buzz(tones[tone])
# Another delay
sleep(1)
# Now sound ALL the frequencies from X to Y
for tone in range(500, 2500):
buzz2(tone)
sleep_ms(5)
buzzer.duty_u16(0);
# And repeat in reverse order
for tone in range(2500, 500, -1):
buzz2(tone)
sleep_ms(4)
buzzer.duty_u16(0); | 28.883333 | 119 | 0.671091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 924 | 0.533179 |
9b1c20b6056395f07046b2fb8132dfe7ff823554 | 1,789 | py | Python | vendor/packages/sqlalchemy/test/orm/test_bind.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
]
| 2 | 2016-05-09T09:17:35.000Z | 2016-08-03T16:30:16.000Z | test/orm/test_bind.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
]
| null | null | null | test/orm/test_bind.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
]
| null | null | null | from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import MetaData, Integer
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, create_session
import sqlalchemy as sa
from sqlalchemy.test import testing
from test.orm import _base
class BindTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', Integer))
@classmethod
def setup_classes(cls):
class Foo(_base.BasicEntity):
pass
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
meta = MetaData()
test_table.tometadata(meta)
assert meta.tables['test_table'].bind is None
mapper(Foo, meta.tables['test_table'])
@testing.resolve_artifact_names
def test_session_bind(self):
engine = self.metadata.bind
for bind in (engine, engine.connect()):
try:
sess = create_session(bind=bind)
assert sess.bind is bind
f = Foo()
sess.add(f)
sess.flush()
assert sess.query(Foo).get(f.id) is f
finally:
if hasattr(bind, 'close'):
bind.close()
@testing.resolve_artifact_names
def test_session_unbound(self):
sess = create_session()
sess.add(Foo())
assert_raises_message(
sa.exc.UnboundExecutionError,
('Could not locate a bind configured on Mapper|Foo|test_table '
'or this Session'),
sess.flush)
| 29.816667 | 75 | 0.618222 | 1,450 | 0.810509 | 0 | 0 | 1,388 | 0.775852 | 0 | 0 | 132 | 0.073784 |
9b1c4ea2bc7164000ac7237aaef4748989fffac3 | 2,607 | py | Python | pdftables/pdf_document.py | tessact/pdftables | 89b0c0f7215fa50651b37e5b1505229c329cc0ab | [
"BSD-2-Clause"
]
| 73 | 2015-01-07T01:42:45.000Z | 2021-01-20T01:19:04.000Z | pdftables/pdf_document.py | MartinThoma/pdftables | bd34a86cba8b70d1af2267cf8a30f387f7e5a43e | [
"BSD-2-Clause"
]
| 1 | 2020-08-02T18:31:16.000Z | 2020-08-02T18:31:16.000Z | pdftables/pdf_document.py | MartinThoma/pdftables | bd34a86cba8b70d1af2267cf8a30f387f7e5a43e | [
"BSD-2-Clause"
]
| 40 | 2015-03-10T05:24:37.000Z | 2019-08-30T06:11:02.000Z | """
Backend abstraction for PDFDocuments
"""
import abc
import os
DEFAULT_BACKEND = "poppler"
BACKEND = os.environ.get("PDFTABLES_BACKEND", DEFAULT_BACKEND).lower()
# TODO(pwaller): Use abstract base class?
# What does it buy us? Can we enforce that only methods specified in an ABC
# are used by client code?
class PDFDocument(object):
__metaclass__ = abc.ABCMeta
@classmethod
def get_backend(cls, backend=None):
"""
Returns the PDFDocument class to use based on configuration from
enviornment or pdf_document.BACKEND
"""
# If `cls` is not already a subclass of the base PDFDocument, pick one
if not issubclass(cls, PDFDocument):
return cls
if backend is None:
backend = BACKEND
# Imports have to go inline to avoid circular imports with the backends
if backend == "pdfminer":
from pdf_document_pdfminer import PDFDocument as PDFDoc
return PDFDoc
elif backend == "poppler":
from pdf_document_poppler import PDFDocument as PDFDoc
return PDFDoc
raise NotImplementedError("Unknown backend '{0}'".format(backend))
@classmethod
def from_path(cls, path):
Class = cls.get_backend()
return Class(path)
@classmethod
def from_fileobj(cls, fh):
# TODO(pwaller): For now, put fh into a temporary file and call
# .from_path. Future: when we have a working stream input function for
# poppler, use that.
raise NotImplementedError
Class = cls._get_backend()
# return Class(fh) # This is wrong since constructor now takes a path.
def __init__(self, *args, **kwargs):
raise RuntimeError(
"Don't use this constructor, use a {0}.from_* method instead!"
.format(self.__class__.__name__))
@abc.abstractmethod
def __len__(self):
"""
Return the number of pages in the PDF
"""
@abc.abstractmethod
def get_page(self, number):
"""
Return a PDFPage for page `number` (0 indexed!)
"""
@abc.abstractmethod
def get_pages(self):
"""
Return all pages in the document: TODO(pwaller) move implementation here
"""
class PDFPage(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_glyphs(self):
"""
Obtain a list of bounding boxes (Box instances) for all glyphs
on the page.
"""
@abc.abstractproperty
def size(self):
"""
(width, height) of page
"""
| 27.15625 | 80 | 0.623322 | 2,288 | 0.877637 | 0 | 0 | 1,937 | 0.743 | 0 | 0 | 1,196 | 0.458765 |
9b1ea81c58845b4a3bb52fdf9a88f5aa5548c833 | 3,316 | py | Python | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
]
| 40 | 2019-05-19T01:29:12.000Z | 2022-03-27T04:37:31.000Z | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
]
| null | null | null | Chapter08/ppo/ppo_kb.py | rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide | 45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce | [
"MIT"
]
| 19 | 2019-05-02T19:55:57.000Z | 2022-02-26T01:51:45.000Z | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
from class_ppo import *
from gym_torcs import TorcsEnv
#----------------------------------------------------------------------------------------
EP_MAX = 2000
EP_LEN = 1000
GAMMA = 0.95
A_LR = 1e-4
C_LR = 1e-4
BATCH = 128
A_UPDATE_STEPS = 10
C_UPDATE_STEPS = 10
S_DIM, A_DIM = 29, 3
METHOD = dict(name='clip', epsilon=0.1)
# train_test = 0 for train; =1 for test
train_test = 0
# irestart = 0 for fresh restart; =1 for restart from ckpt file
irestart = 0
iter_num = 0
if (irestart == 0):
iter_num = 0
#----------------------------------------------------------------------------------------
sess = tf.Session()
ppo = PPO(sess, S_DIM, A_DIM, A_LR, C_LR, A_UPDATE_STEPS, C_UPDATE_STEPS, METHOD)
saver = tf.train.Saver()
env = TorcsEnv(vision=False, throttle=True, gear_change=False)
#----------------------------------------------------------------------------------------
if (train_test == 0 and irestart == 0):
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, "ckpt/model")
for ep in range(iter_num, EP_MAX):
print("-"*50)
print("episode: ", ep)
if np.mod(ep, 100) == 0:
ob = env.reset(relaunch=True) #relaunch TORCS every N episode because of the memory leak error
else:
ob = env.reset()
s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
a = ppo.choose_action(s)
a[0] = np.clip(a[0],-1.0,1.0)
a[1] = np.clip(a[1],0.0,1.0)
a[2] = np.clip(a[2],0.0,1.0)
#print("a: ", a)
ob, r, done, _ = env.step(a)
s_ = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
if (train_test == 0):
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
s = s_
ep_r += r
if (train_test == 0):
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1 or done == True:
#if t == EP_LEN-1 or done == True:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs = np.array(np.vstack(buffer_s))
ba = np.array(np.vstack(buffer_a))
br = np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
print("ppo update")
ppo.update(bs, ba, br)
#print("screen out: ")
#ppo.screen_out(bs, ba, br)
#print("-"*50)
if (done == True):
break
print('Ep: %i' % ep,"|Ep_r: %i" % ep_r,("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',)
if (train_test == 0):
with open("performance.txt", "a") as myfile:
myfile.write(str(ep) + " " + str(t) + " " + str(round(ep_r,4)) + "\n")
if (train_test == 0 and ep%25 == 0):
saver.save(sess, "ckpt/model")
| 25.507692 | 123 | 0.495778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.214717 |
9b2030f197c3c1a90df176f2d19174c439599012 | 681 | py | Python | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
]
| 7 | 2015-08-21T17:17:35.000Z | 2021-03-02T21:40:00.000Z | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
]
| 2 | 2015-04-14T12:40:37.000Z | 2015-04-14T12:44:03.000Z | test/gui/documentationwidget.py | pySUMO/pysumo | 889969f94bd45e2b67e25ff46452378351ca5186 | [
"BSD-2-Clause"
]
| null | null | null | """ Test case for the DocumentationWidget """
from tempfile import mkdtemp
from pySUMOQt import MainWindow
import pysumo
import shutil
"""
Steps:
1. Open pySUMO
2. Open Merge.kif
3. Open DocumentationWidget
3a. Switch to the Ontology tab in the DocumentationWidget
4. Type subrelation into the search field
4a. Press Enter
5. Open TextEditor
5a. Select Merge.kif in TextEditor
6. Press one of the links listed under "Merge"
7. Switch to the WordNet tab in the DocumentationWidget
8. Search for 'Object'
9. Search for 'Table'
"""
if __name__ == "__main__":
tmpdir = mkdtemp()
pysumo.CONFIG_PATH = tmpdir
MainWindow.main()
shutil.rmtree(tmpdir, ignore_errors=True)
| 24.321429 | 57 | 0.756241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.657856 |
9b22737cee51dac49b519ede06b216b061a09833 | 1,628 | py | Python | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
]
| 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
]
| null | null | null | py/garage/tests/threads/test_executors.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
]
| null | null | null | import unittest
import threading
from garage.threads import executors
class ExecutorTest(unittest.TestCase):
def test_executor(self):
pool = executors.WorkerPool()
self.assertEqual(0, len(pool))
# No jobs, no workers are hired.
with executors.Executor(pool, 1) as executor:
self.assertEqual(0, len(pool))
self.assertEqual(0, len(pool))
with executors.Executor(pool, 1) as executor:
f1 = executor.submit(sum, (1, 2, 3))
f2 = executor.submit(sum, (4, 5, 6))
self.assertEqual(0, len(pool))
self.assertEqual(6, f1.result())
self.assertEqual(15, f2.result())
self.assertEqual(1, len(pool))
for worker in pool:
self.assertFalse(worker._get_future().done())
def test_shutdown(self):
pool = executors.WorkerPool()
self.assertEqual(0, len(pool))
with executors.Executor(pool, 1) as executor:
f1 = executor.submit(sum, (1, 2, 3))
f2 = executor.submit(sum, (4, 5, 6))
self.assertEqual(0, len(pool))
self.assertEqual(6, f1.result())
self.assertEqual(15, f2.result())
executor.shutdown(wait=False)
# shutdown(wait=False) does not return workers to the pool.
self.assertEqual(0, len(pool))
event = threading.Event()
with executors.Executor(pool, 1) as executor:
executor.submit(event.wait)
executor.shutdown(wait=False)
self.assertFalse(executor._work_queue)
if __name__ == '__main__':
unittest.main()
| 29.071429 | 67 | 0.595823 | 1,504 | 0.923833 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.062039 |
9b227c99cc76d04bed95afc7abf3ffae257b32fd | 2,619 | py | Python | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
]
| 3 | 2020-04-29T12:35:33.000Z | 2022-03-22T20:08:22.000Z | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
]
| 1 | 2020-10-23T00:08:35.000Z | 2020-10-29T04:10:35.000Z | exporter/BattleRoyal.py | dl-stuff/dl-datamine | aae37710d2525aaa2b83f809e908be67f074c2d2 | [
"MIT"
]
| 4 | 2020-04-05T15:09:08.000Z | 2020-10-21T15:08:34.000Z | import os
import json
from tqdm import tqdm
from loader.Database import DBViewIndex, DBView, check_target_path
from exporter.Shared import snakey
from exporter.Adventurers import CharaData
from exporter.Dragons import DragonData
class BattleRoyalCharaSkin(DBView):
def __init__(self, index):
super().__init__(index, "BattleRoyalCharaSkin")
def process_result(self, res, **kwargs):
self.link(res, "_BaseCharaId", "CharaData", full_query=False)
self.index["CharaData"].set_animation_reference(res["_BaseCharaId"])
self.link(res, "_SpecialSkillId", "SkillData", **kwargs)
self.index["ActionParts"].animation_reference
filtered_res = {}
filtered_res["_Id"] = res["_Id"]
for name_key in ("_Name", "_NameJP", "_NameCN"):
filtered_res[name_key] = res["_BaseCharaId"][name_key]
filtered_res["_SpecialSkillId"] = res["_SpecialSkillId"]
return filtered_res
def export_all_to_folder(self, out_dir="./out", ext=".json"):
where = "_SpecialSkillId != 0"
out_dir = os.path.join(out_dir, "_br")
all_res = self.get_all(where=where)
check_target_path(out_dir)
sorted_res = {}
for res in tqdm(all_res, desc="_br"):
res = self.process_result(res)
sorted_res[res["_Id"]] = res
out_name = snakey(f"_chara_skin.json")
output = os.path.join(out_dir, out_name)
with open(output, "w", newline="", encoding="utf-8") as fp:
json.dump(sorted_res, fp, indent=2, ensure_ascii=False, default=str)
class BattleRoyalUnit(DBView):
def __init__(self, index):
super().__init__(index, "BattleRoyalUnit")
@staticmethod
def outfile_name(res, ext=".json"):
c_res = res["_BaseCharaDataId"]
name = "UNKNOWN" if "_Name" not in c_res else c_res["_Name"] if "_SecondName" not in c_res else c_res["_SecondName"]
return f'{res["_Id"]}_{name}{ext}'
def process_result(self, res, **kwargs):
self.link(res, "_BaseCharaDataId", "CharaData", condense=False)
# self.link(res, "_DragonDataId", "DragonData", **kwargs)
self.link(res, "_SkillId", "SkillData", **kwargs)
for ab in range(1, 11):
self.link(res, f"_ItemAbility{ab:02}", "AbilityData", **kwargs)
return res
def export_all_to_folder(self, out_dir="./out", ext=".json"):
out_dir = os.path.join(out_dir, "_br")
super().export_all_to_folder(out_dir, ext)
if __name__ == "__main__":
index = DBViewIndex()
view = BattleRoyalUnit(index)
view.export_all_to_folder()
| 37.956522 | 124 | 0.649866 | 2,262 | 0.863688 | 0 | 0 | 261 | 0.099656 | 0 | 0 | 567 | 0.216495 |
9b26d22dac1fa85ff57a7518cc0afd23693491bf | 111 | py | Python | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
]
| 6 | 2020-01-20T20:02:09.000Z | 2020-02-24T08:40:23.000Z | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
]
| null | null | null | adonai/user/api/queries.py | Egnod/adonai | b365d81c826fd7b626c9145154ee0136ea73fac1 | [
"MIT"
]
| null | null | null | from .user.queries import UserQuery # isort:skip
from .user_group.queries import UserGroupQuery # isort:skip
| 37 | 60 | 0.801802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.216216 |
f1955c751f92a084391167fe5becfed42fd578e2 | 772 | py | Python | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
]
| null | null | null | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
]
| 7 | 2018-03-20T14:23:16.000Z | 2019-04-09T11:57:57.000Z | test/test_slope_heuristic.py | StatisKit/Core | 79d8ec07c203eb7973a6cf482852ddb2e8e1e93e | [
"Apache-2.0"
]
| 7 | 2017-04-28T07:41:01.000Z | 2021-03-15T18:17:20.000Z | import matplotlib
matplotlib.use('Agg')
from statiskit import core
from statiskit.data import core as data
import unittest
from nose.plugins.attrib import attr
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestSlopeHeuristic(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Test multivariate data construction"""
cls._data = data.load('capushe')
@attr(win=False)
def test_slope_heuristic(self):
"""Test slope heuristic"""
sh = core.SlopeHeuristic([pen.value for pen in self._data.pen.events], [-contrast.value for contrast in self._data.contrast.events])
sh.plot()
@classmethod
def tearDownClass(cls):
"""Test multivariate data deletion"""
del cls._data | 25.733333 | 140 | 0.676166 | 544 | 0.704663 | 0 | 0 | 609 | 0.78886 | 0 | 0 | 118 | 0.15285 |
f1966b5ea95fad48b2c50f6ae0e84a62362e0d49 | 688 | py | Python | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
]
| 18 | 2019-03-07T06:25:58.000Z | 2022-03-07T04:38:36.000Z | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
]
| null | null | null | holteandtalley/test/matToJson.py | garrettdreyfus/HolteAndTalleyMLDPy | baab854ef955664437f04fdc7de100dcc894bbda | [
"MIT"
]
| 3 | 2020-06-21T23:22:19.000Z | 2022-03-07T05:11:14.000Z | from scipy.io import loadmat
import pickle
mldinfo =loadmat('mldinfo.mat')["mldinfo"]
out={}
print(mldinfo)
for i in mldinfo:
line={}
line["floatNumber"] = i[0]
line["cycleNumber"] = i[26]
line["tempMLTFIT"] = i[27]
line["tempMLTFITIndex"] = i[28]
line["densityMLTFIT"] = i[30]
line["salinityMLTFIT"] = i[31]
line["steepest"] = i[29]
line["tempAlgo"] = i[4]
line["salinityAlgo"] = i[8]
line["densityAlgo"] = i[9]
line["tempThreshold"] = i[13]
line["densityThreshold"] = i[17]
line["tempGradient"] = i[21]
line["densityGradient"] = i[22]
out[i[0],i[26]]=line
with open("matOutput.pickle","wb") as f:
pickle.dump(out,f)
| 25.481481 | 42 | 0.604651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.350291 |
f19839bccee38959af0b437965974c79d3cf702f | 1,578 | py | Python | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
]
| 500 | 2018-09-27T17:28:11.000Z | 2022-03-30T02:05:57.000Z | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
]
| 888 | 2018-09-20T05:04:46.000Z | 2022-03-28T04:11:22.000Z | natlas-server/natlas-db.py | purplesecops/natlas | 74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8 | [
"Apache-2.0"
]
| 79 | 2019-02-13T19:49:21.000Z | 2022-02-27T16:39:04.000Z | #!/usr/bin/env python
"""
This is a special app instance that allows us to perform database operations
without going through the app's migration_needed check. Running this script
is functionally equivalent to what `flask db` normally does. The reason we
can't continue to use that is that command is that it invokes the app instance from
FLASK_APP env variable (natlas-server.py) which performs the migration check and exits
during initialization.
"""
import argparse
from app import create_app
from config import Config
from migrations import migrator
parser_desc = """Perform database operations for Natlas.\
It is best practice to take a backup of your database before you upgrade or downgrade, just in case something goes wrong.\
"""
def main():
parser = argparse.ArgumentParser(description=parser_desc)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--upgrade",
action="store_true",
help="Perform a database upgrade, if necessary",
)
group.add_argument(
"--downgrade",
action="store_true",
help="Revert the most recent database upgrade. Danger: This will destroy data as necessary to revert to the previous version.",
)
args = parser.parse_args()
config = Config()
app = create_app(config, migrating=True)
if args.upgrade:
app.config.update({"DB_AUTO_UPGRADE": True})
migrator.handle_db_upgrade(app)
elif args.downgrade:
migrator.handle_db_downgrade(app)
if __name__ == "__main__":
main()
| 33.574468 | 135 | 0.716096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.558935 |
f19909329b0b6001c89ab80ab88194f8528fba3b | 4,368 | py | Python | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
]
| 33 | 2017-12-02T04:09:24.000Z | 2021-11-07T08:41:57.000Z | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
]
| 189 | 2017-11-16T04:06:29.000Z | 2022-03-11T23:35:59.000Z | ontask/action/forms/crud.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
]
| 30 | 2017-11-30T03:35:44.000Z | 2022-01-31T03:08:08.000Z | # -*- coding: utf-8 -*-
"""Forms to process action related fields.
ActionUpdateForm: Basic form to process the name/description of an action
ActionForm: Inherits from Basic to process name, description and type
ActionDescriptionForm: Inherits from basic but process only description (for
surveys)
FilterForm: Form to process filter elements
ConditionForm: Form to process condition elements
"""
from builtins import str
import json
from typing import Dict
from django import forms
from django.utils.translation import ugettext_lazy as _
from ontask import models
from ontask.core import RestrictedFileField
import ontask.settings
class ActionUpdateForm(forms.ModelForm):
"""Basic class to edit name and description."""
def __init__(self, *args, **kwargs):
"""Store user and wokflow."""
self.workflow = kwargs.pop('workflow')
super().__init__(*args, **kwargs)
def clean(self) -> Dict:
"""Verify that the name is not taken."""
form_data = super().clean()
# Check if the name already exists
name_exists = self.workflow.actions.filter(
name=self.data['name'],
).exclude(id=self.instance.id).exists()
if name_exists:
self.add_error(
'name',
_('There is already an action with this name.'),
)
return form_data
class Meta:
"""Select Action and the two fields."""
model = models.Action
fields = ('name', 'description_text')
class ActionForm(ActionUpdateForm):
"""Edit name, description and action type."""
def __init__(self, *args: str, **kargs: str):
"""Adjust widget choices depending on action type."""
super().__init__(*args, **kargs)
at_field = self.fields['action_type']
at_field.widget.choices = [
(key, value)
for key, value in models.Action.AVAILABLE_ACTION_TYPES.items()]
if len(models.Action.AVAILABLE_ACTION_TYPES) == 1:
# There is only one type of action. No need to generate the field.
# Set to value and hide
at_field.widget = forms.HiddenInput()
at_field.initial = models.Action.AVAILABLE_ACTION_TYPES.items(
)[0][0]
class Meta(ActionUpdateForm.Meta):
"""Select action and the three fields."""
model = models.Action
fields = ('name', 'description_text', 'action_type')
class ActionDescriptionForm(forms.ModelForm):
"""Form to edit the description of an action."""
class Meta:
"""Select model and the description field."""
model = models.Action
fields = ('description_text',)
class ActionImportForm(forms.Form):
"""Form to edit information to import an action."""
upload_file = RestrictedFileField(
max_upload_size=int(ontask.settings.MAX_UPLOAD_SIZE),
content_types=json.loads(str(ontask.settings.CONTENT_TYPES)),
allow_empty_file=False,
label=_('File with previously exported OnTask actions'),
help_text=_('File containing a previously exported action'),
)
class RubricCellForm(forms.ModelForm):
"""Edit the content of a RubricCellForm."""
class Meta:
"""Select Action and the two fields."""
model = models.RubricCell
fields = ('description_text', 'feedback_text')
class RubricLOAForm(forms.Form):
"""Edit the levels of attainment of a rubric."""
levels_of_attainment = forms.CharField(
strip=True,
required=True,
label=_('Comma separated list of levels of attainment'))
def __init__(self, *args, **kwargs):
"""Store the criteria."""
self.criteria = kwargs.pop('criteria')
super().__init__(*args, **kwargs)
self.fields['levels_of_attainment'].initial = ', '.join(
self.criteria[0].categories)
def clean(self) -> Dict:
"""Check that the number of LOAs didn't change."""
form_data = super().clean()
current_n_loas = [
loa
for loa in form_data['levels_of_attainment'].split(',')
if loa]
if len(current_n_loas) != len(self.criteria[0].categories):
self.add_error(
'levels_of_attainment',
_('The number of levels cannot change.'))
return form_data
| 29.315436 | 78 | 0.63576 | 3,708 | 0.848901 | 0 | 0 | 0 | 0 | 0 | 0 | 1,617 | 0.370192 |
f199c3663d40296d492582d4c84325e0a23a8f49 | 27,740 | py | Python | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
]
| 1 | 2022-01-09T20:04:31.000Z | 2022-01-09T20:04:31.000Z | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
]
| 1 | 2022-02-15T12:01:57.000Z | 2022-03-24T19:48:47.000Z | Latest/venv/Lib/site-packages/traitsui/value_tree.py | adamcvj/SatelliteTracker | 49a8f26804422fdad6f330a5548e9f283d84a55d | [
"Apache-2.0"
]
| null | null | null | #------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 01/05/2006
#
#------------------------------------------------------------------------------
""" Defines tree node classes and editors for various types of values.
"""
#-------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------
from __future__ import absolute_import
import inspect
from operator import itemgetter
from types import FunctionType, MethodType
from traits.api import Any, Bool, HasPrivateTraits, HasTraits, Instance, List, Str
from .tree_node import ObjectTreeNode, TreeNode, TreeNodeObject
from .editors.tree_editor import TreeEditor
import six
#-------------------------------------------------------------------------
# 'SingleValueTreeNodeObject' class:
#-------------------------------------------------------------------------
class SingleValueTreeNodeObject(TreeNodeObject):
""" A tree node for objects of types that have a single value.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# The parent of this node
parent = Instance(TreeNodeObject)
# Name of the value
name = Str
# User-specified override of the default label
label = Str
# The value itself
value = Any
# Is the value readonly?
readonly = Bool(False)
#-------------------------------------------------------------------------
# Returns whether chidren of this object are allowed or not:
#-------------------------------------------------------------------------
def tno_allows_children(self, node):
""" Returns whether this object can have children (False for this
class).
"""
return False
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children (False for this class).
"""
return False
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be renamed:
#-------------------------------------------------------------------------
def tno_can_rename(self, node):
""" Returns whether the object's children can be renamed (False for
this class).
"""
return False
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be copied:
#-------------------------------------------------------------------------
def tno_can_copy(self, node):
""" Returns whether the object's children can be copied (True for this
class).
"""
return True
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be deleted:
#-------------------------------------------------------------------------
def tno_can_delete(self, node):
""" Returns whether the object's children can be deleted (False for
this class).
"""
return False
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be inserted (or just
# appended):
#-------------------------------------------------------------------------
def tno_can_insert(self, node):
""" Returns whether the object's children can be inserted (False,
meaning children are appended, for this class).
"""
return False
#-------------------------------------------------------------------------
# Returns the icon for a specified object:
#-------------------------------------------------------------------------
def tno_get_icon(self, node, is_expanded):
""" Returns the icon for a specified object.
"""
return ('@icons:%s_node' % self.__class__.__name__[: -4].lower())
#-------------------------------------------------------------------------
# Sets the label for a specified node:
#-------------------------------------------------------------------------
def tno_set_label(self, node, label):
""" Sets the label for a specified object.
"""
if label == '?':
label = ''
self.label = label
#-------------------------------------------------------------------------
# Gets the label to display for a specified object:
#-------------------------------------------------------------------------
def tno_get_label(self, node):
""" Gets the label to display for a specified object.
"""
if self.label != '':
return self.label
if self.name == '':
return self.format_value(self.value)
return '%s: %s' % (self.name, self.format_value(self.value))
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return repr(value)
#-------------------------------------------------------------------------
# Returns the correct node type for a specified value:
#-------------------------------------------------------------------------
def node_for(self, name, value):
""" Returns the correct node type for a specified value.
"""
for type, node in basic_types():
if isinstance(value, type):
break
else:
node = OtherNode
if inspect.isclass(value):
node = ClassNode
elif hasattr(value, '__class__'):
node = ObjectNode
return node(parent=self,
name=name,
value=value,
readonly=self.readonly)
#-------------------------------------------------------------------------
# 'MultiValueTreeNodeObject' class:
#-------------------------------------------------------------------------
class MultiValueTreeNodeObject(SingleValueTreeNodeObject):
""" A tree node for objects of types that have multiple values.
"""
#-------------------------------------------------------------------------
# Returns whether chidren of this object are allowed or not:
#-------------------------------------------------------------------------
def tno_allows_children(self, node):
""" Returns whether this object can have children (True for this class).
"""
return True
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children (True for this class).
"""
return True
#-------------------------------------------------------------------------
# 'StringNode' class:
#-------------------------------------------------------------------------
class StringNode(SingleValueTreeNodeObject):
""" A tree node for strings.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
n = len(value)
if len(value) > 80:
value = '%s...%s' % (value[:42], value[-35:])
return '%s [%d]' % (repr(value), n)
#-------------------------------------------------------------------------
# 'NoneNode' class:
#-------------------------------------------------------------------------
class NoneNode(SingleValueTreeNodeObject):
""" A tree node for None values.
"""
pass
#-------------------------------------------------------------------------
# 'BoolNode' class:
#-------------------------------------------------------------------------
class BoolNode(SingleValueTreeNodeObject):
""" A tree node for Boolean values.
"""
pass
#-------------------------------------------------------------------------
# 'IntNode' class:
#-------------------------------------------------------------------------
class IntNode(SingleValueTreeNodeObject):
""" A tree node for integer values.
"""
pass
#-------------------------------------------------------------------------
# 'FloatNode' class:
#-------------------------------------------------------------------------
class FloatNode(SingleValueTreeNodeObject):
""" A tree node for floating point values.
"""
pass
#-------------------------------------------------------------------------
# 'ComplexNode' class:
#-------------------------------------------------------------------------
class ComplexNode(SingleValueTreeNodeObject):
""" A tree node for complex number values.
"""
pass
#-------------------------------------------------------------------------
# 'OtherNode' class:
#-------------------------------------------------------------------------
class OtherNode(SingleValueTreeNodeObject):
""" A tree node for single-value types for which there is not another
node type.
"""
pass
#-------------------------------------------------------------------------
# 'TupleNode' class:
#-------------------------------------------------------------------------
class TupleNode(MultiValueTreeNodeObject):
""" A tree node for tuples.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'Tuple(%d)' % len(value)
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children, based on the length of
the tuple.
"""
return (len(self.value) > 0)
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
node_for = self.node_for
value = self.value
if len(value) > 500:
return ([node_for('[%d]' % i, x)
for i, x in enumerate(value[: 250])] +
[StringNode(value='...', readonly=True)] +
[node_for('[%d]' % i, x)
for i, x in enumerate(value[-250:])])
return [node_for('[%d]' % i, x) for i, x in enumerate(value)]
#-------------------------------------------------------------------------
# 'ListNode' class:
#-------------------------------------------------------------------------
class ListNode(TupleNode):
""" A tree node for lists.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'List(%d)' % len(value)
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be deleted:
#-------------------------------------------------------------------------
def tno_can_delete(self, node):
""" Returns whether the object's children can be deleted.
"""
return (not self.readonly)
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be inserted (or just
# appended):
#-------------------------------------------------------------------------
def tno_can_insert(self, node):
""" Returns whether the object's children can be inserted (vs.
appended).
"""
return (not self.readonly)
#-------------------------------------------------------------------------
# 'SetNode' class:
#-------------------------------------------------------------------------
class SetNode(ListNode):
""" A tree node for sets.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'Set(%d)' % len(value)
#-------------------------------------------------------------------------
# 'ArrayNode' class:
#-------------------------------------------------------------------------
class ArrayNode(TupleNode):
""" A tree node for arrays.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'Array(%s)' % ','.join([str(n) for n in value.shape])
#-------------------------------------------------------------------------
# 'DictNode' class:
#-------------------------------------------------------------------------
class DictNode(TupleNode):
""" A tree node for dictionaries.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'Dict(%d)' % len(value)
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
node_for = self.node_for
items = [(repr(k), v) for k, v in self.value.items()]
items.sort(key=itemgetter(0))
if len(items) > 500:
return ([node_for('[%s]' % k, v) for k, v in items[: 250]] +
[StringNode(value='...', readonly=True)] +
[node_for('[%s]' % k, v) for k, v in items[-250:]])
return [node_for('[%s]' % k, v) for k, v in items]
#-------------------------------------------------------------------------
# Returns whether or not the object's children can be deleted:
#-------------------------------------------------------------------------
def tno_can_delete(self, node):
""" Returns whether the object's children can be deleted.
"""
return (not self.readonly)
#-------------------------------------------------------------------------
# 'FunctionNode' class:
#-------------------------------------------------------------------------
class FunctionNode(SingleValueTreeNodeObject):
""" A tree node for functions
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return 'Function %s()' % (value.__name__)
#---------------------------------------------------------------------------
# 'MethodNode' class:
#---------------------------------------------------------------------------
class MethodNode(MultiValueTreeNodeObject):
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
type = 'B'
if value.__self__ is None:
type = 'Unb'
return '%sound method %s.%s()' % (
type,
value.__self__.__class__.__name__,
value.__func__.__name__)
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children.
"""
return (self.value.__func__ is not None)
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
return [self.node_for('Object', self.value.__self__)]
#-------------------------------------------------------------------------
# 'ObjectNode' class:
#-------------------------------------------------------------------------
class ObjectNode(MultiValueTreeNodeObject):
""" A tree node for objects.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
try:
klass = value.__class__.__name__
except:
klass = '???'
return '%s(0x%08X)' % (klass, id(value))
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children.
"""
try:
return (len(self.value.__dict__) > 0)
except:
return False
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
items = [(k, v) for k, v in self.value.__dict__.items()]
items.sort(key=itemgetter(0))
return [self.node_for('.' + k, v) for k, v in items]
#-------------------------------------------------------------------------
# 'ClassNode' class:
#-------------------------------------------------------------------------
class ClassNode(ObjectNode):
""" A tree node for classes.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return value.__name__
#-------------------------------------------------------------------------
# 'TraitsNode' class:
#-------------------------------------------------------------------------
class TraitsNode(ObjectNode):
""" A tree node for traits.
"""
#-------------------------------------------------------------------------
# Returns whether or not the object has children:
#-------------------------------------------------------------------------
def tno_has_children(self, node):
""" Returns whether the object has children.
"""
return (len(self._get_names()) > 0)
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
names = sorted(self._get_names())
value = self.value
node_for = self.node_for
nodes = []
for name in names:
try:
item_value = getattr(value, name, '<unknown>')
except Exception as excp:
item_value = '<%s>' % excp
nodes.append(node_for('.' + name, item_value))
return nodes
#-------------------------------------------------------------------------
# Gets the names of all defined traits/attributes:
#-------------------------------------------------------------------------
def _get_names(self):
""" Gets the names of all defined traits or attributes.
"""
value = self.value
names = {}
for name in value.trait_names(type=lambda x: x != 'event'):
names[name] = None
for name in value.__dict__.keys():
names[name] = None
return list(names.keys())
#-------------------------------------------------------------------------
# Sets up/Tears down a listener for 'children replaced' on a specified
# object:
#-------------------------------------------------------------------------
def tno_when_children_replaced(self, node, listener, remove):
""" Sets up or removes a listener for children being replaced on a
specified object.
"""
self._listener = listener
self.value.on_trait_change(self._children_replaced, remove=remove,
dispatch='ui')
def _children_replaced(self):
self._listener(self)
#-------------------------------------------------------------------------
# Sets up/Tears down a listener for 'children changed' on a specified
# object:
#-------------------------------------------------------------------------
def tno_when_children_changed(self, node, listener, remove):
""" Sets up or removes a listener for children being changed on a
specified object.
"""
pass
#-------------------------------------------------------------------------
# 'RootNode' class:
#-------------------------------------------------------------------------
class RootNode(MultiValueTreeNodeObject):
""" A root node.
"""
#-------------------------------------------------------------------------
# Returns the formatted version of the value:
#-------------------------------------------------------------------------
def format_value(self, value):
""" Returns the formatted version of the value.
"""
return ''
#-------------------------------------------------------------------------
# Gets the object's children:
#-------------------------------------------------------------------------
def tno_get_children(self, node):
""" Gets the object's children.
"""
return [self.node_for('', self.value)]
#-------------------------------------------------------------------------
# Define the mapping of object types to nodes:
#-------------------------------------------------------------------------
_basic_types = None
def basic_types():
global _basic_types
if _basic_types is None:
# Create the mapping of object types to nodes:
_basic_types = [
(type(None), NoneNode),
(str, StringNode),
(six.text_type, StringNode),
(bool, BoolNode),
(int, IntNode),
(float, FloatNode),
(complex, ComplexNode),
(tuple, TupleNode),
(list, ListNode),
(set, SetNode),
(dict, DictNode),
(FunctionType, FunctionNode),
(MethodType, MethodNode),
(HasTraits, TraitsNode)
]
try:
from numpy import array
_basic_types.append((type(array([1])), ArrayNode))
except ImportError:
pass
return _basic_types
#-------------------------------------------------------------------------
# '_ValueTree' class:
#-------------------------------------------------------------------------
class _ValueTree(HasPrivateTraits):
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
# List of arbitrary Python values contained in the tree:
values = List(SingleValueTreeNodeObject)
#-------------------------------------------------------------------------
# Defines the value tree editor(s):
#-------------------------------------------------------------------------
# Nodes in a value tree:
value_tree_nodes = [
ObjectTreeNode(
node_for=[NoneNode, StringNode, BoolNode, IntNode, FloatNode,
ComplexNode, OtherNode, TupleNode, ListNode, ArrayNode,
DictNode, SetNode, FunctionNode, MethodNode, ObjectNode,
TraitsNode, RootNode, ClassNode])
]
# Editor for a value tree:
value_tree_editor = TreeEditor(
auto_open=3,
hide_root=True,
editable=False,
nodes=value_tree_nodes
)
# Editor for a value tree with a root:
value_tree_editor_with_root = TreeEditor(
auto_open=3,
editable=False,
nodes=[
ObjectTreeNode(
node_for=[NoneNode, StringNode, BoolNode, IntNode, FloatNode,
ComplexNode, OtherNode, TupleNode, ListNode, ArrayNode,
DictNode, SetNode, FunctionNode, MethodNode,
ObjectNode, TraitsNode, RootNode, ClassNode]
),
TreeNode(node_for=[_ValueTree],
auto_open=True,
children='values',
move=[SingleValueTreeNodeObject],
copy=False,
label='=Values',
icon_group='traits_node',
icon_open='traits_node')
]
)
#-------------------------------------------------------------------------
# Defines a 'ValueTree' trait:
#-------------------------------------------------------------------------
# Trait for a value tree:
ValueTree = Instance(_ValueTree, (), editor=value_tree_editor_with_root)
| 34.805521 | 82 | 0.369755 | 20,043 | 0.722531 | 0 | 0 | 0 | 0 | 0 | 0 | 17,533 | 0.632048 |
f199cbd96d64f014fd31d99a8774f29dfb8baff8 | 3,400 | py | Python | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
]
| 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
]
| 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/events/tests/admin_tests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
]
| 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | from django.contrib.auth.models import Group
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django_dynamic_fixture import G
from apps.authentication.models import OnlineUser
from ..constants import EventType
from ..models import Event
from .utils import (
add_event_permissions,
add_to_group,
create_committee_group,
generate_event,
)
EVENTS_ADMIN_LIST_URL = reverse_lazy("admin:events_event_changelist")
EVENTS_DASHBOARD_INDEX_URL = reverse_lazy("dashboard_events_index")
def event_admin(event: Event) -> str:
return reverse("admin:events_event_change", args=(event.id,))
def attendance_list(event: Event) -> str:
return reverse("event_attendees_pdf", args=(event.id,))
def event_dashboard(event: Event) -> str:
return reverse("dashboard_events_edit", args=(event.id,))
class EventAdminTestCase(TestCase):
def setUp(self):
self.admin_group = create_committee_group(G(Group, name="Arrkom"))
self.other_group: Group = G(Group, name="Buddy")
add_event_permissions(self.admin_group)
self.user: OnlineUser = G(OnlineUser)
self.client.force_login(self.user)
self.event = generate_event(EventType.SOSIALT, organizer=self.admin_group)
# General committee members should not be able to access event admin pages.
self.expected_resp_code_own_django = 302
self.expected_resp_code_own_dashboard = 403
def test_view_event_list_admin(self):
resp = self.client.get(EVENTS_ADMIN_LIST_URL)
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_detail_admin(self):
resp = self.client.get(event_admin(self.event))
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_attendance_list(self):
resp = self.client.get(attendance_list(self.event))
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_list_dashboard(self):
resp = self.client.get(EVENTS_DASHBOARD_INDEX_URL)
self.assertEqual(self.expected_resp_code_own_dashboard, resp.status_code)
def test_view_event_detail_dashboard(self):
resp = self.client.get(event_dashboard(self.event))
self.assertEqual(self.expected_resp_code_own_dashboard, resp.status_code)
class EventAdminGroupTestCase(EventAdminTestCase):
def setUp(self):
super().setUp()
self.event = generate_event(EventType.SOSIALT, organizer=self.admin_group)
add_to_group(self.admin_group, self.user)
self.expected_resp_code_own_django = 200
self.expected_resp_code_own_dashboard = 200
def test_cannot_view_event_attendance_list_for_bedkom(self):
event = generate_event()
resp = self.client.get(attendance_list(event))
self.assertEqual(302, resp.status_code)
def test_cannot_view_event_detail_admin_for_bedkom(self):
event = generate_event(EventType.BEDPRES, organizer=self.other_group)
resp = self.client.get(event_admin(event))
self.assertEqual(302, resp.status_code)
def test_cannot_view_event_detail_dashboard_for_bedkom(self):
event = generate_event(EventType.BEDPRES, organizer=self.other_group)
resp = self.client.get(event_dashboard(event))
self.assertEqual(403, resp.status_code)
| 33.009709 | 83 | 0.746471 | 2,549 | 0.749706 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.063529 |
f19a9b4226505a42ffa94930bb3319c14ebc1a93 | 359 | py | Python | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
]
| null | null | null | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
]
| null | null | null | pyuvs/l1b/_files.py | kconnour/maven-iuvs | fc6ff5d6b7799c78b2ccf34e4316fc151ec87ee8 | [
"BSD-3-Clause"
]
| null | null | null | from pyuvs.files import DataFilenameCollection
class L1bDataFilenameCollection:
def __init__(self, files: DataFilenameCollection):
self.__files = files
def __raise_value_error_if_not_all_l1b(self) -> None:
if not self.__files.all_l1b():
message = 'Some files are not all level 1b.'
raise ValueError(message)
| 29.916667 | 57 | 0.70195 | 309 | 0.860724 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.094708 |
f19aa91679864846081cef43f5707f10afbe079f | 9,380 | py | Python | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
]
| null | null | null | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
]
| 2 | 2019-09-06T12:19:18.000Z | 2019-09-06T15:21:36.000Z | instagram.py | Breizhux/picture-dl | 3e2bfa590097db56d3326a4aa36d0dd37c1bacc3 | [
"Unlicense"
]
| null | null | null | # coding: utf-8
import urllib2
import message_box
from ast import literal_eval
class InfoExtractor():
""" Extractor Information class for Instagram
Instagram InfoExtractor that, given url, extract information about the
image (or images) the URL refers to. This information includes the real
image URL, the image title, author and others. The information is stored
in a list of dictionary."""
def __init__(self, url, verbose) :
self.url = url
# self.print_ = message_box.print_(verbose)
self.result_list = []
self.raw_informations = None
self.info_dictionary = {
'username' : None,
'author' : None,
'profile_url' : None,
'is_several_images' : False,
'id' : None,
'title' : None,
'format' : ".jpg", #all images from instagram are jpg
'description' : None,
'comments' : None,
'date' : None,
'localization' : None,
'real_urls_and_dimensions' : [], # list of urls and dimensions(W-H),
'like_nb' : None,} # ex : [["url1", 1080, 1080],["url2", 640, 640]]
def get_informations(self) :
self.raw_informations = self.download_webpage_informations(self.url) #type dictionary
if self.get_type_link(self.raw_informations) == "post" :
self.get_information_single_image(self.raw_informations)
return self.result_list
elif self.get_type_link(self.raw_informations) == "account" :
self.get_information_many_images(self.raw_informations)
return self.result_list
elif self.get_type_link(self.raw_informations) == "tagpage" :
self.get_informations_tagpage_images(self.raw_informations)
return self.result_list
else : return "Invalid url"
def download_webpage_informations(self, url) :
""" Return the dictionary of image(s) and account informations. """
request = urllib2.Request(url)
fh = urllib2.urlopen(request)
source_code = fh.read()
source_code = source_code[
source_code.index('<script type="text/javascript">window._sharedData = ')+52:
source_code.index(';</script>\n<script type="text/javascript">window.__initialDataLoaded(window._sharedData);</script>')]
source_code = source_code.replace("false", "False")
source_code = source_code.replace("true", "True")
source_code = source_code.replace("null", "None")
dict_of_information = literal_eval(source_code)
return dict_of_information
def get_type_link(self, webpage_info) :
""" Return type url from Instagram : many images (acount) or single image
or undeterminate. The determination find if in the dictionary of source
code of page exist the ['entry_data']['PostPage'] keys (simple post) or
['entry_data']['ProfilePage'] keys exists (account url)"""
webpage_info = webpage_info['entry_data']
if webpage_info.has_key('PostPage') :
return "post"
elif webpage_info.has_key('ProfilePage') :
self.info_dictionary['is_several_images'] = True
return "account"
elif webpage_info.has_key('TagPage') :
self.info_dictionary['is_several_images'] = True
return "tagpage"
else :
return "undeterminate"
def get_information_single_image(self, raw_informations) :
""" Complete the dictionary with information of code source webpage.
The result is locate in a list of result (result list) in the form
of dictionary."""
webpage_info = raw_informations['entry_data']['PostPage'][0]['graphql']['shortcode_media']
self.info_dictionary['username'] = webpage_info['owner']['username']
self.info_dictionary['author'] = webpage_info['owner']['full_name']
self.info_dictionary['profile_url'] = webpage_info['owner']['profile_pic_url']
self.info_dictionary['id'] = webpage_info['shortcode']
title, description = self.get_title_and_description(webpage_info)
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = webpage_info['edge_media_to_comment']
self.info_dictionary['localization'] = webpage_info['localization']
for i in webpage_info['display_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
i['src'],
i['config_width'],
i['config_height']])
#self.info_dictionary["sizes"]
self.info_dictionary['like_nb'] = webpage_info['edge_media_preview_like']['count']
self.complete_result_list()
def get_information_many_images(self, raw_informations) :
""" Complete dictionary and put this in result list at the rate of
one dictionary per image. The dictionary is reset at each loop of
research information for one image. """
webpage_info = raw_informations['entry_data']['ProfilePage'][0]['graphql']['user']
for i in webpage_info['edge_owner_to_timeline_media']['edges'] :
self.info_dictionary['username'] = webpage_info['username']
self.info_dictionary['author'] = webpage_info['full_name']
self.info_dictionary['profile_url'] = webpage_info['profile_pic_url_hd']
self.info_dictionary['id'] = i['node']['shortcode']
title, description = self.get_title_and_description(i['node'])
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = i['node']['edge_media_to_comment']
self.info_dictionary['localization'] = i['node']['localization']
for j in i['node']['thumbnail_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
j['src'],
j['config_width'],
j['config_height']])
self.info_dictionary['real_urls_and_dimensions'].append([
i['node']['display_url'],
i['node']['dimensions']['width'],
i['node']['dimensions']['height']])
self.info_dictionary['like_nb'] = i['node']['edge_liked_by']['count']
self.complete_result_list()
def get_informations_tagpage_images(self, raw_informations) :
""" Complete dictionary and put this in result list at the rate of
one dictionary per image. The dictionary is reset at each loop of
research information for one image. """
webpage_info = raw_informations['entry_data']['TagPage'][0]['graphql']['hashtag']
for i in webpage_info['edge_hashtag_to_media']['edges'] :
self.info_dictionary['username'] = webpage_info['name']
self.info_dictionary['author'] = webpage_info['name']
self.info_dictionary['id'] = i['node']['shortcode']
title, description = self.get_title_and_description(i['node'])
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = i['node']['edge_media_to_comment']
for j in i['node']['thumbnail_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
j['src'],
j['config_width'],
j['config_height']])
self.info_dictionary['real_urls_and_dimensions'].append([
i['node']['display_url'],
i['node']['dimensions']['width'],
i['node']['dimensions']['height']])
self.info_dictionary['like_nb'] = i['node']['edge_liked_by']['count']
self.complete_result_list()
def get_title_and_description(self, webpage_info) :
""" Return a title for image with description of image.
if description don't exists, it can't found title.
The title is crop to the first caracter found : [#,.,!,?,\n]"""
if len(webpage_info['edge_media_to_caption']['edges']) == 0 :
return "No title :(", "Because no description..."
description = webpage_info['edge_media_to_caption']['edges'][0]['node']['text']
end_title = ["#", ".", "!", "?", "\n"]
i = 1
while description[i] not in end_title and i < len(description)-1 : i+=1
title = description[:i] if i < len(description) else "No title found ;("
return title.strip().replace("/","-"), description
def complete_result_list(self) :
""" Copy dictionary to result list, the list of dictionary.
There is one dictionary per image. After append dictionary
in list, clear it. """
self.result_list.append(self.info_dictionary)
self.info_dictionary = {
'username' : None,
'author' : None,
'profile_url' : None,
'is_several_images' : False,
'id' : None,
'title' : None,
'format' : ".jpg",
'description' : None,
'comments' : None,
'date' : None,
'localization' : None,
'real_urls_and_dimensions' : [],
'like_nb' : None,}
| 50.430108 | 133 | 0.615032 | 9,298 | 0.991258 | 0 | 0 | 0 | 0 | 0 | 0 | 3,848 | 0.410235 |
f19bffe1d8db01545aa2bac87ec675c56149bef9 | 195 | py | Python | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
]
| 1 | 2021-06-30T18:08:42.000Z | 2021-06-30T18:08:42.000Z | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
]
| null | null | null | kali/comandosOs.py | NandoDev-lab/AssistenteEmPython | 3d6e7c4abef39154e710e82807d0534586294c1c | [
"MIT"
]
| null | null | null | import sys
import os
import subprocess
import pyautogui
import time
subprocess.run("C:/Windows/system32/cmd.exe")
time.sleep(3)
pyautogui.typewrite("python")
| 8.478261 | 46 | 0.651282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.189744 |
f19c254391cc08472493c02b34a771daed15156b | 75 | py | Python | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
]
| null | null | null | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
]
| null | null | null | main.py | TTRSQ/pip-test | acc81731555f4a3566a76f670fe95d0384ec4ab7 | [
"MIT"
]
| null | null | null | #自作関数のインポート
import pip_test
if __name__ == '__main__':
pip_test.hello() | 15 | 26 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.431579 |
f19cbc9fa4b054f10523c99c5ea25ef1f89616fb | 26 | py | Python | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
]
| null | null | null | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
]
| null | null | null | port/boost/__init__.py | happyxianyu/fxpkg | 6d69f410474e71518cc8c6291892dd069c357c75 | [
"Apache-2.0"
]
| null | null | null | from .main import MainPort | 26 | 26 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f19e04b462dda85e0bd45e84d17a144a85a0f4c3 | 1,830 | py | Python | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
]
| null | null | null | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
]
| 19 | 2019-01-23T16:59:55.000Z | 2021-07-30T15:12:27.000Z | tests/test_invenio_s3.py | lnielsen/invenio-s3 | 442136d580ba99b9d1922a9afffa716e62e29ec8 | [
"MIT"
]
| 9 | 2018-10-31T10:40:56.000Z | 2020-12-09T07:44:45.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018, 2019 Esteban J. G. Gabancho.
#
# Invenio-S3 is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
from __future__ import absolute_import, print_function
from invenio_s3 import InvenioS3
def test_version():
"""Test version import."""
from invenio_s3 import __version__
assert __version__
def test_init(appctx):
"""Test extension initialization."""
assert 'invenio-s3' in appctx.extensions
appctx.config['S3_ENDPOINT_URL'] = 'https://example.com:1234'
appctx.config['S3_REGION_NAME'] = 'eu-west-1'
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['client_kwargs'][
'endpoint_url'] == 'https://example.com:1234'
assert s3_connection_info['client_kwargs'][
'region_name'] == 'eu-west-1'
def test_access_key(appctx):
"""Test correct access key works together with flawed one."""
appctx.config['S3_ACCCESS_KEY_ID'] = 'secret'
try:
# Delete the cached value in case it's there already
del appctx.extensions['invenio-s3'].__dict__['init_s3fs_info']
except KeyError:
pass
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['key'] == 'secret'
def test_secret_key(appctx):
"""Test correct secret key works together with flawed one."""
appctx.config['S3_SECRECT_ACCESS_KEY'] = 'secret'
try:
# Delete the cached value in case it's there already
del appctx.extensions['invenio-s3'].__dict__['init_s3fs_info']
except KeyError:
pass
s3_connection_info = appctx.extensions['invenio-s3'].init_s3fs_info
assert s3_connection_info['secret'] == 'secret'
| 33.272727 | 72 | 0.701639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 880 | 0.480874 |
f1a11c9a3c3f708c9cfe435d2e5adfed43004799 | 600 | py | Python | textattack/constraints/pre_transformation/max_word_index_modification.py | cclauss/TextAttack | 98b8d6102aa47bf3c41afedace0215d48f8ed046 | [
"MIT"
]
| 1 | 2021-06-24T19:35:18.000Z | 2021-06-24T19:35:18.000Z | textattack/constraints/pre_transformation/max_word_index_modification.py | 53X/TextAttack | e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be | [
"MIT"
]
| null | null | null | textattack/constraints/pre_transformation/max_word_index_modification.py | 53X/TextAttack | e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be | [
"MIT"
]
| 1 | 2021-11-12T05:26:21.000Z | 2021-11-12T05:26:21.000Z | from textattack.constraints.pre_transformation import PreTransformationConstraint
from textattack.shared.utils import default_class_repr
class MaxWordIndexModification(PreTransformationConstraint):
"""
A constraint disallowing the modification of words which are past some maximum length limit
"""
def __init__(self, max_length):
self.max_length = max_length
def _get_modifiable_indices(self, current_text):
""" Returns the word indices in current_text which are able to be deleted """
return set(range(min(self.max_length, len(current_text.words))))
| 37.5 | 95 | 0.765 | 460 | 0.766667 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.308333 |
f1a149c6c08f22569c5bb980bf68d3996a092d95 | 2,012 | bzl | Python | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
]
| null | null | null | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
]
| 1 | 2021-10-01T05:24:29.000Z | 2021-10-01T05:24:29.000Z | bazel/utils/merge_kwargs.bzl | george-enf/enkit | af32fede472f04f77965b972c7ef3008f52c8caf | [
"BSD-3-Clause"
]
| null | null | null | # TODO(jonathan): try to simplify this.
def merge_kwargs(d1, d2, limit = 5):
"""Combine kwargs in a useful way.
merge_kwargs combines dictionaries by inserting keys from d2 into d1. If
the same key exists in both dictionaries:
* if the value is a scalar, d2[key] overrides d1[key].
* if the value is a list, the contents of d2[key] not already in d1[key]
are appended to d1[key].
* if the value is a dict, the sub-dictionaries are merged similarly
(scalars are overriden, lists are appended).
By default, this function limits recursion to 5 levels. The "limit"
argument can be specified if deeper recursion is needed.
"""
merged = {}
to_expand = [(merged, d1, k) for k in d1] + [(merged, d2, k) for k in d2]
for _ in range(limit):
expand_next = []
for m, d, k in to_expand:
if k not in m:
if type(d[k]) == "list":
m[k] = list(d[k])
continue
if type(d[k]) == "dict":
m[k] = dict(d[k])
continue
# type must be scalar:
m[k] = d[k]
continue
if type(m[k]) == "dict":
expand_next.extend([(m[k], d[k], k2) for k2 in d[k]])
continue
if type(m[k]) == "list":
# uniquify as we combine lists:
for item in d[k]:
if item not in m[k]:
m[k].append(item)
continue
# type must be scalar:
m[k] = d[k]
to_expand = expand_next
if not to_expand:
break
# If <limit> layers of recursion were not enough, explicitly fail.
if to_expand:
fail("merge_kwargs: exceeded maximum recursion limit.")
return merged
def add_tag(k, t):
"""Returns a kwargs dict that ensures tag `t` is present in kwargs["tags"]."""
return merge_kwargs(k, {"tags": [t]})
| 32.983607 | 82 | 0.524851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.464712 |
f1a1a49462e4695e563f4953333c397736ce81f0 | 24,083 | py | Python | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
]
| null | null | null | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
]
| null | null | null | remote_sensing_core.py | HNoorazar/KC | 2c78de218ce9dc732da228051fbf4b42badc97ea | [
"MIT"
]
| null | null | null | # import libraries
import os, os.path
import numpy as np
import pandas as pd
# import geopandas as gpd
import sys
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import scipy
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
from datetime import date
import datetime
import time
from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
################################################################
#####
##### Function definitions
#####
################################################################
########################################################################
def addToDF_SOS_EOS_White(pd_TS, VegIdx = "EVI", onset_thresh=0.15, offset_thresh=0.15):
"""
In this methods the NDVI_Ratio = (NDVI - NDVI_min) / (NDVI_Max - NDVI_min)
is computed.
SOS or onset is when NDVI_ratio exceeds onset-threshold
and EOS is when NDVI_ratio drops below off-set-threshold.
"""
pandaFrame = pd_TS.copy()
VegIdx_min = pandaFrame[VegIdx].min()
VegIdx_max = pandaFrame[VegIdx].max()
VegRange = VegIdx_max - VegIdx_min + sys.float_info.epsilon
colName = VegIdx + "_ratio"
pandaFrame[colName] = (pandaFrame[VegIdx] - VegIdx_min) / VegRange
SOS_candidates = pandaFrame[colName] - onset_thresh
EOS_candidates = offset_thresh - pandaFrame[colName]
BOS, EOS = find_signChange_locs_DifferentOnOffset(SOS_candidates, EOS_candidates)
pandaFrame['SOS'] = BOS * pandaFrame[VegIdx]
pandaFrame['EOS'] = EOS * pandaFrame[VegIdx]
return(pandaFrame)
########################################################################
def correct_big_jumps_1DaySeries(dataTMS_jumpie, give_col, maxjump_perDay = 0.015):
"""
in the function correct_big_jumps_preDefinedJumpDays(.) we have
to define the jump_amount and the no_days_between_points.
For example if we have a jump more than 0.4 in less than 20 dats, then
that is an outlier detected.
Here we modify the approach to be flexible in the following sense:
if the amount of increase in NDVI is more than #_of_Days * 0.02 then
an outlier is detected and we need interpolation.
0.015 came from the SG based paper that used 0.4 jump in NDVI for 20 days.
That translates into 0.02 = 0.4 / 20 per day.
But we did choose 0.015 as default
"""
dataTMS = dataTMS_jumpie.copy()
dataTMS = initial_clean(df = dataTMS, column_to_be_cleaned = give_col)
dataTMS.sort_values(by=['image_year', 'doy'], inplace=True)
dataTMS.reset_index(drop=True, inplace=True)
dataTMS['system_start_time'] = dataTMS['system_start_time'] / 1000
thyme_vec = dataTMS['system_start_time'].values.copy()
Veg_indks = dataTMS[give_col].values.copy()
time_diff = thyme_vec[1:] - thyme_vec[0:len(thyme_vec)-1]
time_diff_in_days = time_diff / 86400
time_diff_in_days = time_diff_in_days.astype(int)
Veg_indks_diff = Veg_indks[1:] - Veg_indks[0:len(thyme_vec)-1]
jump_indexes = np.where(Veg_indks_diff > maxjump_perDay)
jump_indexes = jump_indexes[0]
jump_indexes = jump_indexes.tolist()
# It is possible that the very first one has a big jump in it.
# we cannot interpolate this. so, lets just skip it.
if len(jump_indexes) > 0:
if jump_indexes[0] == 0:
jump_indexes.pop(0)
if len(jump_indexes) > 0:
for jp_idx in jump_indexes:
if Veg_indks_diff[jp_idx] >= (time_diff_in_days[jp_idx] * maxjump_perDay):
#
# form a line using the adjacent points of the big jump:
#
x1, y1 = thyme_vec[jp_idx-1], Veg_indks[jp_idx-1]
x2, y2 = thyme_vec[jp_idx+1], Veg_indks[jp_idx+1]
# print (x1)
# print (x2)
m = np.float(y2 - y1) / np.float(x2 - x1) # slope
b = y2 - (m*x2) # intercept
# replace the big jump with linear interpolation
Veg_indks[jp_idx] = m * thyme_vec[jp_idx] + b
dataTMS[give_col] = Veg_indks
return(dataTMS)
########################################################################
def correct_big_jumps_preDefinedJumpDays(dataTS_jumpy, given_col, jump_amount = 0.4, no_days_between_points=20):
dataTS = dataTS_jumpy.copy()
dataTS = initial_clean(df = dataTS, column_to_be_cleaned = given_col)
dataTS.sort_values(by=['image_year', 'doy'], inplace=True)
dataTS.reset_index(drop=True, inplace=True)
dataTS['system_start_time'] = dataTS['system_start_time'] / 1000
thyme_vec = dataTS['system_start_time'].values.copy()
Veg_indks = dataTS[given_col].values.copy()
time_diff = thyme_vec[1:] - thyme_vec[0:len(thyme_vec)-1]
time_diff_in_days = time_diff / 86400
time_diff_in_days = time_diff_in_days.astype(int)
Veg_indks_diff = Veg_indks[1:] - Veg_indks[0:len(thyme_vec)-1]
jump_indexes = np.where(Veg_indks_diff > 0.4)
jump_indexes = jump_indexes[0]
# It is possible that the very first one has a big jump in it.
# we cannot interpolate this. so, lets just skip it.
if jump_indexes[0] == 0:
jump_indexes.pop(0)
if len(jump_indexes) > 0:
for jp_idx in jump_indexes:
if time_diff_in_days[jp_idx] >= 20:
#
# form a line using the adjacent points of the big jump:
#
x1, y1 = thyme_vec[jp_idx-1], Veg_indks[jp_idx-1]
x2, y2 = thyme_vec[jp_idx+1], Veg_indks[jp_idx+1]
m = np.float(y2 - y1) / np.float(x2 - x1) # slope
b = y2 - (m*x2) # intercept
# replace the big jump with linear interpolation
Veg_indks[jp_idx] = m * thyme_vec[jp_idx] + b
dataTS[given_col] = Veg_indks
return(dataTS)
########################################################################
def initial_clean(df, column_to_be_cleaned):
dt_copy = df.copy()
# remove the useles system:index column
if ("system:index" in list(dt_copy.columns)):
dt_copy = dt_copy.drop(columns=['system:index'])
# Drop rows whith NA in column_to_be_cleaned column.
dt_copy = dt_copy[dt_copy[column_to_be_cleaned].notna()]
if (column_to_be_cleaned in ["NDVI", "EVI"]):
#
# 1.5 and -1.5 are just indicators for values that have violated the boundaries.
#
dt_copy.loc[dt_copy[column_to_be_cleaned] > 1, column_to_be_cleaned] = 1.5
dt_copy.loc[dt_copy[column_to_be_cleaned] < -1, column_to_be_cleaned] = -1.5
return (dt_copy)
########################################################################
def convert_human_system_start_time_to_systemStart_time(humantimeDF):
epoch_vec = pd.to_datetime(humantimeDF['human_system_start_time']).values.astype(np.int64) // 10 ** 6
# add 83000000 mili sec. since system_start_time is 1 day ahead of image_taken_time
# that is recorded in human_system_start_time column.
epoch_vec = epoch_vec + 83000000
humantimeDF['system_start_time'] = epoch_vec
"""
not returning anything does the operation in place.
so, you have to use this function like
convert_human_system_start_time_to_systemStart_time(humantimeDF)
If you do:
humantimeDF = convert_human_system_start_time_to_systemStart_time(humantimeDF)
Then humantimeDF will be nothing, since we are not returning anything.
"""
########################################################################
def add_human_start_time_by_YearDoY(a_Reg_DF):
"""
This function is written for regularized data
where we miss the Epoch time and therefore, cannot convert it to
human_start_time using add_human_start_time() function
Learn:
x = pd.to_datetime(datetime.datetime(2016, 1, 1) + datetime.timedelta(213 - 1))
x
year = 2020
DoY = 185
x = str(date.fromordinal(date(year, 1, 1).toordinal() + DoY - 1))
x
datetime.datetime(2016, 1, 1) + datetime.timedelta(213 - 1)
"""
DF_C = a_Reg_DF.copy()
# DF_C.image_year = DF_C.image_year.astype(float)
DF_C.doy = DF_C.doy.astype(int)
DF_C['human_system_start_time'] = pd.to_datetime(DF_C['image_year'].astype(int) * 1000 + DF_C['doy'], format='%Y%j')
# DF_C.reset_index(drop=True, inplace=True)
# DF_C['human_system_start_time'] = "1"
# for row_no in np.arange(0, len(DF_C)):
# year = DF_C.loc[row_no, 'image_year']
# DoY = DF_C.loc[row_no, 'doy']
# x = str(date.fromordinal(date(year, 1, 1).toordinal() + DoY - 1))
# DF_C.loc[row_no, 'human_system_start_time'] = x
return(DF_C)
########################################################################
########################################################################
########################################################################
#
# Kirti look here
#
# detect passing the threshod
def find_signChange_locs_EqualOnOffset(a_vec):
asign = np.sign(a_vec) # we can drop .values here.
sign_change = ((np.roll(asign, 1) - asign) != 0).astype(int)
"""
np.sign considers 0 to have it's own sign,
different from either positive or negative values.
So:
"""
sz = asign == 0
while sz.any():
asign[sz] = np.roll(asign, 1)[sz]
sz = asign == 0
"""
numpy.roll does a circular shift, so if the last
element has different sign than the first,
the first element in the sign_change array will be 1.
"""
sign_change[0] = 0
"""
# Another solution for sign change:
np.where(np.diff(np.sign(Vector)))[0]
np.where(np.diff(np.sign(Vector)))[0]
"""
return(sign_change)
def regularize_movingWindow_windowSteps_2Yrs(one_field_df, SF_yr=2017, veg_idxs, window_size=10):
#
# This function almost returns a data frame with data
# that are window_size away from each other. i.e. regular space in time.
# **** For **** 5 months + 12 months.
#
a_field_df = one_field_df.copy()
# initialize output dataframe
regular_cols = ['ID', 'Acres', 'county', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'image_year',
'SF_year', 'doy', veg_idxs]
#
# for a good measure we start at 213 (214 does not matter either)
# and the first
#
first_year_steps = list(range(213, 365, 10))
first_year_steps[-1] = 366
full_year_steps = list(range(1, 365, 10))
full_year_steps[-1] = 366
DoYs = first_year_steps + full_year_steps
#
# There are 5 months first and then a full year
# (31+30+30+30+31) + 365 = 517 days. If we do every 10 days
# then we have 51 data points
#
no_days = 517
no_steps = int(no_days/window_size)
regular_df = pd.DataFrame(data = None,
index = np.arange(no_steps),
columns = regular_cols)
regular_df['ID'] = a_field_df.ID.unique()[0]
regular_df['Acres'] = a_field_df.Acres.unique()[0]
regular_df['county'] = a_field_df.county.unique()[0]
regular_df['CropGrp'] = a_field_df.CropGrp.unique()[0]
regular_df['CropTyp'] = a_field_df.CropTyp.unique()[0]
regular_df['DataSrc'] = a_field_df.DataSrc.unique()[0]
regular_df['ExctAcr'] = a_field_df.ExctAcr.unique()[0]
regular_df['IntlSrD'] = a_field_df.IntlSrD.unique()[0]
regular_df['Irrigtn'] = a_field_df.Irrigtn.unique()[0]
regular_df['LstSrvD'] = a_field_df.LstSrvD.unique()[0]
regular_df['Notes'] = str(a_field_df.Notes.unique()[0])
regular_df['RtCrpTy'] = str(a_field_df.RtCrpTy.unique()[0])
regular_df['Shap_Ar'] = a_field_df.Shap_Ar.unique()[0]
regular_df['Shp_Lng'] = a_field_df.Shp_Lng.unique()[0]
regular_df['TRS'] = a_field_df.TRS.unique()[0]
regular_df['SF_year'] = a_field_df.SF_year.unique()[0]
# I will write this in 3 for-loops.
# perhaps we can do it in a cleaner way like using zip or sth.
#
#####################################################
#
# First year (last 5 months of previous year)
#
#
#####################################################
for row_or_count in np.arange(len(first_year_steps)-1):
curr_year = SF_yr - 1
curr_time_window = a_field_df[a_field_df.image_year == curr_year].copy()
curr_time_window = curr_time_window[curr_time_window.doy >= first_year_steps[row_or_count]]
curr_time_window = curr_time_window[curr_time_window.doy < first_year_steps[row_or_count+1]]
"""
In each time window peak the maximum of present values
If in a window (e.g. 10 days) we have no value observed by Sentinel,
then use -1.5 as an indicator. That will be a gap to be filled. (function fill_theGap_linearLine).
"""
if len(curr_time_window)==0:
regular_df.loc[row_or_count, veg_idxs] = -1.5
else:
regular_df.loc[row_or_count, veg_idxs] = max(curr_time_window[veg_idxs])
regular_df.loc[row_or_count, 'image_year'] = curr_year
regular_df.loc[row_or_count, 'doy'] = first_year_steps[row_or_count]
#############################################
#
# Full year (main year, 12 months)
#
#############################################
row_count_start = len(first_year_steps) - 1
row_count_end = row_count_start + len(full_year_steps) - 1
for row_or_count in np.arange(row_count_start, row_count_end):
curr_year = SF_yr
curr_count = row_or_count - row_count_start
curr_time_window = a_field_df[a_field_df.image_year == curr_year].copy()
curr_time_window = curr_time_window[curr_time_window.doy >= full_year_steps[curr_count]]
curr_time_window = curr_time_window[curr_time_window.doy < full_year_steps[curr_count+1]]
"""
In each time window pick the maximum of present values
If in a window (e.g. 10 days) we have no value observed by Sentinel,
then use -1.5 as an indicator. That will be a gap to be filled (function fill_theGap_linearLine).
"""
if len(curr_time_window)==0:
regular_df.loc[row_or_count, veg_idxs] = -1.5
else:
regular_df.loc[row_or_count, veg_idxs] = max(curr_time_window[veg_idxs])
regular_df.loc[row_or_count, 'image_year'] = curr_year
regular_df.loc[row_or_count, 'doy'] = full_year_steps[curr_count]
return(regular_df)
def extract_XValues_of_2Yrs_TS(regularized_TS, SF_yr):
# old name extract_XValues_of_RegularizedTS_2Yrs().
# I do not know why I had Regularized in it.
# new name extract_XValues_of_2Yrs_TS
"""
Jul 1.
This function is being written since Kirti said
we do not need to have parts of the next year. i.e.
if we are looking at what is going on in a field in 2017,
we only need data since Aug. 2016 till the end of 2017.
We do not need anything in 2018.
"""
X_values_prev_year = regularized_TS[regularized_TS.image_year == (SF_yr - 1)]['doy'].copy().values
X_values_full_year = regularized_TS[regularized_TS.image_year == (SF_yr)]['doy'].copy().values
if check_leap_year(SF_yr - 1):
X_values_full_year = X_values_full_year + 366
else:
X_values_full_year = X_values_full_year + 365
return (np.concatenate([X_values_prev_year, X_values_full_year]))
def regularize_movingWindow_windowSteps_12Months(one_field_df, SF_yr=2017, V_idxs="NDVI", window_size=10):
#
# This function almost returns a data frame with data
# that are window_size away from each other. i.e. regular space in time.
# copy the field input into the new variale.
a_field_df = one_field_df.copy()
# initialize output dataframe
regular_cols = ['ID', 'Acres', 'county', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'image_year',
'SF_year', 'doy', V_idxs]
full_year_steps = list(range(1, 365, 10)) # [1, 10, 20, 30, ..., 360]
full_year_steps[-1] = 366 # save the last extra 5 (or 6) days.
DoYs = full_year_steps
no_days = 366 # number of days in a year
no_steps = int(no_days/window_size) #
regular_df = pd.DataFrame(data = None,
index = np.arange(no_steps),
columns = regular_cols)
regular_df['ID'] = a_field_df.ID.unique()[0]
regular_df['Acres'] = a_field_df.Acres.unique()[0]
regular_df['county'] = a_field_df.county.unique()[0]
regular_df['CropGrp'] = a_field_df.CropGrp.unique()[0]
regular_df['CropTyp'] = a_field_df.CropTyp.unique()[0]
regular_df['DataSrc'] = a_field_df.DataSrc.unique()[0]
regular_df['ExctAcr'] = a_field_df.ExctAcr.unique()[0]
regular_df['IntlSrD'] = a_field_df.IntlSrD.unique()[0]
regular_df['Irrigtn'] = a_field_df.Irrigtn.unique()[0]
regular_df['LstSrvD'] = a_field_df.LstSrvD.unique()[0]
regular_df['Notes'] = str(a_field_df.Notes.unique()[0])
regular_df['RtCrpTy'] = str(a_field_df.RtCrpTy.unique()[0])
regular_df['Shap_Ar'] = a_field_df.Shap_Ar.unique()[0]
regular_df['Shp_Lng'] = a_field_df.Shp_Lng.unique()[0]
regular_df['TRS'] = a_field_df.TRS.unique()[0]
regular_df['SF_year'] = a_field_df.SF_year.unique()[0]
# I will write this in 3 for-loops.
# perhaps we can do it in a cleaner way like using zip or sth.
#
for row_or_count in np.arange(len(full_year_steps)-1):
curr_year = SF_yr
curr_time_window = a_field_df[a_field_df.image_year == curr_year].copy()
# [1, 10, 20, 30, ..., 350, 366]
curr_time_window = curr_time_window[curr_time_window.doy >= full_year_steps[row_or_count]]
curr_time_window = curr_time_window[curr_time_window.doy < full_year_steps[row_or_count+1]]
if len(curr_time_window)==0: # this means in that time window there is no NDVI value
regular_df.loc[row_or_count, V_idxs] = -1.5 # indicator for missing value
else:
regular_df.loc[row_or_count, V_idxs] = max(curr_time_window[V_idxs])
regular_df.loc[row_or_count, 'image_year'] = curr_year
regular_df.loc[row_or_count, 'doy'] = full_year_steps[row_or_count]
return (regular_df)
def fill_theGap_linearLine(regular_TS, V_idx, SF_year):
# regular_TS: is output of function (regularize_movingWindow_windowSteps_12Months)
a_regularized_TS = regular_TS.copy()
if (len(a_regularized_TS.image_year.unique()) == 2):
x_axis = extract_XValues_of_2Yrs_TS(regularized_TS = a_regularized_TS, SF_yr = SF_year)
elif (len(a_regularized_TS.image_year.unique()) == 3):
x_axis = extract_XValues_of_3Yrs_TS(regularized_TS = a_regularized_TS, SF_yr = SF_year)
elif (len(a_regularized_TS.image_year.unique()) == 1):
x_axis = a_regularized_TS["doy"].values.copy()
TS_array = a_regularized_TS[V_idx].copy().values
"""
TS_array[0] = -1.5
TS_array[51] = -1.5
TS_array[52] = -1.5
TS_array[53] = -1.5
TS_array.shape
"""
"""
-1.5 is an indicator of missing values by Sentinel, i.e. a gap.
The -1.5 was used as indicator in the function regularize_movingWindow_windowSteps_2Yrs()
"""
missing_indicies = np.where(TS_array == -1.5)[0]
Notmissing_indicies = np.where(TS_array != -1.5)[0]
#
# Check if the first or last k values are missing
# if so, replace them with proper number and shorten the task
#
left_pointer = Notmissing_indicies[0]
right_pointer = Notmissing_indicies[-1]
if left_pointer > 0:
TS_array[:left_pointer] = TS_array[left_pointer]
if right_pointer < (len(TS_array) - 1):
TS_array[right_pointer:] = TS_array[right_pointer]
#
# update indexes.
#
missing_indicies = np.where(TS_array == -1.5)[0]
Notmissing_indicies = np.where(TS_array != -1.5)[0]
# left_pointer = Notmissing_indicies[0]
stop = right_pointer
right_pointer = left_pointer + 1
missing_indicies = np.where(TS_array == -1.5)[0]
while len(missing_indicies) > 0:
left_pointer = missing_indicies[0] - 1
left_value = TS_array[left_pointer]
right_pointer = missing_indicies[0]
while TS_array[right_pointer] == -1.5:
right_pointer += 1
right_value = TS_array[right_pointer]
if (right_pointer - left_pointer) == 2:
# if there is a single gap, then we have just average of the
# values
# Avoid extra computation!
#
TS_array[left_pointer + 1] = 0.5 * (TS_array[left_pointer] + TS_array[right_pointer])
else:
# form y= ax + b
slope = (right_value - left_value) / (x_axis[right_pointer] - x_axis[left_pointer]) # a
b = right_value - (slope * x_axis[right_pointer])
TS_array[left_pointer+1 : right_pointer] = slope * x_axis[left_pointer+1 : right_pointer] + b
missing_indicies = np.where(TS_array == -1.5)[0]
a_regularized_TS[V_idx] = TS_array
return (a_regularized_TS)
########################################################################
########################################################################
########################################################################
#
# These will not give what we want. It is a 10-days window
# The days are actual days. i.e. between each 2 entry of our
# time series there is already some gap.
#
def add_human_start_time(HDF):
HDF.system_start_time = HDF.system_start_time / 1000
time_array = HDF["system_start_time"].values.copy()
human_time_array = [time.strftime('%Y-%m-%d', time.localtime(x)) for x in time_array]
HDF["human_system_start_time"] = human_time_array
return(HDF)
########################################################################
def check_leap_year(year):
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
return (True)
else:
return (False)
else:
return (True)
else:
return (False)
########################################################################
def find_difference_date_by_systemStartTime(earlier_day_epoch, later_day_epoch):
#
# Given two epoch time, find the difference between them in number of days
#
early = datetime.datetime.fromtimestamp(earlier_day_epoch)
late = datetime.datetime.fromtimestamp(later_day_epoch)
diff = ( late - early).days
return (diff)
########################################################################
def correct_timeColumns_dataTypes(dtf):
dtf.system_start_time = dtf.system_start_time/1000
dtf = dtf.astype({'doy': 'int', 'image_year': 'int'})
return(dtf)
def keep_WSDA_columns(dt_dt):
needed_columns = ['ID', 'Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year']
"""
# Using DataFrame.drop
df.drop(df.columns[[1, 2]], axis=1, inplace=True)
# drop by Name
df1 = df1.drop(['B', 'C'], axis=1)
"""
dt_dt = dt_dt[needed_columns]
return dt_dt
def convert_TS_to_a_row(a_dt):
a_dt = keep_WSDA_columns(a_dt)
a_dt = a_dt.drop_duplicates()
return(a_dt)
def save_matlab_matrix(filename, matDict):
"""
Write a MATLAB-formatted matrix file given a dictionary of
variables.
"""
try:
sio.savemat(filename, matDict)
except:
print("ERROR: could not write matrix file " + filename)
| 37.222566 | 120 | 0.614334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,314 | 0.386746 |
f1a1c6bbb5f8fd9057ce629a8986541e09412fdc | 251 | py | Python | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
]
| 1 | 2021-01-20T18:27:22.000Z | 2021-01-20T18:27:22.000Z | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
]
| null | null | null | thaniya_server/src/thaniya_server/flask/FlaskFilter_tagsToStr.py | jkpubsrc/Thaniya | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | [
"Apache-1.1"
]
| null | null | null |
from .AbstractFlaskTemplateFilter import AbstractFlaskTemplateFilter
#
# ...
#
class FlaskFilter_tagsToStr(AbstractFlaskTemplateFilter):
def __call__(self, tags:list):
if tags:
return ", ".join(tags)
else:
return ""
#
#
| 7.84375 | 68 | 0.677291 | 151 | 0.601594 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.059761 |
f1a479eb0ca5a8f8bbec21a491ef98b110500e1b | 1,584 | py | Python | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
]
| null | null | null | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
]
| null | null | null | python/qisrc/test/test_qisrc_foreach.py | vbarbaresi/qibuild | eab6b815fe0af49ea5c41ccddcd0dff2363410e1 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
def test_qisrc_foreach(qisrc_action, record_messages):
worktree = qisrc_action.worktree
worktree.create_project("not_in_git")
git_worktree = qisrc_action.git_worktree
git_worktree.create_git_project("git_project")
qisrc_action("foreach", "ls")
assert not record_messages.find("not_in_git")
assert record_messages.find("git_project")
record_messages.reset()
qisrc_action("foreach", "ls", "--all")
assert record_messages.find("not_in_git")
assert record_messages.find("git_project")
def test_non_cloned_groups(qisrc_action, git_server, record_messages):
git_server.create_group("foo", ["a.git", "b.git"])
git_server.create_group("bar", ["b.git", "c.git"])
qisrc_action("init", git_server.manifest_url, "--group", "foo")
record_messages.reset()
qisrc_action("foreach", "--group", "bar", "ls")
warning = record_messages.find(r"\[WARN \]")
assert warning
assert "Group bar is not currently in use" in warning
def test_do_not_warn_on_subgroups(qisrc_action, git_server, record_messages):
git_server.create_group("big", ["a.git", "b.git"])
git_server.create_group("small", ["b.git"])
qisrc_action("init", git_server.manifest_url, "--group", "big")
record_messages.reset()
qisrc_action("foreach", "--group", "small", "ls")
assert not record_messages.find(r"\[WARN \]")
assert record_messages.find(r"\* \(1/1\) b")
| 40.615385 | 77 | 0.709596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.324495 |
f1a543e42a5ea04e653279a8af75516ed7470802 | 144 | py | Python | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
]
| 623 | 2018-02-16T20:43:01.000Z | 2022-03-31T05:00:17.000Z | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
]
| 339 | 2018-02-26T21:27:04.000Z | 2022-03-31T03:16:50.000Z | onnxmltools/convert/libsvm/operator_converters/__init__.py | xhochy/onnxmltools | cb2782b155ff67dc1e586f36a27c5d032070c801 | [
"Apache-2.0"
]
| 152 | 2018-02-24T01:20:22.000Z | 2022-03-31T07:41:35.000Z | # SPDX-License-Identifier: Apache-2.0
# To register converter for libsvm operators, import associated modules here.
from . import SVMConverter
| 28.8 | 77 | 0.798611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.791667 |
f1aa3fd77846f2c70da5ebcb50efbe7da8be193b | 333 | py | Python | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
]
| null | null | null | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
]
| null | null | null | aspen/renderers.py | galuszkak/aspen.py | a29047d6d4eefa47413e35a18068946424898364 | [
"MIT"
]
| null | null | null | # for backwards compatibility with aspen-renderer modules
from .simplates.renderers import Factory, Renderer
Factory, Renderer # make pyflakes happy
import warnings
warnings.warn('aspen.renderers is deprecated and will be removed in a future version. '
'Please use aspen.simplates.renderers instead.', FutureWarning)
| 37 | 87 | 0.780781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.594595 |
f1ad55c7e2b9846cac3302cc84dc78c54a2ce31b | 3,562 | py | Python | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
]
| null | null | null | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
]
| null | null | null | coursework/src/highscore.py | SpeedoDevo/G51FSE | bf5e203d936965e254eff1efa0b74edc368a6cda | [
"MIT"
]
| null | null | null | import pygame
import sys
import collections # for ordered dict
import pickle # for saving and loading highscores
from constants import (SCREEN_WIDTH, SCREEN_HEIGHT, RED, GREEN, GREY, BLACK, WHITE)
# class that shows, saves and loads highscores
class ScoreTable(pygame.sprite.Sprite):
# passing in bg so that it's never reinitialized
def __init__(self, screen, clock, bg):
pygame.sprite.Sprite.__init__(self)
self.titleFont = pygame.font.Font('image/langdon.otf', 50)
self.title = self.titleFont.render("highscores", True, GREY)
self.titleRect = self.title.get_rect()
# center on top of the screen
self.titleRect.center = (SCREEN_WIDTH/2,75)
self.scoreFont = pygame.font.Font('image/muzarela.ttf', 30)
self.clock = clock
self.screen = screen
self.bg = bg
# last sores the player's last highscore
self.last = 0
self.load()
def draw(self,screen):
#update then blit bg
self.bg.update()
screen.blit(self.bg.image,self.bg.rect)
screen.blit(self.title,self.titleRect)
for i in range(len(self.hs)):
#red color for the user's highscore
if list(self.hs.items())[i][0] == self.last:
color = RED
else:
color = WHITE
self.text = self.scoreFont.render(str(i+1) + ". " + str(list(self.hs.items())[i][1]) + ": " + str(list(self.hs.items())[i][0]), True, color)
self.textrect = self.text.get_rect()
# position text based on iteration number
self.textrect.center = (SCREEN_WIDTH/2,(150+i*35))
self.screen.blit(self.text,self.textrect)
pygame.display.update()
def update(self):
for event in pygame.event.get():
# let the game quit
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
# quit from hstable with click or enter
if event.type == pygame.MOUSEBUTTONDOWN or (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN):
return True
return False
def run(self):
while 1:
# because we are out of the game loop here we need an own ticking
self.clock.tick(70)
self.draw(self.screen)
if self.update(): return
def getLowest(self):
# get the lowest score to decide whether it's high enough fot adding in the table
return min(list(self.hs.keys()))
def submitScore(self,name,score):
# delete the last
self.hs.popitem()
# add item
self.hs[score] = name
# save which was it
self.last = score
# reorder list
self.hs = collections.OrderedDict(sorted(self.hs.items(), reverse=True))
# save to file
self.save()
def noHS(self):
# remove highlighting if the score wasn't high enough
self.last = None
def save(self):
# pickle highscores into file
pickle.dump(self.hs, open("hs.dat", "wb"), 2)
def load(self):
# load highscores if it already exists
try:
self.hs = pickle.load(open("hs.dat", "rb"))
# create new file if it doesn't
except:
temp = {50000:"SpeedoDevo", 40000:"OliGee", 30000:"Jaume", 20000:"Kyle", 10000:"Steve", 9000:"Danielle", 8000:"Phil", 7000:"Mark", 6000:"Hugh", 5000:"Lisa"}
self.hs = collections.OrderedDict(sorted(temp.items(), reverse=True))
self.save() | 37.893617 | 168 | 0.588433 | 3,317 | 0.931218 | 0 | 0 | 0 | 0 | 0 | 0 | 898 | 0.252106 |
f1afaf0a95380f8c421a56c623e2af9bfd01fd81 | 27,795 | py | Python | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
]
| null | null | null | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
]
| null | null | null | BAT/BAT.py | baba-hashimoto/BAT.py | 8c7ad986dd0854961175079b98ce4f6507fee87a | [
"MIT"
]
| 1 | 2022-03-26T11:34:20.000Z | 2022-03-26T11:34:20.000Z | #!/usr/bin/env python2
import glob as glob
import os as os
import re
import shutil as shutil
import signal as signal
import subprocess as sp
import sys as sys
from lib import build
from lib import scripts
from lib import setup
from lib import analysis
ion_def = []
poses_list = []
poses_def = []
release_eq = []
translate_apr = []
attach_rest = []
lambdas = []
weights = []
components = []
aa1_poses = []
aa2_poses = []
# Read arguments that define input file and stage
if len(sys.argv) < 5:
scripts.help_message()
sys.exit(0)
for i in [1, 3]:
if '-i' == sys.argv[i].lower():
input_file = sys.argv[i + 1]
elif '-s' == sys.argv[i].lower():
stage = sys.argv[i + 1]
else:
scripts.help_message()
sys.exit(1)
# Open input file
with open(input_file) as f_in:
# Remove spaces and tabs
lines = (line.strip(' \t\n\r') for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
for i in range(0, len(lines)):
# split line using the equal sign, and remove text after #
if not lines[i][0] == '#':
lines[i] = lines[i].split('#')[0].split('=')
# Read parameters from input file
for i in range(0, len(lines)):
if not lines[i][0] == '#':
lines[i][0] = lines[i][0].strip().lower()
lines[i][1] = lines[i][1].strip()
if lines[i][0] == 'pull_ligand':
if lines[i][1].lower() == 'yes':
pull_ligand = 'yes'
elif lines[i][1].lower() == 'no':
pull_ligand = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether to pull out the ligand or not.')
sys.exit(1)
elif lines[i][0] == 'temperature':
temperature = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps1':
eq_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'eq_steps2':
eq_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps1':
prep_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'prep_steps2':
prep_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps1':
a_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'a_steps2':
a_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps1':
l_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l_steps2':
l_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps1':
t_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 't_steps2':
t_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps1':
u_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'u_steps2':
u_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps1':
c_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'c_steps2':
c_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps1':
r_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'r_steps2':
r_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps1':
e_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'e_steps2':
e_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps1':
v_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'v_steps2':
v_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps1':
w_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'w_steps2':
w_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps1':
f_steps1 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'f_steps2':
f_steps2 = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'pull_spacing':
pull_spacing = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'poses_list':
newline = lines[i][1].strip('\'\"-,.:;#()][').split(',')
for j in range(0, len(newline)):
poses_list.append(scripts.check_input('int', newline[j], input_file, lines[i][0]))
elif lines[i][0] == 'calc_type':
calc_type = lines[i][1].lower()
elif lines[i][0] == 'celpp_receptor':
celp_st = lines[i][1]
elif lines[i][0] == 'p1':
H1 = lines[i][1]
elif lines[i][0] == 'p2':
H2 = lines[i][1]
elif lines[i][0] == 'p3':
H3 = lines[i][1]
elif lines[i][0] == 'ligand_name':
mol = lines[i][1]
elif lines[i][0] == 'fe_type':
if lines[i][1].lower() == 'rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'all':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'pmf-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'dd-rest':
fe_type = lines[i][1].lower()
elif lines[i][1].lower() == 'custom':
fe_type = lines[i][1].lower()
else:
print('Free energy type not recognized, please choose all, rest (restraints), dd (double decoupling) or pmf (umbrella sampling), pmf-rest, dd-rest, or custom')
sys.exit(1)
elif lines[i][0] == 'dd_type':
if lines[i][1].lower() == 'mbar':
dd_type = lines[i][1].lower()
elif lines[i][1].lower() == 'ti':
dd_type = lines[i][1].lower()
else:
print('Double decoupling type not recognized, please choose ti or mbar')
sys.exit(1)
elif lines[i][0] == 'blocks':
blocks = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'hmr':
if lines[i][1].lower() == 'yes':
hmr = 'yes'
elif lines[i][1].lower() == 'no':
hmr = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether hydrogen mass repartitioning '
'will be used.')
sys.exit(1)
elif lines[i][0] == 'water_model':
if lines[i][1].lower() == 'tip3p':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'tip4pew':
water_model = lines[i][1].upper()
elif lines[i][1].lower() == 'spce':
water_model = lines[i][1].upper()
else:
print('Water model not supported. Please choose TIP3P, TIP4PEW or SPCE')
sys.exit(1)
elif lines[i][0] == 'num_waters':
num_waters = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'neutralize_only':
if lines[i][1].lower() == 'yes':
neut = 'yes'
elif lines[i][1].lower() == 'no':
neut = 'no'
else:
print('Wrong input! Please choose neutralization only or add extra ions')
sys.exit(1)
elif lines[i][0] == 'cation':
cation = lines[i][1]
elif lines[i][0] == 'anion':
anion = lines[i][1]
elif lines[i][0] == 'num_cations':
num_cations = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'num_cat_ligbox':
num_cat_ligbox = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_x':
buffer_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'buffer_y':
buffer_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_buffer':
lig_buffer = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_distance_force':
rec_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_angle_force':
rec_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_dihcf_force':
rec_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_discf_force':
rec_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_distance_force':
lig_distance_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_angle_force':
lig_angle_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_dihcf_force':
lig_dihcf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'lig_discf_force':
lig_discf_force = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_x':
l1_x = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_y':
l1_y = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_z':
l1_z = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_zm':
l1_zm = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'l1_range':
l1_range = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'min_adis':
min_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'max_adis':
max_adis = scripts.check_input('float', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'rec_bb':
if lines[i][1].lower() == 'yes':
rec_bb = 'yes'
elif lines[i][1].lower() == 'no':
rec_bb = 'no'
else:
print('Wrong input! Please use yes or no to indicate whether protein backbone restraints'
'will be used.')
sys.exit(1)
elif lines[i][0] == 'bb_start':
bb_start = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_end':
bb_end = scripts.check_input('int', lines[i][1], input_file, lines[i][0])
elif lines[i][0] == 'bb_equil':
if lines[i][1].lower() == 'yes':
bb_equil = lines[i][1].lower()
else:
bb_equil = 'no'
elif lines[i][0] == 'release_eq':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
release_eq.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'translate_apr':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
translate_apr.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'attach_rest':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
attach_rest.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'lambdas':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
lambdas.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'weights':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
weights.append(scripts.check_input('float', strip_line[j], input_file, lines[i][0]))
elif lines[i][0] == 'components':
strip_line = lines[i][1].strip('\'\"-,.:;#()][').split()
for j in range(0, len(strip_line)):
components.append(strip_line[j])
elif lines[i][0] == 'ntpr':
ntpr = lines[i][1]
elif lines[i][0] == 'ntwr':
ntwr = lines[i][1]
elif lines[i][0] == 'ntwe':
ntwe = lines[i][1]
elif lines[i][0] == 'ntwx':
ntwx = lines[i][1]
elif lines[i][0] == 'cut':
cut = lines[i][1]
elif lines[i][0] == 'gamma_ln':
gamma_ln = lines[i][1]
elif lines[i][0] == 'barostat':
barostat = lines[i][1]
elif lines[i][0] == 'receptor_ff':
receptor_ff = lines[i][1]
elif lines[i][0] == 'ligand_ff':
if lines[i][1].lower() == 'gaff':
ligand_ff = 'gaff'
elif lines[i][1].lower() == 'gaff2':
ligand_ff = 'gaff2'
else:
print('Wrong input! Available options for ligand force-field are gaff and gaff2')
sys.exit(1)
elif lines[i][0] == 'dt':
dt = lines[i][1]
# Number of simulations, 1 equilibrium and 1 production
apr_sim = 2
# Define free energy components
if fe_type == 'rest':
components = ['c', 'a', 'l', 't', 'r']
elif fe_type == 'dd':
components = ['e', 'v', 'f', 'w']
elif fe_type == 'pmf':
components = ['u']
elif fe_type == 'all':
components = ['c', 'a', 'l', 't', 'r', 'u', 'v', 'w', 'e', 'f']
elif fe_type == 'pmf-rest':
components = ['c', 'a', 'l', 't', 'r', 'u']
elif fe_type == 'dd-rest':
components = ['c', 'a', 'l', 't', 'r', 'e', 'v', 'w', 'f']
# Pull ligand out or not
if pull_ligand == 'no':
translate_apr = [ 0.00 ]
pull_spacing = 1.0
prep_steps2 = 0
# Do not apply protein backbone restraints
if rec_bb == 'no':
bb_start = 1
bb_end = 0
bb_equil = 'no'
# Create poses definitions
if calc_type == 'dock':
for i in range(0, len(poses_list)):
poses_def.append('pose'+str(poses_list[i]))
elif calc_type == 'crystal':
poses_def = [celp_st]
# Total distance
apr_distance = translate_apr[-1]
rng = 0
# Create restraint definitions
rest = [rec_distance_force, rec_angle_force, rec_dihcf_force, rec_discf_force, lig_distance_force, lig_angle_force, lig_dihcf_force, lig_discf_force]
# Create ion definitions
ion_def = [cation, anion, num_cations]
ion_lig = [cation, anion, num_cat_ligbox]
# Define number of steps for all stages
dic_steps1 = {}
dic_steps2 = {}
dic_steps1['a'] = a_steps1
dic_steps2['a'] = a_steps2
dic_steps1['l'] = l_steps1
dic_steps2['l'] = l_steps2
dic_steps1['t'] = t_steps1
dic_steps2['t'] = t_steps2
dic_steps1['c'] = c_steps1
dic_steps2['c'] = c_steps2
dic_steps1['r'] = r_steps1
dic_steps2['r'] = r_steps2
if stage == 'equil':
comp = 'q'
win = 0
trans_dist = 0
# Create equilibrium systems for all poses listed in the input file
for i in range(0, len(poses_def)):
rng = len(release_eq) - 1
pose = poses_def[i]
if not os.path.exists('./all-poses/'+pose+'.pdb'):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = len(release_eq)
# Create aligned initial complex
anch = build.build_equil(pose, celp_st, mol, H1, H2, H3, calc_type, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis, ligand_ff)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Equil release weights:')
for i in range(0, len(release_eq)):
weight = release_eq[i]
print('%s' %str(weight))
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%02d.rest' %int(i))
shutil.copy('./'+pose+'/disang%02d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, eq_steps1, eq_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand is most likely not in the defined binding site in these systems.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'prep':
win = 0
weight = 100.0
comp = 's'
# Prepare systems after equilibration for poses listed in the input file
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('./equil/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Get number of simulations
num_sim = int(apr_distance/pull_spacing)+1
rng = num_sim - 1
# Create aligned initial complex
fwin = len(release_eq) - 1
anch = build.build_prep(pose, mol, fwin, l1_x, l1_y, l1_z, l1_zm, l1_range, min_adis, max_adis)
if anch == 'anch1':
aa1_poses.append(pose)
os.chdir('../')
continue
if anch == 'anch2':
aa2_poses.append(pose)
os.chdir('../')
continue
# Solvate system with ions
print('Creating box...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
# Apply restraints and prepare simulation files
print('Pulling distance interval: %s' %pull_spacing)
print('Total pulling distance: %s' %apr_distance)
print('Creating pulling steps...')
for i in range(0, num_sim):
trans_dist = float(i*pull_spacing)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
shutil.copy('./'+pose+'/disang.rest', './'+pose+'/disang%03d.rest' %int(i))
shutil.copy('./'+pose+'/disang%03d.rest' %int(0), './'+pose+'/disang.rest')
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, prep_steps1, prep_steps2, rng)
os.chdir('../')
if len(aa1_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand first anchor L1 for', aa1_poses
print 'The ligand most likely left the binding site during equilibration.'
if len(aa2_poses) != 0:
print('\n')
print 'WARNING: Could not find the ligand L2 or L3 anchors for', aa2_poses
print 'Try reducing the min_adis parameter in the input file.'
elif stage == 'fe':
# Create systems for all poses after preparation
num_sim = apr_sim
# Create and move to apr directory
if not os.path.exists('fe'):
os.makedirs('fe')
os.chdir('fe')
for i in range(0, len(poses_def)):
pose = poses_def[i]
if not os.path.exists('../prep/'+pose):
continue
print('Setting up '+str(poses_def[i]))
# Create and move to pose directory
if not os.path.exists(pose):
os.makedirs(pose)
os.chdir(pose)
# Generate folder and restraints for all components and windows
for j in range(0, len(components)):
comp = components[j]
# Translation (umbrella)
if (comp == 'u'):
if not os.path.exists('pmf'):
os.makedirs('pmf')
os.chdir('pmf')
weight = 100.0
for k in range(0, len(translate_apr)):
trans_dist = translate_apr[k]
win = k
print('window: %s%02d distance: %s' %(comp, int(win), str(trans_dist)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, u_steps1, u_steps2, rng)
os.chdir('../')
# Ligand conformational release in a small box
elif (comp == 'c'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, c_steps1, c_steps2, rng)
os.chdir('../')
# Receptor conformational release in a separate box
elif (comp == 'r'):
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = translate_apr[-1]
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
if int(win) == 0:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for apo protein...')
build.create_box(hmr, pose, mol, num_waters, water_model, ion_def, neut, buffer_x, buffer_y, stage, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
else:
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, r_steps1, r_steps2, rng)
os.chdir('../')
# Van der Waals decoupling
# site
elif (comp == 'v'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, v_steps1, v_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'w'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand only...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, w_steps1, w_steps2, weight, lambdas)
os.chdir('../../')
# Charge decoupling
# site
elif (comp == 'e'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('site'):
os.makedirs('site')
os.chdir('site')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, e_steps1, e_steps2, weight, lambdas)
os.chdir('../../')
# bulk
elif (comp == 'f'):
if not os.path.exists('dd'):
os.makedirs('dd')
os.chdir('dd')
trans_dist = 0
if not os.path.exists('bulk'):
os.makedirs('bulk')
os.chdir('bulk')
for k in range(0, len(lambdas)):
weight = lambdas[k]
win = k
if int(win) == 0:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
print('Creating box for ligand decharging in bulk...')
build.ligand_box(mol, lig_buffer, water_model, neut, ion_lig, comp, ligand_ff)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
else:
print('window: %s%02d lambda: %s' %(comp, int(win), str(weight)))
build.build_dec(hmr, mol, pose, comp, win, water_model, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.dec_files(temperature, mol, num_sim, pose, comp, win, stage, f_steps1, f_steps2, weight, lambdas)
os.chdir('../../')
# Attachments in the bound system
else:
if not os.path.exists('rest'):
os.makedirs('rest')
os.chdir('rest')
trans_dist = 0
for k in range(0, len(attach_rest)):
weight = attach_rest[k]
win = k
print('window: %s%02d weight: %s' %(comp, int(win), str(weight)))
build.build_apr(hmr, mol, pose, comp, win, trans_dist, pull_spacing, ntpr, ntwr, ntwe, ntwx, cut, gamma_ln, barostat, receptor_ff, ligand_ff, dt)
setup.restraints(pose, rest, bb_start, bb_end, weight, stage, mol, trans_dist, comp, bb_equil)
steps1 = dic_steps1[comp]
steps2 = dic_steps2[comp]
setup.sim_files(hmr, temperature, mol, num_sim, pose, comp, win, stage, steps1, steps2, rng)
os.chdir('../')
os.chdir('../')
elif stage == 'analysis':
# Free energies MBAR/TI and analytical calculations
for i in range(0, len(poses_def)):
pose = poses_def[i]
analysis.fe_values(blocks, components, temperature, pose, attach_rest, translate_apr, lambdas, weights, dd_type, rest)
os.chdir('../../')
| 43.565831 | 188 | 0.612916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,569 | 0.20036 |
f1b05065492f951ddbe7f464e95a73ced555ef67 | 693 | py | Python | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
]
| null | null | null | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
]
| 2 | 2021-03-05T06:48:11.000Z | 2021-03-26T08:14:17.000Z | mooringlicensing/migrations/0184_auto_20210630_1422.py | jawaidm/mooringlicensing | b22e74209da8655c8ad3af99e00f36d17c8ef73f | [
"Apache-2.0"
]
| 2 | 2021-09-19T15:45:19.000Z | 2021-10-05T05:07:41.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-30 06:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0183_auto_20210629_1156'),
]
operations = [
migrations.AlterField(
model_name='sticker',
name='status',
field=models.CharField(choices=[('ready', 'Ready'), ('printing', 'Printing'), ('current', 'Current'), ('to_be_returned', 'To be Returned'), ('returned', 'Returned'), ('lost', 'Lost'), ('expired', 'Expired'), ('cancelled', 'Cancelled')], default='ready', max_length=40),
),
]
| 33 | 281 | 0.620491 | 534 | 0.770563 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.425685 |
f1b0c5d59ac79b7bc53e1a8befc59467c9a655ae | 3,188 | py | Python | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
]
| null | null | null | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
]
| 6 | 2021-04-04T06:19:30.000Z | 2021-09-18T16:48:41.000Z | judge/download.py | tokusumi/judge-cli | e6883ba55dc37e8ca2f328105a4df57b0b3145ba | [
"MIT"
]
| null | null | null | from pathlib import Path
from typing import Optional, Tuple
import typer
from onlinejudge import utils
from pydantic.networks import HttpUrl
from pydantic.types import DirectoryPath
from judge.schema import JudgeConfig
from judge.tools.download import DownloadArgs, LoginForm, SaveArgs
from judge.tools.download import download as download_tool
from judge.tools.download import save as save_tool
class DownloadJudgeConfig(JudgeConfig):
URL: HttpUrl
testdir: DirectoryPath
class CLILoginForm(LoginForm):
def get_credentials(self) -> Tuple[str, str]:
username = typer.prompt("What's your username?")
password = typer.prompt("What's your password?", hide_input=True)
return username, password
def main(
workdir: Path = typer.Argument(".", help="a directory path for working directory"),
url: Optional[str] = typer.Option(None, help="a download URL"),
directory: Path = typer.Option(None, help="a directory path for test cases"),
no_store: bool = typer.Option(False, help="testcases is shown but not saved"),
format: str = typer.Option("sample-%i.%e", help="custom filename format"),
login: bool = typer.Option(False, help="login into target service"),
cookie: Path = typer.Option(utils.default_cookie_path, help="directory for cookie"),
) -> None:
"""
Here is shortcut for download with `online-judge-tools`.
At first, call `judge conf` for configuration.
Pass `problem` at `contest` you want to test.
Ex) the following leads to download test cases for Problem `C` at `ABC 051`:
```download```
"""
typer.echo("Load configuration...")
if not workdir.exists():
typer.secho(f"Not exists: {str(workdir.resolve())}", fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
try:
_config = JudgeConfig.from_toml(workdir)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
__config = _config.dict()
if url or directory:
# check arguments
if url:
__config["URL"] = url
if directory:
__config["testdir"] = directory.resolve()
try:
config = DownloadJudgeConfig(**__config)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
typer.echo(f"Download {config.URL}")
try:
login_form: Optional[LoginForm] = None
if login:
login_form = CLILoginForm()
testcases = download_tool(
DownloadArgs(
url=config.URL,
login_form=login_form,
cookie=cookie,
)
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if not no_store:
try:
save_tool(
testcases,
SaveArgs(
format=format,
directory=Path(config.testdir),
),
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if __name__ == "__main__":
typer.run(main)
| 30.653846 | 88 | 0.631117 | 328 | 0.102886 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.20734 |
f1b1cfe08adc3b1c3d213a90411b75dbb6594980 | 682 | py | Python | labs/Bonus_Labs/custom/filter_plugins/ntc.py | ryanaa08/NPA | 45173efa60713858bb8b1d884fe12c50fe69920c | [
"BSD-Source-Code"
]
| 1 | 2021-11-06T20:39:22.000Z | 2021-11-06T20:39:22.000Z | labs/Bonus_Labs/custom/filter_plugins/ntc.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
]
| null | null | null | labs/Bonus_Labs/custom/filter_plugins/ntc.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
]
| null | null | null | import re
import difflib
from ansible import errors
def diff(pre_change, post_change=''):
try:
netdiff = list(
difflib.unified_diff(
pre_change.splitlines(),
post_change.splitlines()
)
)
if netdiff:
header = ''.join(netdiff[0:3])
result = '\n'.join(netdiff[4:])
final = header + result
return final
except Exception, e:
raise errors.AnsibleFilterError('diff plugin error: %s' % str(e) )
class FilterModule(object):
''' A filter to diff two strings. '''
def filters(self):
return {
'diff' : diff
}
| 22.733333 | 74 | 0.527859 | 145 | 0.21261 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.108504 |
f1b716086bee59aea60d9505833a19bb60e79bc5 | 161 | py | Python | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
]
| null | null | null | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
]
| null | null | null | smart_note_diploma/core/urls.py | yerkebulan19971212/dipploma | d274088aa477dadd7971950b80ef9ea3ea366a6b | [
"MIT"
]
| null | null | null | from django.urls import path
from .api.view import get_all_countries_view
app_name = "core"
urlpatterns = [
path('all-countries', get_all_countries_view)
]
| 20.125 | 49 | 0.770186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.130435 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.