max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_util_path.py | JFlynnXYZ/pymel | 287 | 12713366 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import unittest
import pymel.util.path
from pymel.util.path import path
class TestPath(unittest.TestCase):
def test_misc(self):
thisFile = path(__file__)
self.assertTrue(thisFile.exists())
self.assertTrue(thisFile.isfile())
self.assertFalse(thisFile.isdir())
self.assertIn(thisFile.basename(), (path('test_util_path.py'),
path('test_util_path.pyc')))
self.assertEqual(thisFile.namebase, 'test_util_path')
self.assertIn(thisFile.ext, ('.py', '.pyc'))
thisDir = thisFile.dirname()
self.assertEqual(thisDir, os.path.dirname(str(thisFile)))
self.assertTrue(thisDir.exists())
self.assertFalse(thisDir.isfile())
self.assertTrue(thisDir.isdir())
self.assertEqual(thisDir.basename(), 'tests')
self.assertEqual(thisDir.namebase, 'tests')
self.assertEqual(thisDir.ext, '')
files = thisDir.files()
self.assertIn(thisFile, files)
noExist = path('slartybartfast_fasdfjlkfjl')
self.assertFalse(noExist.exists())
self.assertFalse(noExist.isfile())
self.assertFalse(noExist.isdir())
|
bin/query.py | cvangysel/SERT | 107 | 12713369 | #!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils, sklearn_utils, trec_utils
from sert import inference, math_utils, models
import argparse
import collections
import io
import logging
import numpy as np
import os
import operator
import pickle
import scipy
import scipy.spatial
import sklearn.neighbors
#
# Main driver.
#
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel', type=str, default='INFO')
parser.add_argument('--meta',
type=argparse_utils.existing_file_path, required=True)
parser.add_argument('--model',
type=argparse_utils.existing_file_path, required=True)
parser.add_argument('--topics',
type=argparse_utils.existing_file_path, nargs='+')
parser.add_argument('--top',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--run_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
with open(args.model, 'rb') as f:
# Load model arguments and learned mapping.
model_args, predict_fn = (pickle.load(f) for _ in range(2))
# Load word representations.
word_representations = pickle.load(f)
try:
entity_representations = pickle.load(f)
except EOFError:
entity_representations = None
with open(args.meta, 'rb') as f:
(data_args,
words, tokens,
entity_indices_inv, entity_assocs) = (
pickle.load(f) for _ in range(5))
# Parse topic files.
topic_f = list(map(lambda filename: open(filename, 'r'), args.topics))
topics = trec_utils.parse_topics(topic_f)
[f_.close() for f_ in topic_f]
model_name = os.path.basename(args.model)
# Entity profiling.
topics_per_entity = collections.defaultdict(list)
# Entity finding.
entities_per_topic = collections.defaultdict(list)
def ranker_callback(topic_id, top_ranked_indices, top_ranked_values):
for rank, (entity_internal_id, relevance) in enumerate(
zip(top_ranked_indices, top_ranked_values)):
entity_id = entity_indices_inv[entity_internal_id]
# Entity profiling.
topics_per_entity[entity_id].append((relevance, topic_id))
# Entity finding.
entities_per_topic[topic_id].append((relevance, entity_id))
with open('{0}_debug'.format(args.run_out), 'w') as f_debug_out:
if model_args.type == models.LanguageModel:
result_callback = LogLinearCallback(
args, model_args, tokens,
f_debug_out,
ranker_callback)
elif model_args.type == models.VectorSpaceLanguageModel:
result_callback = VectorSpaceCallback(
entity_representations,
args, model_args, tokens,
f_debug_out,
ranker_callback)
batcher = inference.create(
predict_fn, word_representations,
model_args.batch_size, data_args.window_size, len(words),
result_callback)
logging.info('Batching queries using %s.', batcher)
for q_id, (topic_id, terms) in enumerate(topics.items()):
if topic_id not in topics:
logging.error('Topic "%s" not found in topic list.', topic_id)
continue
# Do not replace numeric tokens in queries.
query_terms = trec_utils.parse_query(terms)
query_tokens = []
logging.debug('Query (%d/%d) %s: %s (%s)',
q_id + 1, len(topics),
topic_id, query_terms, terms)
for term in query_terms:
if term not in words:
logging.debug('Term "%s" is OOV.', term)
continue
term_token = words[term].id
query_tokens.append(term_token)
if not query_tokens:
logging.warning('Skipping query with terms "%s".', terms)
continue
batcher.submit(query_tokens, topic_id=topic_id)
batcher.process()
# Entity profiling.
with io.open('{0}_ep'.format(args.run_out),
'w', encoding='utf8') as out_ep_run:
trec_utils.write_run(model_name, topics_per_entity, out_ep_run)
# Entity finding.
with io.open('{0}_ef'.format(args.run_out),
'w', encoding='utf8') as out_ef_run:
trec_utils.write_run(model_name, entities_per_topic, out_ef_run)
logging.info('Saved run to %s.', args.run_out)
#
# Ranker callbacks.
#
class Callback(object):
def __init__(self, args, model_args, tokens,
f_debug_out,
rank_callback):
self.args = args
self.model_args = model_args
self.tokens = tokens
self.f_debug_out = f_debug_out
self.rank_callback = rank_callback
self.topic_projections = {}
def __call__(self, payload, result, topic_id):
assert topic_id not in self.topic_projections
self.topic_projections[topic_id] = result.ravel()
distribution = result
logging.debug('Result of shape %s for topic "%s".',
distribution.shape, topic_id)
self.process(payload, distribution, topic_id)
def process(self, payload, distribution, topic_id):
raise NotImplementedError()
def should_average_input(self):
raise NotImplementedError()
class LogLinearCallback(Callback):
def __init__(self, *args, **kwargs):
super(LogLinearCallback, self).__init__(*args, **kwargs)
def process(self, payload, distribution, topic_id):
terms = list(map(lambda id: self.tokens[id], payload))
term_entropies = compute_normalised_entropy(
distribution, base=2)
distribution = inference.aggregate_distribution(
distribution, mode='product', axis=0)
assert distribution.ndim == 1
distribution /= distribution.sum()
if not np.isclose(distribution.sum(), 1.0):
logging.error('Encountered non-normalized '
'distribution for topic "%s" '
'(mass=%.10f).',
topic_id, distribution.sum())
self.f_debug_out.write('Topic {0} {1}: {2}\n'.format(
topic_id,
math_utils.entropy(
distribution, base=2, normalize=True),
zip(terms, term_entropies)))
ranked_indices = np.argsort(distribution)
top_ranked_indices = ranked_indices[::-1]
top_ranked_values = distribution[top_ranked_indices]
self.rank_callback(topic_id, top_ranked_indices, top_ranked_values)
def should_average_input(self):
return False
class VectorSpaceCallback(Callback):
def __init__(self, entity_representations, *args, **kwargs):
super(VectorSpaceCallback, self).__init__(*args, **kwargs)
logging.info(
'Initializing k-NN for entity representations of shape %s.',
entity_representations.shape)
n_neighbors = self.args.top
if n_neighbors is None:
logging.warning(
'Parameter k not set; defaulting to all entities (k=%d).',
entity_representations.shape[0])
elif n_neighbors > entity_representations.shape[0]:
logging.warning(
'Parameter k exceeds number of entities; '
'defaulting to all entities (k=%d).',
entity_representations.shape[0])
n_neighbors = None
self.entity_representation_distance = 'cosine'
if self.entity_representation_distance == 'cosine':
self.entity_representation_distance = 'euclidean'
self.normalize_representations = True
else:
self.normalize_representations = False
if self.normalize_representations:
entity_repr_l2_norms = np.linalg.norm(
entity_representations, axis=1)[:, np.newaxis]
entity_representations /= entity_repr_l2_norms
logging.debug('Term projections will be normalized.')
self.entity_representations = entity_representations
if n_neighbors:
nn_impl = sklearn_utils.neighbors_algorithm(
self.entity_representation_distance)
logging.info('Using %s as distance metric in entity space '
'with NearestNeighbors %s implementation.',
self.entity_representation_distance, nn_impl)
self.entity_neighbors = sklearn.neighbors.NearestNeighbors(
n_neighbors=n_neighbors,
algorithm=nn_impl,
metric=self.entity_representation_distance)
self.entity_neighbors.fit(entity_representations)
self.entity_avg = entity_representations.mean(axis=1)
logging.info('Entity k-NN params: %s',
self.entity_neighbors.get_params())
else:
logging.info('Using %s as distance metric in entity space.',
self.entity_representation_distance)
self.entity_neighbors = None
def query(self, centroids):
if self.entity_neighbors is not None:
distances, indices = self.entity_neighbors.kneighbors(centroids)
return distances, indices
else:
pairwise_distances = scipy.spatial.distance.cdist(
centroids, self.entity_representations,
metric=self.entity_representation_distance)
distances = np.sort(pairwise_distances, axis=1)
indices = pairwise_distances.argsort(axis=1)\
.argsort(axis=1).argsort(axis=1)
return distances, indices
def process(self, payload, result, topic_id):
terms = list(map(lambda id: self.tokens[id], payload))
term_projections = inference.aggregate_distribution(
result, mode='identity', axis=0)
if term_projections.ndim == 1:
term_projections = term_projections.reshape(1, -1)
_, entity_representation_size = term_projections.shape
assert(entity_representation_size ==
self.model_args.entity_representation_size)
if self.normalize_representations:
term_projections_l2_norm = \
np.linalg.norm(term_projections, axis=1)[:, np.newaxis]
term_projections /= term_projections_l2_norm
logging.debug('Querying kneighbors for %s.', terms)
distances, indices = self.query(term_projections)
assert indices.shape[0] == term_projections.shape[0]
candidates = collections.defaultdict(float)
assert indices.shape[0] == 1
for term in range(indices.shape[0]):
term_indices = indices[term, :]
for rank, candidate in enumerate(term_indices):
matching_score = np.sum(
self.entity_representations[candidate, :] *
term_projections[term, :])
if self.normalize_representations:
matching_score = (matching_score + 1.0) / 2.0
candidates[candidate] += matching_score
top_ranked_indices, top_ranked_values = \
map(np.array, zip(
*sorted(candidates.items(),
reverse=True,
key=operator.itemgetter(1))))
self.rank_callback(topic_id, top_ranked_indices, top_ranked_values)
def should_average_input(self):
return True
def compute_normalised_entropy(distribution, base=2):
assert distribution.ndim == 2
assert np.allclose(distribution.sum(axis=1), 1.0)
entropies = [
math_utils.entropy(distribution[i, :], base=base, normalize=True)
for i in range(distribution.shape[0])]
return entropies
if __name__ == "__main__":
sys.exit(main())
|
data_structures/trees/tests/test_trie.py | vinta/fuck-coding-interviews | 590 | 12713446 | # coding: utf-8
import unittest
from data_structures.trees.trie import Trie
class TrieNodeTest(unittest.TestCase):
def setUp(self):
self.trie = Trie()
def test__len__(self):
self.assertEqual(len(self.trie), 0)
def test_insert(self):
with self.assertRaises(ValueError):
self.trie.insert('')
def test_search(self):
self.assertEqual(self.trie.search(''), False)
def test_startsWith(self):
self.assertEqual(self.trie.startsWith(''), True)
def test_integration(self):
self.trie.insert('apple')
self.assertEqual(len(self.trie), 1)
self.assertEqual(self.trie.search('apple'), True)
self.assertEqual(self.trie.search('app'), False)
self.assertEqual(self.trie.startsWith('app'), True)
self.trie.insert('app')
self.assertEqual(len(self.trie), 2)
self.assertEqual(self.trie.search('app'), True)
self.assertEqual(self.trie.startsWith('app'), True)
self.trie.insert('hammer')
self.assertEqual(len(self.trie), 3)
self.assertEqual(self.trie.search('hammers'), False)
self.assertEqual(self.trie.startsWith('hammers'), False)
if __name__ == '__main__':
unittest.main()
|
examples/create_images.py | hengwei-chan/nn_vis_network_visualization | 631 | 12713456 | import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), "..")))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KernelDensity
from tensorflow.python.keras.datasets import mnist
from data.data_handler import ProcessedNNHandler
from definitions import DATA_PATH
from evaluation.create_plot import save_plot
def configure_plt():
plt.rc('font', size=14)
plt.rc('axes', titlesize=14)
plt.rc('axes', labelsize=14)
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('legend', fontsize=14)
plt.rc('figure', titlesize=14)
def plot_mnist_samples(width: int = 6, height: int = 2):
(x_train, y_train), (_, _) = mnist.load_data()
fig, axs = plt.subplots(height, width, figsize=(width, height))
for i in range(height):
for j in range(width):
first_image = x_train[j + width * i + 120]
first_image = np.array(first_image, dtype='float')
pixels = first_image.reshape((28, 28))
axs[i, j].imshow(pixels, cmap='gray')
for ax in axs.flat:
ax.label_outer()
plt.subplots_adjust(wspace=0.2, hspace=0.2)
def plot_kernels():
fig, ax = plt.subplots(figsize=(8, 4),
subplot_kw={'facecolor': '#F4F4F4',
'axisbelow': True})
ax.grid(color='white', linestyle='-', linewidth=2)
for spine in ax.spines.values():
spine.set_color('#BBBBBB')
X_src = np.zeros((1, 1))
x_grid = np.linspace(-3, 3, 1000)
for kernel in ['gaussian', 'tophat', 'exponential', 'epanechnikov', 'linear', 'cosine']:
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(x_grid[:, None])
if kernel is 'epanechnikov':
ax.plot(x_grid, np.exp(log_dens), lw=6, alpha=0.8, label=kernel)
else:
ax.plot(x_grid, np.exp(log_dens), lw=3, alpha=0.5, label=kernel)
ax.set_ylim(0, 1.05)
ax.set_xlim(-2.9, 2.9)
ax.legend()
def plot_histogram(path: str):
processed_nn: ProcessedNNHandler = ProcessedNNHandler(DATA_PATH + path)
samples: np.array = processed_nn.get_all_samples()
z_values: np.array = np.zeros(samples.shape[0])
for i, sample in enumerate(samples):
z_values[i] = sample[2]
z_values = z_values.reshape(-1, 1)
slots: int = 50
x_grid = np.linspace(-1.2, 1.2, int(slots * 1.2 * 4.0))
fig, ax = plt.subplots()
for bandwidth in [0.05, 0.18, 0.5]:
pdf = KernelDensity(kernel='epanechnikov', bandwidth=bandwidth).fit(z_values).score_samples(x_grid[:, None])
ax.plot(x_grid, np.exp(pdf), linewidth=2, alpha=0.6, label='bandwidth=%.2f' % bandwidth)
ax.hist(z_values, slots, facecolor='gray', histtype='stepfilled', alpha=0.4, density=True)
ax.legend(loc='upper right')
ax.set_xlim(-1.2, 1.2)
configure_plt()
plot_mnist_samples()
save_plot('mnist')
plt.show()
|
tools/scrape/scrape.py | therealplato/vim-awesome | 1,379 | 12713487 | <gh_stars>1000+
import argparse
import logging
from raven import Client
from tools.scrape import vimorg, github
try:
import secrets
_SENTRY_DSN = getattr(secrets, 'SENTRY_DSN', None)
except ImportError:
_SENTRY_DSN = None
client = Client(_SENTRY_DSN)
def scrape_github_plugins(num):
print "\nScraping plugins from github.com..."
github.scrape_plugin_repos(num)
print "%s GitHub API requests remaining." % github.get_requests_left()
def scrape_github_vim_scripts(num):
print "\nScraping plugins from the github.com/vim-scripts user..."
github.scrape_vim_scripts_repos(num)
print "%s GitHub API requests remaining." % github.get_requests_left()
def scrape_github_dotfiles(num):
print "\nScraping dotfiles from github.com..."
num_scraped, scraped_counter = github.scrape_dotfiles_repos(num)
print "\nScraped %s dotfiles repos." % num_scraped
print "Found: %s" % scraped_counter
print "%s GitHub API requests remaining." % github.get_requests_left()
def scrape_vimorg(num):
print "\nScraping plugins from vim.org..."
vimorg.scrape_scripts(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
scrape_fns = {
"vim.org": scrape_vimorg,
"github-plugins": scrape_github_plugins,
"github-vim-scripts": scrape_github_vim_scripts,
"github-dotfiles": scrape_github_dotfiles,
}
parser.add_argument("number", nargs="?", default=6000, type=int,
help="Maximum # of objects to scrape from each source"
" (default: 6000)")
parser.add_argument("--source", "-s", choices=scrape_fns.keys(),
default="all", help="Source to scrape from (default: all)")
args = parser.parse_args()
sources = scrape_fns.keys() if args.source == "all" else [args.source]
for source in sources:
scrape_fn = scrape_fns[source]
try:
scrape_fn(args.number)
except Exception:
logging.exception("scrape.py: error in %s " % (scrape_fn))
client.captureException()
|
gffutils/test/parser_test.py | aswarren/gffutils | 171 | 12713497 | <reponame>aswarren/gffutils
import tempfile
from nose.tools import assert_raises
from gffutils import (parser, create, feature, iterators, constants, helpers,
exceptions)
from gffutils import example_filename, create_db
from . import attr_test_cases
from textwrap import dedent
TEST_FILENAMES = [example_filename(i) for i in [
'c_elegans_WS199_ann_gff.txt',
'ensembl_gtf.txt',
'hybrid1.gff3',
'ncbi_gff3.txt',
'c_elegans_WS199_dna_shortened.fa',
'F3-unique-3.v2.gff',
'jgi_gff2.txt',
'wormbase_gff2_alt.txt',
'c_elegans_WS199_shortened_gff.txt',
'glimmer_nokeyval.gff3',
'mouse_extra_comma.gff3',
'wormbase_gff2.txt']]
def test_directives():
data = dedent("""
##directive1 example
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
""")
it = iterators._StringIterator(data)
db = create_db(data, dbfn=':memory:', from_string=True, verbose=False)
assert it.directives == db.directives == ['directive1 example'], (it.directives, db.directives)
def test_split_attrs():
# nosetests generator for all the test cases in attr_test_cases. (note no
# docstring for this test function so that nosetests -v will print the test
# cases)
for (attr_str, attr_dict, acceptable_reconstruction) \
in attr_test_cases.attrs:
yield attrs_OK, attr_str, attr_dict, acceptable_reconstruction
def attrs_OK(attr_str, attr_dict, acceptable_reconstruction=None):
"""
Given an attribute string and a dictionary of what you expect, test the
attribute splitting and reconstruction (invariant roundtrip).
There are some corner cases for the roundtrip invariance that don't work
(see attr_test_cases.py for details); `acceptable_reconstruction` handles
those.
"""
result, dialect = parser._split_keyvals(attr_str)
assert result == attr_dict, result
reconstructed = parser._reconstruct(result, dialect, keep_order=True)
if acceptable_reconstruction:
assert reconstructed == acceptable_reconstruction, reconstructed
else:
assert reconstructed == attr_str, reconstructed
def parser_smoke_test():
"""
Just confirm we can iterate completely through the test files....
"""
# Don't show the warnings for tests
import logging
parser.logger.setLevel(logging.CRITICAL)
for filename in TEST_FILENAMES:
p = iterators._FileIterator(filename)
for i in p:
continue
def test_empty_recontruct():
"""
reconstructing attributes with incomplete information returns empty string
"""
assert parser._reconstruct(None, constants.dialect) == ""
assert_raises(exceptions.AttributeStringError, parser._reconstruct, dict(ID='asdf'), None)
assert_raises(exceptions.AttributeStringError, parser._reconstruct, None, None)
def test_empty_split_keyvals():
attrs, dialect = parser._split_keyvals(keyval_str=None)
assert attrs == feature.dict_class()
assert dialect == constants.dialect
def test_repeated_keys_conflict():
"""
if dialect says repeated keys, but len(vals) > 1, then the keys are not
actually repeated....
"""
#
# This is now only checked when infer_dialect is True -- and is disabled
# when a dialect is provided
#
#dialect = constants.dialect.copy()
#dialect['repeated keys'] = True
#assert_raises(exceptions.AttributeStringError, parser._split_keyvals, "Parent=1,2,3", dialect)
def test_parser_from_string():
"""
make sure from string and from file return identical results
"""
line = b"chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
tmp = tempfile.NamedTemporaryFile()
tmp.write(line)
tmp.seek(0)
p1 = iterators._StringIterator(
"chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690"
)
p2 = iterators._FileIterator(tmp.name)
lines = list(zip(p1, p2))
assert len(lines) == 1
assert p1.current_item_number == p2.current_item_number == 0
assert lines[0][0] == lines[0][1]
def test_valid_line_count():
p = iterators._FileIterator(example_filename('ncbi_gff3.txt'))
assert len(list(p)) == 17
p = iterators._FileIterator(example_filename('hybrid1.gff3'))
assert len(list(p)) == 6
p = iterators._FileIterator(example_filename('FBgn0031208.gff'))
assert len(list(p)) == 27
def test_inconsistent_dialect():
"""
The second feature does not have a trailing semicolon (wormbase_gff2_alt is
like this). But since the first feature does, that's what the db's dialect
is set to, which can cause errors when parsing attributes.
"""
db = create.create_db(
"""
chr1 . gene 1 100 . + . gene_id "gene1";
chr1 . mRNA 1 100 . + . transcript_id "mRNA1"
""", ':memory:', from_string=True)
items = list(db.all_features())
print(items[0])
# before, was ['"mRNA1'] -- note extra "
assert items[1].attributes['transcript_id'] == ['mRNA1'], items[1].attributes['transcript_id']
def test_attributes():
s = "chr2L FlyBase mRNA 7529 9484 . + . ID=FBtr0300690;Name=CG11023-RC;Parent=FBgn0031208;"
f = feature.feature_from_line(s)
f.keep_order = True
assert str(f) == s, str(f)
|
src/commands/flow/__init__.py | PranjalPansuriya/JavaScriptEnhancements | 690 | 12713509 | <reponame>PranjalPansuriya/JavaScriptEnhancements<filename>src/commands/flow/__init__.py
from .build_flow import JavascriptEnhancementsBuildFlowCommand
from .add_flow_definition import JavascriptEnhancementsAddFlowDefinitionCommand
__all__ = [
"JavascriptEnhancementsBuildFlowCommand",
"JavascriptEnhancementsAddFlowDefinitionCommand"
] |
utils/update_pyarrow.py | ssayyah/notebooks-contrib | 155 | 12713555 | import re
import sys
import os
print('***********************************************************************')
print('Let us check on that pyarrow version...')
print('***********************************************************************')
print()
pyarrow_version = sys.modules["pyarrow"].__version__
f = re.search("0.15.+", pyarrow_version)
if(f == None):
for key in list(sys.modules.keys()):
if key.startswith("pyarrow"):
del sys.modules[key]
print(f"unloaded pyarrow {pyarrow_version}")
import pyarrow
pyarrow_version = sys.modules['pyarrow'].__version__
print(f"loaded pyarrow {pyarrow_version}")
print(f"You're now running pyarrow {pyarrow_version} and are good to go!")
del(pyarrow_version)
else:
print(f"You're running pyarrow {pyarrow_version} and are good to go!")
|
python/ql/src/Numerics/Pythagorean.py | vadi2/codeql | 4,036 | 12713559 | <reponame>vadi2/codeql
# We know that a^2 + b^2 = c^2, and wish to use this to compute c
from math import sqrt, hypot
a = 3e154 # a^2 > 1e308
b = 4e154 # b^2 > 1e308
# with these, c = 5e154 which is less that 1e308
def longSideDirect():
return sqrt(a**2 + b**2) # this will overflow
def longSideBuiltin():
return hypot(a, b) # better to use built-in function |
pypy/tool/pytest/fake_pytest/moduledef.py | nanjekyejoannah/pypy | 333 | 12713563 | from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
applevel_name = 'pytest'
interpleveldefs = {
'raises': 'interp_pytest.pypyraises',
'skip': 'interp_pytest.pypyskip',
'fixture': 'interp_pytest.fake_fixture',
}
appleveldefs = {
'importorskip': 'app_pytest.importorskip',
'mark': 'app_pytest.mark',
}
|
assets/jupyter-gmaps-example.py | mpacer/jupyter.github.io | 143 | 12713571 | <filename>assets/jupyter-gmaps-example.py
import gmaps
import gmaps.datasets
gmaps.configure(api_key="AI...") # Your Google API key
locations = gmaps.datasets.load_dataset("taxi_rides")
fig = gmaps.figure()
# locations could be an array, a dataframe or just a Python iterable
fig.add_layer(gmaps.heatmap_layer(locations))
fig
|
test/test_functorch_lagging_op_db.py | mathandy/functorch | 279 | 12713574 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_utils import TestCase, run_tests
from functorch_lagging_op_db import (
functorch_lagging_op_db,
in_functorch_lagging_op_db,
)
import torch
class TestFuncTorchLaggingOpDb(TestCase):
def test_functorch_lagging_op_db_has_opinfos(self, device):
self.assertEqual(len(functorch_lagging_op_db), len(op_db))
@ops(op_db, allowed_dtypes=(torch.float,))
def test_coverage(self, device, dtype, op):
if in_functorch_lagging_op_db(op):
return
raise RuntimeError(
f"{(op.name, op.variant_test_name)} is in PyTorch's OpInfo db ",
"but is not in functorch's OpInfo db. Please regenerate ",
"test/functorch_lagging_op_db.py and add the new tests to ",
"denylists if necessary.")
instantiate_device_type_tests(
TestFuncTorchLaggingOpDb, globals(), only_for=['cpu'])
if __name__ == '__main__':
run_tests()
|
examples/pybullet/gym/pybullet_envs/minitaur/vision/imagery_client.py | felipeek/bullet3 | 9,136 | 12713583 | <reponame>felipeek/bullet3
"""The imagery client to connect to the camera job."""
from typing import Any, Dict, Sequence, Text
import gin
from pybullet_envs.minitaur.fw_bridge import worker_builder
from pybullet_envs.minitaur.vision import imagery_pb2
from pybullet_envs.minitaur.vision import imagery_utils
from google3.third_party.fluxworks.core.fluxworks.python.genericutil_py import fwassert
from google3.third_party.fluxworks.core.fluxworks.python.genericutil_py import timeutil
_RPC_TIMEOUT = 1 * timeutil.TimeUtil.SEC
_URI_START_CAPTURE = "fwuri://VisionJob/StartCapture"
_URI_STOP_CAPTURE = "fwuri://VisionJob/StopCapture"
_URI_GET_FRAME = "fwuri://VisionJob/GetFrame"
@gin.configurable
class ImageryClient(object):
"""Sends commands and receives states from cameras."""
def __init__(
self,
fw_worker=None,
rpc_timeout_sec=_RPC_TIMEOUT,
ip_address=None,
port=None,
async_mode=False,
start_capture_uri: Text = _URI_START_CAPTURE,
stop_capture_uri: Text = _URI_STOP_CAPTURE,
get_frame_uri: Text = _URI_GET_FRAME,
):
"""Initializes the client.
Args:
fw_worker: A FluxWorks worker instance.
rpc_timeout_sec: The timeout for any RPC calls from this client.
ip_address: The ip address of the camera/vision process. If vision job is
also instantiated in the same FluxWorks worker, both ip address and port
are not needed.
port: The port of the camera/vision process.
async_mode: Whether the RPC calls in this client are async or synchronous.
Aync mode is only required when you have multiple workers communicating
with each other in the same Python process. If worker A is calling
worker B's RPC, worker B's RPC is trying to get GIL from it's thread but
caller (worker A) already holds the GIL, and this will cause a dead lock
if worker A's calls are synchronous. If worker A is calling its own RPC,
the same GIL can be used so there is no dead lock, and there is no need
for async mode. Async mode will require context switching and thus is a
bit slower.
start_capture_uri: The FluxWorks URI to start camera capture.
stop_capture_uri: The FluxWorks URI to stop camera capture.
get_frame_uri: The FluxWorks URI to get camera frames.
"""
self._rpc_timeout_sec = rpc_timeout_sec
if fw_worker is None:
fw_worker = worker_builder.GetDefaultWorker()
self._worker = fw_worker
# TODO(tingnan): Use a single address and split the string for FW.
if ip_address is not None:
self._worker.ConnectToWorker(ip_address, port)
self._async_mode = async_mode
self._start_capture_uri = start_capture_uri
self._stop_capture_uri = stop_capture_uri
self._get_frame_uri = get_frame_uri
def _convert_camera_frame_to_image_dict(
self, camera_frame: imagery_pb2.CameraFrame):
"""Converts the camera frame to an image dictionary."""
# Each camera frame might contain multiple image channels, such as rgb and
# depth.
images = {}
for image_name, image_proto in camera_frame.images.items():
image_array = imagery_utils.convert_image_to_array(image_proto)
images[image_name] = image_array
return images
def start_capture(self, run_id: Text = "vision"):
"""Starts the camera capture session.
Args:
run_id: The capture session id. This id will determine the name of the
image logs' sub-direcotry.
"""
capture_request = imagery_pb2.CaptureRequest()
capture_request.run_id = run_id
fwassert.FwAssert.CheckErrorMessage(
self._worker.CallOnewayProtoRpc(
self._start_capture_uri,
capture_request,
async_mode=self._async_mode))
def stop_capture(self):
"""Concludes the current capture session."""
capture_request = imagery_pb2.CaptureRequest()
fwassert.FwAssert.CheckErrorMessage(
self._worker.CallOnewayProtoRpc(
self._stop_capture_uri,
capture_request,
async_mode=self._async_mode))
def get_camera_images(self) -> Dict[Text, Sequence[Any]]:
"""Gets the latest camera images.
Camera images can only be obtained after self.start_capture() is called.
Returns:
A dictionary of camera frames, with the camera id as the key. Each camera
frame may contain multiple streams. For example, on a realsense camera we
may have "rgb" and "depth" streams, depending on the configuration.
"""
get_frame_request = imagery_pb2.GetFrameRequest()
frame_collection = imagery_pb2.CameraFrameCollection()
fwassert.FwAssert.CheckErrorMessage(
self._worker.CallRoundtripProtoRpc(
self._get_frame_uri,
get_frame_request,
frame_collection,
self._rpc_timeout_sec,
async_mode=self._async_mode))
images_by_camera = {}
for camera_frame in frame_collection.frames:
camera_id = camera_frame.camera_id
# In case we received multiple frames, we apppend them in the order
# received.
if camera_id in images_by_camera:
images_by_camera[camera_id].append(
self._convert_camera_frame_to_image_dict(camera_frame))
else:
images_by_camera[camera_id] = [
self._convert_camera_frame_to_image_dict(camera_frame)
]
return images_by_camera
|
tools/nntool/quantization/multiplicative/quantizers/rescale_constant_mixin.py | 00-01/gap_sdk | 118 | 12713654 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from graph.types import ConstantInputParameters
from utils.node_id import NodeId
class RescaleConstantMixin():
@classmethod
def rescale_constant(cls, node: ConstantInputParameters, scale, qrecs, dtype=None):
qrec = qrecs[NodeId(node)]
qtype = qrec.out_qs[0]
if (qtype.scale == scale.astype(qtype.scale.dtype) and
(dtype is None or dtype == qtype.dtype)):
return
if node.qtype:
node.value = node.dqvalue
node.qtype = None
qtype.scale = scale
if dtype:
qtype.dtype = dtype
|
python/ffi_navigator/workspace.py | csullivan/ffi-navigator | 148 | 12713657 | <gh_stars>100-1000
import glob
import os
import logging
from . import pattern
from .import_resolver import PyImportResolver
from .dialect import autodetect_dialects
from .util import join_path
def _append_dict(sdict, key, value):
if key in sdict:
sdict[key].append(value)
else:
sdict[key] = [value]
class Workspace:
"""Analysis workspace"""
def __init__(self, logger=None):
# logger
self.logger = logging if logger is None else logger
# states
self.pyimport_resolver = PyImportResolver()
self.key2defs = {}
self.key2refs = {}
self.modpath2exports = {}
self._need_reload = False
# information
self._root_path = None
def initialize(self, root_path):
# By default only update root/src, root/python, root/include
# can add configs later
self.logger.info("root_path: %s", root_path)
self._providers = autodetect_dialects(root_path, self.pyimport_resolver, self.logger)
self._root_path = root_path
self._reload()
def _reload(self):
"""Reload workspace."""
self.key2defs = {}
self.key2refs = {}
self.modpath2exports = {}
scan_dirs = [
os.path.join(self._root_path, "src"),
os.path.join(self._root_path, "include"),
os.path.join(self._root_path, "python")
]
for provider in self._providers:
scan_dirs += provider.get_additional_scan_dirs(self._root_path)
for dirname in scan_dirs:
self.update_dir(dirname)
self._need_reload = False
def _sync_states(self):
"""Synchronize the workspace states."""
if self._need_reload:
self._reload()
def init_pass(self, path, source):
"""Initialization pass"""
mod_path = path[:-3] if path.endswith(".py") else path
self.pyimport_resolver.update_doc(path, source)
for provider in self._providers:
provider.init_pass(path, source)
def update_dir(self, dirname):
self.logger.info("Workspace.update_dir %s start", dirname)
# intialize pass
for path in sorted(glob.glob(join_path(dirname, "**/*.py"), recursive=True)):
self.init_pass(path, open(path).readlines())
# normal scans
for path in sorted(glob.glob(join_path(dirname, "**/*.py"), recursive=True)):
self.update_doc(path, open(path).readlines())
for path in sorted(glob.glob(join_path(dirname, "**/*.h"), recursive=True)):
self.update_doc(path, open(path).readlines())
for path in sorted(glob.glob(join_path(dirname, "**/*.cc"), recursive=True)):
self.update_doc(path, open(path).readlines())
for path in sorted(glob.glob(join_path(dirname, "**/*.cpp"), recursive=True)):
self.update_doc(path, open(path).readlines())
self.logger.info("Workspace.update_dir %s finish", dirname)
def update_doc(self, path, source):
for provider in self._providers:
for pt in provider.extract(path, source):
mod_path = path[:-3] if path.endswith(".py") else path
if isinstance(pt, pattern.Def):
_append_dict(self.key2defs, pt.key, pt)
elif isinstance(pt, pattern.Ref):
_append_dict(self.key2refs, pt.key, pt)
elif isinstance(pt, pattern.Export):
_append_dict(self.modpath2exports, mod_path, pt)
else:
self.logger.warn("Ignore pattern %s, path=%s", pt, path)
self.logger.debug("Workspace.update_doc %s", path)
def find_defs(self, mod_path, sym_name):
"""Get definition given python mod path and symbol name"""
self._sync_states()
mod_path, var_name = self.pyimport_resolver.resolve(mod_path, sym_name)
if var_name is None:
return []
export_list = self.modpath2exports.get(mod_path, [])
for item in export_list:
key = item.fvar2key(var_name)
if key in self.key2defs:
return self.key2defs[key]
return []
def _py_find_refs(self, key):
# Step 1: find python ffi module that import the related function
var_targets = set()
mod_targets = {}
for mod_path, exports in self.modpath2exports.items():
for item in exports:
if key.startswith(item.key_prefix):
var_name = item.fkey2var(key)
var_targets.add((mod_path, var_name))
mod_targets[mod_path] = var_name
# Step2: find modules that imports the ffi modules
# construct search terms
search_map = {}
for mod_path, var_list in self.pyimport_resolver._modpath2imports.items():
search_term = []
for var_name in var_list:
new_path, new_var = self.pyimport_resolver.resolve(mod_path, var_name)
if (new_path, new_var) in var_targets:
search_term.append(var_name)
if new_var is None and new_path in mod_targets:
search_term.append(var_name + "." + mod_targets[new_path])
if search_term:
search_map[mod_path] = search_term
for mod_path, var_name in mod_targets.items():
search_map[mod_path] = [var_name]
# Step 3: search the related files
results = []
for mod_path, terms in search_map.items():
path = mod_path if mod_path.endswith(".py") else mod_path + ".py"
if os.path.isfile(path):
res = pattern.search_symbol(open(path).read(), terms)
for x in res:
results.append(pattern.Ref(key=key, path=path, range=x))
return results
def find_refs(self, key):
self._sync_states()
res = self._py_find_refs(key)
res += self.key2refs.get(key, [])
return res
def extract_symbol(self, path, source, pos):
for pt in self._providers:
res = pt.extract_symbol(path, source, pos)
if res:
return res
return pattern.extract_symbol(source, pos)
|
knitpy/tests/__init__.py | srinivas32/knitpy | 213 | 12713694 | <gh_stars>100-1000
# encoding: utf-8
# Copyright (c) <NAME> <<EMAIL>>
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, print_function, unicode_literals
import codecs
import glob
import os
import tempfile
import inspect
import unittest
import re
from knitpy.knitpy import Knitpy
from knitpy.py3compat import PY3
def _add_test_cases(cls, foldername):
""" Adds one testcase for each input file in the 'test_dir'
You have to build a TestCase class, with a _output_test(self, input_file, output_file)
method and a tests_dir property, which is simply the name of the dir, where the test cases
are in.
The inputs for the test cases have to have a file ending "*_input.pymd" and the outputs have
to end in "*_output.md".
The `_output_test` method has to convert input and then test for equality with the output.
The generated test methods will be called `test_something` for `something_input.pymd`.
"""
# Put them together to make a list of new test functions.
# One test function for each input file
tests_dir = os.path.join(os.path.dirname(inspect.getfile(cls)), foldername)
test_cases_glob = os.path.join(tests_dir,"*.pymd")
testcases = glob.glob(test_cases_glob)
function = cls._output_test
for input_file in testcases:
# remove ".pymd" from filename
basename = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(tests_dir, basename+".md")
# the complicated syntax is needed to get the individual input files into the method...
# http://math.andrej.com/2009/04/09/pythons-lambda-is-broken/comment-page-1/
def test_function(self, input_file=input_file, output_file=output_file):
function(self, input_file, output_file)
name = "test_%s_%s" % (foldername, basename)
test_function.__name__ = str(name)
setattr(cls, name, test_function)
class AbstractOutputTestCase(unittest.TestCase):
#<ipython-input-2-fb4ced135814>
_re_ipython_id = re.compile(r"<ipython-input-[0-9]+-[a-z0-9]+>")
def setUp(self):
self.maxDiff = None
self.knitpy = Knitpy()
def _output_test(self, input_file, output_file):
with codecs.open(input_file, 'r', 'UTF-8') as f:
input = f.read()
# some exceptions are different on py2 and py3, so add a way to make both happy...
# the version which were used to develop the tests (currently py2) should stay '.md' and
# the exception should become '.md_pyX'
if PY3:
if os.path.exists(output_file+"_py3"):
output_file = output_file+"_py3"
else:
if os.path.exists(output_file+"_py2"):
output_file = output_file+"_py2"
output = self.knitpy._knit(input, tempfile.gettempdir())
if not os.path.exists(output_file):
_file = output_file+".off"
with codecs.open(_file, 'w', 'UTF-8') as f:
output = self._re_ipython_id.sub("<ipython-input>", output)
output = output.replace(os.linesep, "\n")
f.write(output)
self.fail("Output does not exist, created one as %s. Remove '.off' to enable it.")
with codecs.open(output_file, 'r', 'UTF-8') as f:
exp = f.read()
self.assert_equal_output(exp, output, filename=output_file)
def assert_equal_output(self, expected, received, filename=None):
# output written to a file does not seem to have os.linesep
# handle everything here by replacing the os linesep by a simple \n
expected = expected.replace(os.linesep, "\n").rstrip('\n')
received = received.replace(os.linesep, "\n").rstrip('\n')
# in errors, there is a unique id like <ipython-input-2-fb4ced135814>
received = self._re_ipython_id.sub("<ipython-input>", received)
# this is a hardcoded fix for py3, where there are quotes around the module:
received = received.replace("'NoneExistingModule'", "NoneExistingModule")
if filename and expected != received:
_file = filename+".received"
with codecs.open(_file, 'w', 'UTF-8') as f:
f.write(received)
self.assertEqual(expected, received)
|
release/scripts/presets/camera/Sony_A55.py | rbabari/blender | 365 | 12713702 | import bpy
bpy.context.camera.sensor_width = 23.4
bpy.context.camera.sensor_height = 15.6
bpy.context.camera.sensor_fit = 'HORIZONTAL'
|
classifier.py | pankaj5395/clickbait-classifier | 122 | 12713707 | #!/usr/bin/env python
# coding: utf-8
# Sources for machine learning information / examples:
# http://nbviewer.ipython.org/gist/rjweiss/7158866
# http://scikit-learn.org/stable/modules/feature_extraction.html
# http://www.datarobot.com/blog/classification-with-scikit-learn/
# http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html
# http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# TODO(peter):
# - Store trained state
# - Flask frontend / API
# - Incremental training tool (show a title to N people, get consensus on
# 'is-clickbait')
import glob
import json
import numpy
import sys
import nltk
from itertools import imap
from operator import itemgetter
from random import shuffle
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
# Make this `True` to train on parts of speech instead of words.
TRAIN_ON_PARTS_OF_SPEECH = False
if TRAIN_ON_PARTS_OF_SPEECH:
data_files = glob.glob('./data/pos/*.json')
def title_cleaner(title):
import nltk
return ' '.join(
map(itemgetter(1), # Parts of speech
nltk.pos_tag(nltk.word_tokenize(title.lower()))))
else:
data_files = glob.glob('./data/*.json')
def title_cleaner(title):
return title
# All of these complicated splits are used to ensure that there are both types
# of article titles (clickbait and news) in the training set.
training_proportion = 0.8
training_data = []
testing_data = []
for filename in data_files:
with open(filename, 'rb') as in_f:
dataset = json.load(in_f)
cutoff = int(round(len(dataset) * training_proportion))
training_data.extend(dataset[0:cutoff])
testing_data.extend(dataset[cutoff:])
print 'Loaded %d headlines from %s' % (len(dataset), filename)
def category_cleaner(category):
return 'clickbait' if category else 'news'
article_titles = map(itemgetter('article_title'), training_data)
clickbait_values = map(category_cleaner,
imap(itemgetter('clickbait'), training_data))
test_article_titles = map(itemgetter('article_title'), testing_data)
test_clickbait_values = map(category_cleaner,
imap(itemgetter('clickbait'), testing_data))
X_train = numpy.array(article_titles)
Y_train = numpy.array(clickbait_values)
X_test = numpy.array(test_article_titles)
Y_test = numpy.array(test_clickbait_values)
assert len(X_train) == len(Y_train) > 0
assert len(X_test) == len(Y_test) > 0
vectorizer = TfidfVectorizer(ngram_range=(1, 3),
lowercase=True,
stop_words='english',
strip_accents='unicode',
min_df=2,
norm='l2')
X_train = vectorizer.fit_transform(X_train) # Fit and then transform
nb_classifier = MultinomialNB()
nb_classifier.fit(X_train, Y_train)
X_test = vectorizer.transform(X_test)
Y_predicted = nb_classifier.predict(X_test)
print 'Classification report:'
print metrics.classification_report(Y_test, Y_predicted)
print ''
def show_most_informative_features(vectorizer, clf, n=20):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top:
print "\t%.4f\t%-15s\t\t%.4f\t%-15s" % (coef_1, fn_1, coef_2, fn_2)
show_most_informative_features(vectorizer, nb_classifier, 20)
def classify(title):
predictions = nb_classifier.predict_proba(
vectorizer.transform(numpy.array([title_cleaner(title)])))[0]
probabilities = dict(zip(nb_classifier.classes_, predictions))
return probabilities
|
Connected Component Analysis-Labeling.py/DrawVideo.py | yinedeng/Kryon | 229 | 12713708 | #This code comes from: https://github.com/becomequantum/kryon
from PIL import Image,ImageDraw,ImageFont
import numpy as np
#This code is only about animation. 本代码只是和做演示动画相关.
VideoSize = (1280, 720)
DemoImageSize = (48, 36)
标题位置 = (60, 16)
注释1位置 = (1000, 76)
网格位置 = (32, 76)
比例 = 17
网格颜色 = (230, 230, 230)
网三位置 = (网格位置[0] + 比例 * DemoImageSize[0] + 比例 * 2, 网格位置[1])
网三比例 = 比例 * 2
坐标位置 = (网三位置[0], 网三位置[1] + 网三比例 * 3 + 5)
注释2位置 = (坐标位置[0], 坐标位置[1] + 比例 + 18)
副标题位置 = (注释2位置[0],注释2位置[1] + 350)
UnitTime = 0.1
ScanTime = 0.1
FinishTime = 0.1
frame_list = []
def 微软雅黑(Size):
return ImageFont.truetype("msyh.ttf", Size)
def 方框(x, y, 位置, 比例):
左上 = (位置[0] + x * 比例, 位置[1] + y * 比例)
右下 = (位置[0] + x * 比例 + 比例, 位置[1] + y * 比例 + 比例)
return [左上, 右下]
def 小方框(x, y, 位置, 比例):
左上 = (位置[0] + x * 比例 + 2, 位置[1] + y * 比例 + 2)
右下 = (位置[0] + x * 比例 + 比例 - 2, 位置[1] + y * 比例 + 比例 - 2)
return [左上, 右下]
def 方块(x, y, 位置, 比例):
左上 = (位置[0] + x * 比例 + 1, 位置[1] + y * 比例 + 1)
右下 = (位置[0] + x * 比例 + 比例 - 1, 位置[1] + y * 比例 + 比例 - 1)
return [左上, 右下]
def 完成框(ShapeInfo):
左上 = (网格位置[0] + ShapeInfo[2][0] * 比例 - 1, 网格位置[1] + ShapeInfo[2][1] * 比例 - 1)
右下 = (网格位置[0] + ShapeInfo[1][0] * 比例 + 比例 + 1, 网格位置[1] + ShapeInfo[1][1] * 比例 + 比例 + 1)
return [左上, 右下]
def 反色(color):
rcolor = (255 - color[0], 255 - color[1], 255 - color[2])
return rcolor
def InitBackGround(ExampleImage, Title, subtitle, textcolor = (0, 162, 232), subtitlecolor = "orange", BgColor = (255, 255, 255), FPGA = False):
back_ground_image = Image.new("RGB", VideoSize, BgColor)
画 = ImageDraw.Draw(back_ground_image)
画.text(标题位置,Title, fill = textcolor, font = 微软雅黑(30))
画.text(副标题位置, subtitle, fill = subtitlecolor, font=微软雅黑(25))
for y in range(DemoImageSize[1]):
for x in range(DemoImageSize[0]):
画.rectangle(方框(x, y, 网格位置, 比例), outline = 网格颜色) #画大背景网格
if not(ExampleImage[y, x ,0] == ExampleImage[y, x ,1] == ExampleImage[y, x ,0] == 255):
画.rectangle(方块(x, y, 网格位置, 比例), fill = "black") #画示例图片中的黑点
ExampleImage[y, x] = [0, 0, 0] #不是白点的都变成黑点
if x<= 2 and y <= 2 :
画.rectangle(方框(x, y, 网三位置, 网三比例), outline = 网格颜色) #画右边3x3小邻域网格
if FPGA and (y == 1 or (y == 2 and x == 0)):
画.rectangle(方框(x, y - 1, 网三位置, 网三比例), outline = "blue")
画.rectangle(方框(1, 1, 网三位置, 网三比例), outline = "red")
return back_ground_image
def AddClip(bg_image, x, y, Neighbourhood3x3, LabelColor = None, diff = False, duration = UnitTime, Shape_info = None, 注释1 =" ", 注释2 =" "):
标记 = ImageDraw.Draw(bg_image)
if LabelColor != None :
标记.rectangle(方块(x, y, 网格位置, 比例), fill = LabelColor, outline = None) #画标记色块
if diff: #周围有两个不同标号点时
标记.rectangle(小方框(x, y , 网格位置, 比例), outline = 反色(LabelColor))
temp_image = bg_image.copy()
画 = ImageDraw.Draw(temp_image)
if Shape_info != None :
标记.rectangle(完成框(Shape_info), outline = "red")
画.rectangle(完成框(Shape_info), outline = "red")
画.rectangle(方框(x, y, 网格位置, 比例), outline = "red") #画小红框
画.text(注释1位置, 注释1, fill = "purple", font = 微软雅黑(25))
画.text(注释2位置, 注释2, fill = LabelColor if (LabelColor != None) else "purple", font = 微软雅黑(25))
画.text(坐标位置, str((x, y)), fill = "black", font = 微软雅黑(25))
for y in range(3):
for x in range(3):
画.rectangle(方块(x, y, 网三位置, 网三比例), fill = tuple(Neighbourhood3x3[y, x]))
[frame_list.append(np.array(temp_image)) for n in range (int(duration / UnitTime))]
|
tools/telemetry/telemetry/unittest/decorators_unittest.py | iplo/Chain | 231 | 12713735 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
class Foo(object):
pass
def CreateFooUncached(_):
return Foo()
@decorators.Cache
def CreateFooCached(_):
return Foo()
class DecoratorsUnitTest(unittest.TestCase):
def testCacheDecorator(self):
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(2))
self.assertNotEquals(CreateFooCached(1), CreateFooCached(2))
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(1))
self.assertEquals(CreateFooCached(1), CreateFooCached(1))
|
security_monkey/auditors/rds/rds_snapshot.py | boladmin/security_monkey | 4,258 | 12713747 | # Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.rds.rds_snapshot
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from security_monkey.auditor import Auditor, Entity
from security_monkey.watchers.rds.rds_snapshot import RDSSnapshot
class RDSSnapshotAuditor(Auditor):
index = RDSSnapshot.index
i_am_singular = RDSSnapshot.i_am_singular
i_am_plural = RDSSnapshot.i_am_plural
def __init__(self, accounts=None, debug=False):
super(RDSSnapshotAuditor, self).__init__(accounts=accounts, debug=debug)
def prep_for_audit(self):
super(RDSSnapshotAuditor, self).prep_for_audit()
self.FRIENDLY = { account['identifier']: account['name'] for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'friendly'}
self.THIRDPARTY = { account['identifier']: account['name'] for account in self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS'] if account['label'] == 'thirdparty'}
def check_internet_accessible(self, item):
if 'all' in item.config.get('Attributes', {}).get('restore', []):
entity = Entity(category='account', value='all')
self.record_internet_access(item, entity, actions=['restore'])
def check_friendly_cross_account(self, item):
accounts = item.config.get('Attributes', {}).get('restore', [])
for account in accounts:
if account == 'all':
continue
if account in self.FRIENDLY:
entity = Entity(
category='account',
value=account,
account_name=self.FRIENDLY[account],
account_identifier=account)
self.record_friendly_access(item, entity, actions=['restore'])
def check_thirdparty_cross_account(self, item):
accounts = item.config.get('Attributes', {}).get('restore', [])
for account in accounts:
if account == 'all':
continue
if account in self.THIRDPARTY:
entity = Entity(
category='account',
value=account,
account_name=self.THIRDPARTY[account],
account_identifier=account)
self.record_thirdparty_access(item, entity, actions=['restore'])
def check_unknown_cross_account(self, item):
accounts = item.config.get('Attributes', {}).get('restore', [])
for account in accounts:
if account == 'all':
continue
if account not in self.FRIENDLY and account not in self.THIRDPARTY:
entity = Entity(
category='account',
value=account)
self.record_unknown_access(item, entity, actions=['restore'])
|
examples/retrieve_audit_events.py | Manny27nyc/oci-python-sdk | 249 | 12713765 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This script retrieves all audit logs across an Oracle Cloud Infrastructure Tenancy.
# for a timespan defined by start_time and end_time.
# This sample script retrieves Audit events for last 5 days.
# This script will work at a tenancy level only.
import datetime
import oci
def get_subscription_regions(identity, tenancy_id):
'''
To retrieve the list of all available regions.
'''
list_of_regions = []
list_regions_response = identity.list_region_subscriptions(tenancy_id)
for r in list_regions_response.data:
list_of_regions.append(r.region_name)
return list_of_regions
def get_compartments(identity, tenancy_id):
'''
Retrieve the list of compartments under the tenancy.
'''
list_compartments_response = oci.pagination.list_call_get_all_results(
identity.list_compartments,
compartment_id=tenancy_id).data
compartment_ocids = [c.id for c in filter(lambda c: c.lifecycle_state == 'ACTIVE', list_compartments_response)]
return compartment_ocids
def get_audit_events(audit, compartment_ocids, start_time, end_time):
'''
Get events iteratively for each compartment defined in 'compartments_ocids'
for the region defined in 'audit'.
This method eagerly loads all audit records in the time range and it does
have performance implications of lot of audit records.
Ideally, the generator method in oci.pagination should be used to lazily
load results.
'''
list_of_audit_events = []
for c in compartment_ocids:
list_events_response = oci.pagination.list_call_get_all_results(
audit.list_events,
compartment_id=c,
start_time=start_time,
end_time=end_time).data
# Results for a compartment 'c' for a region defined
# in 'audit' object.
list_of_audit_events.extend(list_events_response)
return list_of_audit_events
# Setting configuration
# Default path for configuration file is "~/.oci/config"
config = oci.config.from_file()
tenancy_id = config["tenancy"]
# Initiate the client with the locally available config.
identity = oci.identity.IdentityClient(config)
# Timespan defined by variables start_time and end_time(today).
# ListEvents expects timestamps into RFC3339 format.
# For the purposes of sample script, logs of last 5 days.
end_time = datetime.datetime.utcnow()
start_time = end_time + datetime.timedelta(days=-5)
# This array will be used to store the list of available regions.
regions = get_subscription_regions(identity, tenancy_id)
# This array will be used to store the list of compartments in the tenancy.
compartments = get_compartments(identity, tenancy_id)
audit = oci.audit.audit_client.AuditClient(config)
# For each region get the logs for each compartment.
for r in regions:
# Intialize with a region value.
audit.base_client.set_region(r)
# To separate results by region use print here.
audit_events = get_audit_events(
audit,
compartments,
start_time,
end_time)
# Results for a region 'r' for each compartment.
if audit_events:
print(audit_events)
|
test-framework/test-suites/integration/tests/set/test_set_bootaction_args.py | knutsonchris/stacki | 123 | 12713771 | import json
from textwrap import dedent
class TestSetBootactionArgs:
def test_no_args(self, host):
result = host.run('stack set bootaction args')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "action" argument is required
{action} {args=string} [os=string] [type=string]
''')
def test_multiple_args(self, host):
result = host.run('stack set bootaction args test foo')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "action" argument must be unique
{action} {args=string} [os=string] [type=string]
''')
def test_invalid_action(self, host):
result = host.run('stack set bootaction args test type=os args=test')
assert result.rc == 255
assert result.stderr == 'error - action "test" does not exist\n'
def test_no_type(self, host):
result = host.run('stack set bootaction args memtest')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "type" parameter is required
{action} {args=string} [os=string] [type=string]
''')
def test_invalid_type(self, host):
result = host.run('stack set bootaction args memtest type=foo')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "type" parameter must be "os" or "install"
{action} {args=string} [os=string] [type=string]
''')
def test_no_args_parameter(self, host):
result = host.run('stack set bootaction args memtest type=os')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "args" parameter is required
{action} {args=string} [os=string] [type=string]
''')
def test_with_os(self, host):
# Add a test bootaction with an OS
result = host.run('stack add bootaction test type=os os=ubuntu kernel=""')
assert result.rc == 0
# Make sure the action got added
result = host.run('stack list bootaction test output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': None,
'bootaction': 'test',
'kernel': None,
'os': 'ubuntu',
'ramdisk': None,
'type': 'os'
}
]
# Set the bootaction args with a specified os
result = host.run(f'stack set bootaction args test type=os os=ubuntu args="test_args"')
assert result.rc == 0
# Make sure the args got set
result = host.run('stack list bootaction test output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': 'test_args',
'bootaction': 'test',
'kernel': None,
'os': 'ubuntu',
'ramdisk': None,
'type': 'os'
}
]
def test_os_is_null(self, host):
# Set the bootaction args with a null os
result = host.run('stack set bootaction args memtest type=os args="test_args"')
assert result.rc == 0
# Make sure the action got added
result = host.run('stack list bootaction memtest output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'args': 'test_args',
'bootaction': 'memtest',
'kernel': 'kernel memtest',
'os': None,
'ramdisk': None,
'type': 'os'
}
]
|
observations/r/softbacks.py | hajime9652/observations | 199 | 12713791 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def softbacks(path):
"""Measurements on a Selection of Paperback Books
This is a subset of the `allbacks` data frame which gives measurements
on the volume and weight of 8 paperback books.
This data frame contains the following columns:
volume
a numeric vector giving the book volumes in cubic centimeters
weight
a numeric vector giving the weights in grams
The bookshelf of <NAME>.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `softbacks.csv`.
Returns:
Tuple of np.ndarray `x_train` with 8 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'softbacks.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/softbacks.csv'
maybe_download_and_extract(path, url,
save_file_name='softbacks.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
python_modules/dagster/dagster/core/storage/sqlite.py | dbatten5/dagster | 4,606 | 12713814 | <filename>python_modules/dagster/dagster/core/storage/sqlite.py
import os
import sqlite3
from functools import update_wrapper
from dagster import check
from .sql import run_migrations_offline as run_migrations_offline_
from .sql import run_migrations_online as run_migrations_online_
def run_migrations_offline(*args, **kwargs):
try:
run_migrations_offline_(*args, **kwargs)
except sqlite3.DatabaseError as exc:
# This is to deal with concurrent execution -- if this table already exists thanks to a
# race with another process, we are fine and can continue.
if not "table alembic_version already exists" in str(exc):
raise
def run_migrations_online(*args, **kwargs):
try:
run_migrations_online_(*args, **kwargs)
except (sqlite3.DatabaseError, sqlite3.OperationalError) as exc:
# This is to deal with concurrent execution -- if this table already exists thanks to a
# race with another process, we are fine and can continue.
if not "table alembic_version already exists" in str(exc):
raise
update_wrapper(run_migrations_offline, run_migrations_offline_)
update_wrapper(run_migrations_online, run_migrations_online_)
def create_db_conn_string(base_dir, db_name):
check.str_param(base_dir, "base_dir")
check.str_param(db_name, "db_name")
path_components = os.path.abspath(base_dir).split(os.sep)
db_file = "{}.db".format(db_name)
return "sqlite:///{}".format("/".join(path_components + [db_file]))
|
tests/bugs/issue_19/issue_19_no_interpolation.py | jmabry/pyaf | 377 | 12713822 | <reponame>jmabry/pyaf<filename>tests/bugs/issue_19/issue_19_no_interpolation.py<gh_stars>100-1000
import numpy as np
import pandas as pd
df = pd.read_csv('tests/bugs/issue_19/issue_19_data_1.csv')
import datetime
def convert_date(x):
y = np.nan
try:
y = datetime.datetime.strptime(str(x), "%Y")
except:
# bad format
pass
return y
df['date'] = df['date'].apply(convert_date)
df_train = df[['date' , 'number']].dropna().reset_index(drop=True)
print(df_train)
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.train(iInputDS = df_train, iTime = 'date', iSignal = 'number', iHorizon = 7);
print(lEngine.getModelInfo())
# lEngine.standardPlots('outputs/tour')
df_forecast = lEngine.forecast(iInputDS = df_train, iHorizon = 7)
print(df_forecast.columns)
print(df_forecast[['date', 'number_Forecast', 'number_Forecast_Lower_Bound', 'number_Forecast_Upper_Bound']].tail(7))
|
homeassistant/components/tankerkoenig/const.py | domwillcode/home-assistant | 22,481 | 12713826 | <reponame>domwillcode/home-assistant
"""Constants for the tankerkoenig integration."""
DOMAIN = "tankerkoenig"
NAME = "tankerkoenig"
CONF_FUEL_TYPES = "fuel_types"
CONF_STATIONS = "stations"
FUEL_TYPES = ["e5", "e10", "diesel"]
|
d2go/data/transforms/affine.py | wenliangzhao2018/d2go | 687 | 12713858 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import random
from typing import List, Optional, Tuple
import cv2
import numpy as np
import torchvision.transforms as T
from detectron2.config import CfgNode
from detectron2.data.transforms import Transform, TransformGen, NoOpTransform
from .build import TRANSFORM_OP_REGISTRY
class AffineTransform(Transform):
def __init__(
self,
M: np.ndarray,
img_w: int,
img_h: int,
flags: Optional[int] = None,
border_mode: Optional[int] = None,
is_inversed_M: bool = False,
):
"""
Args:
will transform img according to affine transform M
"""
super().__init__()
self._set_attributes(locals())
self.warp_kwargs = {}
if flags is not None:
self.warp_kwargs["flags"] = flags
if border_mode is not None:
self.warp_kwargs["borderMode"] = border_mode
def apply_image(self, img: np.ndarray) -> np.ndarray:
M = self.M
if self.is_inversed_M:
M = M[:2]
img = cv2.warpAffine(
img,
M,
(int(self.img_w), (self.img_h)),
**self.warp_kwargs,
)
return img
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
# Add row of ones to enable matrix multiplication
coords = coords.T
ones = np.ones((1, coords.shape[1]))
coords = np.vstack((coords, ones))
M = self.M
if self.is_inversed_M:
M = np.linalg.inv(M)
coords = (M @ coords)[:2, :].T
return coords
class RandomPivotScaling(TransformGen):
"""
Uniformly pick a random pivot point inside image frame, scaling the image
around the pivot point using the scale factor sampled from a list of
given scales. The pivot point's location is unchanged after the transform.
Arguments:
scales: List[float]: each element can be any positive float number,
when larger than 1.0 objects become larger after transform
and vice versa.
"""
def __init__(self, scales: List[int]):
super().__init__()
self._init(locals())
self.scales = scales
def get_transform(self, img: np.ndarray) -> Transform:
img_h, img_w, _ = img.shape
img_h = float(img_h)
img_w = float(img_w)
pivot_y = self._rand_range(0.0, img_h)
pivot_x = self._rand_range(0.0, img_w)
def _interp(p1, p2, alpha):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
p_x = p1[0] + alpha * dx
p_y = p1[1] + alpha * dy
return (p_x, p_y)
scale = np.random.choice(self.scales)
lt = (0.0, 0.0)
rb = (img_w, img_h)
pivot = (pivot_x, pivot_y)
pts1 = np.float32([lt, pivot, rb])
pts2 = np.float32(
[_interp(pivot, lt, scale), pivot, _interp(pivot, rb, scale)],
)
M = cv2.getAffineTransform(pts1, pts2)
return AffineTransform(M, img_w, img_h)
class RandomAffine(TransformGen):
"""
Apply random affine trasform to the image given
probabilities and ranges in each dimension.
"""
def __init__(
self,
prob: float = 0.5,
angle_range: Tuple[float, float] = (-90, 90),
translation_range: Tuple[float, float] = (0, 0),
scale_range: Tuple[float, float] = (1.0, 1.0),
shear_range: Tuple[float, float] = (0, 0),
):
"""
Args:
prob (float): probability of applying transform.
angle_range (tuple of integers): min/max rotation angle in degrees
between -180 and 180.
translation_range (tuple of integers): min/max translation
(post re-centered rotation).
scale_range (tuple of floats): min/max scale (post re-centered rotation).
shear_range (tuple of intgers): min/max shear angle value in degrees
between -180 to 180.
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
im_h, im_w = img.shape[:2]
max_size = max(im_w, im_h)
center = [im_w / 2, im_h / 2]
angle = random.uniform(self.angle_range[0], self.angle_range[1])
translation = [
random.uniform(self.translation_range[0], self.translation_range[1]),
random.uniform(self.translation_range[0], self.translation_range[1]),
]
scale = random.uniform(self.scale_range[0], self.scale_range[1])
shear = [
random.uniform(self.shear_range[0], self.shear_range[1]),
random.uniform(self.shear_range[0], self.shear_range[1]),
]
dummy_translation = [0.0, 0.0]
dummy_scale = 1.0
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, dummy_translation, dummy_scale, shear
)
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
# Center in output patch
img_corners = np.array(
[
[0, 0, im_w, im_w],
[0, im_h, 0, im_h],
[1, 1, 1, 1],
]
)
transformed_corners = M @ img_corners
x_min = np.amin(transformed_corners[0])
x_max = np.amax(transformed_corners[0])
x_range = np.ceil(x_max - x_min)
y_min = np.amin(transformed_corners[1])
y_max = np.amax(transformed_corners[1])
y_range = np.ceil(y_max - y_min)
# Apply translation and scale after centering in output patch
translation_adjustment = [(max_size - im_w) / 2, (max_size - im_h) / 2]
translation[0] += translation_adjustment[0]
translation[1] += translation_adjustment[1]
scale_adjustment = min(max_size / x_range, max_size / y_range)
scale *= scale_adjustment
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, translation, scale, shear
)
# Convert to Numpy matrix so it can be inverted
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
do = self._rand_range() < self.prob
if do:
return AffineTransform(
M_inv,
max_size,
max_size,
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REPLICATE,
is_inversed_M=True,
)
else:
return NoOpTransform()
# example repr: "RandomPivotScalingOp::[1.0, 0.75, 0.5]"
@TRANSFORM_OP_REGISTRY.register()
def RandomPivotScalingOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
scales = json.loads(arg_str)
assert isinstance(scales, list)
assert all(isinstance(scale, (float, int)) for scale in scales)
return [RandomPivotScaling(scales=scales)]
@TRANSFORM_OP_REGISTRY.register()
def RandomAffineOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = json.loads(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomAffine(**kwargs)]
|
TrackingTools/MaterialEffects/python/MaterialPropagatorParabolicMf_cff.py | ckamtsikis/cmssw | 852 | 12713862 | import FWCore.ParameterSet.Config as cms
from TrackingTools.MaterialEffects.MaterialPropagator_cfi import MaterialPropagator
MaterialPropagatorParabolicMF = MaterialPropagator.clone(
SimpleMagneticField = 'ParabolicMf',
ComponentName = 'PropagatorWithMaterialParabolicMf'
)
from TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi import OppositeMaterialPropagator
OppositeMaterialPropagatorParabolicMF = OppositeMaterialPropagator.clone(
SimpleMagneticField = 'ParabolicMf',
ComponentName = 'PropagatorWithMaterialParabolicMfOpposite'
)
|
koku/reporting/migrations/0194_awscostentrylineitemdailysummary_savingsplan_effective_cost.py | cgoodfred/koku | 157 | 12713866 | # Generated by Django 3.1.12 on 2021-09-09 14:23
import pkgutil
from django.db import connection
from django.db import migrations
from django.db import models
def add_aws_views(apps, schema_editor):
"""Create the AWS Materialized views from files."""
version = "_20210910"
views = {
f"sql/views/{version}/reporting_aws_compute_summary": ["", "_by_account", "_by_region", "_by_service"],
f"sql/views/{version}/reporting_aws_cost_summary": ["", "_by_account", "_by_region", "_by_service"],
f"sql/views/{version}/reporting_aws_storage_summary": ["", "_by_account", "_by_region", "_by_service"],
f"sql/views/{version}/reporting_aws_database_summary": [""],
f"sql/views/{version}/reporting_aws_network_summary": [""],
}
for base_path, view_tuple in views.items():
for view in view_tuple:
view_sql = pkgutil.get_data("reporting.provider.aws", f"{base_path}{view}{version}.sql")
view_sql = view_sql.decode("utf-8")
with connection.cursor() as cursor:
cursor.execute(view_sql)
class Migration(migrations.Migration):
dependencies = [("reporting", "0193_gcptopology")]
operations = [
migrations.AddField(
model_name="awscostentrylineitemdailysummary",
name="savingsplan_effective_cost",
field=models.DecimalField(decimal_places=9, max_digits=24, null=True),
),
migrations.RunPython(add_aws_views),
]
|
ch3/greedy/loowater_UVa11292.py | lyskevin/cpbook-code | 1,441 | 12713881 | def main():
while True:
n,m = map(int, input().split())
if n==0 and m==0:
break
D = [9] * n
H = [0] * m
for d in range(n):
D[d] = int(input())
for k in range(m):
H[k] = int(input())
D.sort() # sorting is an important
H.sort() # pre-processing step
gold = 0
d = 0
k = 0 # both arrays are sorted
while d < n and k < m: # while not done yet
while k < m and D[d] > H[k]:
k += 1 # find required knight k
if k == m:
break # loowater is doomed :S
gold += H[k] # pay this amount of gold
d += 1 # next dragon
k += 1 # next knight
if d == n:
print("{}".format(gold)) # all dragons are chopped
else:
print("Loowater is doomed!")
main()
|
heudiconv/queue.py | fhopp/heudiconv | 167 | 12713920 | <filename>heudiconv/queue.py
import subprocess
import sys
import os
import logging
from .utils import which
lgr = logging.getLogger(__name__)
def queue_conversion(queue, iterarg, iterables, queue_args=None):
"""
Write out conversion arguments to file and submit to a job scheduler.
Parses `sys.argv` for heudiconv arguments.
Parameters
----------
queue: string
Batch scheduler to use
iterarg: str
Multi-argument to index (`subjects` OR `files`)
iterables: int
Number of `iterarg` arguments
queue_args: string (optional)
Additional queue arguments for job submission
"""
SUPPORTED_QUEUES = {'SLURM': 'sbatch'}
if queue not in SUPPORTED_QUEUES:
raise NotImplementedError("Queuing with %s is not supported", queue)
for i in range(iterables):
args = clean_args(sys.argv[1:], iterarg, i)
# make arguments executable
heudiconv_exec = which("heudiconv") or "heudiconv"
args.insert(0, heudiconv_exec)
convertcmd = " ".join(args)
# will overwrite across subjects
queue_file = os.path.abspath('heudiconv-%s.sh' % queue)
with open(queue_file, 'wt') as fp:
fp.write("#!/bin/bash\n")
if queue_args:
for qarg in queue_args.split():
fp.write("#SBATCH %s\n" % qarg)
fp.write(convertcmd + "\n")
cmd = [SUPPORTED_QUEUES[queue], queue_file]
proc = subprocess.call(cmd)
lgr.info("Submitted %d jobs", iterables)
def clean_args(hargs, iterarg, iteridx):
"""
Filters arguments for batch submission.
Parameters
----------
hargs: list
Command-line arguments
iterarg: str
Multi-argument to index (`subjects` OR `files`)
iteridx: int
`iterarg` index to submit
Returns
-------
cmdargs : list
Filtered arguments for batch submission
Example
--------
>>> from heudiconv.queue import clean_args
>>> cmd = ['heudiconv', '-d', '/some/{subject}/path',
... '-q', 'SLURM',
... '-s', 'sub-1', 'sub-2', 'sub-3', 'sub-4']
>>> clean_args(cmd, 'subjects', 0)
['heudiconv', '-d', '/some/{subject}/path', '-s', 'sub-1']
"""
if iterarg == "subjects":
iterarg = ['-s', '--subjects']
elif iterarg == "files":
iterarg = ['--files']
else:
raise ValueError("Cannot index %s" % iterarg)
# remove these or cause an infinite loop
queue_args = ['-q', '--queue', '--queue-args']
# control variables for multi-argument parsing
is_iterarg = False
itercount = 0
indicies = []
cmdargs = hargs[:]
for i, arg in enumerate(hargs):
if arg.startswith('-') and is_iterarg:
# moving on to another argument
is_iterarg = False
if is_iterarg:
if iteridx != itercount:
indicies.append(i)
itercount += 1
if arg in iterarg:
is_iterarg = True
if arg in queue_args:
indicies.extend([i, i+1])
for j in sorted(indicies, reverse=True):
del cmdargs[j]
return cmdargs
|
tests/conftest.py | teaglebuilt/bocadillo | 434 | 12713933 | <gh_stars>100-1000
import typing
import pytest
from bocadillo import App, configure, create_client, Templates, settings
@pytest.fixture(name="raw_app")
def fixture_raw_app(request) -> App:
settings._clear()
return App()
@pytest.fixture(name="app")
def fixture_app(raw_app: App) -> App:
configure(raw_app)
return raw_app
@pytest.fixture
def client(app):
return create_client(app)
@pytest.fixture(name="templates")
def fixture_templates():
return Templates()
class TemplateWrapper(typing.NamedTuple):
name: str
context: dict
rendered: str
root: str
def create_template(
templates: Templates, tmpdir_factory, dirname: str
) -> TemplateWrapper:
templates_dir = tmpdir_factory.mktemp(dirname)
template = templates_dir.join("hello.html")
template.write("<h1>Hello, {{ name }}!</h1>")
templates.directory = str(templates_dir)
return TemplateWrapper(
name="hello.html",
context={"name": "Bocadillo"},
rendered="<h1>Hello, Bocadillo!</h1>",
root=str(templates_dir),
)
@pytest.fixture
def template_file(templates: Templates, tmpdir_factory) -> TemplateWrapper:
return create_template(templates, tmpdir_factory, dirname="templates")
|
tools/pot/tools/frame_extractor/run.py | pazamelin/openvino | 2,406 | 12713976 | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys
from argparse import ArgumentParser
import extractor
def parse_args(argv):
"""
Parse and process arguments for frames-extractor tool
"""
parser = ArgumentParser(description='Frames-extractor toolkit', allow_abbrev=False)
parser.add_argument(
'-v',
'--video',
help='Full path to video file',
required=True)
parser.add_argument(
'-o',
'--output_dir',
help='Directory to save valuable frames from video.',
required=True)
parser.add_argument(
'-f',
'--frame_step',
type=int,
help='Read frames from video with step',
default=1,
required=False
)
parser.add_argument(
'-e',
'--ext',
type=str,
help='Extension of images in resulting dataset',
choices=['jpg', 'png'],
default='png',
required=False
)
parser.add_argument(
'-s',
'--dataset_size',
type=int,
help='Number of frames to save from video as dataset. '
'Should be less then video frames number',
default=None,
required=False)
args = parser.parse_args(args=argv)
return args.video, args.output_dir, args.dataset_size, args.frame_step
if __name__ == '__main__':
extractor.extract_frames_and_make_dataset(*parse_args(sys.argv[1:]))
|
tests/test_provider_ellisdon_oss_azuredevops.py | mjuenema/python-terrascript | 507 | 12713988 | # tests/test_provider_ellisdon-oss_azuredevops.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:00 UTC)
def test_provider_import():
import terrascript.provider.ellisdon_oss.azuredevops
def test_resource_import():
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_build_definition,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_extension
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_project
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_definition,
)
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_environment,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_release_task
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_release_tasks
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_release_variables,
)
from terrascript.resource.ellisdon_oss.azuredevops import (
azuredevops_service_endpoint,
)
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_service_hook
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_task_group
from terrascript.resource.ellisdon_oss.azuredevops import azuredevops_variable_group
def test_datasource_import():
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_agent_queue
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_build_definition
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_project
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_release_definition
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_definition_environments,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_definitions,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_environment,
)
from terrascript.data.ellisdon_oss.azuredevops import (
azuredevops_release_stage_variables,
)
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_release_tasks
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_service_endpoint
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_source_repository
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_task_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_user
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_variable_group
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_variable_groups
from terrascript.data.ellisdon_oss.azuredevops import azuredevops_workflow_task
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.ellisdon_oss.azuredevops
#
# t = terrascript.provider.ellisdon_oss.azuredevops.azuredevops()
# s = str(t)
#
# assert 'https://github.com/ellisdon-oss/terraform-provider-azuredevops' in s
# assert '0.0.2' in s
|
attacks/rsa/wiener_attack_lattice.py | jvdsn/crypto-attacks | 139 | 12713998 | <gh_stars>100-1000
import os
import sys
from math import isqrt
from sage.all import ZZ
from sage.all import matrix
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__)))))
if sys.path[1] != path:
sys.path.insert(1, path)
from attacks.factorization import known_phi
from shared.lattice import shortest_vectors
def attack(N, e):
"""
Recovers the prime factors of a modulus and the private exponent if the private exponent is too small.
More information: <NAME>., "Public-Key Cryptanalysis"
:param N: the modulus
:param e: the public exponent
:return: a tuple containing the prime factors of the modulus and the private exponent, or None if the private exponent was not found
"""
s = isqrt(N)
L = matrix(ZZ, [[e, s], [N, 0]])
for v in shortest_vectors(L):
d = v[1] // s
k = abs(v[0] - e * d) // N
d = abs(d)
if pow(pow(2, e, N), d, N) != 2:
continue
phi = (e * d - 1) // k
factors = known_phi.factorize(N, phi)
if factors:
return *factors, int(d)
|
lib/airtable/__init__.py | goztrk/django-htk | 206 | 12714040 | <gh_stars>100-1000
# HTK Imports
from htk.lib.airtable.api import AirtableAPI
|
cacreader/swig-4.0.2/Examples/test-suite/python/typedef_class_runme.py | kyletanyag/LL-Smartcard | 1,031 | 12714050 | <reponame>kyletanyag/LL-Smartcard
import typedef_class
a = typedef_class.RealA()
a.a = 3
b = typedef_class.B()
b.testA(a)
|
Python/FiniteStateParser/struct_fs_parser.py | Gjacquenot/training-material | 115 | 12714061 | #!/usr/bin/env python
"""Module containing a parser for a simple block enocded data format"""
import re
class ParseError(Exception):
"""Exception thrown when a parse error occurs"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return repr(self.msg)
# last_match will hold the match object that is produced in the test
# functions to be used in the rest of the parser
last_match = None
# compile the regular expressions to be used for performance reasons
comment_pattern = re.compile(r"\s*#.*")
block_begin_pattern = re.compile(r"\s*begin\s+(\w+)")
block_end_pattern = re.compile(r"\s*end\s+(\w+)")
# current_block holds the name of the block that is being parsed, its
# value is None when outside a block, note that it doubles as state
# variable
current_block = None
# dictionary to hold the blocks' content
block_content = {}
def parse(file_name):
"""function that takes a file name, and returns a dictionary of blocks,
the keys are the names of the blocks, the values are lists of their
content"""
global current_block, block_content
def filter_line(line):
global comment_pattern
return comment_pattern.sub("", line.strip())
def is_blank(line):
return len(line) == 0
def is_block_begin(line):
global last_match, block_begin_pattern
last_match = block_begin_pattern.match(line)
return last_match is not None
def is_block_end(line):
global last_match, block_end_pattern
last_match = block_end_pattern.match(line)
return last_match is not None
def is_in_block():
global current_block
return current_block is not None
def set_current_block():
global current_block, block_content
current_block = last_match.group(1)
block_content[current_block] = []
def check_is_not_in_block():
global current_block, last_match
if is_in_block():
msg = "block {0} is not close when opening {1}".format(
current_block, last_match.group(1))
raise ParseError(msg)
def check_end_matches_begin():
global current_block, last_match
if last_match.group(1) != current_block:
msg = "block %s is closed with %s" % \
(last_match.group(1), current_block)
raise ParseError(msg)
def store_data(line):
global block_content, current_block
block_content[current_block].append(line)
def sort_block_data():
global block_content
for key in list(block_content.keys()):
block_content[key].sort()
# open file, specified on command line
block_file = open(file_name, 'r')
# iterate over the lines in the file and process
for line in block_file:
line = filter_line(line)
if not is_blank(line):
if is_block_begin(line):
check_is_not_in_block()
set_current_block()
elif is_block_end(line):
check_end_matches_begin()
current_block = None
elif is_in_block():
store_data(line)
# close the file
block_file.close()
sort_block_data()
return block_content
def main():
"""main program to be executed when this module is used as a script"""
import sys
# check whether at least one command line argument has been passed
if len(sys.argv) == 1:
print("### error: no file specified")
exit(1)
content = parse(sys.argv[1])
for block_name in list(content.keys()):
for value in content[block_name]:
print("%s: '%s'" % (block_name, value))
if __name__ == "__main__":
main()
|
securityheaders/models/csp/cspdirective.py | th3cyb3rc0p/securityheaders | 151 | 12714062 | from securityheaders.models import Directive
from securityheaders.models.annotations import requireddirectives, requireddirectivevalues
@requireddirectivevalues('form-action','frame-ancestors','report-uri','report-to','require-sri-for','plugin-types','worker-src','style-src','object-src','manifest-src','frame-src','default-src','connect-src','child-src')
class CSPDirective(Directive):
# Fetch directives
CHILD_SRC = 'child-src', 'childSrc'
CONNECT_SRC = 'connect-src', 'connectSrc'
DEFAULT_SRC = 'default-src', 'defaultSrc'
FONT_SRC = 'font-src', 'fontSrc'
FRAME_SRC = 'frame-src', 'frameSrc'
IMG_SRC = 'img-src', 'imgSrc'
MEDIA_SRC = 'media-src', 'mediaSrc'
OBJECT_SRC = 'object-src', 'objectSrc'
SCRIPT_SRC = 'script-src', 'scriptSrc'
STYLE_SRC = 'style-src', 'styleSrc'
MANIFEST_SRC = 'manifest-src', 'manifestSrc'
WORKER_SRC = 'worker-src', 'workerSrc'
# Document directives
BASE_URI = 'base-uri','baseUri'
PLUGIN_TYPES = 'plugin-types','pluginTypes'
SANDBOX = 'sandbox','sandBox'
DISOWN_OPENER = 'disown-opener','disownOpener'
# Navigation directives
FORM_ACTION = 'form-action','formAction'
FRAME_ANCESTORS = 'frame-ancestors','frameAncestors'
# Reporting directives
REPORT_TO = 'report-to','reportTo'
REPORT_URI = 'report-uri','reportUri'
# Other directives
BLOCK_ALL_MIXED_CONTENT = 'block-all-mixed-content','blockAllMixedContent'
UPGRADE_INSECURE_REQUESTS = 'upgrade-insecure-requests','upgradeInsecureRequests'
REFLECTED_XSS = 'reflected-xss','reflectedXss'
REFERRER = 'referrer'
REQUIRE_SRI_FOR = 'require-sri-for','requireSriFor'
@classmethod
def isDirective(cls, directive):
""" Checks whether a given string is a directive
Args:
directive (str): the string to validate
"""
if isinstance(directive, CSPDirective):
return True
return any(directive.lower() == item for item in list(cls.keys()))
|
tests/python/view_layer/test_scene_delete.py | rbabari/blender | 365 | 12714167 | # ############################################################
# Importing - Same For All Render Layer Tests
# ############################################################
import unittest
import os
import sys
from view_layer_common import *
# ############################################################
# Testing
# ############################################################
class UnitTesting(ViewLayerTesting):
def test_scene_delete(self):
"""
See if a scene can be properly deleted
"""
import bpy
scene = bpy.context.scene
bpy.data.scenes.new('New')
bpy.data.scenes.remove(scene)
# ############################################################
# Main - Same For All Render Layer Tests
# ############################################################
if __name__ == '__main__':
UnitTesting._extra_arguments = setup_extra_arguments(__file__)
unittest.main()
|
locations/spiders/tropical_smoothie_cafe.py | nbeecher/alltheplaces | 297 | 12714176 | <gh_stars>100-1000
import json
import re
import scrapy
from scrapy.selector import Selector
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
class TropicalSmoothieCafe(scrapy.Spider):
name = "tropical_smoothie_cafe"
item_attributes = {"brand": "Tropical Smoothie Cafe", "brand_wikidata": "Q7845817"}
allowed_domains = ["locations.tropicalsmoothiecafe.com"]
start_urls = ("https://locations.tropicalsmoothiecafe.com/sitemap.xml",)
def parse(self, response):
xml = Selector(response)
xml.remove_namespaces()
urls = xml.xpath("//loc/text()").extract()
urls = [url.strip() for url in urls]
for url in urls:
path = scrapy.utils.url.parse_url(url).path
if re.match(r"^/.*/.*/.*$", path):
yield scrapy.Request(url, callback=self.parse_location)
def parse_location(self, response):
hours_spec = response.css(".Cafe-hours").xpath(".//@data-days").get()
hours = self.parse_hours(json.loads(hours_spec)) if hours_spec else None
ref = (
response.css(
"""
a.Header-orderOnline[href^="https://ordernow.tropicalsmoothie.com"],
a.Header-orderOnline[href^="https://order.tropicalsmoothie.com"],
a.Header-orderOnline[href^="https://order.tropicalsmoothiecafe.com"]
"""
)
.attrib["href"]
.split("/")[-1]
)
properties = {
"name": response.xpath('//h1[@itemprop="name"]/text()').get(),
"extras": {"branch": response.css("div.Hero-city").xpath("./text()").get()},
"addr_full": response.xpath(
'//*[@itemprop="streetAddress"]/@content'
).get(),
"city": response.xpath('//*[@itemprop="addressLocality"]/@content').get(),
"state": response.xpath('//*[@itemprop="addressRegion"]/text()').get(),
"postcode": response.xpath('//*[@itemprop="postalCode"]/text()').get(),
"phone": response.xpath('//*[@itemprop="telephone"]/text()').get(),
"website": response.url,
"opening_hours": hours,
"ref": ref,
"lat": response.xpath('//*[@itemprop="latitude"]/@content').get(),
"lon": response.xpath('//*[@itemprop="longitude"]/@content').get(),
}
yield GeojsonPointItem(**properties)
def parse_hours(self, hours_json):
opening_hours = OpeningHours()
for date in hours_json:
day = date["day"][:2].capitalize()
for interval in date["intervals"]:
start_hr, start_min = divmod(interval["start"], 100)
end_hr, end_min = divmod(interval["end"], 100)
opening_hours.add_range(
day, f"{start_hr}:{start_min}", f"{end_hr}:{end_min}"
)
return opening_hours.as_opening_hours()
|
hordak/migrations/0002_check_leg_trigger_20160903_1149.py | PetrDlouhy/django-hordak | 187 | 12714178 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-03 11:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hordak", "0001_initial")]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION check_leg()
RETURNS trigger AS
$$
DECLARE
transaction_sum DECIMAL(13, 2);
BEGIN
IF (TG_OP = 'DELETE') THEN
SELECT SUM(amount) INTO transaction_sum FROM hordak_leg WHERE transaction_id = OLD.transaction_id;
ELSE
SELECT SUM(amount) INTO transaction_sum FROM hordak_leg WHERE transaction_id = NEW.transaction_id;
END IF;
IF transaction_sum != 0 THEN
RAISE EXCEPTION 'Sum of transaction amounts must be 0';
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql
""",
"DROP FUNCTION check_leg()",
),
migrations.RunSQL(
"""
CREATE CONSTRAINT TRIGGER check_leg_trigger
AFTER INSERT OR UPDATE OR DELETE ON hordak_leg
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE check_leg();
""",
"DROP TRIGGER IF EXISTS check_leg_trigger ON hordak_leg",
),
]
|
L1TriggerConfig/L1GtConfigProducers/test/L1GtVhdlWriter_cfg.py | ckamtsikis/cmssw | 852 | 12714189 | <filename>L1TriggerConfig/L1GtConfigProducers/test/L1GtVhdlWriter_cfg.py
# cfg file to write the VHDL templates
import FWCore.ParameterSet.Config as cms
# process
process = cms.Process("L1GtVhdlWriterTest")
# number of events and source
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
# configuration
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1Menu2007_cff")
#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1030.L1Menu2008_2E30_cff")
#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1031.L1Menu2008_2E31_cff")
#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1MenuTestCondCorrelation_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.l1GtVhdlWriter_cfi")
# path to be run
process.p = cms.Path(process.l1GtVhdlWriter)
# services
# Message Logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.debugModules = ['l1GtVhdlWriterTest']
process.MessageLogger.cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
threshold = cms.untracked.string('DEBUG'), ## DEBUG
DEBUG = cms.untracked.PSet( ## DEBUG, all messages
limit = cms.untracked.int32(-1)
)
)
|
docs_src/behind_a_proxy/tutorial001.py | Aryabhata-Rootspring/fastapi | 53,007 | 12714197 | <gh_stars>1000+
from fastapi import FastAPI, Request
app = FastAPI()
@app.get("/app")
def read_main(request: Request):
return {"message": "Hello World", "root_path": request.scope.get("root_path")}
|
dnsdb_common/library/exception.py | baiyongjie/open_dnsdb | 378 | 12714214 | <filename>dnsdb_common/library/exception.py
# -*- coding: utf-8 -*-
BADREQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
GONE = 410
TOOMANYREQUESTS = 412
class DnsdbException(Exception):
def __init__(self, message, errcode=500, detail=None, msg_ch=u''):
self.message = message
self.errcode = errcode
self.detail = detail
self.msg_ch = msg_ch
super(DnsdbException, self).__init__()
def __str__(self):
return self.message
def json(self):
return dict(code=self.errcode, why=self.message)
class Unauthorized(DnsdbException):
def __init__(self, message='Unauthorized', errcode=UNAUTHORIZED, detail=None, msg_ch=u''):
super(Unauthorized, self).__init__(message, errcode, detail, msg_ch)
class Forbidden(DnsdbException):
def __init__(self, message='Forbidden', errcode=FORBIDDEN, detail=None, msg_ch=u''):
super(Forbidden, self).__init__(message, errcode, detail, msg_ch)
class OperationLogErr(DnsdbException):
def __init__(self, message, errcode=500, detail=None, msg_ch=u''):
super(OperationLogErr, self).__init__(message, errcode, detail, msg_ch)
class BadParam(DnsdbException):
def __init__(self, message='Bad params', errcode=BADREQUEST, detail=None, msg_ch=u''):
super(BadParam, self).__init__(message, errcode, detail, msg_ch)
class UpdaterErr(DnsdbException):
pass
class ConfigErr(UpdaterErr):
def __init__(self, message):
super(ConfigErr, self).__init__(message=message, errcode=501)
|
graphlearn/python/nn/tf/config.py | amznero/graph-learn | 1,088 | 12714216 | <gh_stars>1000+
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
def __init__(self):
"""Set and get configurations for tf models.
Configurations:
training (bool): Whether in training mode or not. Defaults to True.
emb_max_partitions (int): The `max_partitions` for embedding variables
partitioned by `min_max_variable_partitioner`. Specially,
`EmbeddingVariable` uses `fixed_size_partitioner`.
Defaults to None means no partition.
emb_min_slice_size (int): The `min_slice_size` for embedding variables
partitioned by `min_max_variable_partitioner`. Defaults to 128K.
emb_live_steps (int): Global steps to live for inactive keys in embedding
variables. Defaults to None.
"""
self.training = True
self.partitioner = 'min_max'
self.emb_max_partitions = None
self.emb_min_slice_size = 128 * 1024
self.emb_live_steps = None
conf = Config()
|
moviepy/video/io/ffmpeg_tools.py | odidev/moviepy | 8,558 | 12714233 | """Miscellaneous bindings to ffmpeg."""
import os
from moviepy.config import FFMPEG_BINARY
from moviepy.decorators import convert_parameter_to_seconds, convert_path_to_string
from moviepy.tools import subprocess_call
@convert_path_to_string(("inputfile", "outputfile"))
@convert_parameter_to_seconds(("start_time", "end_time"))
def ffmpeg_extract_subclip(
inputfile, start_time, end_time, outputfile=None, logger="bar"
):
"""Makes a new video file playing video file between two times.
Parameters
----------
inputfile : str
Path to the file from which the subclip will be extracted.
start_time : float
Moment of the input clip that marks the start of the produced subclip.
end_time : float
Moment of the input clip that marks the end of the produced subclip.
outputfile : str, optional
Path to the output file. Defaults to
``<inputfile_name>SUB<start_time>_<end_time><ext>``.
"""
if not outputfile:
name, ext = os.path.splitext(inputfile)
t1, t2 = [int(1000 * t) for t in [start_time, end_time]]
outputfile = "%sSUB%d_%d%s" % (name, t1, t2, ext)
cmd = [
FFMPEG_BINARY,
"-y",
"-ss",
"%0.2f" % start_time,
"-i",
inputfile,
"-t",
"%0.2f" % (end_time - start_time),
"-map",
"0",
"-vcodec",
"copy",
"-acodec",
"copy",
"-copyts",
outputfile,
]
subprocess_call(cmd, logger=logger)
@convert_path_to_string(("videofile", "audiofile", "outputfile"))
def ffmpeg_merge_video_audio(
videofile,
audiofile,
outputfile,
video_codec="copy",
audio_codec="copy",
logger="bar",
):
"""Merges video file and audio file into one movie file.
Parameters
----------
videofile : str
Path to the video file used in the merge.
audiofile : str
Path to the audio file used in the merge.
outputfile : str
Path to the output file.
video_codec : str, optional
Video codec used by FFmpeg in the merge.
audio_codec : str, optional
Audio codec used by FFmpeg in the merge.
"""
cmd = [
FFMPEG_BINARY,
"-y",
"-i",
audiofile,
"-i",
videofile,
"-vcodec",
video_codec,
"-acodec",
audio_codec,
outputfile,
]
subprocess_call(cmd, logger=logger)
@convert_path_to_string(("inputfile", "outputfile"))
def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger="bar"):
"""Extract the sound from a video file and save it in ``outputfile``.
Parameters
----------
inputfile : str
The path to the file from which the audio will be extracted.
outputfile : str
The path to the file to which the audio will be stored.
bitrate : int, optional
Bitrate for the new audio file.
fps : int, optional
Frame rate for the new audio file.
"""
cmd = [
FFMPEG_BINARY,
"-y",
"-i",
inputfile,
"-ab",
"%dk" % bitrate,
"-ar",
"%d" % fps,
outputfile,
]
subprocess_call(cmd, logger=logger)
@convert_path_to_string(("inputfile", "outputfile"))
def ffmpeg_resize(inputfile, outputfile, size, logger="bar"):
"""Resizes a file to new size and write the result in another.
Parameters
----------
inputfile : str
Path to the file to be resized.
outputfile : str
Path to the output file.
size : list or tuple
New size in format ``[width, height]`` for the output file.
"""
cmd = [
FFMPEG_BINARY,
"-i",
inputfile,
"-vf",
"scale=%d:%d" % (size[0], size[1]),
outputfile,
]
subprocess_call(cmd, logger=logger)
@convert_path_to_string(("inputfile", "outputfile", "output_dir"))
def ffmpeg_stabilize_video(
inputfile, outputfile=None, output_dir="", overwrite_file=True, logger="bar"
):
"""
Stabilizes ``filename`` and write the result to ``output``.
Parameters
----------
inputfile : str
The name of the shaky video.
outputfile : str, optional
The name of new stabilized video. Defaults to appending '_stabilized' to
the input file name.
output_dir : str, optional
The directory to place the output video in. Defaults to the current
working directory.
overwrite_file : bool, optional
If ``outputfile`` already exists in ``output_dir``, then overwrite
``outputfile`` Defaults to True.
"""
if not outputfile:
without_dir = os.path.basename(inputfile)
name, ext = os.path.splitext(without_dir)
outputfile = f"{name}_stabilized{ext}"
outputfile = os.path.join(output_dir, outputfile)
cmd = [FFMPEG_BINARY, "-i", inputfile, "-vf", "deshake", outputfile]
if overwrite_file:
cmd.append("-y")
subprocess_call(cmd, logger=logger)
|
missions/middleware.py | ANSUUVIOUS/dart | 234 | 12714267 | <reponame>ANSUUVIOUS/dart
# -*- coding: utf-8 -*-
# Copyright 2017 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from calendar import timegm
import re
import logging
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect
from django.utils.timezone import timedelta, now
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.http.response import HttpResponseServerError
logger = logging.getLogger(__name__)
class RequiredInterstitial(object):
"""
Some organizations may require an acceptable use policy or similar to be displayed upon logon,
the setting REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL will specify how often the AUP should be displayed
in hours as a positive integer or 0 to indicate it should be displayed once per application logon.
Omitting this setting will bypass the interstitial.
To Use:
- Add to settings.MIDDLEWARE_CLASSES: 'missions.middleware.RequiredInterstitial'
- Ensure you specify a value in settings for the key REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL
"""
def process_request(self, request):
try:
display_interval = settings.REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL
except AttributeError:
# Setting not defined, so assume we don't want the interstitial to display
return None
try:
if display_interval == 0 \
and request.session['last_acknowledged_interstitial']:
return None
else:
max_age = timedelta(hours=display_interval).total_seconds()
if timegm(now().timetuple()) - request.session['last_acknowledged_interstitial'] < max_age:
return None
except KeyError:
pass
path = request.get_full_path()
if re.match(str(reverse_lazy('login-interstitial')), path) or \
re.match(str(reverse_lazy('login')), path) or \
re.match(str(reverse_lazy('logout')), path) or \
re.match(settings.STATIC_URL + r'.+', path):
return None
return redirect('login-interstitial')
|
samples/alpc/advanced_alpc.py | IMULMUL/PythonForWindows | 479 | 12714269 | import sys
import multiprocessing
import windows.alpc
from windows.generated_def import LPC_CONNECTION_REQUEST, LPC_REQUEST
import windows.generated_def as gdef
import ctypes
import tempfile
PORT_NAME = r"\RPC Control\PythonForWindowsPORT_2"
PORT_CONTEXT = 0x11223344
def full_alpc_server():
print("server pid = {0}".format(windows.current_process.pid))
server = windows.alpc.AlpcServer(PORT_NAME)
print("[SERV] PORT <{0}> CREATED".format(PORT_NAME))
msg = server.recv()
print("[SERV] == Message received ==")
if msg.type & 0xfff == LPC_CONNECTION_REQUEST:
print(" * ALPC connection request: <{0}>".format(msg.data))
msg.data = "Connection message response"
server.accept_connection(msg, port_context=PORT_CONTEXT)
else:
raise ValueError("Expected connection")
while True:
msg = server.recv()
print("[SERV] == Message received ==")
# print(" * Data: {0}".format(msg.data))
# print("[SERV] RECV Message type = {0:#x}".format(msg.type))
# print("[SERV] RECV Message Valid ATTRS = {0:#x}".format(msg.attributes.ValidAttributes))
# print("[SERV] RECV Message ATTRS = {0:#x}".format(msg.attributes.AllocatedAttributes))
if msg.type & 0xfff == LPC_REQUEST:
print(" * ALPC request: <{0}>".format(msg.data))
print(" * view_is_valid <{0}>".format(msg.view_is_valid))
if msg.view_is_valid:
print(" * message view attribute:")
windows.utils.print_ctypes_struct(msg.view_attribute, " - VIEW", hexa=True)
view_data = windows.current_process.read_string(msg.view_attribute.ViewBase)
print(" * Reading view content: <{0}>".format(view_data))
# Needed in Win7 - TODO: why is there a different behavior ?
msg.attributes.ValidAttributes -= gdef.ALPC_MESSAGE_VIEW_ATTRIBUTE
print(" * security_is_valid <{0}>".format(msg.security_is_valid))
print(" * handle_is_valid <{0}>".format(msg.handle_is_valid))
if msg.handle_is_valid:
if msg.handle_attribute.Handle:
print(" * message handle attribute:")
windows.utils.print_ctypes_struct(msg.handle_attribute, " - HANDLE", hexa=True)
if msg.handle_attribute.ObjectType == 1:
f = windows.utils.create_file_from_handle(msg.handle_attribute.Handle)
print(" - File: {0}".format(f))
print(" - content: <{0}>".format(f.read()))
else:
print(" - unknow object type == {0}".format(msg.handle_attribute.ObjectType))
msg.attributes.ValidAttributes -= gdef.ALPC_MESSAGE_HANDLE_ATTRIBUTE
print(" * context_is_valid <{0}>".format(msg.context_is_valid))
if msg.context_is_valid:
print(" * message context attribute:")
windows.utils.print_ctypes_struct(msg.context_attribute, " - CTX", hexa=True)
if msg.attributes.ValidAttributes & gdef.ALPC_MESSAGE_TOKEN_ATTRIBUTE:
print(" * message token attribute:")
token_struct = msg.attributes.get_attribute(gdef.ALPC_MESSAGE_TOKEN_ATTRIBUTE)
windows.utils.print_ctypes_struct(token_struct, " - TOKEN", hexa=True)
# We can reply by to way:
# - Send the same message with modified data
# - Recreate a Message and copy the MessageId
msg.data = "REQUEST '{0}' DONE".format(msg.data)
sys.stdout.flush()
server.send(msg)
else:
print ValueError("Unexpected message type <{0}>".format(msg.type & 0xfff))
def send_message_with_handle(client):
print ""
print("[Client] == Sending a message with a handle ==")
# Craft a file with some data
f = tempfile.NamedTemporaryFile()
f.write("Tempfile data <3")
f.seek(0)
# New message with a Handle
msg = windows.alpc.AlpcMessage()
msg.attributes.ValidAttributes |= gdef.ALPC_MESSAGE_HANDLE_ATTRIBUTE
msg.handle_attribute.Flags = gdef.ALPC_HANDLEFLG_DUPLICATE_SAME_ACCESS
msg.handle_attribute.Handle = windows.utils.get_handle_from_file(f)
msg.handle_attribute.ObjectType = 0
msg.handle_attribute.DesiredAccess = 0
msg.data = "some message with a file"
client.send_receive(msg)
def send_message_with_view(client):
print ""
print("[Client] == Sending a message with a view ==")
# Create View
section = client.create_port_section(0, 0, 0x4000)
view = client.map_section(section[0], 0x4000)
# New message with a View
msg = windows.alpc.AlpcMessage(0x2000)
msg.attributes.ValidAttributes |= gdef.ALPC_MESSAGE_VIEW_ATTRIBUTE
msg.view_attribute.Flags = 0
msg.view_attribute.ViewBase = view.ViewBase
msg.view_attribute.SectionHandle = view.SectionHandle
msg.view_attribute.ViewSize = 0x4000
msg.data = "some message with a view"
windows.current_process.write_memory(view.ViewBase, "The content of the view :)\x00")
client.send_receive(msg)
def alpc_client():
print("Client pid = {0}".format(windows.current_process.pid))
client = windows.alpc.AlpcClient()
# You can create a non-connected AlpcClient and send a custom
# 'AlpcMessage' for complexe alpc port connection.
connect_message = windows.alpc.AlpcMessage()
connect_message.data = "Connection request client message"
print("[CLIENT] == Connecting to port ==")
connect_response = client.connect_to_port(PORT_NAME, connect_message)
print("[CLIENT] Connected with response: <{0}>".format(connect_response.data))
# AlpcClient send/recv/send_receive methods accept both string or
# AlpcMessage for complexe message.
print""
print("[CLIENT] == Sending a message ==")
msg = windows.alpc.AlpcMessage()
msg.data = "Complex Message 1"
print(" * Sending Message <{0}>".format(msg.data))
response = client.send_receive(msg)
print("[CLIENT] Server response: <{0}>".format(response.data))
print("[CLIENT] RESP Message Valid ATTRS = {0}".format(response.valid_attributes))
send_message_with_handle(client)
send_message_with_view(client)
sys.stdout.flush()
if __name__ == "__main__":
proc = multiprocessing.Process(target=full_alpc_server, args=())
proc.start()
import time; time.sleep(0.5)
alpc_client()
import time; time.sleep(0.5)
print("BYE")
proc.terminate() |
tests/worker/test_initializers.py | adir-intsights/sergeant | 152 | 12714274 | import unittest
import unittest.mock
import sergeant.worker
class WorkerInitializersTestCase(
unittest.TestCase,
):
def test_init_logger(
self,
):
worker = sergeant.worker.Worker()
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
)
worker.init_logger()
self.assertIsNotNone(
obj=worker.logger,
)
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
logging=sergeant.config.Logging(
handlers=[
sergeant.logging.logstash.LogstashHandler(
host='localhost',
port=9999,
),
],
),
)
worker.init_logger()
self.assertIsNotNone(
obj=worker.logger,
)
self.assertEqual(
first=len(worker.logger.handlers),
second=1,
)
self.assertIsInstance(
obj=worker.logger.handlers[0],
cls=sergeant.logging.logstash.LogstashHandler,
)
def test_init_broker(
self,
):
worker = sergeant.worker.Worker()
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='redis',
params={
'nodes': [
{
'host': 'localhost',
'port': 6379,
'password': <PASSWORD>,
'database': 0,
},
],
},
),
)
worker.init_broker()
self.assertIsInstance(
obj=worker.broker,
cls=sergeant.broker.Broker,
)
self.assertIsInstance(
obj=worker.broker.connector,
cls=sergeant.connector.redis.Connector,
)
self.assertIsNone(
obj=worker.broker.encoder.compressor,
)
self.assertIsInstance(
obj=worker.broker.encoder.serializer,
cls=sergeant.encoder.serializer.pickle.Serializer,
)
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='redis',
params={
'nodes': [
{
'host': 'localhost',
'port': 6379,
'password': <PASSWORD>,
'database': 0,
},
{
'host': 'localhost',
'port': 6380,
'password': <PASSWORD>,
'database': 0,
},
],
},
),
)
worker.init_broker()
self.assertIsInstance(
obj=worker.broker,
cls=sergeant.broker.Broker,
)
self.assertIsInstance(
obj=worker.broker.connector,
cls=sergeant.connector.redis.Connector,
)
self.assertIsNone(
obj=worker.broker.encoder.compressor,
)
self.assertIsInstance(
obj=worker.broker.encoder.serializer,
cls=sergeant.encoder.serializer.pickle.Serializer,
)
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='mongo',
params={
'nodes': [
{
'host': 'localhost',
'port': 27017,
'replica_set': 'test_replica_set',
},
],
},
),
)
worker.init_broker()
self.assertIsInstance(
obj=worker.broker,
cls=sergeant.broker.Broker,
)
self.assertIsInstance(
obj=worker.broker.connector,
cls=sergeant.connector.mongo.Connector,
)
self.assertIsNone(
obj=worker.broker.encoder.compressor,
)
self.assertIsInstance(
obj=worker.broker.encoder.serializer,
cls=sergeant.encoder.serializer.pickle.Serializer,
)
compressor_names = list(sergeant.encoder.encoder.Encoder.compressors.keys())
compressor_names.append(None)
serializer_names = sergeant.encoder.encoder.Encoder.serializers.keys()
for compressor_name in compressor_names:
for serializer_name in serializer_names:
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='redis',
params={
'nodes': [
{
'host': 'localhost',
'port': 6379,
'password': <PASSWORD>,
'database': 0,
},
],
},
),
encoder=sergeant.config.Encoder(
compressor=compressor_name,
serializer=serializer_name,
),
)
worker.init_broker()
self.assertIsInstance(
obj=worker.broker,
cls=sergeant.broker.Broker,
)
self.assertIsInstance(
obj=worker.broker.connector,
cls=sergeant.connector.redis.Connector,
)
if compressor_name:
self.assertEqual(
first=worker.broker.encoder.compressor.name,
second=compressor_name,
)
else:
self.assertIsNone(
obj=worker.broker.encoder.compressor,
)
self.assertEqual(
first=worker.broker.encoder.serializer.name,
second=serializer_name,
)
def test_init_executor(
self,
):
worker = sergeant.worker.Worker()
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
)
worker.init_executor()
self.assertIsInstance(
obj=worker.executor_obj,
cls=sergeant.executor.serial.SerialExecutor,
)
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
number_of_threads=1,
)
worker.init_executor()
self.assertIsInstance(
obj=worker.executor_obj,
cls=sergeant.executor.serial.SerialExecutor,
)
worker.config = sergeant.config.WorkerConfig(
name='some_worker',
connector=sergeant.config.Connector(
type='',
params={},
),
number_of_threads=2,
)
worker.init_executor()
self.assertIsInstance(
obj=worker.executor_obj,
cls=sergeant.executor.threaded.ThreadedExecutor,
)
|
tests/unit/merchant_account/test_business_details.py | futureironman/braintree_python | 182 | 12714302 | <reponame>futureironman/braintree_python
from tests.test_helper import *
from braintree.merchant_account.business_details import BusinessDetails
class TestBusinessDetails(unittest.TestCase):
def test_repr_has_all_fields(self):
details = BusinessDetails({
"dba_name": "<NAME>",
"legal_name": "<NAME>",
"tax_id": "123001234",
"address": {
"street_address": "123 First St",
"region": "Las Vegas",
"locality": "NV",
}
})
regex = r"<BusinessDetails {dba_name: '<NAME>', legal_name: '<NAME>', tax_id: '123001234', address_details: <AddressDetails {street_address: '123 First St', locality: 'NV', region: 'Las Vegas'} at \w+>} at \w+>"
matches = re.match(regex, repr(details))
self.assertTrue(matches)
|
backprop/models/hf_seq2seq_tg_model/__init__.py | lucky7323/backprop | 200 | 12714330 | <filename>backprop/models/hf_seq2seq_tg_model/__init__.py
from .model import HFSeq2SeqTGModel |
tests/test__types.py | lemon24/reader | 205 | 12714335 | from types import SimpleNamespace
import pytest
from reader._types import entry_data_from_obj
from reader._types import EntryData
from reader._types import FeedData
from reader._types import fix_datetime_tzinfo
from reader._types import tag_filter_argument
TAG_DATA = [
([], [None, [], (), [[]], ((),), [[], []]]),
([[True]], [True, [True], [[True]]]),
([[False]], [False, [False], [[False]]]),
([[True], [False]], [[True, False], [[True], [False]]]),
([[True, False]], [[[True, False]]]),
([[(False, 'one')]], [['one'], [['one']], ['one', []], [[], ['one'], []]]),
([[(False, 'one')], [(True, 'two')]], [['one', '-two'], [['one'], ['-two']]]),
([[(False, 'one'), (True, 'two')]], [[['one', '-two']]]),
([[True], [(False, 'one')]], [[True, 'one'], [True, ['one']], [[True], 'one']]),
([[(False, 'one'), False]], [[['one', False]]]),
]
TAG_DATA_FLAT = [(input, expected) for expected, inputs in TAG_DATA for input in inputs]
@pytest.mark.parametrize('input, expected', TAG_DATA_FLAT)
def test_tag_filter_argument(input, expected):
assert tag_filter_argument(input) == expected
DEFINITELY_NOT_TAGS = [0, 1, 2, {}, set(), object()]
TAG_DATA_BAD = [
("argument must be", DEFINITELY_NOT_TAGS + ['', 'one', '-one']),
("must be non-empty", [[''], ['-'], [['']], [['-']]]),
(
"elements of argument must be",
[[t] for t in DEFINITELY_NOT_TAGS] + [[[t]] for t in DEFINITELY_NOT_TAGS],
),
]
TAG_DATA_BAD_FLAT = [
(input, error) for error, inputs in TAG_DATA_BAD for input in inputs
]
@pytest.mark.parametrize('input, error', TAG_DATA_BAD_FLAT)
def test_tag_filter_argument_error(input, error):
with pytest.raises(ValueError) as excinfo:
tag_filter_argument(input, 'argument')
assert error in str(excinfo.value)
@pytest.mark.parametrize('data_file', ['full', 'empty'])
def test_entry_data_from_obj(data_dir, data_file):
expected = {'url_base': '', 'rel_base': ''}
exec(data_dir.join(f'{data_file}.rss.py').read(), expected)
for i, entry in enumerate(expected['entries']):
entry_utc = fix_datetime_tzinfo(entry, 'updated', 'published')
assert entry == entry_data_from_obj(entry_utc), i
entry_dict = entry_utc._asdict()
if 'content' in entry_dict:
entry_dict['content'] = [c._asdict() for c in entry_dict['content']]
if 'enclosures' in entry_dict:
entry_dict['enclosures'] = [e._asdict() for e in entry_dict['enclosures']]
assert entry == entry_data_from_obj(entry_dict), i
@pytest.mark.parametrize(
'exc, entry',
[
(AttributeError, SimpleNamespace()),
(AttributeError, SimpleNamespace(feed_url='feed')),
(AttributeError, SimpleNamespace(id='id')),
(TypeError, SimpleNamespace(feed_url='feed', id=1)),
(TypeError, SimpleNamespace(feed_url='feed', id=None)),
(TypeError, SimpleNamespace(feed_url='feed', id='id', updated=1)),
(TypeError, SimpleNamespace(feed_url='feed', id='id', title=1)),
(TypeError, SimpleNamespace(feed_url='feed', id='id', content=1)),
(
AttributeError,
SimpleNamespace(feed_url='feed', id='id', content=[SimpleNamespace()]),
),
(
TypeError,
SimpleNamespace(
feed_url='feed', id='id', content=[SimpleNamespace(value=1)]
),
),
(
TypeError,
SimpleNamespace(
feed_url='feed',
id='id',
content=[SimpleNamespace(value='value', type=1)],
),
),
(
AttributeError,
SimpleNamespace(feed_url='feed', id='id', enclosures=[SimpleNamespace()]),
),
(
TypeError,
SimpleNamespace(
feed_url='feed', id='id', enclosures=[SimpleNamespace(href=1)]
),
),
(
TypeError,
SimpleNamespace(
feed_url='feed',
id='id',
enclosures=[SimpleNamespace(href='href', type=1)],
),
),
(
TypeError,
SimpleNamespace(
feed_url='feed',
id='id',
enclosures=[SimpleNamespace(href='href', length='1')],
),
),
],
)
def test_entry_data_from_obj_errors(exc, entry):
with pytest.raises(exc):
entry_data_from_obj(entry)
with pytest.raises(exc):
entry_dict = dict(vars(entry))
if 'content' in entry_dict:
entry_dict['content'] = [dict(vars(c)) for c in entry_dict['content']]
if 'enclosures' in entry_dict:
entry_dict['enclosures'] = [dict(vars(e)) for e in entry_dict['enclosures']]
entry_data_from_obj(entry_dict)
|
examples/mcscf/32-beh2_scan/BeH2-scan.py | robert-anderson/pyscf | 501 | 12714338 | <gh_stars>100-1000
import numpy
from pyscf import gto, scf, mcscf
'''
Scan BeH2 molecule symmetric dissociation curve
Note the CI wave function might change symmetry in the scanning. Adjust
fcisolver parameters to maintain the right symmetry.
'''
def run(i, dm0, mo0, ci0):
x = i
y = (2.54 - 0.46 * x)
x = x * 0.529177249
y = y * 0.529177249
mol = gto.M(
verbose = 0,
atom = [
['Be',( 0., 0. , 0. )],
['H', ( x, -y , 0. )],
['H', ( x, y , 0. )],],
basis = '6-311G',
symmetry = True)
mf = scf.RHF(mol)
ehf = mf.scf(dm0)
mc = mcscf.CASSCF(mf, 2, 2)
mc.fcisolver.davidson_only = True # force the CI solver stick on (A1)^2(B1)^0 configuration
if mo0 is not None:
mo0 = mcscf.project_init_guess(mc, mo0)
emc = mc.mc1step(mo0, ci0)[0]
print('%2.1f bohr, HF energy: %12.8f, CASSCF energy: %12.8f' % (i, ehf, emc))
return mf, mc
dm0 = mo0 = ci = None
for i in reversed(numpy.arange(1.0, 4.1, .1)):
mf, mc = run(i, dm0, mo0, ci)
dm0 = mf.make_rdm1()
mo_coeff = mc.mo_coeff
ci = mc.ci
|
api/users/tests/test_signals.py | mevinbabuc/flagsmith | 1,259 | 12714340 | import pytest
from users.signals import warn_insecure
@pytest.mark.django_db
def test_warn_insecure_emits_a_warning_when_no_user_exists(recwarn):
# When
warn_insecure(None)
# Then
assert len(recwarn) == 1
w = recwarn.pop()
assert issubclass(w.category, RuntimeWarning)
@pytest.mark.django_db
def test_warn_insecure_emits_no_warning_when_user_exists(admin_user, recwarn):
# When
warn_insecure(None)
# Then
assert len(recwarn) == 0
|
src/operation/fetchlines.py | fekblom/critic | 216 | 12714347 | <filename>src/operation/fetchlines.py
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import gitutils
import htmlutils
import diff
from operation import Operation, OperationResult
class FetchLines(Operation):
def __init__(self):
Operation.__init__(self, { "repository_id": int,
"path": str,
"sha1": str,
"ranges": [{ "offset": int,
"count": int,
"context": bool }],
"tabify": bool },
accept_anonymous_user=True)
def process(self, db, user, repository_id, path, sha1, ranges, tabify):
repository = gitutils.Repository.fromId(db, repository_id)
cursor = db.cursor()
def getContext(offset):
cursor.execute("""SELECT context
FROM codecontexts
WHERE sha1=%s
AND %s BETWEEN first_line AND last_line
ORDER BY first_line DESC
LIMIT 1""",
(sha1, offset))
row = cursor.fetchone()
if row: return row[0]
else: return None
file = diff.File(repository=repository, path=path, new_sha1=sha1)
file.loadNewLines(highlighted=True, request_highlight=True)
if tabify:
tabwidth = file.getTabWidth()
indenttabsmode = file.getIndentTabsMode()
def processRange(offset, count, context):
if context: context = getContext(offset)
else: context = None
# Offset is a 1-based line number.
start = offset - 1
# If count is -1, fetch all lines.
end = start + count if count > -1 else None
lines = file.newLines(highlighted=True)[start:end]
if tabify:
lines = [htmlutils.tabify(line, tabwidth, indenttabsmode) for line in lines]
return { "lines": lines, "context": context }
return OperationResult(ranges=[processRange(**line_range) for line_range in ranges])
|
litex_boards/platforms/trenz_te0725.py | smunaut/litex-boards | 177 | 12714416 | #
# This file is part of LiteX-Boards.
# FPGA Board Info : https://shop.trenz-electronic.de/en/TE0725-03-35-2C-FPGA-Module-with-Xilinx-Artix-7-XC7A35T-2CSG324C-2-x-50-Pin-with-2.54-mm-pitch
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk100", 0, Pins("P17"), IOStandard("LVCMOS33")),
("cpu_reset", 0, Pins("T8"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("M16"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("L18")),
Subsignal("rx", Pins("M18")),
IOStandard("LVCMOS33")
),
# SPIFlash
("spiflash", 0,
Subsignal("cs_n", Pins("L13")),
Subsignal("clk", Pins("E9")),
Subsignal("mosi", Pins("K17")),
Subsignal("miso", Pins("K18")),
Subsignal("wp", Pins("L14")),
Subsignal("hold", Pins("M14")),
IOStandard("LVCMOS33"),
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("L13")),
Subsignal("clk", Pins("E9")),
Subsignal("dq", Pins("K17 K18 L14 M14")),
IOStandard("LVCMOS33")
),
# HyperRAM
("hyperram", 0,
Subsignal("dq", Pins("E17 B17 F18 F16 G17 D18 B18 A16"), IOStandard("SSTL18_II")),
Subsignal("rwds", Pins("E18"), IOStandard("SSTL18_II")),
Subsignal("cs_n", Pins("D17"), IOStandard("SSTL18_II")),
Subsignal("rst_n", Pins("J17"), IOStandard("SSTL18_II")),
Subsignal("clk_p", Pins("A13"), IOStandard("DIFF_SSTL18_II")),
Subsignal("clk_n", Pins("A14"), IOStandard("DIFF_SSTL18_II")),
Misc("SLEW=FAST"),
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("j1", "C6 C5 B7 B6 A6 A5 D8 C7",
"E6 E5 E7 D7 C4 B4 A4 A3",
"B1 A1 B3 B2 D5 D4 E3 D3",
"F4 F3 E2 D2 H2 G2 C2 C1",
"H1 G1 F1 E1 G6 F6 J3 J2",
"K2 K1"),
("j2", "L1 M1 N2 N1 M3 M2 U1 V1",
"U4 U3 U2 V2 V5 V4 R3 T3",
"T5 T4 N5 P5 P4 P3 P2 R2",
"M4 N4 R1 T1 M6 N6 R6 R5",
"V7 V6 U9 V9 U7 U6 R7 T6",
"R8"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 1e9/100e6
def __init__(self):
XilinxPlatform.__init__(self, "xc7a35tcsg324-2", _io, _connectors, toolchain="vivado")
self.toolchain.bitstream_commands = \
["set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]"]
self.toolchain.additional_commands = \
["write_cfgmem -force -format bin -interface spix4 -size 16"
" -loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
self.add_platform_command("set_property CFGBVS VCCO [current_design]")
self.add_platform_command("set_property CONFIG_VOLTAGE 3.3 [current_design]")
def create_programmer(self):
return OpenOCD("openocd_xc7_ft2232.cfg", "bscan_spi_xc7a35t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
# "set_property SEVERITY {{Warning}} [get_drc_checks UCIO-1]"]
|
main.py | rederxz/wide_resnets_keras | 156 | 12714427 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import os
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
#sys.stdout = sys.stderr
# Prevent reaching to maximum recursion depth in `theano.tensor.grad`
#sys.setrecursionlimit(2 ** 20)
import numpy as np
np.random.seed(2 ** 10)
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, AveragePooling2D, BatchNormalization, Dropout, Input, Activation, Add, Dense, Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
from utils import mk_dir
# ================================================
# DATA CONFIGURATION:
logging.debug("Loading data...")
nb_classes = 10
image_size = 32
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# convert class vectors to binary class matrices
Y_train = to_categorical(y_train, nb_classes)
Y_test = to_categorical(y_test, nb_classes)
# ================================================
# ================================================
# NETWORK/TRAINING CONFIGURATION:
logging.debug("Loading network/training configuration...")
depth = 28 # table 5 on page 8 indicates best value (4.17) CIFAR-10
k = 10 # 'widen_factor'; table 5 on page 8 indicates best value (4.17) CIFAR-10
dropout_probability = 0 # table 6 on page 10 indicates best value (4.17) CIFAR-10
weight_decay = 0.0005 # page 10: "Used in all experiments"
batch_size = 128 # page 8: "Used in all experiments"
# Regarding nb_epochs, lr_schedule and sgd, see bottom page 10:
nb_epochs = 200
lr_schedule = [60, 120, 160] # epoch_step
def schedule(epoch_idx):
if (epoch_idx + 1) < lr_schedule[0]:
return 0.1
elif (epoch_idx + 1) < lr_schedule[1]:
return 0.02 # lr_decay_ratio = 0.2
elif (epoch_idx + 1) < lr_schedule[2]:
return 0.004
return 0.0008
sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
# Other config from code; throughtout all layer:
use_bias = False # following functions 'FCinit(model)' and 'DisableBias(model)' in utils.lua
weight_init="he_normal" # follows the 'MSRinit(model)' function in utils.lua
# Keras specific
if K.image_data_format() == "th":
logging.debug("image_dim_ordering = 'th'")
channel_axis = 1
input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
channel_axis = -1
input_shape = (image_size, image_size, 3)
# ================================================
# ================================================
# OUTPUT CONFIGURATION:
print_model_summary = True
save_model = True
save_model_plot = False
MODEL_PATH = os.environ.get('MODEL_PATH', 'models/')
CHECKPOINT_PATH = os.environ.get('CHECKPOINT_PATH', 'checkpoints/')
# ================================================
# Wide residual network http://arxiv.org/abs/1605.07146
def _wide_basic(n_input_plane, n_output_plane, stride):
def f(net):
# format of conv_params:
# [ [nb_col="kernel width", nb_row="kernel height",
# subsample="(stride_vertical,stride_horizontal)",
# border_mode="same" or "valid"] ]
# B(3,3): orignal <<basic>> block
conv_params = [ [3,3,stride,"same"],
[3,3,(1,1),"same"] ]
n_bottleneck_plane = n_output_plane
# Residual block
for i, v in enumerate(conv_params):
if i == 0:
if n_input_plane != n_output_plane:
net = BatchNormalization(axis=channel_axis)(net)
net = Activation("relu")(net)
convs = net
else:
convs = BatchNormalization(axis=channel_axis)(net)
convs = Activation("relu")(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(convs)
else:
convs = BatchNormalization(axis=channel_axis)(convs)
convs = Activation("relu")(convs)
if dropout_probability > 0:
convs = Dropout(dropout_probability)(convs)
convs = Conv2D(n_bottleneck_plane,
(v[0],v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(convs)
# Shortcut Conntection: identity function or 1x1 convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in each
# group; see _layer() ).
if n_input_plane != n_output_plane:
shortcut = Conv2D(n_output_plane,
(1,1),
strides=stride,
padding="same",
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(net)
else:
shortcut = net
return Add()([convs, shortcut])
return f
# "Stacking Residual Units on the same stage"
def _layer(block, n_input_plane, n_output_plane, count, stride):
def f(net):
net = block(n_input_plane, n_output_plane, stride)(net)
for i in range(2,int(count+1)):
net = block(n_output_plane, n_output_plane, stride=(1,1))(net)
return net
return f
def create_model():
logging.debug("Creating model...")
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k]
conv1 = Conv2D(n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
use_bias=use_bias)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
batch_norm = BatchNormalization(axis=channel_axis)(conv4)
relu = Activation("relu")(batch_norm)
# Classifier block
pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
flatten = Flatten()(pool)
predictions = Dense(units=nb_classes, kernel_initializer=weight_init, use_bias=use_bias,
kernel_regularizer=l2(weight_decay), activation="softmax")(flatten)
model = Model(inputs=inputs, outputs=predictions)
return model
if __name__ == '__main__':
model = create_model()
model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=['accuracy'])
if print_model_summary:
logging.debug("Model summary...")
model.count_params()
model.summary()
if save_model_plot:
logging.debug("Saving model plot...")
mk_dir(MODEL_PATH)
from tensorflow.keras.utils import plot_model
plot_model(model, to_file=os.path.join(MODEL_PATH, 'WRN-{0}-{1}.png'.format(depth, k)), show_shapes=True)
# Data Augmentation based on page 6 (see README for full details)
logging.debug("Creating ImageDataGenerators...")
train_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zca_whitening=True,
horizontal_flip=True)
train_datagen.fit(X_train, augment=True, rounds=2)
test_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zca_whitening=True)
test_datagen.fit(X_train)
mk_dir(CHECKPOINT_PATH)
callbacks = [ LearningRateScheduler(schedule=schedule),
ModelCheckpoint(CHECKPOINT_PATH+'/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
]
logging.debug("Running training...")
# fit the model on the batches generated by train_datagen.flow()
model.fit(train_datagen.flow(X_train, Y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=X_train.shape[0]/batch_size,
epochs=nb_epochs,
validation_data=test_datagen.flow(X_test, Y_test, batch_size=batch_size),
callbacks=callbacks)
if save_model:
logging.debug("Saving model...")
mk_dir(MODEL_PATH)
model.save(os.path.join(MODEL_PATH, 'WRN-{0}-{1}.h5'.format(depth, k)), overwrite=True)
|
tests/testapp/__init__.py | cursive-works/wagtailmedia | 176 | 12714437 | default_app_config = "tests.testapp.apps.WagtailmediaTestsAppConfig"
|
modules/lsegmentation_module_zs.py | isl-org/lang-seg | 202 | 12714439 | <gh_stars>100-1000
import types
import time
import random
import clip
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from argparse import ArgumentParser
import pytorch_lightning as pl
from encoding.models import get_segmentation_model
from encoding.nn import SegmentationLosses
from encoding.utils import batch_pix_accuracy, batch_intersection_union
# add mixed precision
import torch.cuda.amp as amp
import numpy as np
from encoding.utils.metrics import SegmentationMetric
# get fewshot dataloader
from fewshot_data.model.hsnet import HypercorrSqueezeNetwork
from fewshot_data.common.logger import Logger, AverageMeter
from fewshot_data.common.evaluation import Evaluator
from fewshot_data.common import utils
from fewshot_data.data.dataset import FSSDataset
class Fewshot_args:
datapath = 'fewshot_data/Datasets_HSN'
benchmark = 'pascal'
logpath = ''
nworker = 8
bsz = 20
fold = 0
class LSegmentationModuleZS(pl.LightningModule):
def __init__(self, data_path, dataset, batch_size, base_lr, max_epochs, **kwargs):
super().__init__()
self.batch_size = batch_size
self.base_lr = base_lr / 16 * batch_size
self.lr = self.base_lr
self.epochs = max_epochs
self.other_kwargs = kwargs
self.enabled = False #True mixed precision will make things complicated and leading to NAN error
self.scaler = amp.GradScaler(enabled=self.enabled)
# for whether fix the encoder or not
self.fixed_encoder = True if kwargs["use_pretrained"] in ['clip_fixed'] else False
# fewshot hyperparameters
self.cross_entropy_loss = nn.CrossEntropyLoss()
self.args = self.get_fewshot_args()
if data_path:
self.args.datapath = data_path
self.args.logpath = self.other_kwargs["logpath"]
self.args.benchmark = dataset
self.args.bsz = self.batch_size
self.args.fold = self.other_kwargs["fold"]
self.args.nshot = self.other_kwargs["nshot"]
self.args.finetune_mode = self.other_kwargs["finetune_mode"]
Logger.initialize(self.args, training=True)
Evaluator.initialize()
if kwargs["backbone"] in ["clip_resnet101"]:
FSSDataset.initialize(img_size=480, datapath=self.args.datapath, use_original_imgsize=False, imagenet_norm=True)
else:
FSSDataset.initialize(img_size=480, datapath=self.args.datapath, use_original_imgsize=False)
self.best_val_miou = float('-inf')
self.num_classes = 2
self.labels = ['others', '']
self.fewshot_trn_loss = 100
self.fewshot_trn_miou = 0
self.fewshot_trn_fb_iou = 0
def get_fewshot_args(self):
return Fewshot_args()
def forward(self, x, class_info):
return self.net(x, class_info)
def training_step(self, batch, batch_nb):
if self.args.finetune_mode:
if self.args.nshot == 5:
bshape = batch['support_imgs'].shape
img = batch['support_imgs'].view(-1, bshape[2], bshape[3], bshape[4])
target = batch['support_masks'].view(-1, bshape[3], bshape[4])
class_info = batch['class_id']
for i in range(1, 5):
class_info = torch.cat([class_info, batch['class_id']])
with amp.autocast(enabled=self.enabled):
out = self(img, class_info)
loss = self.criterion(out, target)
loss = self.scaler.scale(loss)
self.log("train_loss", loss)
# 3. Evaluate prediction
if self.args.benchmark == 'pascal' and batch['support_ignore_idxs'] is not None:
query_ignore_idx = batch['support_ignore_idxs'].view(-1, bshape[3], bshape[4])
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target, query_ignore_idx)
else:
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target)
else:
img = batch['support_imgs'].squeeze(1)
target = batch['support_masks'].squeeze(1)
class_info = batch['class_id']
with amp.autocast(enabled=self.enabled):
out = self(img, class_info)
loss = self.criterion(out, target)
loss = self.scaler.scale(loss)
self.log("train_loss", loss)
# 3. Evaluate prediction
if self.args.benchmark == 'pascal' and batch['support_ignore_idxs'] is not None:
query_ignore_idx = batch['support_ignore_idxs'].squeeze(1)
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target, query_ignore_idx)
else:
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target)
else:
img = torch.cat([batch['support_imgs'].squeeze(1), batch['query_img']], dim=0)
target = torch.cat([batch['support_masks'].squeeze(1), batch['query_mask']], dim=0)
class_info=torch.cat([batch['class_id'], batch['class_id']], dim=0)
with amp.autocast(enabled=self.enabled):
out = self(img, class_info)
loss = self.criterion(out, target)
loss = self.scaler.scale(loss)
self.log("train_loss", loss)
# 3. Evaluate prediction
if self.args.benchmark == 'pascal' and batch['query_ignore_idx'] is not None:
query_ignore_idx = torch.cat([batch['support_ignore_idxs'].squeeze(1), batch['query_ignore_idx']], dim=0)
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target, query_ignore_idx)
else:
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target)
self.train_average_meter.update(area_inter, area_union, class_info, loss.detach().clone())
if self.global_rank == 0:
return_value = self.train_average_meter.write_process(batch_nb, self.len_train_dataloader, self.current_epoch, write_batch_idx=50)
if return_value is not None:
iou, fb_iou = return_value
self.log("fewshot_train_iou", iou)
self.log("fewshot_trainl_fb_iou", fb_iou)
return loss
def training_epoch_end(self, outs):
if self.global_rank == 0:
self.train_average_meter.write_result('Training', self.current_epoch)
self.fewshot_trn_loss = utils.mean(self.train_average_meter.loss_buf)
self.fewshot_trn_miou, self.fewshot_trn_fb_iou = self.train_average_meter.compute_iou()
self.log("fewshot_trn_loss", self.fewshot_trn_loss)
self.log("fewshot_trn_miou", self.fewshot_trn_miou)
self.log("fewshot_trn_fb_iou", self.fewshot_trn_fb_iou)
def validation_step(self, batch, batch_nb):
if self.args.finetune_mode and self.args.nshot == 5:
bshape = batch['query_img'].shape
img = batch['query_img'].view(-1, bshape[2], bshape[3], bshape[4])
target = batch['query_mask'].view(-1, bshape[3], bshape[4])
class_info = batch['class_id']
for i in range(1, 5):
class_info = torch.cat([class_info, batch['class_id']])
out = self(img, class_info)
val_loss = self.criterion(out, target)
# 3. Evaluate prediction
if self.args.benchmark == 'pascal' and batch['query_ignore_idx'] is not None:
query_ignore_idx = batch['query_ignore_idx'].view(-1, bshape[3], bshape[4])
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target, query_ignore_idx)
else:
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target)
else:
img = batch['query_img'].squeeze(1)
target = batch['query_mask'].squeeze(1)
class_info = batch['class_id']
out = self(img, class_info)
val_loss = self.criterion(out, target)
# 3. Evaluate prediction
if self.args.benchmark == 'pascal' and batch['query_ignore_idx'] is not None:
query_ignore_idx = batch['query_ignore_idx'].squeeze(1)
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target, query_ignore_idx)
else:
area_inter, area_union = Evaluator.classify_prediction(out.argmax(dim=1), target)
self.val_average_meter.update(area_inter, area_union, class_info, val_loss.detach().clone())
if self.global_rank == 0:
return_value = self.val_average_meter.write_process(batch_nb, self.len_val_dataloader, self.current_epoch, write_batch_idx=50)
if return_value is not None:
iou, fb_iou = return_value
self.log("fewshot_val_iou", iou)
self.log("fewshot_val_fb_iou", fb_iou)
def validation_epoch_end(self, outs):
if self.global_rank == 0:
self.val_average_meter.write_result('Validation', self.current_epoch)
val_loss = utils.mean(self.val_average_meter.loss_buf)
val_miou, val_fb_iou = self.val_average_meter.compute_iou()
self.log("fewshot_val_loss", val_loss)
self.log("fewshot_val_miou", val_miou)
self.log("fewshot_val_fb_iou", val_fb_iou)
if self.global_rank == 0:
Logger.tbd_writer.add_scalars('fewshot_data/data/loss', {'trn_loss': self.fewshot_trn_loss, 'val_loss': val_loss}, self.current_epoch)
Logger.tbd_writer.add_scalars('fewshot_data/data/miou', {'trn_miou': self.fewshot_trn_miou, 'val_miou': val_miou}, self.current_epoch)
Logger.tbd_writer.add_scalars('fewshot_data/data/fb_iou', {'trn_fb_iou': self.fewshot_trn_fb_iou, 'val_fb_iou': val_fb_iou}, self.current_epoch)
Logger.tbd_writer.flush()
if self.current_epoch + 1 == self.epochs:
Logger.tbd_writer.close()
Logger.info('==================== Finished Training ====================')
threshold_epoch = 3
if self.args.benchmark in ['pascal', 'coco'] and self.current_epoch >= threshold_epoch:
print('End this loop!')
exit()
def configure_optimizers(self):
# if we want to fix the encoder
if self.fixed_encoder:
params_list = [
{"params": self.net.pretrained.model.parameters(), "lr": 0},
]
params_list.append(
{"params": self.net.pretrained.act_postprocess1.parameters(), "lr": self.base_lr}
)
params_list.append(
{"params": self.net.pretrained.act_postprocess2.parameters(), "lr": self.base_lr}
)
params_list.append(
{"params": self.net.pretrained.act_postprocess3.parameters(), "lr": self.base_lr}
)
params_list.append(
{"params": self.net.pretrained.act_postprocess4.parameters(), "lr": self.base_lr}
)
else:
params_list = [
{"params": self.net.pretrained.parameters(), "lr": self.base_lr},
]
if hasattr(self.net, "scratch"):
print("Found output scratch")
params_list.append(
{"params": self.net.scratch.parameters(), "lr": self.base_lr * 10}
)
if hasattr(self.net, "auxlayer"):
print("Found auxlayer")
params_list.append(
{"params": self.net.auxlayer.parameters(), "lr": self.base_lr * 10}
)
if hasattr(self.net, "scale_inv_conv"):
print(self.net.scale_inv_conv)
print("Found scaleinv layers")
params_list.append(
{
"params": self.net.scale_inv_conv.parameters(),
"lr": self.base_lr * 10,
}
)
params_list.append(
{"params": self.net.scale2_conv.parameters(), "lr": self.base_lr * 10}
)
params_list.append(
{"params": self.net.scale3_conv.parameters(), "lr": self.base_lr * 10}
)
params_list.append(
{"params": self.net.scale4_conv.parameters(), "lr": self.base_lr * 10}
)
if self.other_kwargs["midasproto"]:
print("Using midas optimization protocol")
opt = torch.optim.Adam(
params_list,
lr=self.base_lr,
betas=(0.9, 0.999),
weight_decay=self.other_kwargs["weight_decay"],
)
sch = torch.optim.lr_scheduler.LambdaLR(
opt, lambda x: pow(1.0 - x / self.epochs, 0.9)
)
else:
opt = torch.optim.SGD(
params_list,
lr=self.base_lr,
momentum=0.9,
weight_decay=self.other_kwargs["weight_decay"],
)
sch = torch.optim.lr_scheduler.LambdaLR(
opt, lambda x: pow(1.0 - x / self.epochs, 0.9)
)
return [opt], [sch]
def train_dataloader(self):
if self.args.finetune_mode:
dataloader = FSSDataset.build_dataloader(
self.args.benchmark,
self.args.bsz,
self.args.nworker,
self.args.fold,
'test',
self.args.nshot)
else:
dataloader = FSSDataset.build_dataloader(
self.args.benchmark,
self.args.bsz,
self.args.nworker,
self.args.fold,
'trn')
self.len_train_dataloader = len(dataloader) // torch.cuda.device_count()
self.train_average_meter = AverageMeter(dataloader.dataset)
return dataloader
def val_dataloader(self):
self.val_iou = SegmentationMetric(self.num_classes)
if self.args.finetune_mode:
dataloader = FSSDataset.build_dataloader(
self.args.benchmark,
self.args.bsz,
self.args.nworker,
self.args.fold,
'test',
self.args.nshot)
else:
dataloader = FSSDataset.build_dataloader(
self.args.benchmark,
self.args.bsz,
self.args.nworker,
self.args.fold,
'val')
self.len_val_dataloader = len(dataloader) // torch.cuda.device_count()
self.val_average_meter = AverageMeter(dataloader.dataset)
return dataloader
def criterion(self, logit_mask, gt_mask):
bsz = logit_mask.size(0)
logit_mask = logit_mask.view(bsz, 2, -1)
gt_mask = gt_mask.view(bsz, -1).long()
return self.cross_entropy_loss(logit_mask, gt_mask)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
"--data_path",
type=str,
default='',
help="path where dataset is stored"
)
parser.add_argument(
"--dataset",
type=str,
default='pascal',
choices=['pascal', 'coco', 'fss'],
)
parser.add_argument(
"--batch_size", type=int, default=20, help="size of the batches"
)
parser.add_argument(
"--base_lr", type=float, default=0.004, help="learning rate"
)
parser.add_argument("--momentum", type=float, default=0.9, help="SGD momentum")
parser.add_argument(
"--weight_decay", type=float, default=1e-4, help="weight_decay"
)
parser.add_argument(
"--aux", action="store_true", default=False, help="Auxilary Loss"
)
parser.add_argument(
"--aux-weight",
type=float,
default=0.2,
help="Auxilary loss weight (default: 0.2)",
)
parser.add_argument(
"--se-loss",
action="store_true",
default=False,
help="Semantic Encoding Loss SE-loss",
)
parser.add_argument(
"--se-weight", type=float, default=0.2, help="SE-loss weight (default: 0.2)"
)
parser.add_argument(
"--midasproto", action="store_true", default=False, help="midasprotocol"
)
parser.add_argument(
"--ignore_index",
type=int,
default=-1,
help="numeric value of ignore label in gt",
)
parser.add_argument(
"--augment",
action="store_true",
default=False,
help="Use extended augmentations",
)
parser.add_argument(
"--use_relabeled",
action="store_true",
default=False,
help="Use extended augmentations",
)
parser.add_argument(
"--nworker",
type=int,
default=8
)
parser.add_argument(
"--fold",
type=int,
default=0,
choices=[0, 1, 2, 3]
)
parser.add_argument(
"--logpath",
type=str,
default=''
)
parser.add_argument(
"--nshot",
type=int,
default=0 #1
)
parser.add_argument(
"--finetune_mode",
action="store_true",
default=False,
help="whether finetune or not"
)
return parser
|
exps/stage3_root2/config.py | zju3dv/SMAP | 209 | 12714451 | <gh_stars>100-1000
# encoding: utf-8
import os, getpass
import os.path as osp
import argparse
from easydict import EasyDict as edict
from dataset.data_settings import load_dataset
from cvpack.utils.pyt_utils import ensure_dir
class Config:
# -------- Directoy Config -------- #
ROOT_DIR = os.environ['PROJECT_HOME']
OUTPUT_DIR = osp.join(ROOT_DIR, 'model_logs', osp.split(osp.split(osp.realpath(__file__))[0])[1])
TEST_DIR = osp.join(OUTPUT_DIR, 'log_dir')
TENSORBOARD_DIR = osp.join(OUTPUT_DIR, 'tb_dir')
# -------- Data Config -------- #
DATALOADER = edict()
DATALOADER.NUM_WORKERS = 0
DATALOADER.ASPECT_RATIO_GROUPING = False
DATALOADER.SIZE_DIVISIBILITY = 0
DATASET = edict()
DATASET.NAME = 'MIX'
dataset = load_dataset(DATASET.NAME)
DATASET.KEYPOINT = dataset.KEYPOINT
DATASET.PAF = dataset.PAF
DATASET.ROOT_IDX = dataset.ROOT_IDX # pelvis or neck
DATASET.MAX_PEOPLE = 20
INPUT = edict()
INPUT.NORMALIZE = True
INPUT.MEANS = [0.406, 0.456, 0.485] # bgr
INPUT.STDS = [0.225, 0.224, 0.229]
INPUT_SHAPE = dataset.INPUT_SHAPE
OUTPUT_SHAPE = dataset.OUTPUT_SHAPE
# -------- Model Config -------- #
MODEL = edict()
MODEL.STAGE_NUM = 3
MODEL.UPSAMPLE_CHANNEL_NUM = 256
MODEL.DEVICE = 'cuda'
MODEL.WEIGHT = None # osp.join(ROOT_DIR, 'lib/models/resnet-50_rename.pth')
# -------- Training Config -------- #
SOLVER = edict()
SOLVER.IMG_PER_GPU = 2
SOLVER.BASE_LR = 2e-4
SOLVER.CHECKPOINT_PERIOD = 4800
SOLVER.MAX_ITER = 96000*2
SOLVER.WEIGHT_DECAY = 8e-6
SOLVER.WARMUP_FACTOR = 0.1
SOLVER.WARMUP_ITERS = 2400
LOSS = edict()
LOSS.OHKM = True
LOSS.TOPK = 8
LOSS.COARSE_TO_FINE = True
WITH_MDS = True
RUN_EFFICIENT = False
# -------- Test Config -------- #
TEST = edict()
TEST.IMG_PER_GPU = 16
TEST.ROOT_PATH = '/data/MultiPersonTestSet' # '/data/datasets/mupots-3d-eval/MultiPersonTestSet'
TEST.JSON_PATH = osp.join(TEST.ROOT_PATH, 'M3E_gt.json')
config = Config()
cfg = config
def link_log_dir():
if not osp.exists('./log'):
ensure_dir(config.OUTPUT_DIR)
cmd = 'ln -s ' + config.OUTPUT_DIR + ' log'
os.system(cmd)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-log', '--linklog', default=False, action='store_true')
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
if args.linklog:
link_log_dir()
|
nri/nri/models/decoders/__init__.py | take-cheeze/models | 112 | 12714470 | <reponame>take-cheeze/models<gh_stars>100-1000
from nri.models.decoders.mlp_decoder import MLPDecoder
from nri.models.decoders.rnn_decoder import RNNDecoder |
lib/test/test_automata.py | SkittlePox/ConwayGOLGym | 138 | 12714474 | from conway import conway
import unittest
import numpy as np
class TestConway(unittest.TestCase):
def test_still(self):
"""2x2 block"""
A = np.zeros((10,10))
A[1:3,1:3] = 1
B = conway(A)
assert (A == B).all()
def test_scillator(self):
"""blinker"""
A = np.zeros((10,10))
A[1:4,1] = 1
B = conway(A)
assert (B[2, 0:3] == 1).all()
B = conway(B)
assert (A == B).all()
def test_evolution(self):
"""test that something changes"""
m, n = 10, 10
A = np.random.random(m*n).reshape((m, n)).round()
B = conway(A)
assert (B != A).any()
if __name__ == '__main__':
unittest.main()
|
tests/test_end_to_end.py | lammda/mercari-solution | 249 | 12714486 | import numpy as np
import pandas as pd
import pytest
from sklearn.model_selection import train_test_split
from mercari.datasets_mx import prepare_vectorizer_1, prepare_vectorizer_2, prepare_vectorizer_3
from mercari.datasets_tf import prepare_vectorizer_1_tf, prepare_vectorizer_2_tf, prepare_vectorizer_3_tf
from mercari.mercari_io import load_train
from mercari.mx_sparse import MXRegression, MXRegressionClf
from mercari.tf_sparse import RegressionHuber, RegressionClf
from mercari.utils import rmsle
@pytest.mark.parametrize('vectorizer', [
prepare_vectorizer_1(),
prepare_vectorizer_2(),
prepare_vectorizer_3(),
prepare_vectorizer_1_tf(),
prepare_vectorizer_2_tf(),
prepare_vectorizer_3_tf(),
])
@pytest.mark.parametrize('model', [
MXRegression(n_epoch=3, loss='huber'),
MXRegression(n_epoch=3, binary_X=True, loss='huber'),
MXRegressionClf(n_epoch=3, n_hidden=(196, 64)),
MXRegressionClf(n_epoch=3, n_hidden=(196, 64), binary_X=True),
RegressionHuber(n_epoch=3),
RegressionHuber(n_epoch=3, binary_X=True),
RegressionClf(n_epoch=3, n_hidden=(196, 64)),
RegressionClf(n_epoch=3, n_hidden=(196, 64), binary_X=True)
])
def test_end_to_end(vectorizer, model):
_test(vectorizer, model, n_rows=None)
@pytest.mark.parametrize('model', [
MXRegression(n_epoch=3, loss='huber'),
MXRegressionClf(n_epoch=3, n_hidden=(196, 64)),
RegressionHuber(n_epoch=3),
RegressionClf(n_epoch=3, n_hidden=(196, 64)),
])
@pytest.mark.parametrize('vectorizer', [
prepare_vectorizer_1(),
prepare_vectorizer_1_tf(),
])
@pytest.mark.parametrize('n_rows', [
None,
'random',
1,
2,
2**10,
2**13 - 1,
2**13,
2**13 + 1,
2**13 + 2**10,
])
def test_random_number_of_rows(vectorizer, model, n_rows):
_test(vectorizer, model, n_rows)
def _test(vectorizer, model, n_rows):
tr = load_train('tests/train_10k.tsv')
tr, va = train_test_split(tr)
te = pd.read_csv('tests/test_10k_corrupted.tsv', sep="\t")
if n_rows is not None:
if n_rows == 'random':
n_rows = np.random.randint(1, te.shape[0])
te = te.sample(n=n_rows)
mat_tr = vectorizer.fit_transform(tr, tr.price)
mat_te = vectorizer.transform(te.copy())
mat_va = vectorizer.transform(va)
model.fit(mat_tr, np.log1p(tr.price))
assert rmsle(np.expm1(model.predict(mat_va)), va.price) < 0.85
te_preds = np.expm1(model.predict(mat_te))
assert te_preds.shape[0] == te.shape[0]
assert np.all(np.isfinite(te_preds))
assert te_preds.min() >= -1, "min price is {}".format(te_preds.min())
assert te_preds.max() <= 3000, "max price is {}".format(te_preds.max())
|
main.py | AlexTaehwan/kgpolicy | 111 | 12714494 | import os
import random
import torch
import numpy as np
from time import time
from tqdm import tqdm
from copy import deepcopy
from pathlib import Path
from prettytable import PrettyTable
from common.test import test_v2
from common.utils import early_stopping, print_dict
from common.config import parse_args
from common.dataset import CKGData
from common.dataset.build import build_loader
from modules.sampler import KGPolicy
from modules.recommender import MF
def train_one_epoch(
recommender,
sampler,
train_loader,
recommender_optim,
sampler_optim,
adj_matrix,
edge_matrix,
train_data,
cur_epoch,
avg_reward,
):
loss, base_loss, reg_loss = 0, 0, 0
epoch_reward = 0
"""Train one epoch"""
tbar = tqdm(train_loader, ascii=True)
num_batch = len(train_loader)
for batch_data in tbar:
tbar.set_description("Epoch {}".format(cur_epoch))
if torch.cuda.is_available():
batch_data = {k: v.cuda(non_blocking=True) for k, v in batch_data.items()}
"""Train recommender using negtive item provided by sampler"""
recommender_optim.zero_grad()
neg = batch_data["neg_i_id"]
pos = batch_data["pos_i_id"]
users = batch_data["u_id"]
selected_neg_items_list, _ = sampler(batch_data, adj_matrix, edge_matrix)
selected_neg_items = selected_neg_items_list[-1, :]
train_set = train_data[users]
in_train = torch.sum(
selected_neg_items.unsqueeze(1) == train_set.long(), dim=1
).byte()
selected_neg_items[in_train] = neg[in_train]
base_loss_batch, reg_loss_batch = recommender(users, pos, selected_neg_items)
loss_batch = base_loss_batch + reg_loss_batch
loss_batch.backward()
recommender_optim.step()
"""Train sampler network"""
sampler_optim.zero_grad()
selected_neg_items_list, selected_neg_prob_list = sampler(
batch_data, adj_matrix, edge_matrix
)
with torch.no_grad():
reward_batch = recommender.get_reward(users, pos, selected_neg_items_list)
epoch_reward += torch.sum(reward_batch)
reward_batch -= avg_reward
batch_size = reward_batch.size(1)
n = reward_batch.size(0) - 1
R = torch.zeros(batch_size, device=reward_batch.device)
reward = torch.zeros(reward_batch.size(), device=reward_batch.device)
gamma = args_config.gamma
for i, r in enumerate(reward_batch.flip(0)):
R = r + gamma * R
reward[n - i] = R
reinforce_loss = -1 * torch.sum(reward_batch * selected_neg_prob_list)
reinforce_loss.backward()
sampler_optim.step()
"""record loss in an epoch"""
loss += loss_batch
reg_loss += reg_loss_batch
base_loss += base_loss_batch
avg_reward = epoch_reward / num_batch
train_res = PrettyTable()
train_res.field_names = ["Epoch", "Loss", "BPR-Loss", "Regulation", "AVG-Reward"]
train_res.add_row(
[cur_epoch, loss.item(), base_loss.item(), reg_loss.item(), avg_reward.item()]
)
print(train_res)
return loss, base_loss, reg_loss, avg_reward
def save_model(file_name, model, config):
if not os.path.isdir(config.out_dir):
os.mkdir(config.out_dir)
model_file = Path(config.out_dir + file_name)
model_file.touch(exist_ok=True)
print("Saving model...")
torch.save(model.state_dict(), model_file)
def build_sampler_graph(n_nodes, edge_threshold, graph):
adj_matrix = torch.zeros(n_nodes, edge_threshold * 2)
edge_matrix = torch.zeros(n_nodes, edge_threshold)
"""sample neighbors for each node"""
for node in tqdm(graph.nodes, ascii=True, desc="Build sampler matrix"):
neighbors = list(graph.neighbors(node))
if len(neighbors) >= edge_threshold:
sampled_edge = random.sample(neighbors, edge_threshold)
edges = deepcopy(sampled_edge)
else:
neg_id = random.sample(
range(CKG.item_range[0], CKG.item_range[1] + 1),
edge_threshold - len(neighbors),
)
node_id = [node] * (edge_threshold - len(neighbors))
sampled_edge = neighbors + neg_id
edges = neighbors + node_id
"""concatenate sampled edge with random edge"""
sampled_edge += random.sample(
range(CKG.item_range[0], CKG.item_range[1] + 1), edge_threshold
)
adj_matrix[node] = torch.tensor(sampled_edge, dtype=torch.long)
edge_matrix[node] = torch.tensor(edges, dtype=torch.long)
if torch.cuda.is_available():
adj_matrix = adj_matrix.cuda().long()
edge_matrix = edge_matrix.cuda().long()
return adj_matrix, edge_matrix
def build_train_data(train_mat):
num_user = max(train_mat.keys()) + 1
num_true = max([len(i) for i in train_mat.values()])
train_data = torch.zeros(num_user, num_true)
for i in train_mat.keys():
true_list = train_mat[i]
true_list += [-1] * (num_true - len(true_list))
train_data[i] = torch.tensor(true_list, dtype=torch.long)
return train_data
def train(train_loader, test_loader, graph, data_config, args_config):
"""build padded training set"""
train_mat = graph.train_user_dict
train_data = build_train_data(train_mat)
if args_config.pretrain_r:
print(
"\nLoad model from {}".format(
args_config.data_path + args_config.model_path
)
)
paras = torch.load(args_config.data_path + args_config.model_path)
all_embed = torch.cat((paras["user_para"], paras["item_para"]))
data_config["all_embed"] = all_embed
recommender = MF(data_config=data_config, args_config=args_config)
sampler = KGPolicy(recommender, data_config, args_config)
if torch.cuda.is_available():
train_data = train_data.long().cuda()
sampler = sampler.cuda()
recommender = recommender.cuda()
print("\nSet sampler as: {}".format(str(sampler)))
print("Set recommender as: {}\n".format(str(recommender)))
recommender_optimer = torch.optim.Adam(recommender.parameters(), lr=args_config.rlr)
sampler_optimer = torch.optim.Adam(sampler.parameters(), lr=args_config.slr)
loss_loger, pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], []
stopping_step, cur_best_pre_0, avg_reward = 0, 0.0, 0
t0 = time()
for epoch in range(args_config.epoch):
if epoch % args_config.adj_epoch == 0:
"""sample adjacency matrix"""
adj_matrix, edge_matrix = build_sampler_graph(
data_config["n_nodes"], args_config.edge_threshold, graph.ckg_graph
)
cur_epoch = epoch + 1
loss, base_loss, reg_loss, avg_reward = train_one_epoch(
recommender,
sampler,
train_loader,
recommender_optimer,
sampler_optimer,
adj_matrix,
edge_matrix,
train_data,
cur_epoch,
avg_reward,
)
"""Test"""
if cur_epoch % args_config.show_step == 0:
with torch.no_grad():
ret = test_v2(recommender, args_config.Ks, graph)
loss_loger.append(loss)
rec_loger.append(ret["recall"])
pre_loger.append(ret["precision"])
ndcg_loger.append(ret["ndcg"])
hit_loger.append(ret["hit_ratio"])
print_dict(ret)
cur_best_pre_0, stopping_step, should_stop = early_stopping(
ret["recall"][0],
cur_best_pre_0,
stopping_step,
expected_order="acc",
flag_step=args_config.flag_step,
)
if should_stop:
break
recs = np.array(rec_loger)
pres = np.array(pre_loger)
ndcgs = np.array(ndcg_loger)
hit = np.array(hit_loger)
best_rec_0 = max(recs[:, 0])
idx = list(recs[:, 0]).index(best_rec_0)
final_perf = (
"Best Iter=[%d]@[%.1f]\n recall=[%s] \n precision=[%s] \n hit=[%s] \n ndcg=[%s]"
% (
idx,
time() - t0,
"\t".join(["%.5f" % r for r in recs[idx]]),
"\t".join(["%.5f" % r for r in pres[idx]]),
"\t".join(["%.5f" % r for r in hit[idx]]),
"\t".join(["%.5f" % r for r in ndcgs[idx]]),
)
)
print(final_perf)
if __name__ == "__main__":
"""fix the random seed"""
seed = 2020
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
"""initialize args and dataset"""
args_config = parse_args()
CKG = CKGData(args_config)
"""set the gpu id"""
if torch.cuda.is_available():
torch.cuda.set_device(args_config.gpu_id)
data_config = {
"n_users": CKG.n_users,
"n_items": CKG.n_items,
"n_relations": CKG.n_relations + 2,
"n_entities": CKG.n_entities,
"n_nodes": CKG.entity_range[1] + 1,
"item_range": CKG.item_range,
}
print("\ncopying CKG graph for data_loader.. it might take a few minutes")
graph = deepcopy(CKG)
train_loader, test_loader = build_loader(args_config=args_config, graph=graph)
train(
train_loader=train_loader,
test_loader=test_loader,
graph=CKG,
data_config=data_config,
args_config=args_config,
)
|
terraform_compliance/steps/then/it_must_contain_something.py | Miouge1/cli | 866 | 12714499 | # -*- coding: utf-8 -*-
from terraform_compliance.common.helper import (
seek_key_in_dict, # importing this purely because the unit tests require it to exist in global scope
Null
)
from terraform_compliance.common.error_handling import Error
def it_must_contain_something(_step_obj, something, inherited_values=Null, child=False):
match = _step_obj.context.match
seek_key_in_dict, seek_regex_key_in_dict_values = match.seek_key_in_dict, match.seek_regex_key_in_dict_values
prop_list = []
_step_obj.context.stash = inherited_values if inherited_values is not Null else _step_obj.context.stash
if _step_obj.context.type in ('resource', 'data'):
for resource in _step_obj.context.stash:
if not isinstance(resource, dict) \
or 'values' not in resource \
or 'address' not in resource \
or 'type' not in resource:
resource = {'values': resource,
'address': resource,
'type': _step_obj.context.name}
# not going to use match.get here because the following line is an edge case
values = resource.get('values', resource.get('expressions', {}))
if not values:
values = seek_key_in_dict(resource, something)
found_values = []
found_key = Null # this could also become a list
resource_passed = False
# set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
if isinstance(values, dict):
found_key = match.get(values, something, Null)
if found_key is not Null:
found_key = [{something: found_key}]
else:
found_key = seek_key_in_dict(values, something)
for kv_pair in found_key:
# kv_pair must be in {something: found_key} format.
if not isinstance(kv_pair, dict):
continue # should raise exception
# ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
if match.get(kv_pair, something) not in ([],):
found_values.append(match.get(kv_pair, something))
resource_passed = True
elif isinstance(values, list):
for value in values:
if isinstance(value, dict):
# First search in the keys
found_key = seek_key_in_dict(value, something)
# The following is an edge case that covers things like aws asg tags (https://www.terraform.io/docs/providers/aws/r/autoscaling_group.html)
# Then search in the values with 'key'
if not found_key:
found_key = seek_regex_key_in_dict_values(value, 'key', something)
if found_key:
found_key = found_key[0]
# not going to use match.get here because the following line is an edge case
found_values.extend(value.get('value'))
resource_passed = True
continue
elif isinstance(value, list):
_, temp_found_values = it_must_contain_something(_step_obj, something, value, child=True)
prop_list.extend(temp_found_values)
resource_passed = True
elif isinstance(value, (str, bool, int, float)):
if match.equals(value, something):
found_values.append(value)
resource_passed = True
if found_key is not Null and len(found_key):
for found_key_instance in found_key:
if isinstance(found_key_instance, dict):
if match.get(found_key_instance, something, Null) not in (Null, [], '', {}):
found_values.append(match.get(found_key_instance, something))
resource_passed = True
for i, found_val in enumerate(found_values):
if isinstance(found_val, dict) and 'constant_value' in found_val:
found_values[i] = found_val['constant_value']
for found_val in found_values:
prop_list.append({'address': resource['address'],
'values': found_val,
'type': _step_obj.context.name})
# do not check prop list here because every resource should contain it.
if not resource_passed and not child: # if nothing was found in this resource, don't error if you're a child
Error(_step_obj, '{} ({}) does not have {} property.'.format(resource['address'],
resource.get('type', ''),
something))
if prop_list:
_step_obj.context.stash = prop_list
_step_obj.context.property_name = something
return something, prop_list
elif _step_obj.context.type == 'provider':
prop_list = []
for provider_data in _step_obj.context.stash:
values = seek_key_in_dict(provider_data, something)
if values:
prop_list.extend(values)
_step_obj.context.property_name = something
_step_obj.context.address = '{}.{}'.format(provider_data.get('name', _step_obj.context.addresses),
provider_data.get('alias', "\b"))
else:
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
if prop_list:
_step_obj.context.stash = prop_list
return True
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
def it_must_not_contain_something(_step_obj, something, inherited_values=Null):
match = _step_obj.context.match
seek_key_in_dict, seek_regex_key_in_dict_values = match.seek_key_in_dict, match.seek_regex_key_in_dict_values
prop_list = []
_step_obj.context.stash = inherited_values if inherited_values is not Null else _step_obj.context.stash
if _step_obj.context.type in ('resource', 'data'):
for resource in _step_obj.context.stash:
if not isinstance(resource, dict) \
or 'values' not in resource \
or 'address' not in resource \
or 'type' not in resource:
resource = {'values': resource,
'address': resource,
'type': _step_obj.context.name}
values = resource.get('values', resource.get('expressions', {}))
if not values:
values = seek_key_in_dict(resource, something)
found_values = []
found_key = Null
resource_passed = False
# set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
if isinstance(values, dict):
found_key = match.get(values, something, Null)
if found_key is not Null:
found_key = [{something: found_key}]
else:
found_key = seek_key_in_dict(values, something)
for kv_pair in found_key:
# kv_pair must be in {something: found_key} format.
if not isinstance(kv_pair, dict):
continue # could raise an exception
# ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
if match.get(kv_pair, something) not in ([],):
found_values.append(match.get(kv_pair, something))
resource_passed = True
elif isinstance(values, list):
for value in values:
if isinstance(value, dict):
# First search in the keys
found_key = seek_key_in_dict(value, something)
# Then search in the values with 'key'
if not found_key:
found_key = seek_regex_key_in_dict_values(value, 'key', something)
if found_key:
found_key = found_key[0]
found_values.extend(value.get('value'))
resource_passed = True
continue
elif isinstance(value, list):
_, temp_found_values = it_must_contain_something(_step_obj, something, value, child=True)
prop_list.extend(temp_found_values)
resource_passed = True
elif isinstance(value, (str, bool, int, float)):
if match.equals(value, something):
found_values.append(value)
resource_passed = True
if found_key is not Null and len(found_key):
for found_key_instance in found_key:
if isinstance(found_key_instance, dict):
if match.get(found_key_instance, something, Null) not in (Null, [], '', {}):
found_values.append(match.get(found_key_instance, something))
resource_passed = True
for i, found_val in enumerate(found_values):
if isinstance(found_val, dict) and 'constant_value' in found_val:
found_values[i] = found_val['constant_value']
if resource_passed:
Error(_step_obj, '{} property exists in {} ({}).'.format(something, resource['address'], resource.get('type', '')))
elif _step_obj.context.type == 'provider':
for provider_data in _step_obj.context.stash:
values = seek_key_in_dict(provider_data, something)
if values:
Error(_step_obj, '{} {} does not have {} property.'.format(_step_obj.context.addresses,
_step_obj.context.type,
something))
|
pydocx/openxml/packaging/footnotes_part.py | botzill/pydocx | 127 | 12714500 | # coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.openxml.packaging.open_xml_part import OpenXmlPart
from pydocx.openxml.wordprocessing import Footnotes
class FootnotesPart(OpenXmlPart):
'''
Represents a Footnotes part within a Word document container.
See also: http://msdn.microsoft.com/en-us/library/documentformat.openxml.packaging.footnotespart%28v=office.14%29.aspx # noqa
'''
relationship_type = '/'.join([
'http://schemas.openxmlformats.org',
'officeDocument',
'2006',
'relationships',
'footnotes',
])
def __init__(self, *args, **kwargs):
super(FootnotesPart, self).__init__(*args, **kwargs)
self._footnotes = None
@property
def footnotes(self):
if not self._footnotes:
self._footnotes = self.load_footnotes()
return self._footnotes
def load_footnotes(self):
self._footnotes = Footnotes.load(self.root_element, container=self)
return self._footnotes
|
src/python/gem5/components/memory/dram_interfaces/wideio.py | hyu-iot/gem5 | 765 | 12714505 | <gh_stars>100-1000
# Copyright (c) 2012-2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 <NAME>
# Copyright (c) 2015 University of Kaiserslautern
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for WideIO memory devices
These memory "interfaces" contain the timing,energy,etc parameters for each
memory type and are usually based on datasheets for the memory devices.
You can use these interfaces in the MemCtrl object as the `dram` timing
interface.
"""
from m5.objects import DRAMInterface
class WideIO_200_1x128(DRAMInterface):
"""
A single WideIO x128 interface (one command and address bus), with
default timings based on an estimated WIO-200 8 Gbit part.
"""
# No DLL for WideIO
dll = False
# size of device
device_size = "1024MiB"
# 1x128 configuration, 1 device with a 128-bit interface
device_bus_width = 128
# This is a BL4 device
burst_length = 4
# Each device has a page (row buffer) size of 4KB
# (this depends on the memory density)
device_rowbuffer_size = "4KiB"
# 1x128 configuration, so 1 device
devices_per_rank = 1
# Use one rank for a one-high die stack
ranks_per_channel = 1
# WideIO has 4 banks in all configurations
banks_per_rank = 4
# 200 MHz
tCK = "5ns"
# WIO-200
tRCD = "18ns"
tCL = "18ns"
tRP = "18ns"
tRAS = "42ns"
tWR = "15ns"
# Read to precharge is same as the burst
tRTP = "20ns"
# 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz.
# Note this is a BL4 SDR device.
tBURST = "20ns"
# WIO 8 Gb
tRFC = "210ns"
# WIO 8 Gb, <=85C, half for >85C
tREFI = "3.9us"
# Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns
tWTR = "15ns"
# Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns
tRTW = "10ns"
# Default different rank bus delay to 2 CK, @200 MHz = 10 ns
tCS = "10ns"
# Activate to activate irrespective of density and speed grade
tRRD = "10.0ns"
# Two instead of four activation window
tXAW = "50ns"
activation_limit = 2
# The WideIO specification does not provide current information
|
lldb/test/API/tools/lldb-vscode/console/TestVSCode_redirection_to_console.py | mkinsner/llvm | 2,338 | 12714510 | import unittest2
import vscode
import json
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
class TestVSCode_redirection_to_console(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows
@skipIfRemote
def test(self):
"""
Without proper stderr and stdout redirection, the following code would throw an
exception, like the following:
Exception: unexpected malformed message from lldb-vscode
"""
program = self.getBuildArtifact("a.out")
self.build_and_launch(
program,
lldbVSCodeEnv={"LLDB_VSCODE_TEST_STDOUT_STDERR_REDIRECTION": ""})
source = 'main.cpp'
breakpoint1_line = line_number(source, '// breakpoint 1')
breakpoint_ids = self.set_source_breakpoints(source, [breakpoint1_line])
self.assertEqual(len(breakpoint_ids), 1,
"expect correct number of breakpoints")
self.continue_to_breakpoints(breakpoint_ids)
self.assertIn('argc', json.dumps(self.vscode.get_local_variables(frameIndex=1)))
|
src/DyldExtractor/macho/macho_context.py | arandomdev/DyldExtractor | 177 | 12714528 | import struct
from mmap import mmap
from typing import Union
from DyldExtractor.file_context import FileContext
from DyldExtractor.macho.segment_context import SegmentContext
from DyldExtractor.macho.macho_structs import (
LoadCommandMap,
LoadCommands,
load_command,
UnknownLoadCommand,
mach_header_64,
segment_command_64
)
class MachOContext(FileContext):
loadCommands: list[load_command]
segments: dict[bytes, SegmentContext]
segmentsI: list[SegmentContext]
def __init__(
self,
file: mmap,
offset: int
) -> None:
"""A wrapper around a MachO file.
Provides convenient methods and attributes for a given MachO file.
Args:
file: The macho file.
offset: The offset to the header in the file.
"""
super().__init__(file, offset=offset)
self.header = mach_header_64(file, offset)
# check to make sure the MachO file is 64 bit
magic = self.header.magic
if magic == 0xfeedface or magic == 0xcefaedfe:
raise Exception("MachOContext doesn't support 32bit files!")
self._parseLoadCommands()
pass
def getLoadCommand(
self,
cmdFilter: tuple[LoadCommands],
multiple: bool = False
) -> Union[load_command, tuple[load_command]]:
"""Retreive a load command with its command ID
Args:
filter: The command to filter by.
multiple: Optional; To get multiple results instead of the first.
Returns:
If the command is not found, None is returned. If one was found it will
return the first match. If multiple is set to True, it will return a list
of matches.
"""
matches = []
for loadCommand in self.loadCommands:
if loadCommand.cmd in cmdFilter:
if not multiple:
return loadCommand
else:
matches.append(loadCommand)
if len(matches) == 0:
return None
return matches
def containsAddr(self, address: int) -> bool:
"""Check if the address is contained in the MachO file.
Args:
address: the VM address to check.
Returns:
Whether or not the address is contained in the segments
of this MachO file.
"""
for segment in self.segmentsI:
seg = segment.seg
lowBound = seg.vmaddr
highBound = lowBound + seg.vmsize
if address >= lowBound and address < highBound:
return True
return False
def _parseLoadCommands(self) -> None:
"""Parse the load commands
Parse the load commands and set the loadCommands attribute.
"""
self.loadCommands = []
self.segments = {}
self.segmentsI = []
cmdOff = len(self.header) + self.fileOffset
for _ in range(self.header.ncmds):
self.file.seek(cmdOff)
cmd = struct.unpack("<I", self.file.read(4))[0]
command = LoadCommandMap.get(cmd, UnknownLoadCommand)
if command == UnknownLoadCommand:
raise Exception(f"Unknown LoadCommand: {cmd}")
command = command(self.file, cmdOff)
cmdOff += command.cmdsize
self.loadCommands.append(command)
# populate the segments at this point too
if isinstance(command, segment_command_64):
segCtx = SegmentContext(self.file, command)
self.segments[command.segname] = segCtx
self.segmentsI.append(segCtx)
pass
|
src/enamlnative/widgets/image_view.py | codelv/enaml-native | 237 | 12714530 | <gh_stars>100-1000
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Int, Unicode, Bool, Event, observe, set_default
)
from enaml.core.declarative import d_
from .view import View, ProxyView
class ProxyImageView(ProxyView):
""" The abstract definition of a proxy relative layout object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: ImageView)
def set_src(self, src):
raise NotImplementedError
def set_max_height(self, height):
raise NotImplementedError
def set_max_width(self, width):
raise NotImplementedError
class ImageView(View):
""" Displays image resources
"""
#: Set the offset of the widget's text baseline from the widget's
#: top boundary.
# baseline = d_(Int(-1))
#
# baseline_align_bottom = d_(Bool())
#
# crop_to_padding = d_(Bool())
#: Sets a drawable as the content of this ImageView.
src = d_(Unicode())
#: An optional argument to supply a maximum height for this view.
max_height = d_(Int())
#: An optional argument to supply a maximum width for this view.
max_width = d_(Int())
#: A reference to the ProxyImageView object.
proxy = Typed(ProxyImageView)
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe('src', 'max_height', 'max_width')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(ImageView, self)._update_proxy(change)
|
scripts/eval_badja.py | isabella232/lasr | 128 | 12714550 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
sys.path.insert(0,'third_party')
sys.path.insert(0,'./')
import torch
import torch.nn as nn
from torch.autograd import Variable
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
import ext_utils.flowlib as flowlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam, orthographic_cam, render_flow_soft_3
parser = argparse.ArgumentParser(description='BADJA')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--seqname', default='camel',
help='sequence to test')
parser.add_argument('--type', default='mesh',
help='load mesh data or flow or zero')
parser.add_argument('--cam_type', default='perspective',
help='camera model, orthographic or perspective')
parser.add_argument('--vis', dest='vis', action='store_true',
help='whether to draw visualization')
args = parser.parse_args()
renderer_softflf = sr.SoftRenderer(image_size=256,dist_func='hard' ,aggr_func_alpha='hard',
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
def process_flow(model, imgL_o,imgR_o, mean_L, mean_R):
testres=1
# for gray input images
if len(imgL_o.shape) == 2:
imgL_o = np.tile(imgL_o[:,:,np.newaxis],(1,1,3))
imgR_o = np.tile(imgR_o[:,:,np.newaxis],(1,1,3))
# resize
maxh = imgL_o.shape[0]*testres
maxw = imgL_o.shape[1]*testres
max_h = int(maxh // 64 * 64)
max_w = int(maxw // 64 * 64)
if max_h < maxh: max_h += 64
if max_w < maxw: max_w += 64
input_size = imgL_o.shape
imgL = cv2.resize(imgL_o,(max_w, max_h))
imgR = cv2.resize(imgR_o,(max_w, max_h))
imgL_noaug = torch.Tensor(imgL/255.)[np.newaxis].float().cuda()
# flip channel, subtract mean
imgL = imgL[:,:,::-1].copy() / 255. - np.asarray(mean_L).mean(0)[np.newaxis,np.newaxis,:]
imgR = imgR[:,:,::-1].copy() / 255. - np.asarray(mean_R).mean(0)[np.newaxis,np.newaxis,:]
imgL = np.transpose(imgL, [2,0,1])[np.newaxis]
imgR = np.transpose(imgR, [2,0,1])[np.newaxis]
# modify module according to inputs
from models.VCN_exp import WarpModule, flow_reg
for i in range(len(model.module.reg_modules)):
model.module.reg_modules[i] = flow_reg([1,max_w//(2**(6-i)), max_h//(2**(6-i))],
ent=getattr(model.module, 'flow_reg%d'%2**(6-i)).ent,\
maxdisp=getattr(model.module, 'flow_reg%d'%2**(6-i)).md,\
fac=getattr(model.module, 'flow_reg%d'%2**(6-i)).fac).cuda()
for i in range(len(model.module.warp_modules)):
model.module.warp_modules[i] = WarpModule([1,max_w//(2**(6-i)), max_h//(2**(6-i))]).cuda()
# get intrinsics
intr_list = [torch.Tensor(inxx).cuda() for inxx in [[1],[1],[1],[1],[1],[0],[0],[1],[0],[0]]]
fl_next = 1
intr_list.append(torch.Tensor([fl_next]).cuda())
disc_aux = [None,None,None,intr_list,imgL_noaug,None]
# forward
imgL = Variable(torch.FloatTensor(imgL).cuda())
imgR = Variable(torch.FloatTensor(imgR).cuda())
with torch.no_grad():
imgLR = torch.cat([imgL,imgR],0)
model.eval()
torch.cuda.synchronize()
start_time = time.time()
rts = model(imgLR, disc_aux)
torch.cuda.synchronize()
ttime = (time.time() - start_time); print('time = %.2f' % (ttime*1000) )
flow, logmid, occ, biseg, objseg = rts
# upsampling
flow = torch.squeeze(flow).data.cpu().numpy()
flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],
cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)
flow[:,:,0] *= imgL_o.shape[1] / max_w
flow[:,:,1] *= imgL_o.shape[0] / max_h
flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)
torch.cuda.empty_cache()
flow = torch.Tensor(flow).cuda()[None]
return flow
def preprocess_image(img,mask,imgsize):
if len(img.shape) == 2:
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
mask = mask[:,:,:1]
# crop box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( (xid.max()-xid.min())//2, (yid.max()-yid.min())//2)
maxlength = int(1.2*max(length))
length = (maxlength,maxlength)
alp = 2*length[0]/float(imgsize)
refpp = np.asarray(center)/(imgsize/2.) - 1
return alp, refpp,center,length[0]
def draw_joints_on_image(rgb_img, joints, visibility, region_colors, marker_types,pred=None,correct=None):
joints = joints[:, ::-1] # OpenCV works in (x, y) rather than (i, j)
disp_img = rgb_img.copy()
i=0
for joint_coord, visible, color, marker_type in zip(joints, visibility, region_colors, marker_types):
if visible:
joint_coord = joint_coord.astype(int)
cv2.circle(disp_img, tuple(joint_coord), radius=3, color=[255,0,0], thickness = 10)
if pred is not None:
if correct[i]:
color=[0,255,0]
else:
color=[0,0,255]
error = np.linalg.norm(joint_coord - pred[i,::-1],2,-1)
cv2.circle(disp_img, tuple(joint_coord), radius=int(error), color=color, thickness = 3)
cv2.line(disp_img, tuple(joint_coord), tuple(pred[i,::-1]),color , thickness = 3)
i+=1
return disp_img
def main():
smal_joint_info = SMALJointInfo()
badja_data = BADJAData(args.seqname)
data_loader = badja_data.get_loader()
print(args.testdir)
# store all the data
all_anno = []
all_mesh = []
all_cam = []
all_fr = []
all_fl = []
#import pdb; pdb.set_trace()
for anno in data_loader:
all_anno.append(anno)
rgb_img, sil_img, joints, visible, name = anno
seqname = name.split('/')[-2]
fr = int(name.split('/')[-1].split('.')[-2])
all_fr.append(fr)
print('%s/%d'%(seqname, fr))
# load mesh data or flow
if args.type=='mesh':
mesh = trimesh.load('%s/pred%d.ply'%(args.testdir, fr),process=False)
all_mesh.append(mesh)
cam = np.loadtxt('%s/cam%d.txt'%(args.testdir,fr))
all_cam.append(cam)
if args.type=='flow':
from models.VCN_exp import VCN
model = VCN([1, 256, 256], md=[int(4*(256/256)),4,4,4,4], fac=1)
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
pretrained_dict = torch.load('/data/gengshay/vcn_weights/robexp.pth',map_location='cpu')
mean_L=pretrained_dict['mean_L']
mean_R=pretrained_dict['mean_R']
model.load_state_dict(pretrained_dict['state_dict'],strict=False)
# store all the results
pck_all = []
for i in range(len(all_anno)):
for j in range(len(all_anno)):
if i!=j:
# evaluate every two-frame
refimg, refsil, refkp, refvis, refname = all_anno[i]
tarimg, tarsil, tarkp, tarvis, tarname = all_anno[j]
print('%s vs %s'%(refname, tarname))
if args.type=='mesh':
refmesh, tarmesh = all_mesh[i], all_mesh[j]
refcam, tarcam = all_cam[i], all_cam[j]
img_size = max(refimg.shape)
renderer_softflf.rasterizer.image_size = img_size
# render flow between mesh 1 and 2
refface = torch.Tensor(refmesh.faces[None]).cuda()
verts = torch.Tensor(np.concatenate([refmesh.vertices[None], tarmesh.vertices[None]],0)).cuda()
Rmat = torch.Tensor(np.concatenate([refcam[None,:3,:3], tarcam[None,:3,:3]], 0)).cuda()
Tmat = torch.Tensor(np.concatenate([refcam[None,:3,3], tarcam[None,:3,3]], 0)).cuda()
ppoint = torch.Tensor(np.concatenate([refcam[None,3,2:], tarcam[None,3,2:]], 0)).cuda()
scale = torch.Tensor(np.concatenate([refcam[None,3,:1], tarcam[None,3,:1]], 0)).cuda()
scale = scale/img_size*2
ppoint = ppoint/img_size * 2 -1
verts_fl = obj_to_cam(verts, Rmat, Tmat[:,None],nmesh=1,n_hypo=1,skin=None)
verts_fl = torch.cat([verts_fl,torch.ones_like(verts_fl[:, :, 0:1])], dim=-1)
verts_pos = verts_fl.clone()
verts_fl = pinhole_cam(verts_fl, ppoint, scale)
flow_fw, bgmask_fw, fgmask_flowf = render_flow_soft_3(renderer_softflf, verts_fl[:1], verts_fl[1:], refface)
flow_fw[bgmask_fw]=0.
flow_fw = torch.cat([flow_fw, torch.zeros_like(flow_fw)[:,:,:,:1]],-1)[:,:refimg.shape[0],:refimg.shape[1]]
elif args.type=='flow':
flow_fw = process_flow(model, refimg, tarimg, mean_L, mean_R)
flow_fw = (flow_fw)/(refimg.shape[0]/2.)
elif args.type=='zero':
flow_fw = torch.zeros(refimg.shape).cuda()[None]
refkpx = torch.Tensor(refkp.astype(float)).cuda()
x0,y0=np.meshgrid(range(refimg.shape[1]),range(refimg.shape[0]))
x0 = torch.Tensor(x0).cuda()
y0 = torch.Tensor(y0).cuda()
idx = ( (flow_fw[:,:,:,:2].norm(2,-1)<1e-6).float().view(1,-1)*1e6+ torch.pow(refkpx[:,0:1]-y0.view(1,-1),2) + torch.pow(refkpx[:,1:2]-x0.view(1,-1),2)).argmin(-1)
samp_flow = flow_fw.view(-1,3)[idx][:,:2]
tarkp_pred = refkpx.clone()
tarkp_pred[:,0] = tarkp_pred[:,0] +(samp_flow[:,1])*refimg.shape[0]/2
tarkp_pred[:,1] = tarkp_pred[:,1] +(samp_flow[:,0])*refimg.shape[1]/2
tarkp_pred = np.asarray(tarkp_pred.cpu())
diff = np.linalg.norm(tarkp_pred - tarkp, 2,-1)
sqarea = np.sqrt((refsil[:,:,0]>0).sum())
correct = diff < sqarea * 0.2
correct = correct[np.logical_and(tarvis, refvis)]
if args.vis:
rgb_vis = draw_joints_on_image(refimg, refkp, refvis, smal_joint_info.joint_colors, smal_joint_info.annotated_markers)
tarimg = draw_joints_on_image(tarimg, tarkp, tarvis, smal_joint_info.joint_colors, smal_joint_info.annotated_markers, pred=tarkp_pred,correct=diff < sqarea * 0.2)
cv2.addWeighted(rgb_vis, 0.5, flowlib.flow_to_image(np.asarray(flow_fw[0].clamp(-1,1).detach().cpu())), 0.5,0.0,rgb_vis)
cv2.imwrite('%s/%05d-%05d-flo.png'%(args.testdir,all_fr[i],all_fr[j]),rgb_vis[:,:,::-1])
cv2.imwrite('%s/%05d-%05d.png'%(args.testdir,all_fr[i],all_fr[j]),tarimg[:,:,::-1])
pck_all.append(correct)
print('PCK %.02f'%(100*np.concatenate(pck_all).astype(float).mean()))
if __name__ == '__main__':
main()
|
lib/core/loss.py | ahmedelmahy/HRNet-Bottom-Up-Pose-Estimation | 129 | 12714560 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The code is based on HigherHRNet-Human-Pose-Estimation.
# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)
# Modified by <NAME> (<EMAIL>).
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
def make_input(t, requires_grad=False, need_cuda=True):
inp = torch.autograd.Variable(t, requires_grad=requires_grad)
inp = inp.sum()
if need_cuda:
inp = inp.cuda()
return inp
class HeatmapLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, gt, mask):
assert pred.size() == gt.size()
loss = ((pred - gt)**2) * mask
loss = loss.mean(dim=3).mean(dim=2).mean(dim=1)
return loss
class OffsetsLoss(nn.Module):
def __init__(self):
super().__init__()
def smooth_l1_loss(self, pred, gt, beta=1. / 9):
l1_loss = torch.abs(pred - gt)
cond = l1_loss < beta
loss = torch.where(cond, 0.5*l1_loss**2/beta, l1_loss-0.5*beta)
return loss
def forward(self, pred, gt, weights):
assert pred.size() == gt.size()
num_pos = torch.nonzero(weights > 0).size()[0]
loss = self.smooth_l1_loss(pred, gt) * weights
if num_pos == 0:
num_pos = 1.
loss = loss.sum() / num_pos
return loss
class MultiLossFactory(nn.Module):
def __init__(self, cfg):
super().__init__()
self._init_check(cfg)
self.num_joints = cfg.MODEL.NUM_JOINTS
self.num_stages = cfg.LOSS.NUM_STAGES
self.heatmaps_loss = \
nn.ModuleList(
[
HeatmapLoss()
if with_heatmaps_loss else None
for with_heatmaps_loss in cfg.LOSS.WITH_HEATMAPS_LOSS
]
)
self.heatmaps_loss_factor = cfg.LOSS.HEATMAPS_LOSS_FACTOR
self.offsets_loss = \
nn.ModuleList(
[
OffsetsLoss()
if with_offsets_loss else None
for with_offsets_loss in cfg.LOSS.WITH_OFFSETS_LOSS
]
)
self.offsets_loss_factor = cfg.LOSS.OFFSETS_LOSS_FACTOR
def forward(self, outputs, poffsets, heatmaps,
masks, offsets, offset_w):
heatmaps_losses = []
offsets_losses = []
for idx in range(len(outputs)):
with_heatmaps_loss = self.heatmaps_loss[idx]
with_offsets_loss = self.offsets_loss[idx]
if with_heatmaps_loss and len(outputs[idx]) > 0:
num_outputs = len(outputs[idx])
if num_outputs > 1:
heatmaps_pred = torch.cat(outputs[idx], dim=1)
c = outputs[idx][0].shape[1]
if len(heatmaps[idx]) > 1:
heatmaps_gt = [heatmaps[idx][i][:, :c]
for i in range(num_outputs)]
heatmaps_gt = torch.cat(heatmaps_gt, dim=1)
mask = [masks[idx][i].expand_as(outputs[idx][0])
for i in range(num_outputs)]
mask = torch.cat(mask, dim=1)
else:
heatmaps_gt = torch.cat([heatmaps[idx][0][:, :c]
for i in range(num_outputs)], dim=1)
mask = [masks[idx][0].expand_as(outputs[idx][0])
for i in range(num_outputs)]
mask = torch.cat(mask, dim=1)
else:
heatmaps_pred = outputs[idx][0]
c = heatmaps_pred.shape[1]
heatmaps_gt = heatmaps[idx][0][:, :c]
mask = masks[idx][0].expand_as(heatmaps_pred)
heatmaps_loss = with_heatmaps_loss(
heatmaps_pred, heatmaps_gt, mask
)
heatmaps_loss = heatmaps_loss * self.heatmaps_loss_factor[0]
heatmaps_losses.append(heatmaps_loss)
else:
heatmaps_losses.append(None)
if with_offsets_loss and len(poffsets[idx]) > 0:
num_poffsets = len(poffsets[idx])
if num_poffsets > 1:
offset_pred = torch.cat(poffsets[idx], dim=1)
offset_gt = torch.cat([offsets[idx][0]
for i in range(num_poffsets)], dim=1)
offset_w = torch.cat([offset_w[idx][0]
for i in range(num_poffsets)], dim=1)
else:
offset_pred = poffsets[idx][0]
offset_gt = offsets[idx][0]
offset_w = offset_w[idx][0]
offsets_loss = with_offsets_loss(
offset_pred, offset_gt, offset_w
)
offsets_loss = offsets_loss * self.offsets_loss_factor[0]
offsets_losses.append(offsets_loss)
else:
offsets_losses.append(None)
return heatmaps_losses, offsets_losses
def _init_check(self, cfg):
assert isinstance(cfg.LOSS.WITH_HEATMAPS_LOSS, (list, tuple)), \
'LOSS.WITH_HEATMAPS_LOSS should be a list or tuple'
assert isinstance(cfg.LOSS.HEATMAPS_LOSS_FACTOR, (list, tuple)), \
'LOSS.HEATMAPS_LOSS_FACTOR should be a list or tuple'
assert len(cfg.LOSS.WITH_HEATMAPS_LOSS) == cfg.LOSS.NUM_STAGES, \
'LOSS.WITH_HEATMAPS_LOSS and LOSS.NUM_STAGE should have same length, got {} vs {}.'.\
format(len(cfg.LOSS.WITH_HEATMAPS_LOSS), cfg.LOSS.NUM_STAGES)
assert len(cfg.LOSS.WITH_HEATMAPS_LOSS) == len(cfg.LOSS.HEATMAPS_LOSS_FACTOR), \
'LOSS.WITH_HEATMAPS_LOSS and LOSS.HEATMAPS_LOSS_FACTOR should have same length, got {} vs {}.'.\
format(len(cfg.LOSS.WITH_HEATMAPS_LOSS),
len(cfg.LOSS.HEATMAPS_LOSS_FACTOR))
|
test/Image_by_text.py | cigar666/my_manim_projects | 159 | 12714562 | from manimlib.imports import *
import matplotlib.pyplot as plt
import csv
import codecs
# import pandas as pd
# import ctypes
# # from https://www.cnpython.com/qa/81434
# def GetTextLength(text, points=10, font='思源黑体 Bold'):
# class SIZE(ctypes.Structure):
# _fields_ = [("cx", ctypes.c_long), ("cy", ctypes.c_long)]
#
# hdc = ctypes.windll.user32.GetDC(0)
# hfont = ctypes.windll.gdi32.CreateFontA(points, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, font)
# hfont_old = ctypes.windll.gdi32.SelectObject(hdc, hfont)
#
# size = SIZE(0, 0)
# ctypes.windll.gdi32.GetTextExtentPoint32A(hdc, text, len(text), ctypes.byref(size))
#
# ctypes.windll.gdi32.SelectObject(hdc, hfont_old)
# ctypes.windll.gdi32.DeleteObject(hfont)
#
# # return (size.cx, size.cy)
# return size.cx
def get_text_length(text):
# xia j8 xie de 一个估算大致长度的代码
l = 0
for ch in text:
if ch in 'abcdefghijklmnopqrstuvwxyz1234567890_':
l += 0.5
else:
l += 1
return l
class Test_max_length_of_Text(Scene):
def construct(self):
t = Text('哈' * 1000, font='思源黑体 Bold', color=WHITE, size=0.05).set_width(100).move_to(ORIGIN)
print(len(t.text))
print(len(t))
self.add(t)
self.wait()
class Test(Scene):
def construct(self):
im = plt.imread(r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\m_set_01.bmp')
Z = im[:, :, 0]
nx, ny = len(Z[0])-1, len(Z)-1
def set_color4text(Text):
for t in Text:
loc = t.get_center()
j, i = int((loc[0]/FRAME_WIDTH + 1/2) * nx), int((-loc[1]/FRAME_HEIGHT + 1/2) * ny)
t.set_color(rgb_to_hex(im[i, j]/255))
text_str = ''
num = 0
text_all = VGroup()
str_01 = '可爱的cigar666的粉丝'
for i in range(6000):
text_str_i = '@' + str_01[0:np.random.randint(2, 14)]
num += len(text_str_i)
text_str += text_str_i
if num > 400:
t = Text(text_str, font='思源黑体 Bold', size=0.09)
# set_color4text(t)
text_all.add(t)
print(len(t))
text_str = ''
num = 0
text_all.arrange(DOWN, buff=0.005, aligned_edge=LEFT)
if text_all.get_height()/text_all.get_width() > 8/14:
text_all.set_height(7.9)
else:
text_all.set_width(13.8)
for text in text_all:
set_color4text(text)
self.add(text_all)
self.wait(1)
# data = []
#
# f = open(r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv", "r", encoding="utf8")
# reader = csv.reader(f)
# print(type(reader))
# for row in reader:
# data.append(row)
# print(row)
# fans_name = np.array(data)[:, 1]
# f.close()
# print('##################')
# print(sorted(fans_name, reverse=False, key=lambda name: len(name)))
class Show_followers(Scene):
CONFIG = {
'image_path': r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\m_set_01.bmp', # 图片路径
'data_file_path': r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv", # 粉丝数据(csv格式)
'line_length': 600, # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}
def construct(self):
data = []
f = open(self.data_file_path, "r", encoding="utf8")
reader = csv.reader(f)
print(type(reader))
for row in reader:
data.append(row)
# print(row)
f.close()
fans_name = np.array(data)[:, 1]
names = fans_name # 如果注释掉这行用下一行的话排序方式有区别
# names = sorted(fans_name, reverse=False, key=lambda name: get_text_length(name)) # 注释掉的话就按照长度排序一下, 否则就是按关注时间排序
im = plt.imread(self.image_path)
Z = im[:, :, 0]
nx, ny = len(Z[0])-1, len(Z)-1
final_str = ''
def set_color4text(Text):
for t in Text:
loc = t.get_center()
j, i = int((loc[0]/FRAME_WIDTH + 1/2) * nx), int((-loc[1]/FRAME_HEIGHT + 1/2) * ny)
t.set_color(rgb_to_hex(im[i, j]/255))
text_str = ''
l_max = 0
line_num = 0
text_all = VGroup()
for i in range(1, len(names)):
text_str_i = '@' + names[i]
# length_i = GetTextLength(text_str_i)
length_i = get_text_length(text_str_i)
l_max += length_i
text_str += text_str_i
if l_max > self.line_length - length_i/2:
line_num += 1
text_str = str(line_num) + ' ' + text_str
t = Text(text_str, font='思源黑体 Bold', size=0.08)
# set_color4text(t)
text_all.add(t)
print(l_max)
final_str += text_str + '\n'
text_str = ''
l_max = 0
line_num += 1
text_str = str(line_num) + ' ' + text_str # + '@cigar666'
t = Text(text_str, font='思源黑体 Bold', size=0.08)
text_all.add(t)
print(l_max)
final_str += text_str # + '@cigar666' # 都渲染完了才发现我这行的+@cigar666应该写到上面去,唉蹭合照失败o(╯□╰)o
f = codecs.open('get_loction_of_fans.txt', 'w', encoding='utf-8')
print(final_str)
f.write(final_str)
f.close()
text_all.arrange(DOWN, buff=0.005, aligned_edge=LEFT)
if text_all.get_height()/text_all.get_width() > FRAME_HEIGHT/FRAME_WIDTH:
text_all.set_height(FRAME_HEIGHT-0.1)
else:
text_all.set_width(FRAME_WIDTH-0.1)
for text in text_all:
set_color4text(text)
self.add(text_all)
self.wait(1)
class Show_followers_PythagoreanTree(Show_followers):
CONFIG = {
'image_path': r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\Test_PythagoreanTree.bmp', # 图片路径
'data_file_path': r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv", # 粉丝数据(csv格式)
'line_length': 540, # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}
class Show_followers_Misaka(Show_followers):
CONFIG = {
'image_path': r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\Misaka.bmp', # 图片路径
'data_file_path': r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv", # 粉丝数据(csv格式)
'line_length': 540, # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}
|
time_series_detector/algorithm/ewma.py | alexanderqiu/Metis | 1,367 | 12714569 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
Tencent is pleased to support the open source community by making Metis available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import numpy as np
class Ewma(object):
"""
In statistical quality control, the EWMA chart (or exponentially weighted moving average chart)
is a type of control chart used to monitor either variables or attributes-type data using the monitored business
or industrial process's entire history of output. While other control charts treat rational subgroups of samples
individually, the EWMA chart tracks the exponentially-weighted moving average of all prior sample means.
WIKIPEDIA: https://en.wikipedia.org/wiki/EWMA_chart
"""
def __init__(self, alpha=0.3, coefficient=3):
"""
:param alpha: Discount rate of ewma, usually in (0.2, 0.3).
:param coefficient: Coefficient is the width of the control limits, usually in (2.7, 3.0).
"""
self.alpha = alpha
self.coefficient = coefficient
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
:param X: the time series to detect of
:param type X: pandas.Series
:return: 1 denotes normal, 0 denotes abnormal
"""
s = [X[0]]
for i in range(1, len(X)):
temp = self.alpha * X[i] + (1 - self.alpha) * s[-1]
s.append(temp)
s_avg = np.mean(s)
sigma = np.sqrt(np.var(X))
ucl = s_avg + self.coefficient * sigma * np.sqrt(self.alpha / (2 - self.alpha))
lcl = s_avg - self.coefficient * sigma * np.sqrt(self.alpha / (2 - self.alpha))
if s[-1] > ucl or s[-1] < lcl:
return 0
return 1
|
scrapyd_client/commands.py | zanachka/scrapyd-client | 681 | 12714641 | <reponame>zanachka/scrapyd-client<filename>scrapyd_client/commands.py<gh_stars>100-1000
import sys
from scrapyd_client import lib
from scrapyd_client.utils import indent
INDENT_PREFIX = ' '
def deploy(args):
""" Deploys a Scrapy project to a Scrapyd instance.
For help on this command, invoke `scrapyd-deploy`. """
from scrapyd_client import deploy
sys.argv.pop(1)
deploy.main()
def projects(args):
""" Lists all projects deployed on a Scrapyd instance. """
_projects = lib.get_projects(args.target)
if _projects:
print('\n'.join(_projects))
def schedule(args):
""" Schedules the specified spider(s). """
job_args = dict((x[0], x[1]) for x in (y.split('=', 1) for y in args.arg))
_projects = lib.get_projects(args.target, args.project)
for project in _projects:
_spiders = lib.get_spiders(args.target, project, args.spider)
for spider in _spiders:
job_id = lib.schedule(args.target, project, spider, job_args)
print(f'{project} / {spider} => {job_id}')
def spiders(args):
""" Lists all spiders for the given project(s). """
_projects = lib.get_projects(args.target, args.project)
for project in _projects:
project_spiders = lib.get_spiders(args.target, project)
if not args.verbose:
print(f'{project}:')
if project_spiders:
print(indent('\n'.join(project_spiders), INDENT_PREFIX))
else:
print(INDENT_PREFIX + 'No spiders.')
elif project_spiders:
print('\n'.join(f'{project} {x}' for x in project_spiders))
|
dialogue-engine/src/programy/utils/logging/ylogger.py | cotobadesign/cotoba-agent-oss | 104 | 12714660 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import logging
import traceback
import json
import datetime
class YLoggerSnapshot(object):
def __init__(self, criticals=0, fatals=0, errors=0, exceptions=0, warnings=0, infos=0, debugs=0):
self._criticals = criticals
self._fatals = fatals
self._errors = errors
self._exceptions = exceptions
self._warnings = warnings
self._infos = infos
self._debugs = debugs
def __str__(self):
return "Critical(%d) Fatal(%d) Error(%d) Exception(%d) Warning(%d) Info(%d), Debug(%d)" % (
self._criticals, self._fatals, self._errors, self._exceptions, self._warnings, self._infos, self._debugs
)
class YLogger(object):
CRITICALS = 0
FATALS = 0
ERRORS = 0
EXCEPTIONS = 0
WARNINGS = 0
INFOS = 0
DEBUGS = 0
IS_STDOUT = False
IS_STDERR = False
PREFIX = "Yadlan"
IS_TRACEBACK = True
DEFAULT_LEVEL = None
@staticmethod
def snapshot():
return YLoggerSnapshot(YLogger.CRITICALS,
YLogger.FATALS,
YLogger.ERRORS,
YLogger.EXCEPTIONS,
YLogger.WARNINGS,
YLogger.INFOS,
YLogger.DEBUGS)
@staticmethod
def reset_snapshot():
YLogger.CRITICALS = 0
YLogger.FATALS = 0
YLogger.ERRORS = 0
YLogger.EXCEPTIONS = 0
YLogger.WARNINGS = 0
YLogger.INFOS = 0
YLogger.DEBUGS = 0
@staticmethod
def format_message(caller, message):
if caller is not None:
if hasattr(caller, "ylogger_type"):
log_type = caller.ylogger_type()
if log_type == 'client':
return "[%s] - %s" % (caller.id, message)
elif log_type == 'bot':
return "[%s] [%s] - %s" % (caller.client.id if caller.client is not None else "",
caller.id, message)
elif log_type == 'brain':
clientid = ""
botid = ""
if caller.bot is not None:
if caller.bot.client is not None:
clientid = caller.bot.client.id
botid = caller.bot.id
return "[%s] [%s] [%s] - %s" % (clientid, botid, caller.id, message)
elif log_type == 'context':
return "[%s] [%s] [%s] [%s] - %s" % (caller.client.id if caller.client is not None else "",
caller.bot.id if caller.bot is not None else "",
caller.brain.id if caller.brain is not None else "",
caller.userid, message)
return message
@staticmethod
def set_default_level():
YLogger.DEFAULT_LEVEL = 'none'
level = logging.getLogger().getEffectiveLevel()
if level == logging.CRITICAL or \
level == logging.FATAL or \
level == logging.ERROR:
YLogger.DEFAULT_LEVEL = 'error'
elif level == logging.WARNING:
YLogger.DEFAULT_LEVEL = 'warning'
elif level == logging.INFO:
YLogger.DEFAULT_LEVEL = 'info'
elif level == logging.DEBUG:
YLogger.DEFAULT_LEVEL = 'debug'
logging.getLogger().setLevel(level=logging.DEBUG)
@staticmethod
def check_loglevel(caller, level):
if YLogger.DEFAULT_LEVEL is None:
return logging.getLogger().isEnabledFor(level)
out_level = YLogger.DEFAULT_LEVEL
if caller is not None:
if hasattr(caller, "get_loglevel"):
client_loglevel = caller.get_loglevel()
if client_loglevel is not None:
out_level = client_loglevel
if level == logging.CRITICAL or \
level == logging.FATAL or \
level == logging.ERROR:
if out_level in ['error', 'warning', 'info', 'debug']:
return True
elif level == logging.WARNING:
if out_level in ['warning', 'info', 'debug']:
return True
elif level == logging.INFO:
if out_level in ['info', 'debug']:
return True
elif level == logging.DEBUG:
if out_level == 'debug':
return True
return False
@staticmethod
def critical(caller, message, *args, **kwargs):
YLogger.CRITICALS += 1
if YLogger.check_loglevel(caller, logging.CRITICAL):
logging.critical(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stderr(caller, "critical", message, *args, **kwargs)
@staticmethod
def fatal(caller, message, *args, **kwargs):
YLogger.FATALS += 1
if YLogger.check_loglevel(caller, logging.FATAL):
logging.fatal(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stderr(caller, "fatal", message, *args, **kwargs)
@staticmethod
def error(caller, message, *args, **kwargs):
YLogger.ERRORS += 1
if YLogger.check_loglevel(caller, logging.ERROR):
logging.error(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stderr(caller, "error", message, *args, **kwargs)
@staticmethod
def exception(caller, message, exception, *args, **kwargs):
YLogger.EXCEPTIONS += 1
if YLogger.check_loglevel(caller, logging.ERROR):
excep_msg = "%s [%s]" % (message, str(exception))
logging.error(YLogger.format_message(caller, excep_msg), *args, **kwargs)
if YLogger.IS_TRACEBACK is True and exception is not None:
tb_lines = [line.rstrip('\n') for line in
traceback.format_exception(exception.__class__, exception, exception.__traceback__)]
for line in tb_lines:
logging.error(YLogger.format_message(caller, line))
YLogger.yadlan_stderr(caller, "exception", message, *args, **kwargs)
@staticmethod
def warning(caller, message, *args, **kwargs):
YLogger.WARNINGS += 1
if YLogger.check_loglevel(caller, logging.WARNING):
logging.warning(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stdout(caller, "warning", message, *args, **kwargs)
@staticmethod
def info(caller, message, *args, **kwargs):
YLogger.INFOS += 1
if YLogger.check_loglevel(caller, logging.INFO):
logging.info(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stdout(caller, "info", message, *args, **kwargs)
@staticmethod
def debug(caller, message, *args, **kwargs):
YLogger.DEBUGS += 1
if YLogger.check_loglevel(caller, logging.DEBUG):
logging.debug(YLogger.format_message(caller, message), *args, **kwargs)
YLogger.yadlan_stdout(caller, "debug", message, *args, **kwargs)
@staticmethod
def set_stdout(status):
if status != "True":
YLogger.IS_STDOUT = False
else:
YLogger.IS_STDOUT = True
@staticmethod
def set_stderr(status):
if status != "True":
YLogger.IS_STDERR = False
else:
YLogger.IS_STDERR = True
@staticmethod
def set_prefix(prefix):
YLogger.PREFIX = prefix
@staticmethod
def set_traceback(setting: bool):
YLogger.IS_TRACEBACK = setting
@staticmethod
def format_yadlan_message(prefix, level, caller, message):
botid = ""
brainid = ""
userid = ""
try:
botid = caller.bot.id
except Exception:
pass
try:
brainid = caller.brain.id
except Exception:
pass
try:
userid = caller.userid
except Exception:
pass
messageDict = message
try:
messageDict = json.loads(message, encoding="utf-8")
except Exception:
pass
dt_now = datetime.datetime.now()
dict = {"time": str(dt_now), "status": level, "bot_id": prefix, "botid": botid, "brainid": brainid, "userid": userid, "message": messageDict}
return json.dumps(dict, ensure_ascii=False)
@staticmethod
def yadlan_stdout(caller, level, message, *args, **kwargs):
if YLogger.IS_STDOUT:
if len(args) == 0:
sys.stdout.write(YLogger.format_yadlan_message(YLogger.PREFIX, level, caller, message) + "\n")
else:
sys.stdout.write(YLogger.format_yadlan_message(YLogger.PREFIX, level, caller, message) % args + "\n")
sys.stdout.flush()
@staticmethod
def yadlan_stderr(caller, level, message, *args, **kwargs):
if YLogger.IS_STDERR:
sys.stderr.write(YLogger.format_yadlan_message(YLogger.PREFIX, level, caller, message) % args + "\n")
sys.stderr.flush()
|
ztag/annotations/FtpBelkin.py | justinbastress/ztag | 107 | 12714744 | import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpBelkin(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
manufact_re = re.compile(
"^220 Belkin Network USB Hub Ver \d+\.\d+\.\d+ FTP",
re.IGNORECASE
)
tests = {
"FtpBelkin_1": {
"global_metadata": {
"device_type": Type.USB_HUB,
"manufacturer": Manufacturer.BELKIN
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
tagged = False
if self.manufact_re.search(banner):
meta.global_metadata.device_type = Type.USB_HUB
meta.global_metadata.manufacturer = Manufacturer.BELKIN
tagged = True
if tagged:
return meta
else:
return None
|
gluoncv/utils/version.py | Kh4L/gluon-cv | 5,447 | 12714750 | <gh_stars>1000+
"""Utility functions for version checking."""
import warnings
__all__ = ['check_version']
def check_version(min_version, warning_only=False):
"""Check the version of gluoncv satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version : str
Minimum version
warning_only : bool
Printing a warning instead of throwing an exception.
"""
from .. import __version__
from distutils.version import LooseVersion
bad_version = LooseVersion(__version__) < LooseVersion(min_version)
if bad_version:
msg = 'Installed GluonCV version (%s) does not satisfy the ' \
'minimum required version (%s)'%(__version__, min_version)
if warning_only:
warnings.warn(msg)
else:
raise AssertionError(msg)
|
pyspark/find_model_collaborative.py | chelseawmk/recommendation-engine | 254 | 12714768 | <reponame>chelseawmk/recommendation-engine<filename>pyspark/find_model_collaborative.py
#!/usr/bin/env python
"""
Copyright Google Inc. 2016
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import itertools
from math import sqrt
from operator import add
from os.path import join, isfile, dirname
from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
CLOUDSQL_INSTANCE_IP = sys.argv[1]
CLOUDSQL_DB_NAME = sys.argv[2]
CLOUDSQL_USER = sys.argv[3]
CLOUDSQL_PWD = sys.argv[4]
conf = SparkConf().setAppName("app_collaborative")
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
jdbcUrl = 'jdbc:mysql://%s:3306/%s?user=%s&password=%s' % (CLOUDSQL_INSTANCE_IP, CLOUDSQL_DB_NAME, CLOUDSQL_USER, CLOUDSQL_PWD)
#[START how_far]
def howFarAreWe(model, against, sizeAgainst):
# Ignore the rating column
againstNoRatings = against.map(lambda x: (int(x[0]), int(x[1])) )
# Keep the rating to compare against
againstWiRatings = against.map(lambda x: ((int(x[0]),int(x[1])), int(x[2])) )
# Make a prediction and map it for later comparison
# The map has to be ((user,product), rating) not ((product,user), rating)
predictions = model.predictAll(againstNoRatings).map(lambda p: ( (p[0],p[1]), p[2]) )
# Returns the pairs (prediction, rating)
predictionsAndRatings = predictions.join(againstWiRatings).values()
# Returns the variance
return sqrt(predictionsAndRatings.map(lambda s: (s[0] - s[1]) ** 2).reduce(add) / float(sizeAgainst))
#[END how_far]
# Read the data from the Cloud SQL
# Create dataframes
dfRates = sqlContext.read.jdbc(url=jdbcUrl, table='Rating')
rddUserRatings = dfRates.filter(dfRates.userId == 0).rdd
print(rddUserRatings.count())
# Split the data in 3 different sets : training, validating, testing
# 60% 20% 20%
rddRates = dfRates.rdd
rddTraining, rddValidating, rddTesting = rddRates.randomSplit([6,2,2])
#Add user ratings in the training model
rddTraining.union(rddUserRatings)
nbValidating = rddValidating.count()
nbTesting = rddTesting.count()
print("Training: %d, validation: %d, test: %d" % (rddTraining.count(), nbValidating, rddTesting.count()))
# Best results are not commented
ranks = [5,10,15,20]
reguls = [0.1, 1,10]
iters = [5,10,20]
finalModel = None
finalRank = 0
finalRegul = float(0)
finalIter = -1
finalDist = float(100)
#[START train_model]
for cRank, cRegul, cIter in itertools.product(ranks, reguls, iters):
model = ALS.train(rddTraining, cRank, cIter, float(cRegul))
dist = howFarAreWe(model, rddValidating, nbValidating)
if dist < finalDist:
print("Best so far:%f" % dist)
finalModel = model
finalRank = cRank
finalRegul = cRegul
finalIter = cIter
finalDist = dist
#[END train_model]
print("Rank %i" % finalRank)
print("Regul %f" % finalRegul)
print("Iter %i" % finalIter)
print("Dist %f" % finalDist)
|
tests/distance/test_init.py | rhasspy/rapidfuzz | 554 | 12714785 | <reponame>rhasspy/rapidfuzz<filename>tests/distance/test_init.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pytest
from rapidfuzz.distance import Levenshtein, Editops, Opcodes
def test_editops_comparision():
"""
test comparision with Editops
"""
ops = Levenshtein.editops("aaabaaa", "abbaaabba")
assert ops == ops
assert not (ops != ops)
assert ops == ops.copy()
assert not (ops != ops.copy())
def test_editops_get_index():
"""
test __getitem__ with index of Editops
"""
ops = Editops([('delete', 1, 1), ('replace', 2, 1),
('insert', 6, 5), ('insert', 6, 6), ('insert', 6, 7)], 7, 9)
ops_list = [('delete', 1, 1), ('replace', 2, 1),
('insert', 6, 5), ('insert', 6, 6), ('insert', 6, 7)]
assert ops[0] == ops_list[0]
assert ops[1] == ops_list[1]
assert ops[2] == ops_list[2]
assert ops[3] == ops_list[3]
assert ops[4] == ops_list[4]
assert ops[-1] == ops_list[-1]
assert ops[-2] == ops_list[-2]
assert ops[-3] == ops_list[-3]
assert ops[-4] == ops_list[-4]
assert ops[-5] == ops_list[-5]
with pytest.raises(IndexError):
ops[5]
with pytest.raises(IndexError):
ops[-6]
def test_editops_inversion():
"""
test correct inversion of Editops
"""
ops = Editops([('delete', 1, 1), ('replace', 2, 1),
('insert', 6, 5), ('insert', 6, 6), ('insert', 6, 7)], 7, 9)
assert ops.inverse().as_list() == [('insert', 1, 1), ('replace', 1, 2),
('delete', 5, 6), ('delete', 6, 6), ('delete', 7, 6)]
def test_opcodes_comparision():
"""
test comparision with Opcodes
"""
ops = Levenshtein.opcodes("aaabaaa", "abbaaabba")
assert ops == ops
assert not (ops != ops)
assert ops == ops.copy()
assert not (ops != ops.copy())
def test_opcode_get_index():
"""
test __getitem__ with index of Opcodes
"""
ops = Opcodes([('equal', 0, 1, 0, 1), ('delete', 1, 2, 1, 1),
('replace', 2, 3, 1, 2), ('equal', 3, 6, 2, 5), ('insert', 6, 6, 5, 8), ('equal', 6, 7, 8, 9)], 7, 9)
ops_list = [('equal', 0, 1, 0, 1), ('delete', 1, 2, 1, 1),
('replace', 2, 3, 1, 2), ('equal', 3, 6, 2, 5), ('insert', 6, 6, 5, 8), ('equal', 6, 7, 8, 9)]
assert ops[0] == ops_list[0]
assert ops[1] == ops_list[1]
assert ops[2] == ops_list[2]
assert ops[3] == ops_list[3]
assert ops[4] == ops_list[4]
assert ops[5] == ops_list[5]
assert ops[-1] == ops_list[-1]
assert ops[-2] == ops_list[-2]
assert ops[-3] == ops_list[-3]
assert ops[-4] == ops_list[-4]
assert ops[-5] == ops_list[-5]
assert ops[-6] == ops_list[-6]
with pytest.raises(IndexError):
ops[6]
with pytest.raises(IndexError):
ops[-7]
def test_opcode_inversion():
"""
test correct inversion of Opcodes
"""
ops = Opcodes([('equal', 0, 1, 0, 1), ('delete', 1, 2, 1, 1),
('replace', 2, 3, 1, 2), ('equal', 3, 6, 2, 5), ('insert', 6, 6, 5, 8), ('equal', 6, 7, 8, 9)], 7, 9)
assert ops.inverse().as_list() == [('equal', 0, 1, 0, 1), ('insert', 1, 1, 1, 2),
('replace', 1, 2, 2, 3), ('equal', 2, 5, 3, 6), ('delete', 5, 8, 6, 6), ('equal', 8, 9, 6, 7)]
def test_list_initialization():
"""
test whether list initialization works correctly
"""
ops = Levenshtein.opcodes("aaabaaa", "abbaaabba")
ops2 = Opcodes(ops.as_list(), ops.src_len, ops.dest_len)
assert ops == ops2
ops = Levenshtein.editops("aaabaaa", "abbaaabba")
ops2 = Editops(ops.as_list(), ops.src_len, ops.dest_len)
assert ops == ops2
ops = Levenshtein.opcodes("aaabaaa", "abbaaabba")
ops2 = Editops(ops.as_list(), ops.src_len, ops.dest_len)
assert ops.as_editops() == ops2
ops = Levenshtein.editops("aaabaaa", "abbaaabba")
ops2 = Opcodes(ops.as_list(), ops.src_len, ops.dest_len)
assert ops.as_opcodes() == ops2
ops = Levenshtein.editops("skdsakldsakdlasda", "djkajkdfkdgkhdfjrmecsidjf")
ops2 = Opcodes(ops.as_list(), ops.src_len, ops.dest_len)
assert ops.as_opcodes() == ops2
if __name__ == '__main__':
unittest.main() |
generate/build_tools/forge/defaults.py | flamencist/browser-extensions | 102 | 12714797 | <filename>generate/build_tools/forge/defaults.py
'Project-wide default values'
import sys
from os import path
FORGE_ROOT = path.dirname(path.dirname(__file__))
CONFIG_FILE = path.join(FORGE_ROOT, 'forge_build.json')
PASSWORD = "<PASSWORD>"
SRC_DIR = 'src'
APP_CONFIG_FILE = path.join(SRC_DIR, 'config.json')
IDENTITY_FILE = path.join(SRC_DIR, 'identity.json')
LOCAL_CONFIG_FILE = 'local_config.json'
TEMPLATE_DIR = '.template'
INSTRUCTIONS_DIR = TEMPLATE_DIR
|
src/utils/clean_links.py | LaudateCorpus1/hermes-5 | 135 | 12714799 | #!/usr/bin/env python
def clean_links(text):
"""Remove brackets around a wikilink, keeping the label instead of the page
if it exists.
"[[foobar]]" will become "foobar", but "[[foobar|code words]]" will return
"code words".
Args:
text (str): Full text of a Wikipedia article as a single string.
Returns:
str: A copy of the full text with all wikilinks cleaned.
"""
good_char_list = []
next_char = None
skip = 0
for pos,char in enumerate(text):
try:
next_char = text[pos+1]
except IndexError:
next_char = None
# Skip the character
if skip:
skip -= 1
continue
# Otherwise check if we have found a link
if char == '[' and next_char == '[':
skip = 1
# Check if we are in a comment with
pipe_pos = text.find('|', pos)
if pipe_pos == -1:
continue
end_pos = text.find(']]', pos)
if pipe_pos < end_pos:
skip = pipe_pos - pos
elif char == ']' and next_char == ']':
skip = 1
# Otherwise just append the character
else:
good_char_list.append(char)
return ''.join(good_char_list)
|
py_utils/emu_utils.py | tamarindmonkey/.oh-my-comma | 102 | 12714806 | <filename>py_utils/emu_utils.py<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import psutil
import difflib
import argparse
import subprocess
import time
if __package__ is None:
from os import path
sys.path.append(path.abspath(path.join(path.dirname(__file__), '../py_utils')))
from py_utils.colors import COLORS
else:
from py_utils.colors import COLORS
COMMUNITY_PATH = '/data/community'
COMMUNITY_BASHRC_PATH = '/data/community/.bashrc'
OH_MY_COMMA_PATH = '/data/community/.oh-my-comma'
UPDATE_PATH = '{}/update.sh'.format(OH_MY_COMMA_PATH)
UNINSTALL_PATH = '{}/uninstall.sh'.format(OH_MY_COMMA_PATH)
OPENPILOT_PATH = '/data/openpilot'
FORK_PARAM_PATH = '/data/community/forks.json'
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise Exception('error: {}'.format(message))
class TimeDebugger:
def __init__(self, convention='s', round_to=4, silent=False):
assert convention in ['s', 'ms'], 'Must be "s" or "ms"!'
self.convention = convention
self.round_to = round_to
self.silent = silent
self.reset(full=True)
def reset(self, full=False):
self.last_time = time.time()
if full:
self.start_time = self.last_time
def print(self, msg=None, total=False):
if self.silent:
return
if not total:
elapsed = time.time() - self.last_time
elapsed *= 1000 if self.convention == 'ms' else 1
if msg is not None:
msg = 'Time to {}'.format(msg)
else:
msg = 'Time elapsed'
print('{}: {} {}'.format(msg, round(elapsed, self.round_to), self.convention))
else:
elapsed = time.time() - self.start_time
elapsed *= 1000 if self.convention == 'ms' else 1
print('Total: {} {}'.format(round(elapsed, self.round_to), self.convention))
self.reset(total)
class BaseFunctions:
def print_commands(self, error_msg=None, ascii_art=False):
if ascii_art:
print(EMU_ART)
if error_msg is not None:
error(error_msg)
max_cmd = max([len(_c) for _c in self.commands]) + 1
for idx, cmd in enumerate(self.commands):
desc = COLORS.CYAN + self.commands[cmd].description
print_cmd = '{} {}'.format(self.name, cmd)
if self.name != 'emu':
print_cmd = 'emu {}'.format(print_cmd)
print(COLORS.OKGREEN + ('- {:<%d} {}' % max_cmd).format(print_cmd + ':', desc))
if hasattr(self, '_help'):
# leading is for better differentiating between the different commands
if self._help(cmd, show_description=False, leading=' '):
print() # only add newline when there's more information to sift through
print(COLORS.ENDC, end='')
def next_arg(self, lower=True, ingest=True):
"""
Returns next arg and deletes arg from self.args if ingest=True
:param lower: Returns arg.lower()
:param ingest: Deletes returned arg from self.arg
:return:
"""
if len(self.args):
arg = self.args[0]
if lower:
arg = arg.lower()
if ingest:
del self.args[0]
else:
arg = None
return arg
def str_sim(a, b):
return difflib.SequenceMatcher(a=a, b=b).ratio()
def input_with_options(options, default=None):
"""
Takes in a list of options and asks user to make a choice.
The most similar option list index is returned along with the similarity percentage from 0 to 1
"""
user_input = input('[{}]: '.format('/'.join(options))).lower().strip()
if not user_input:
return default, 0.0
sims = [str_sim(i.lower().strip(), user_input) for i in options]
argmax = sims.index(max(sims))
return argmax, sims[argmax]
def most_similar(find, options):
sims = [[str_sim(i.lower().strip(), find.lower().strip()), i] for i in options]
sims = sorted(sims, reverse=True)
return [[o[1], o[0]] for o in sims]
def check_output(cmd, cwd=None, shell=False):
class Output:
def __init__(self, output='', s=True):
self.output = output
self.success = s
if isinstance(cmd, str) and not shell:
cmd = cmd.split()
try:
return Output(subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT, encoding='utf8', shell=shell))
except subprocess.CalledProcessError as e:
if e.output is None:
return Output(e, s=False) # command failed to execute
return Output(e.output) # command executed but it resulted in error
def run(cmd, out_file=None): # todo: return output with same format as check_output, but also output to user (current behavior)
"""
If cmd is a string, it is split into a list, otherwise it doesn't modify cmd.
The status is returned, True being success, False for failure
"""
if isinstance(cmd, str):
cmd = cmd.split()
f = None
if isinstance(out_file, str):
f = open(out_file, 'a')
try:
r = subprocess.call(cmd, stdout=f)
return not r
except (Exception, KeyboardInterrupt) as e:
# print(e)
return False
def kill(procname):
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == procname:
proc.kill()
return True
return None
def is_affirmative():
i = None
print(COLORS.PROMPT, end='')
while i not in ['y', 'n', 'yes', 'no', 'sure', '']:
i = input('[Y/n]: ').lower().strip()
print(COLORS.ENDC)
return i in ['y', 'yes', 'sure', '']
def error(msg, end='\n', ret=False, start=''):
"""
The following applies to error, warning, and success methods
:param msg: The message to display
:param end: The ending char, default is \n
:param ret: Whether to return the formatted string, or print it
:return: The formatted string if ret is True
"""
e = start + '❌ {}{}{}'.format(COLORS.FAIL, msg, COLORS.ENDC)
if ret:
return e
print(e, end=end)
def warning(msg, end='\n', ret=False):
w = '{}{}{}'.format(COLORS.PROMPT, msg, COLORS.ENDC)
if ret:
return w
print(w, end=end)
def success(msg, end='\n', ret=False):
s = '{}{}{}'.format(COLORS.SUCCESS, msg, COLORS.ENDC)
if ret:
return s
print(s, end=end)
def info(msg, end='\n', ret=False):
s = '{}{}{}'.format(COLORS.WARNING, msg, COLORS.ENDC)
if ret:
return s
print(s, end=end)
EMU_ART = r""" _
-=(""" + COLORS.RED + """'""" + COLORS.CWHITE + """)
;;
//
//
: '.---.__
| --_-_)__)
`.____,'
\ \ """ + COLORS.OKGREEN + """ ___ ._ _ _ _ _ """ + COLORS.CWHITE + """
___\ \ """ + COLORS.OKGREEN + """/ ._>| ' ' || | |""" + COLORS.CWHITE + """
( \ """ + COLORS.OKGREEN + """\___.|_|_|_|`___|""" + COLORS.CWHITE + """
\
/""" + '\n'
|
backend/www/test/list_client_logs_test.py | xuantan/viewfinder | 645 | 12714818 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
__author__ = '<EMAIL> (<NAME>)'
import datetime
import logging
import mock
import time
from functools import partial
from tornado import options
from viewfinder.backend.base import otp, util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db import client_log
from viewfinder.backend.www import json_schema
from viewfinder.backend.www.test import service_base_test
from viewfinder.backend.www.test.service_base_test import ClientLogRecord
class NewClientLogUrlTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(NewClientLogUrlTestCase, self).setUp()
# Write sample client logs.
self._cur_t = time.time()
self._t_minus_1d = self._cur_t - 24 * 60 * 60
self._t_minus_2d = self._cur_t - 2 * 24 * 60 * 60
self._logs = [(self._cookie, ClientLogRecord(self._t_minus_2d, 'cl1.t-2', 'log 1, t-2')),
(self._cookie, ClientLogRecord(self._t_minus_1d, 'cl1.t-1', 'log 1, t-1')),
(self._cookie, ClientLogRecord(self._cur_t, 'cl1.t.1', 'log 1, t')),
(self._cookie, ClientLogRecord(self._cur_t, 'cl1.t.2', 'log 2, t')),
(self._cookie2, ClientLogRecord(self._cur_t, 'cl2', 'user 2, log 1, t'))]
for user_cookie, log in self._logs:
self._WriteClientLog(user_cookie, log)
def testListClientLogs(self):
"""Verify listing of client logs."""
start_timestamp = self._cur_t
end_timestamp = start_timestamp
response_dict = self._tester.SendAdminRequest('list_client_logs',
{'user_id': self._users[0].user_id,
'start_timestamp': start_timestamp,
'end_timestamp': end_timestamp})
urls = self._FilterList(response_dict['log_urls'])
self.assertEqual(2, len(urls))
content = self._FetchClientLog(urls[0]['url'])
self.assertEqual('log 1, t', content)
content = self._FetchClientLog(urls[1]['url'])
self.assertEqual('log 2, t', content)
def testMultipleDates(self):
"""Verify logs can be listed for multiple dates."""
start_timestamp = self._t_minus_2d
end_timestamp = self._cur_t
response_dict = self._tester.SendAdminRequest('list_client_logs',
{'user_id': self._users[0].user_id,
'start_timestamp': start_timestamp,
'end_timestamp': end_timestamp})
urls = self._FilterList(response_dict['log_urls'])
self.assertEqual(4, len(urls))
def testListFilter(self):
"""Verify logs can be filtered via regexp."""
start_timestamp = self._cur_t
end_timestamp = self._cur_t
response_dict = self._tester.SendAdminRequest('list_client_logs',
{'user_id': self._users[0].user_id,
'start_timestamp': start_timestamp,
'end_timestamp': end_timestamp,
'filter': 'cl1.t.2'})
urls = self._FilterList(response_dict['log_urls'])
self.assertEqual(1, len(urls))
self.assertTrue(urls[0]['filename'].endswith('dev-2-cl1.t.2'))
@mock.patch.object(client_log, 'MAX_CLIENT_LOGS', 1)
def testLimit(self):
"""Verify limit is respected."""
response_dict = self._tester.SendAdminRequest('list_client_logs',
{'user_id': self._users[0].user_id,
'start_timestamp': self._cur_t,
'end_timestamp': self._cur_t,
'filter': 'dev-2'})
urls = response_dict['log_urls']
self.assertEqual(2, len(urls))
self.assertTrue(urls[0]['filename'].endswith('dev-2-cl1.t.1'))
self.assertTrue(urls[1]['filename'].endswith('dev-2-cl1.t.2'))
def _FetchClientLog(self, url):
"""Fetches the client log specified by "url" and returns the
contents to "callback".
"""
response = self._RunAsync(self._tester.http_client.fetch, url, method='GET')
self.assertEqual(200, response.code)
return response.body
def _FilterList(self, log_urls):
"""Remove op logs from response that were created by base class user
registration.
"""
return [log_url for log_url in log_urls if 'Operation' not in log_url['url']]
|
recipes/Python/425345_Generic_commandline/recipe-425345.py | tdiprima/code | 2,023 | 12714833 | <filename>recipes/Python/425345_Generic_commandline/recipe-425345.py
# -- coding: iso8859-1
"""Generic option parser class. This class can be used
to write code that will parse command line options for
an application by invoking one of the standard Python
library command argument parser modules optparse or
getopt.
The class first tries to use optparse. It it is not there
(< Python 2.3), it invokes getopt. However, this is
transparent to the application which uses the class.
The class requires a dictionary with entries of the following
form for each command line option.
'option_var' : ('short=<short option>','long=<long option>',
'help=<help string>', 'meta=<meta variable>',
'default=<default value>', 'type=<option type>')
where, 'option_var' is the key for the option in the final
dictionary of option-value pairs. The value is a tuple of
strings, where each string consists of entries of the form,
'key=value', where 'key' is borrowed from the way optparse
represents each variables for an option setting.
To parse the arguments, call the method 'parse_arguments'.
The return value is a dictionary of the option-value pairs."""
import sys
__author__="<NAME>"
class GenericOptionParserError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return str(self.value)
class GenericOptionParser:
""" Generic option parser using
either optparse or getopt """
def __init__(self, optmap):
self._optmap = self._parse_optmap(optmap)
self._optdict = {}
self.maxw = 24
def _parse_optmap(self, map):
""" Internal method -> Parse option
map containing tuples and convert the
tuples to a dictionary """
optmap = {}
for key,value in map.items():
d = {}
for item in value:
if not item: continue
var,val=item.split('=')
d[var]=val
optmap[key] = d
return optmap
def parse_arguments(self):
""" Parse command line arguments and
return a dictionary of option-value pairs """
try:
self.optparse = __import__('optparse')
# For invoking help, when no arguments
# are passed.
if len(sys.argv)==1:
sys.argv.append('-h')
self._parse_arguments1()
except ImportError:
try:
import getopt
self.getopt = __import__('getopt')
self._parse_arguments2()
except ImportError:
raise GenericOptionParserError,'Fatal Error: No optparse or getopt modules found'
return self._optdict
def _parse_arguments1(self):
""" Parse command-line arguments using optparse """
p = self.optparse.OptionParser()
for key,value in self._optmap.items():
# Option destination is the key itself
option = key
# Default action is 'store'
action = 'store'
# Short option string
sopt = value.get('short','')
# Long option string
lopt = value.get('long','')
# Help string
helpstr = value.get('help','')
# Meta var
meta = value.get('meta','')
# Default value
defl = value.get('default','')
# Default type is 'string'
typ = value.get('type','string')
# If bool type...
if typ == 'bool':
action = 'store_true'
defl = bool(str(defl) == 'True')
if sopt: sopt = '-' + sopt
if lopt: lopt = '--' + lopt
# Add option
p.add_option(sopt,lopt,dest=option,help=helpstr,metavar=meta,action=action,
default=defl)
(options,args) = p.parse_args()
self._optdict = options.__dict__
def _parse_arguments2(self):
""" Parse command-line arguments using getopt """
# getopt requires help string to
# be generated.
if len(sys.argv)==1:
sys.exit(self._usage())
shortopt,longopt='h',['help']
# Create short option string and long option
# list for getopt
for key, value in self._optmap.items():
sopt = value.get('short','')
lopt = value.get('long','')
typ = value.get('type','string')
defl = value.get('default','')
# If bool type...
if typ == 'bool':
defl = bool(str(defl) == 'True')
# Set default value
self._optdict[key] = defl
if typ=='bool':
if sopt: shortopt += sopt
if lopt: longopt.append(lopt)
else:
if sopt: shortopt = "".join((shortopt,sopt,':'))
if lopt: longopt.append(lopt+'=')
# Parse
(optlist,args) = self.getopt.getopt(sys.argv[1:],shortopt,longopt)
# Match options
for opt,val in optlist:
# Invoke help
if opt in ('-h','--help'):
sys.exit(self._usage())
for key,value in self._optmap.items():
sopt = '-' + value.get('short','')
lopt = '--' + value.get('long','')
typ = value.get('type','string')
if opt in (sopt,lopt):
if typ=='bool': val = True
self._optdict[key]=val
del self._optmap[key]
break
def _usage(self):
""" Generate and return a help string
for the program, similar to the one
generated by optparse """
usage = ["usage: %s [options]\n\n" % sys.argv[0]]
usage.append("options:\n")
options = [(' -h, --help', 'show this help message and exit\n')]
maxlen = 0
for value in self._optmap.values():
sopt = value.get('short','')
lopt = value.get('long','')
help = value.get('help','')
meta = value.get('meta','')
optstr = ""
if sopt: optstr="".join((' -',sopt,meta))
if lopt: optstr="".join((optstr,', --',lopt))
if meta: optstr="".join((optstr,'=',meta))
l = len(optstr)
if l>maxlen: maxlen=l
options.append((optstr,help))
for x in range(len(options)):
optstr = options[x][0]
helpstr = options[x][1]
if maxlen<self.maxw - 1:
usage.append("".join((optstr,(maxlen-len(optstr) + 2)*' ', helpstr,'\n')))
elif len(optstr)<self.maxw - 1:
usage.append("".join((optstr,(self.maxw-len(optstr))*' ', helpstr,'\n')))
else:
usage.append("".join((optstr,'\n',self.maxw*' ', helpstr,'\n')))
return "".join(usage)
if __name__=="__main__":
d={ 'infile' : ('short=i','long=in','help=Input file for the program',
'meta=IN'),
'outfile': ('short=o','long=out','help=Output file for the program',
'meta=OUT'),
'verbose': ('short=V','long=verbose','help=Be verbose in output',
'type=bool') }
g=GenericOptionParser(d)
optdict = g.parse_arguments()
for key,value in optdict.items():
# Use the option and the value in
# your program
...
|
instances/__init__.py | NGXTDN/webvirtcloud | 1,246 | 12714925 | <reponame>NGXTDN/webvirtcloud<filename>instances/__init__.py
default_app_config = 'instances.apps.InstancesConfig'
|
dataset/dota_coco/dota_generate_test_result.py | leakyH/PaddleDetection | 7,782 | 12715023 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import glob
import numpy as np
from multiprocessing import Pool
from functools import partial
from shapely.geometry import Polygon
import argparse
nms_thresh = 0.1
class_name_15 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter'
]
class_name_16 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter', 'container-crane'
]
def rbox_iou(g, p):
"""
iou of rbox
"""
g = np.array(g)
p = np.array(p)
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def py_cpu_nms_poly_fast(dets, thresh):
"""
Args:
dets: pred results
thresh: nms threshold
Returns: index of keep
"""
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = [
dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4],
dets[i][5], dets[i][6], dets[i][7]
]
polys.append(tm_polygon)
polys = np.array(polys)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
# h_keep_inds = np.where(hbb_ovr == 0)[0]
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = rbox_iou(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
# ovr.append(iou)
# ovr_index.append(tmp_order[j])
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
def poly2origpoly(poly, x, y, rate):
origpoly = []
for i in range(int(len(poly) / 2)):
tmp_x = float(poly[i * 2] + x) / float(rate)
tmp_y = float(poly[i * 2 + 1] + y) / float(rate)
origpoly.append(tmp_x)
origpoly.append(tmp_y)
return origpoly
def nmsbynamedict(nameboxdict, nms, thresh):
"""
Args:
nameboxdict: nameboxdict
nms: nms
thresh: nms threshold
Returns: nms result as dict
"""
nameboxnmsdict = {x: [] for x in nameboxdict}
for imgname in nameboxdict:
keep = nms(np.array(nameboxdict[imgname]), thresh)
outdets = []
for index in keep:
outdets.append(nameboxdict[imgname][index])
nameboxnmsdict[imgname] = outdets
return nameboxnmsdict
def merge_single(output_dir, nms, pred_class_lst):
"""
Args:
output_dir: output_dir
nms: nms
pred_class_lst: pred_class_lst
class_name: class_name
Returns:
"""
class_name, pred_bbox_list = pred_class_lst
nameboxdict = {}
for line in pred_bbox_list:
splitline = line.split(' ')
subname = splitline[0]
splitname = subname.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
x_y = re.findall(pattern1, subname)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
confidence = splitline[1]
poly = list(map(float, splitline[2:]))
origpoly = poly2origpoly(poly, x, y, rate)
det = origpoly
det.append(confidence)
det = list(map(float, det))
if (oriname not in nameboxdict):
nameboxdict[oriname] = []
nameboxdict[oriname].append(det)
nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
# write result
dstname = os.path.join(output_dir, class_name + '.txt')
with open(dstname, 'w') as f_out:
for imgname in nameboxnmsdict:
for det in nameboxnmsdict[imgname]:
confidence = det[-1]
bbox = det[0:-1]
outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
map(str, bbox))
f_out.write(outline + '\n')
def dota_generate_test_result(pred_txt_dir,
output_dir='output',
dota_version='v1.0'):
"""
pred_txt_dir: dir of pred txt
output_dir: dir of output
dota_version: dota_version v1.0 or v1.5 or v2.0
"""
pred_txt_list = glob.glob("{}/*.txt".format(pred_txt_dir))
# step1: summary pred bbox
pred_classes = {}
class_lst = class_name_15 if dota_version == 'v1.0' else class_name_16
for class_name in class_lst:
pred_classes[class_name] = []
for current_txt in pred_txt_list:
img_id = os.path.split(current_txt)[1]
img_id = img_id.split('.txt')[0]
with open(current_txt) as f:
res = f.readlines()
for item in res:
item = item.split(' ')
pred_class = item[0]
item[0] = img_id
pred_bbox = ' '.join(item)
pred_classes[pred_class].append(pred_bbox)
pred_classes_lst = []
for class_name in pred_classes.keys():
print('class_name: {}, count: {}'.format(class_name,
len(pred_classes[class_name])))
pred_classes_lst.append((class_name, pred_classes[class_name]))
# step2: merge
pool = Pool(len(class_lst))
nms = py_cpu_nms_poly_fast
mergesingle_fn = partial(merge_single, output_dir, nms)
pool.map(mergesingle_fn, pred_classes_lst)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dota anno to coco')
parser.add_argument('--pred_txt_dir', help='path of pred txt dir')
parser.add_argument(
'--output_dir', help='path of output dir', default='output')
parser.add_argument(
'--dota_version',
help='dota_version, v1.0 or v1.5 or v2.0',
type=str,
default='v1.0')
args = parser.parse_args()
# process
dota_generate_test_result(args.pred_txt_dir, args.output_dir,
args.dota_version)
print('done!')
|
src/super_gradients/common/factories/optimizers_type_factory.py | Deci-AI/super-gradients | 308 | 12715040 | import importlib
from typing import Union
from torch import optim
from super_gradients.common.factories.base_factory import AbstractFactory
from super_gradients.training.utils.optimizers.rmsprop_tf import RMSpropTF
from super_gradients.training.utils.optimizers.lamb import Lamb
class OptimizersTypeFactory(AbstractFactory):
"""
This is a special factory for torch.optim.Optimizer.
This factory does not instantiate an object but rather return the type, since optimizer instantiation
requires the model to be instantiated first
"""
def __init__(self):
self.type_dict = {
"SGD": optim.SGD,
"Adam": optim.Adam,
"RMSprop": optim.RMSprop,
"RMSpropTF": RMSpropTF,
"Lamb": Lamb
}
def get(self, conf: Union[str]):
"""
Get a type.
:param conf: a configuration
if string - assumed to be a type name (not the real name, but a name defined in the Factory)
a dictionary is not supported, since the actual instantiation takes place elsewhere
If provided value is not one of the three above, the value will be returned as is
"""
if isinstance(conf, str):
if conf in self.type_dict:
return self.type_dict[conf]
else:
try:
lib = '.'.join(conf.split('.')[:-1])
module = conf.split('.')[-1]
lib = importlib.import_module(lib) # Import the required packages
class_type = lib.__dict__[module]
return class_type
except RuntimeError:
raise RuntimeError(f"Unknown object type: {conf} in configuration. valid types are: {self.type_dict.keys()} or a class "
"type available in the env (or the form 'package_name.sub_package.MyClass'")
else:
return conf
|
lib/interact.py | alegrey91/legion | 430 | 12715085 | <reponame>alegrey91/legion
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, signal
from threading import Thread
from termcolor import colored
from cmd import Cmd
from time import sleep
from lib.main import main_run
from lib.main import valid_protos
from lib.main import print_error
from warriors.general import General
class LegionPrompt(Cmd):
intro = """\
██╗ ███████╗ ██████╗ ██╗ ██████╗ ███╗ ██╗
██║ ██╔════╝██╔════╝ ██║██╔═══██╗████╗ ██║
██║ █████╗ ██║ ███╗██║██║ ██║██╔██╗ ██║
██║ ██╔══╝ ██║ ██║██║██║ ██║██║╚██╗██║
███████╗███████╗╚██████╔╝██║╚██████╔╝██║ ╚████║
╚══════╝╚══════╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ v2.0"""+colored("\nI wanted to destroy everything beautiful I'd never have\n", 'red', attrs=['bold'])
prompt = '('+colored("legion", 'blue', attrs=['bold'])+') > '
def __init__(self, parser, proto, host, workdir, port, intensity, username, ulist, plist, notuse, extensions, path, password,
ipv6, domain, verbose):
Cmd.__init__(self)
self.all_values = {"proto": proto, "host": host, "workdir": workdir, "port": port, "intensity": intensity,
"username": username, "ulist": ulist, "plist": plist, "notuse": notuse, "extensions": extensions,
"path": path, "password": password, "ipv6": ipv6, "domain": domain, "verbose": verbose,
"reexec": False}
self.priv_values = {"interactive": True, "protohelp": False, "executed": [], "exec": ""}
self.parser = parser
self.ws = []
self.general = ""
self.msgs = {}
def emptyline(self):
pass
def do_exit(self, inp):
'''exit the application'''
print("Bye!")
self.stop_procs()
os.kill(os.getpid(), signal.SIGTERM)
def do_quit(self, inp):
'''exit the application'''
print("Bye!")
self.stop_procs()
os.kill(os.getpid(), signal.SIGTERM)
def do_stopall(self, inp):
'''Stop ALL running processes'''
self.stop_procs()
def do_stop(self, procname):
'''Stop a process given the name'''
self.stop_p(procname)
def do_protos(self, args):
'''List the name of valid protocols'''
c = 0
for i, p in enumerate(valid_protos):
clr = "green" if p not in ["scanner"] else "blue"
myend = "\t\t" if len(p) < 8 else "\t"
if c % 3 != 0:
print(colored(p, clr, attrs=['bold']), end=myend)
else:
print("\n"+colored(p, clr, attrs=['bold']), end=myend)
c += 1
print()
print(colored("There are a total of "+str(len(valid_protos))+" supported protocols", "yellow", attrs=['bold']))
def do_set(self, args):
'''Set variable value: set proto http'''
if len(args.split(" ")) < 2:
print_error("set <variable> <value>")
else:
variable = args.split(" ")[0].lower()
value = args.split(" ")[1].strip()
if variable == "proto" and value.lower() not in valid_protos:
print_error("Not valid protocol: "+value)
elif variable in ["port", "intensity"] and not value.isdigit():
print_error("Please set a number: "+value)
elif variable in ["reexec", "verbose"] and not value.lower() in ["true", "false"]:
print_error("Please set a boolean(true or false): "+value)
elif variable.lower() in self.all_values:
value = value if not value.lower() in ["true", "false"] else (True if value.lower() == "true" else False)
self.all_values[variable] = value
print(colored(variable.capitalize(), "blue", attrs=['bold']) + ": " + colored(str(value), "yellow", attrs=['bold']))
else:
print_error(variable + " is not valid")
def do_unset(self, args):
'''Set variable to null'''
if len(args.split(" ")) < 1:
print_error("unset <variable>")
else:
variable = args.split(" ")[0].lower()
if variable.lower() in self.all_values:
self.all_values[variable] = ""
print(colored(variable.capitalize(), "blue", attrs=['bold']) + ": " + colored("Null", "magenta", attrs=['bold']))
else:
print_error(variable + " is not valid")
def do_get(self, args):
'''Get variable value: get proto'''
variable = args.split(" ")[0].lower()
if variable.lower() in self.all_values:
print(variable + ": " + str(self.all_values[variable]))
else:
print_error(variable + " is not valid")
def do_options(self, _):
'''Get all Parameters and their value'''
for key, value in sorted(self.all_values.items()):
if key == "proto":
print(colored(str(key) + ": ", 'yellow', attrs=['bold']) + str(value))
else:
print(colored(str(key)+": ", 'cyan', attrs=['bold'])+str(value))
def do_procs(self, _):
'''Get information about running and run processes'''
self.print_procs(self.ws)
if self.general != "":
print()
print(colored("Warriors sent by the General:", "blue", attrs=['bold', 'underline']))
self.print_procs(self.general.get_warriors())
print("")
def do_out(self, filename):
'''Get the output of a executed tool: getout smbclient'''
name = filename + ".out"
for root, dirs, files in os.walk(self.all_values["workdir"]+"/"+self.all_values["host"]):
if name in files:
with open(os.path.join(root, name), 'r') as f:
print(f.read())
break
def do_err(self, filename):
'''Get the error of a executed tool: geterr smbclient'''
name = filename + ".err"
for root, dirs, files in os.walk(self.all_values["workdir"]):
if name in files:
with open(os.path.join(root, name), 'r') as f:
print(f.read())
break
def do_info(self, _):
'''Get info of the selected protocol'''
self.priv_values["protohelp"] = True
self.initW()
self.priv_values["protohelp"] = False
def do_run(self, _):
'''Execute the confiured protocol attack'''
self.priv_values["protohelp"] = False
self.update_executed()
warrior = self.initW()
if warrior != -1: # If -1, then something went wrong creating the warrior
self.ws.append(warrior)
thread = Thread(target=warrior.run)
thread.start()
else:
print_error("Something went wrong, nothing is going to be executed")
def do_exec(self, args):
'''Execute the indicated cmd'''
if len(args.split(" ")) != 1:
print_error("exec <CMDname>")
else:
cmd = args.split(" ")[0].lower()
self.priv_values["exec"] = cmd
warrior = self.initW()
self.priv_values["exec"] = ""
if warrior != -1: # If -1, then something went wrong creating the warrior
self.ws.append(warrior)
thread = Thread(target=warrior.run)
thread.start()
else:
print_error("Something went wrong, nothing is going to be executed")
def do_startGeneral(self, _):
'''Star a General that will help. Automatize the scan and launch of scripts depending on the discovered services. Only one at the same time is allowed.'''
if self.general == "":
print(colored("Starting General", "blue", attrs=['bold']))
self.general = General(self.parser, self.all_values["host"], "0", self.all_values["workdir"], "/", self.all_values["intensity"],
self.all_values["username"], self.all_values["ulist"], self.all_values["password"], self.all_values["plist"],
self.all_values["notuse"], self.all_values["extensions"], self.all_values["path"], self.all_values["reexec"],
self.all_values["ipv6"], self.all_values["domain"], self.priv_values["interactive"],
self.all_values["verbose"], self.priv_values["executed"], self.priv_values["exec"])
thread = Thread(target=self.general.run)
thread.start()
else:
print(colored("general is already running... You can stop it with stopGeneral",'blue'))
def do_stopGeneral(self, _):
'''Stop the general'''
print(colored("Stopping General...", 'blue', attrs=['bold']))
self.general.stop_general()
self.general = ""
def initW(self):
'''Initialize the current Warrior'''
return main_run(self.parser, self.all_values["proto"], self.all_values["host"],
self.all_values["workdir"], self.all_values["port"], self.all_values["intensity"],
self.all_values["username"], self.all_values["ulist"],
self.all_values["plist"], self.priv_values["protohelp"], self.all_values["notuse"],
self.all_values["extensions"], self.all_values["path"], self.all_values["password"],
self.all_values["ipv6"], self.all_values["domain"], self.priv_values["interactive"],
self.all_values["verbose"], self.all_values["reexec"], self.priv_values["executed"],
self.priv_values["exec"])
def print_procs(self, ws):
last = ""
ws = sorted(ws, key=lambda x: x.get_proto())
for w in ws:
if type(w) is int:
print_error("An INT was found as a WARRIOR, TAKE A LOOK!")
continue
if last != w.get_proto():
print()
print(colored(w.get_proto(), 'blue', attrs=['reverse', 'bold']))
last = w.get_proto()
procs, ch = w.get_procs_info()
procs = sorted(procs, key=lambda x: x["name"])
for p in procs:
if p["proc"].is_alive():
print(colored(p["name"] + ": ", 'yellow', attrs=['blink', 'bold']) + p["cmd"])
else:
print(colored(p["name"] + ": ", 'green', attrs=['bold']) + p["cmd"])
if ch:
print(colored("Command require sequencial object, waiting for execute next commnad...", 'red', attrs=['bold']))
def get_pids(self):
for w in self.ws:
self.msgs = {**self.msgs, **w.get_all_queue()}
if self.general != "":
for w in self.general.get_warriors():
self.msgs = {**self.msgs, **w.get_all_queue()}
def stop_p(self, param_name):
self.get_pids()
self.kill_pbyname(param_name)
def stop_procs(self):
self.proc_stop_procs(self.ws)
if self.general != "":
self.proc_stop_procs(self.general.get_warriors())
def proc_stop_procs(self, ws):
rep = True
while rep:
rep = False
self.get_pids()
for w in ws:
procs, ch = w.get_procs_info()
for p in procs:
if p["proc"].is_alive():
self.kill_pbyname(p["name"])
rep = True
sleep(0.5)
def kill_pbyname(self, pname):
if pname in self.msgs:
os.kill(int(self.msgs[pname]), signal.SIGKILL)
print(colored("Terminated: ", 'green', attrs=['bold']) + pname + "("+str(self.msgs[pname])+")")
else:
print(colored("Not found: ", 'red', attrs=['bold']) + pname)
def update_executed(self):
for w in self.ws:
self.priv_values["executed"].append(w.get_executed())
if self.general != "":
for w in self.general.get_warriors():
self.priv_values["executed"].append(w.get_executed())
|
samples/device_manager/device_manager.py | IMULMUL/PythonForWindows | 479 | 12715106 | <reponame>IMULMUL/PythonForWindows
import sys
import os.path
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
import windows.generated_def as gdef
devmgr = windows.system.device_manager
print("Device manager is {0}".format(devmgr))
print("Enumerating the first 3 device classes")
for cls in devmgr.classes[:3]:
print(" * {0}".format(cls))
print("Finding device class 'System'")
# Allow devmgr.classes["name"] ?
system_cls = [cls for cls in devmgr.classes if cls.name == b"System"][0]
print(" * {0}".format(system_cls))
print(" Enumerating some devices of 'System'")
devices = system_cls.devices.all()
for devinst in (devices[0], devices[25], devices[35]): # Some "random" devices to have interesting ones
print(" * {0}".format(devinst))
devconf = devinst.allocated_configuration
if not devconf:
continue
print(" Enumerating allocated resources:")
for resource in devconf.resources:
print(" * {0}".format(resource))
# python64 samples\device\device_manager.py
# Device manager is <windows.winobject.device_manager.DeviceManager object at 0x0000000003669908>
# Enumerating the first 3 device classes
# * <DeviceClass name="XboxComposite" guid=05F5CFE2-4733-4950-A6BB-07AAD01A3A84>
# * <DeviceClass name="DXGKrnl" guid=1264760F-A5C8-4BFE-B314-D56A7B44A362>
# * <DeviceClass name="RemotePosDevice" guid=13E42DFA-85D9-424D-8646-28A70F864F9C>
# Finding device class 'System'
# * <DeviceClass name="System" guid=4D36E97D-E325-11CE-BFC1-08002BE10318>
# Enumerating some devices of 'System'
# * <DeviceInstance "Motherboard resources" (id=1)>
# * <DeviceInstance "Microsoft ACPI-Compliant Embedded Controller" (id=26)>
# Enumerating allocated resources:
# * <IoResource : [0x00000000000062-0x00000000000062]>
# * <IoResource : [0x00000000000066-0x00000000000066]>
# * <DeviceInstance "High Definition Audio Controller" (id=36)>
# Enumerating allocated resources:
# * <MemoryResource : [0x000000f7080000-0x000000f7083fff]>
# * <DevicePrivateResource type=ResType_DevicePrivate(0x8001)>
# * <IrqResource : [0x00000000000011]>
|
nuplan/planning/metrics/evaluation_metrics/common/distance_to_baseline.py | motional/nuplan-devkit | 128 | 12715117 | <filename>nuplan/planning/metrics/evaluation_metrics/common/distance_to_baseline.py<gh_stars>100-1000
from typing import List
import numpy as np
from shapely.geometry import Point
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.abstract_map import AbstractMap
from nuplan.planning.metrics.evaluation_metrics.base.metric_base import MetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, TimeSeries
from nuplan.planning.metrics.utils.route_extractor import get_route
from nuplan.planning.metrics.utils.state_extractors import extract_ego_center, extract_ego_time_point
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
class DistanceToBaselineStatistics(MetricBase):
"""Statistics on distance of center of ego from nearest baseline."""
def __init__(self, name: str, category: str) -> None:
"""
Initializes the DistanceToBaselineStatistics class
:param name: Metric name
:param category: Metric category.
"""
super().__init__(name=name, category=category)
@staticmethod
def compute_distance_to_route_baseline(map_api: AbstractMap, poses: List[Point2D]) -> List[float]:
"""
Returns minimum distances of each ego pose to the baseline of a lane or lane_connector that it
belongs to one, if it does not belong to any lane or lane_connector inf is returned
:param map_api: a map
:param ego_poses: list of ego poses
:return list of ditances to baseline, or inf.
"""
# Get the list of lane or lane_connectors ego belongs to.
ego_route = get_route(map_api=map_api, poses=poses)
# For each (route_obj, pose), if route_obj is not None, compute the distance of pose from its
# baseline, otherwise set distance to inf
distances = []
for route_obj, pose in zip(ego_route, poses):
if len(route_obj) == 0:
distances.append(np.inf)
continue
baseline_paths = [one_route_obj.baseline_path() for one_route_obj in route_obj]
dist_to_route = min(
baseline_path.linestring.distance(Point(pose.x, pose.y)) for baseline_path in baseline_paths
)
distances.append(dist_to_route)
return distances
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the estimated metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return: the estimated metric.
"""
# Extract xy coordinates of center of ego from history.
ego_states = history.extract_ego_state
ego_poses = extract_ego_center(ego_states)
# Compute distance of center poses from the baseline of route objects.
distance_to_baseline = self.compute_distance_to_route_baseline(map_api=history.map_api, poses=ego_poses)
ego_timestamps = extract_ego_time_point(ego_states)
time_series = TimeSeries(unit='meters', time_stamps=list(ego_timestamps), values=list(distance_to_baseline))
statistics_type_list = [MetricStatisticsType.MAX, MetricStatisticsType.MEAN]
metric_statistics = self._compute_time_series_statistic(
time_series=time_series, statistics_type_list=statistics_type_list
)
results = self._construct_metric_results(
metric_statistics=metric_statistics, scenario=scenario, time_series=time_series
)
return results # type: ignore
|
v7.0/act_timing.py | jsstwright/osumapper | 296 | 12715119 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Timing
#
import numpy as np
import re, os
import include.id3reader_p3 as id3
from shutil import copy
from timing import *
from metadata import *
def get_timed_osu_file(music_path, input_filename = "assets/template.osu", output_filename = "timing.osu", game_mode = 0, mania_key_count = None):
with open(input_filename) as osu_file:
osu_text = osu_file.read()
rdr = id3.Reader(music_path)
artist = rdr.get_value("performer")
if artist is None:
artist = "unknown"
title = rdr.get_value("title")
if title is None:
title = re.sub("\.[^\.]*$", "", os.path.basename(music_path))
bpm, offset = get_timing(music_path)
osu_text = re.sub("{audio_filename}", "audio.mp3", osu_text)
osu_text = re.sub("{game_mode}", str(game_mode), osu_text)
osu_text = re.sub("{artist}", artist, osu_text)
osu_text = re.sub("{title}", title, osu_text)
osu_text = re.sub("{version}", get_difficulty_name(), osu_text)
osu_text = re.sub("{hp_drain}", "{}".format(np.random.randint(0, 101) / 10), osu_text)
if mania_key_count is None:
osu_text = re.sub("{circle_size}", "{}".format(np.random.randint(30, 51) / 10), osu_text)
else:
osu_text = re.sub("{circle_size}", "{}".format(mania_key_count), osu_text)
osu_text = re.sub("{overall_difficulty}", "{}".format(np.random.randint(50, 91) / 10), osu_text)
osu_text = re.sub("{approach_rate}", "{}".format(np.random.randint(70, 96) / 10), osu_text)
osu_text = re.sub("{slider_velocity}", "{}".format(np.random.randint(12, 26) / 10), osu_text)
osu_text = re.sub("{tickLength}", "{}".format(60000 / bpm), osu_text)
osu_text = re.sub("{offset}", "{}".format(int(offset)), osu_text)
osu_text = re.sub("{colors}", get_colors(), osu_text)
osu_text = re.sub("{hit_objects}", "", osu_text)
with open(output_filename, 'w', encoding="utf8") as osu_output:
osu_output.write(osu_text)
copy(music_path, "./audio.mp3")
return output_filename |
communicate/dbus_notify.py | lukaszbinden/process-watcher | 113 | 12715138 |
import sys
import notify2
from notify2 import Notification
notify2.init(sys.argv[0])
def send(process=None, subject_format='{executable} process {pid} ended',
timeout=notify2.EXPIRES_NEVER):
"""Display a Desktop Notification via DBUS (notify2)
:param process: information about process. (.info() inserted into body)
:param subject_format: subject format string. (uses process.__dict__)
:param timeout: how long to display notification (milliseconds) default 0 (never expires)
"""
notif = Notification(subject_format.format(**process.__dict__),
process.info())
notif.timeout = timeout
notif.show()
|
pyairctrl/__init__.py | GSzabados/py-air-control | 203 | 12715210 | """py-air-control."""
|
tests/util/test_store.py | spxtr/bumblebee-status | 1,089 | 12715215 | <reponame>spxtr/bumblebee-status<gh_stars>1000+
import pytest
import util.store
@pytest.fixture
def emptyStore():
return util.store.Store()
@pytest.fixture
def store():
return util.store.Store()
def test_get_of_unset_key(emptyStore):
assert emptyStore.get("any-key") == None
assert emptyStore.get("any-key", "default-value") == "default-value"
def test_get_of_set_key(store):
store.set("key", "value")
assert store.get("key") == "value"
def test_overwrite_set(store):
store.set("key", "value 1")
store.set("key", "value 2")
assert store.get("key") == "value 2"
def test_unused_keys(store):
store.set("key 1", "value x")
store.set("key 2", "value y")
assert sorted(store.unused_keys()) == sorted(["key 1", "key 2"])
store.get("key 2")
assert store.unused_keys() == ["key 1"]
store.get("key 1")
assert store.unused_keys() == []
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
self-paced-labs/vertex-ai/vertex-ai-qwikstart/utils/data_download.py | memeyankm/training-data-analyst | 6,140 | 12715262 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import shutil
import wget
import argparse
import pandas as pd
from google.cloud import storage
from google.cloud import bigquery
from google.cloud.exceptions import NotFound, Conflict
from dataset_schema import table_schema
from dataset_clean import dataset_clean_query
from dataset_ml import dataset_ml_query
LOCAL_PATH ="./data"
FILENAME = "online_retail"
def download_url2gcs(args):
"""
args:
"""
#set GCS client.
client = storage.Client()
# Retrieve GCS bucket.
bucket = client.get_bucket(args.GCS_BUCKET)
blob = bucket.blob("data/online_retail.csv")
#See if file already exists.
if blob.exists() == False:
try:
os.mkdir(LOCAL_PATH)
logging.info('Downloading xlsx file...')
local_xlsx = wget.download(args.URL, out=f"{LOCAL_PATH}/{FILENAME}.xlsx")
logging.info('Converting xlsx -> csv...')
df = pd.read_excel(local_xlsx)
df.to_csv(f"{LOCAL_PATH}/{FILENAME}.csv", index=False)
logging.info('Uploading local csv file to GCS...')
blob.upload_from_filename(f"{LOCAL_PATH}/{FILENAME}.csv")
logging.info('Copied local csv file to GCS.')
# Delete all contents of a directory using shutil.rmtree() and handle exceptions.
try:
shutil.rmtree(LOCAL_PATH)
logging.info('Cleaning up local tmp data directory...')
except:
logging.error('Error while deleting local tmp data directory.')
#print error if file doesn't exist.
except BaseException as error:
logging.error('An exception occurred: {}'.format(error))
#print error if file already exists in GCS.
else:
logging.warning('File already exists in GCS.')
def upload_gcs2bq(args, schema):
"""
args:
schema:
"""
# Construct a BigQuery client object.
client = bigquery.Client()
# Construct a full Dataset object to send to the API.
logging.info('Initializing BigQuery dataset.')
dataset = bigquery.Dataset(f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}")
try:
# Send the dataset to the API for creation, with an explicit timeout.
# Raises google.api_core.exceptions.Conflict if the Dataset already
# exists within the project.
dataset = client.create_dataset(dataset, timeout=30) # Make an API request.
# Specify the geographic location where the dataset should reside.
dataset.location = args.BQ_LOCATION
except Conflict:
logging.warning('Dataset %s already exists, not creating.', dataset.dataset_id)
else:
logging.info("Created dataset %s.%s", client.project, dataset.dataset_id)
try:
URI = f"gs://{args.GCS_BUCKET}/data/{FILENAME}.csv"
RAW_TABLE_ID = f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_RAW_TABLE_NAME}"
# Load job.
job_config = bigquery.LoadJobConfig(
schema=schema,
skip_leading_rows=1,
allow_jagged_rows=True,
write_disposition="WRITE_TRUNCATE",
source_format=bigquery.SourceFormat.CSV)
load_job = client.load_table_from_uri(source_uris=URI, destination=RAW_TABLE_ID, job_config=job_config)
logging.info('BQ raw dataset load job starting...')
load_job.result() # Waits for the job to complete.
logging.info('BQ raw dataset load job complete.')
except BaseException as error:
logging.error('An exception occurred: {}'.format(error))
destination_table = client.get_table(RAW_TABLE_ID) # Make an API request.
logging.info("Loaded %s rows into %s.",destination_table.num_rows, RAW_TABLE_ID)
def make_dataset_clean_bq(args, query: str):
"""
args:
query:
"""
client = bigquery.Client()
CLEAN_TABLE_ID = f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_CLEAN_TABLE_NAME}"
RAW_TABLE_ID = f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_RAW_TABLE_NAME}"
clean_query = query.replace("@CLEAN_TABLE_ID", CLEAN_TABLE_ID).replace("@RAW_TABLE_ID", RAW_TABLE_ID)
logging.info('BQ make clean dataset starting...')
try:
job = client.query(clean_query)
_ = job.result()
logging.info('BQ make clean dataset complete')
except BaseException as error:
logging.error('An exception occurred: {}'.format(error))
destination_table = client.get_table(CLEAN_TABLE_ID) # Make an API request.
logging.info("Loaded %s rows into %s.",destination_table.num_rows, CLEAN_TABLE_ID)
def make_dataset_ml_bq(args, query: str):
"""
args:
query:
"""
client = bigquery.Client()
ML_TABLE_ID = f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_ML_TABLE_NAME}"
CLEAN_TABLE_ID = f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_CLEAN_TABLE_NAME}"
ml_query = query.replace("@ML_TABLE_ID", ML_TABLE_ID).replace("@CLEAN_TABLE_ID", CLEAN_TABLE_ID)
logging.info('BQ make ML dataset starting...')
try:
job = client.query(ml_query)
_ = job.result()
logging.info('BQ make ML dataset complete')
except BaseException as error:
logging.error('An exception occurred: {}'.format(error))
destination_table = client.get_table(ML_TABLE_ID) # Make an API request.
logging.info("Loaded %s rows into %s.",destination_table.num_rows, ML_TABLE_ID)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--PROJECT_ID", dest="PROJECT_ID", type=str, required=True)
parser.add_argument("--GCS_BUCKET", dest="GCS_BUCKET", type=str, required=True)
parser.add_argument("--URL", dest="URL", type=str, required=True)
parser.add_argument("--BQ_DATASET_NAME", dest="BQ_DATASET_NAME", type=str, default="online_retail")
parser.add_argument("--BQ_LOCATION", dest="BQ_LOCATION", type=str, default="US")
parser.add_argument("--BQ_RAW_TABLE_NAME", dest="BQ_RAW_TABLE_NAME", type=str, default="online_retail_clv_raw")
parser.add_argument("--BQ_CLEAN_TABLE_NAME", dest="BQ_CLEAN_TABLE_NAME", type=str, default="online_retail_clv_clean")
parser.add_argument("--BQ_ML_TABLE_NAME", dest="BQ_ML_TABLE_NAME", type=str, default="online_retail_clv_ml")
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="\n %(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()]
)
download_url2gcs(args)
upload_gcs2bq(args, table_schema)
make_dataset_clean_bq(args, dataset_clean_query)
make_dataset_ml_bq(args, dataset_ml_query) |
cLoops/cDBSCAN.py | rikrdo89/cLoops | 105 | 12715283 | <gh_stars>100-1000
#--coding:utf-8 --
"""
"""
class cDBSCAN:
"""
The major class of the cDBSCAN algorithm, belong to <NAME>, <NAME>.
"""
def __init__(self, mat, eps, minPts):
"""
@param mat: the raw or normalized [pointId,X,Y] data matrix
@type mat : np.array
@param eps: The clustering distance threshold, key parameter in DBSCAN.
@type eps: float
@param minPts: The min point in neighbor to define a core point, key
parameter in DBSCAN.
@type minPts: int
"""
#: build the data in the class for global use
self.eps = eps
self.minPts = minPts
#: cell width, city block distance
self.cw = self.eps
#: build the square index for quick neighbor search
self.buildGrids(mat)
#: get the points for all neighbors
self.buildGridNeighbors()
#: remove noise grids
self.removeNoiseGrids()
#: get the points for all neighbors
self.buildGridNeighbors()
#: get the clusters
self.callClusters()
del self.Gs, self.Gs2, self.ps
def getDist(self, p, q):
"""
Basic function 1, city block distance funciton.
"""
x = self.ps[p]
y = self.ps[q]
d = abs(x[0] - y[0]) + abs(x[1] - y[1])
#euclidean distance ,just in case.
#d = np.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
return d
def getNearbyGrids(self, cell):
"""
Basic funciton 2, 9 grid as searching neghbors, grid width is eps.
"""
x, y = cell[0], cell[1]
#keys = [(x, y),
keys = [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y), (x - 1, y - 1),
(x - 1, y + 1), (x + 1, y - 1), (x + 1, y + 1)]
#keys = [(x, y), (x, y - 1), (x, y + 1), (x - 1, y), (x - 1, y - 1),
# (x - 1, y + 1), (x + 1, y), (x + 1, y - 1), (x + 1, y + 1),
# (x, y + 2), (x, y - 2), (x + 1, y + 2), (x + 1, y - 2),
# (x - 1, y + 2), (x - 1, y - 2), (x + 2, y), (x + 2, y + 1),
# (x + 2, y - 1), (x - 2, y), (x - 2, y + 1), (x - 2, y - 1)]
ncells = []
for key in keys:
if key in self.Gs:
ncells.append(key)
return ncells
def buildGrids(self, mat):
"""
Algorithm 1: Construct the grids.
@param mat: the raw or normalized [pointId,X,Y] data matrix
"""
minX, minY = mat[0][1], mat[0][2]
for t in mat:
minX = min([minX, t[1]])
minY = min([minY, t[2]])
Gs = {}
ps = {}
for d in mat:
nx = int((d[1] - minX) / self.cw) + 1
ny = int((d[2] - minY) / self.cw) + 1
Gs.setdefault((nx, ny), [])
Gs[(nx, ny)].append(d[0])
#last elements marks the class, initially -1 as noise
ps[d[0]] = [d[1], d[2], nx, ny, -1]
self.Gs, self.ps = Gs, ps
def buildGridNeighbors(self):
"""
Algorithm 2 : Grid index with all neighbor points.
"""
Gs2 = {}
for cell in self.Gs.keys():
nps = []
nps.extend(self.Gs[cell])
for cellj in self.getNearbyGrids(cell):
nps.extend(self.Gs[cellj])
Gs2[cell] = nps
self.Gs2 = Gs2
def removeNoiseGrids(self):
"""
Algorithm 3: Remove noise grid according to KNN and get the obvious core points and core grids.
"""
#: noise cells without neighbors
tode = set()
#: noise cells with neighbors
tode2 = set()
for cell in self.Gs.keys():
if len(self.Gs2[cell]) < self.minPts:
tode2.add(cell)
#KNN to noise cells with neighbors
for cell in tode2:
cells = self.getNearbyGrids(cell)
ncells = set(cells) & tode2
#all neighbor cells are noise
if len(cells) == len(ncells):
tode.add(cell)
for cell in tode:
for p in self.Gs[cell]:
del self.ps[p]
del self.Gs[cell]
def callClusters(self):
"""
Algorithm 4: Do DBSCAN clustering by go through all points in the sets.
"""
#: clustering id, noise is -2 and unclassified point is -1.
clusterId = 0
for key in self.ps:
if self.ps[key][-1] == -1:
if self.expandCluster(key, clusterId):
clusterId += 1
#remove the noise and unclassified points
labels = {}
cs = {}
for p in self.ps.keys():
c = self.ps[p][-1]
if c == -2:
continue
labels[p] = c
if c not in cs:
cs[c] = []
cs[c].append(p)
for key in cs.keys():
if len(cs[key]) < self.minPts:
for p in cs[key]:
del labels[p]
self.labels = labels
def expandCluster(self, pointKey, clusterId):
"""
Search connection for given point to others.
@param pointKey: the key in self.dataPoints
@type pointKey:
@param clusterId: the cluster id for the current
@type clusterId: int
@return: bool
"""
seeds = self.regionQuery(pointKey)
if len(seeds) < self.minPts:
self.ps[pointKey][-1] = -2
return False
else:
for key in seeds:
self.ps[key][-1] = clusterId
while len(seeds) > 0:
currentP = seeds[0]
result = self.regionQuery(currentP)
if len(result) >= self.minPts:
for key in result:
if self.ps[key][-1] in [-1, -2]:
if self.ps[key][-1] == -1:
seeds.append(key)
self.ps[key][-1] = clusterId
del (seeds[0])
return True
def regionQuery(self, pointKey):
"""
Find the related points to the queried point, city block distance is used.
@param pointKey: the key in self.dataPoints
@type pointKey:
@return: list
"""
p = self.ps[pointKey]
x = p[2]
y = p[3]
#scan square and get nearby points.
result = [pointKey]
for q in self.Gs2[(x, y)]:
if q == pointKey:
continue
if self.getDist(pointKey, q) <= self.eps:
result.append(q)
return result
|
lightbus/__init__.py | C0DK/lightbus | 178 | 12715284 | from lightbus.utilities.logging import configure_logging
from lightbus.transports import *
from lightbus.client import BusClient
from lightbus.path import *
from lightbus.message import *
from lightbus.api import *
from lightbus.schema import *
from lightbus.creation import *
from lightbus.client.utilities import OnError
from lightbus.exceptions import *
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.