hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a906359018ecf72d4a4f117b4a1b82b665b383a6 | 3,912 | py | Python | examples/j1j2_2d_exact_4.py | vigsterkr/FlowKet | 0d8f301b5f51a1bab83021f10f65cfb5f2751079 | [
"MIT"
]
| 21 | 2019-11-19T13:59:13.000Z | 2021-12-03T10:26:30.000Z | examples/j1j2_2d_exact_4.py | HUJI-Deep/PyKet | 61238afd3fe1488d35c57d280675f544c559bd01 | [
"MIT"
]
| 10 | 2019-11-15T12:07:28.000Z | 2020-11-07T18:12:18.000Z | examples/j1j2_2d_exact_4.py | HUJI-Deep/PyKet | 61238afd3fe1488d35c57d280675f544c559bd01 | [
"MIT"
]
| 11 | 2019-12-09T22:51:17.000Z | 2021-11-29T22:05:41.000Z | from collections import OrderedDict
import itertools
import sys
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from flowket.callbacks import TensorBoard
from flowket.callbacks.exact import default_wave_function_callbacks_factory, ExactObservableCallback
from flowket.operators.j1j2 import J1J2
from flowket.operators import NetketOperatorWrapper
from flowket.machines import ConvNetAutoregressive2D
from flowket.optimization import ExactVariational, VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import FastAutoregressiveSampler
from flowket.optimizers import convert_to_accumulate_gradient_optimizer
import numpy
import netket
def total_spin_netket_operator(hilbert_state_shape):
edge_colors = []
for i in range(numpy.prod(hilbert_state_shape)):
edge_colors.append([i, i, 1])
g = netket.graph.CustomGraph(edge_colors)
hi = netket.hilbert.Spin(s=0.5, graph=g)
sigmaz = [[1, 0], [0, -1]]
sigmax = [[0, 1], [1, 0]]
sigmay = [[0, -1j], [1j, 0]]
interaction = numpy.kron(sigmaz, sigmaz) + numpy.kron(sigmax, sigmax) + numpy.kron(sigmay, sigmay)
bond_operator = [
(interaction).tolist(),
]
bond_color = [1]
return netket.operator.GraphOperator(hi, bondops=bond_operator, bondops_colors=bond_color)
params_grid_config = {
'width': [32],
'depth': [5],
'lr': [5e-3, 1e-3],
'weights_normalization': [False, True]
}
run_index = int(sys.argv[-1].strip())
ks, vs = zip(*params_grid_config.items())
params_options = list(itertools.product(*vs))
chosen_v = params_options[run_index % len(params_options)]
params = dict(zip(ks, chosen_v))
print('Chosen params: %s' % str(params))
hilbert_state_shape = (4, 4)
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=params['depth'], num_of_channels=params['width'],
weights_normalization=params['weights_normalization'])
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 2 ** 12
# For fair comparison with monte carlo eacg epoch see 2 ** 18 sampels
steps_per_epoch = 2 ** 6
true_ground_state_energy = -30.022227800323677
operator = J1J2(hilbert_state_shape=hilbert_state_shape, j2=0.5, pbc=False)
exact_variational = ExactVariational(model, operator, batch_size)
optimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
optimizer,
update_params_frequency=exact_variational.num_of_batch_until_full_cycle,
accumulate_sum_or_mean=True)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
total_spin = NetketOperatorWrapper(total_spin_netket_operator(hilbert_state_shape), hilbert_state_shape)
run_name = 'j1j2_4_exact_weights_normalization_%s_depth_%s_width_%s_adam_lr_%s_run_%s' % \
(params['weights_normalization'], params['depth'], params['width'], params['lr'], run_index)
tensorboard = TensorBoard(log_dir='tensorboard_logs/%s' % run_name,
update_freq='epoch',
write_output=False)
callbacks = default_wave_function_callbacks_factory(exact_variational, log_in_batch_or_epoch=False,
true_ground_state_energy=true_ground_state_energy) + [
ExactObservableCallback(exact_variational, total_spin, 'total_spin', log_in_batch_or_epoch=False),
tensorboard]
model.fit_generator(exact_variational.to_generator(), steps_per_epoch=steps_per_epoch, epochs=1000, callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('final_%s.h5' % run_name)
| 40.329897 | 120 | 0.748466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.08819 |
a907a743744664923c1dc0146b6eda52d8a91360 | 3,833 | py | Python | build/package_version/archive_info.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
]
| 6 | 2015-02-06T23:41:01.000Z | 2015-10-21T03:08:51.000Z | build/package_version/archive_info.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
]
| null | null | null | build/package_version/archive_info.py | MicrohexHQ/nacl_contracts | 3efab5eecb3cf7ba43f2d61000e65918aa4ba77a | [
"BSD-3-Clause"
]
| 1 | 2019-10-02T08:41:50.000Z | 2019-10-02T08:41:50.000Z | #!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A archive_info is a json file describing a single package archive."""
import collections
import hashlib
import json
import os
ArchiveInfoTuple = collections.namedtuple(
'ArchiveInfoTuple',
['name', 'hash', 'url', 'tar_src_dir', 'extract_dir'])
def GetArchiveHash(archive_file):
"""Gets the standardized hash value for a given archive.
This hash value is the expected value used to verify package archives.
Args:
archive_file: Path to archive file to hash.
Returns:
Hash value of archive file, or None if file is invalid.
"""
if os.path.isfile(archive_file):
with open(archive_file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
return None
class ArchiveInfo(object):
"""A archive_info file is a single json file describine an archive.
Archive Fields:
name: Name of the package archive.
hash: Hash value of the package archive, for validation purposes.
url: Web URL location where the archive can be found.
tar_src_dir: Where files are located within the tar archive.
extract_dir: Where files should be extracted to within destination dir.
"""
def __init__(self, name='', archive_hash=0, url=None, tar_src_dir='',
extract_dir='', archive_info_file=None):
"""Initialize ArchiveInfo object.
When an archive_info_file is specified, all other fields are ignored.
Otherwise, uses first fields as constructor for archive info object.
"""
self._archive_tuple = None
if archive_info_file is not None:
self.LoadArchiveInfoFile(archive_info_file)
else:
self.SetArchiveData(name, archive_hash, url, tar_src_dir, extract_dir)
def __eq__(self, other):
return (type(self) == type(other) and
self.GetArchiveData() == other.GetArchiveData())
def __repr__(self):
return "ArchiveInfo(" + str(self._archive_tuple) + ")"
def LoadArchiveInfoFile(self, archive_info_file):
"""Loads a archive info file into this object.
Args:
archive_info_file: Filename or archive info json.
"""
archive_json = None
if isinstance(archive_info_file, dict):
archive_json = archive_info_file
elif isinstance(archive_info_file, basestring):
with open(archive_info_file, 'rt') as f:
archive_json = json.load(f)
else:
raise RuntimeError('Invalid load archive file type (%s): %s',
type(archive_info_file),
archive_info_file)
self._archive_tuple = ArchiveInfoTuple(**archive_json)
def SaveArchiveInfoFile(self, archive_info_file):
"""Saves this object as a serialized JSON file if the object is valid.
Args:
archive_info_file: File path where JSON file will be saved.
"""
if self._archive_tuple and self._archive_tuple.hash:
archive_json = self.DumpArchiveJson()
with open(archive_info_file, 'wt') as f:
json.dump(archive_json, f, sort_keys=True,
indent=2, separators=(',', ': '))
def DumpArchiveJson(self):
"""Returns a dict representation of this object for JSON."""
if self._archive_tuple is None or not self._archive_tuple.hash:
return {}
return dict(self._archive_tuple._asdict())
def SetArchiveData(self, name, archive_hash, url=None, tar_src_dir='',
extract_dir=''):
"""Replaces currently set with new ArchiveInfoTuple."""
self._archive_tuple = ArchiveInfoTuple(name, archive_hash, url,
tar_src_dir, extract_dir)
def GetArchiveData(self):
"""Returns the current ArchiveInfoTuple tuple."""
return self._archive_tuple
| 33.920354 | 76 | 0.687712 | 2,938 | 0.766501 | 0 | 0 | 0 | 0 | 0 | 0 | 1,702 | 0.444039 |
8bef32020f0494687a4f159a327cd70c156e52e5 | 3,546 | py | Python | tests/test_lsstdoc.py | lsst-sqre/dochub-adapter | 3c155bc7ffe46f41e8de5108c936aed7587c8cdb | [
"MIT"
]
| null | null | null | tests/test_lsstdoc.py | lsst-sqre/dochub-adapter | 3c155bc7ffe46f41e8de5108c936aed7587c8cdb | [
"MIT"
]
| null | null | null | tests/test_lsstdoc.py | lsst-sqre/dochub-adapter | 3c155bc7ffe46f41e8de5108c936aed7587c8cdb | [
"MIT"
]
| null | null | null | """Ad hoc tests of the LsstLatexDoc class. Other test modules rigorously verify
LsstLatexDoc against sample documents.
"""
from pybtex.database import BibliographyData
import pytest
from lsstprojectmeta.tex.lsstdoc import LsstLatexDoc
def test_no_short_title():
"""title without a short title."""
sample = r"\title{Title}"
lsstdoc = LsstLatexDoc(sample)
assert lsstdoc.title == "Title"
def test_title_variations():
"""Test variations on the title command's formatting."""
# Test with whitespace in title command
input_txt = r"\title [Test Plan] { \product ~Test Plan}"
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.title == r"\product ~Test Plan"
assert lsstdoc.short_title == "Test Plan"
def test_author_variations():
"""Test variations on the author command's formatting."""
input_txt = (r"\author {William O'Mullane, Mario Juric, "
r"Frossie Economou}"
r" % the author(s)")
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == ["William O'Mullane",
"Mario Juric",
"Frossie Economou"]
def test_author_list_amanda():
"""Test author list parsing where one author's name is Amanda.
"""
input_txt = (
r"\author {William O'Mullane, John Swinbank, Leanne Guy, "
r"Amanda Bauer}"
)
expected = [
"William O'Mullane",
"John Swinbank",
"Leanne Guy",
"Amanda Bauer"
]
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == expected
def test_author_list_and():
input_txt = r"\author{A.~Author, B.~Author, and C.~Author}"
expected = ['A. Author', 'B. Author', 'C. Author']
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == expected
def test_handle_variations():
"""Test variations on the handle command's formatting."""
input_txt = r"\setDocRef {LDM-503} % the reference code "
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.handle == "LDM-503"
def test_abstract_variations():
"""Test variations on the abstract command's formatting."""
input_txt = (r"\setDocAbstract {" + "\n"
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications "
r"for specific items.}")
expected_abstract = (
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications for "
r"specific items."
)
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.abstract == expected_abstract
@pytest.mark.parametrize(
'sample, expected',
[(r'\documentclass[DM,lsstdraft,toc]{lsstdoc}', True),
(r'\documentclass[DM,toc]{lsstdoc}', False),
(r'\documentclass[DM, lsstdraft, toc]{lsstdoc}', True)])
def test_is_draft(sample, expected):
lsstdoc = LsstLatexDoc(sample)
assert lsstdoc.is_draft == expected
def test_html_title():
sample = r"\title{``Complex'' title \textit{like} $1+2$}"
expected = ('“Complex” title <em>like</em> '
'<span class="math inline">1\u2005+\u20052</span>\n')
lsstdoc = LsstLatexDoc(sample)
converted = lsstdoc.html_title
assert converted == expected
def test_default_load_bib_db():
"""Test that the common lsst-texmf bibliographies are always loaded.
"""
lsstdoc = LsstLatexDoc('')
assert isinstance(lsstdoc.bib_db, BibliographyData)
| 32.833333 | 79 | 0.645798 | 0 | 0 | 0 | 0 | 332 | 0.093521 | 0 | 0 | 1,692 | 0.47662 |
8bf15c081cf1ec7e2805d8cdda039957d68c5367 | 454 | py | Python | Exercicios/script030.py | jacksonmoreira/Curso-em-video-mundo1- | 84b09bd3b61417fab483acf9f1a38e0cf6b95a80 | [
"MIT"
]
| null | null | null | Exercicios/script030.py | jacksonmoreira/Curso-em-video-mundo1- | 84b09bd3b61417fab483acf9f1a38e0cf6b95a80 | [
"MIT"
]
| null | null | null | Exercicios/script030.py | jacksonmoreira/Curso-em-video-mundo1- | 84b09bd3b61417fab483acf9f1a38e0cf6b95a80 | [
"MIT"
]
| null | null | null | frase = str(input('Digite o seu nome completo para a análise ser feita:')).strip()
print('-' * 50)
print('Analisando nome...')
print('O seu nome em maiúsculas é {}.'.format(frase.upper()))
print('O seu nome em minúsculas é {}.'.format(frase.lower()))
print('O seu nome tem ao todo {} letras.'.format(len(frase) - frase.count(' ')))
print('O seu primeiro nome tem {} letras.'.format(frase.find(' ')))
print('Nome analisado com sucesso!')
print('-' * 50)
| 41.272727 | 82 | 0.665198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.555556 |
8bf171a05404452569f820648c7f427a69c301b2 | 8,012 | py | Python | bluesky_kafka/tests/test_kafka.py | gwbischof/bluesky-kafka | fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4 | [
"BSD-3-Clause"
]
| null | null | null | bluesky_kafka/tests/test_kafka.py | gwbischof/bluesky-kafka | fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4 | [
"BSD-3-Clause"
]
| null | null | null | bluesky_kafka/tests/test_kafka.py | gwbischof/bluesky-kafka | fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4 | [
"BSD-3-Clause"
]
| null | null | null | from functools import partial
import logging
import msgpack
import msgpack_numpy as mpn
from confluent_kafka.cimpl import KafkaException
import numpy as np
import pickle
import pytest
from bluesky_kafka import Publisher, BlueskyConsumer
from bluesky_kafka.tests.conftest import get_all_documents_from_queue
from bluesky.plans import count
from event_model import sanitize_doc
# mpn.patch() is recommended by msgpack-numpy as a way
# to patch msgpack but it caused a utf-8 decode error
mpn.patch()
logging.getLogger("bluesky.kafka").setLevel("DEBUG")
# the Kafka test broker should be configured with
# KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
def test_producer_config():
test_topic = "test.producer.config"
kafka_publisher = Publisher(
topic=test_topic,
bootstrap_servers="1.2.3.4:9092",
key="kafka-unit-test-key",
# work with a single broker
producer_config={
"bootstrap.servers": "5.6.7.8:9092",
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
},
)
assert (
kafka_publisher._producer_config["bootstrap.servers"]
== "1.2.3.4:9092,5.6.7.8:9092"
)
def test_get_cluster_metadata(publisher_factory):
# the topic test.get.cluster.metadata will be created
# by the call to publisher.get_cluster_metadata
# if automatic topic creation is enabled
# otherwise this test will fail
publisher = publisher_factory(topic="test.get.cluster.metadata")
cluster_metadata = publisher.get_cluster_metadata()
assert "test.get.cluster.metadata" in cluster_metadata.topics
def test_get_cluster_metadata_failure(publisher_factory):
publisher = publisher_factory(
topic="test.get.cluster.metadata.failure",
bootstrap_servers="5.6.7.8:9092"
)
with pytest.raises(KafkaException):
publisher.get_cluster_metadata()
def test_consumer_config():
test_topic = "test.consumer.config"
bluesky_consumer = BlueskyConsumer(
topics=[test_topic],
bootstrap_servers="1.2.3.4:9092",
group_id="abc",
consumer_config={
"bootstrap.servers": "5.6.7.8:9092",
"auto.offset.reset": "latest",
},
)
assert (
bluesky_consumer._consumer_config["bootstrap.servers"]
== "1.2.3.4:9092,5.6.7.8:9092"
)
def test_bad_consumer_config():
test_topic = "test.bad.consumer.config"
with pytest.raises(ValueError) as excinfo:
BlueskyConsumer(
topics=[test_topic],
bootstrap_servers="1.2.3.4:9092",
group_id="abc",
consumer_config={
"bootstrap.servers": "5.6.7.8:9092",
"auto.offset.reset": "latest",
"group.id": "raise an exception!",
},
)
assert (
"do not specify 'group.id' in consumer_config, use only the 'group_id' argument"
in excinfo.value
)
@pytest.mark.parametrize(
"serializer, deserializer",
[(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],
)
def test_kafka_remote_dispatcher(
RE,
hw,
serializer,
deserializer,
publisher_factory,
remote_dispatcher_process_factory,
external_process_document_queue,
):
# COMPONENT 1
# a Kafka broker must be running
# in addition the topic "test.remote.dispatcher" must exist
# or the broker must be configured to create topics on demand (recommended)
# COMPONENT 2
# Run a RemoteDispatcher on a separate process. Pass the documents
# it receives over a Queue to this process so we can count them for
# our test.
test_topic = "test.remote.dispatcher"
with external_process_document_queue(
topics=[test_topic],
deserializer=deserializer,
process_factory=remote_dispatcher_process_factory,
) as document_queue:
# COMPONENT 3
# Set up a RunEngine in this process that will
# send all documents to a bluesky_kafka.Publisher
# and accumulate all documents in the local_documents list
kafka_publisher = publisher_factory(
topic=test_topic, serializer=serializer, flush_on_stop_doc=True
)
RE.subscribe(kafka_publisher)
local_documents = []
RE.subscribe(
lambda local_name, local_doc: local_documents.append(
(local_name, local_doc)
)
)
# test that numpy data is transmitted correctly
md = {
"numpy_data": {"nested": np.array([1, 2, 3])},
"numpy_scalar": np.float64(3),
"numpy_array": np.ones((3, 3)),
}
# documents will be generated by this plan
# and published by the Kafka Publisher
RE(count([hw.det]), md=md)
# retrieve the documents published by the Kafka broker
remote_documents = get_all_documents_from_queue(document_queue=document_queue)
# sanitize_doc normalizes some document data, such as numpy arrays, that are
# problematic for direct comparison of documents by "assert"
sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]
sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]
assert len(sanitized_remote_documents) == len(sanitized_local_documents)
assert sanitized_remote_documents == sanitized_local_documents
@pytest.mark.parametrize(
"serializer, deserializer",
[(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],
)
def test_bluesky_consumer(
RE,
hw,
serializer,
deserializer,
publisher_factory,
consumer_process_factory,
external_process_document_queue,
):
# COMPONENT 1
# a Kafka broker must be running
# in addition the broker must have topic "test.bluesky.consumer"
# or be configured to create topics on demand (recommended)
# COMPONENT 2
# Run a BlueskyConsumer polling loop in a separate process.
# Pass the documents it receives over a Queue to this process
# and compare them against the documents published directly
# by the RunEngine.
test_topic = "test.bluesky.consumer"
with external_process_document_queue(
topics=[test_topic],
deserializer=deserializer,
process_factory=partial(
consumer_process_factory, consumer_factory=BlueskyConsumer
),
) as document_queue:
# COMPONENT 3
# Set up a RunEngine in this process that will
# send all documents to a bluesky_kafka.Publisher
# and accumulate all documents in the local_documents list
kafka_publisher = publisher_factory(
topic=test_topic, serializer=serializer, flush_on_stop_doc=True
)
RE.subscribe(kafka_publisher)
local_documents = []
RE.subscribe(
lambda local_name, local_doc: local_documents.append(
(local_name, local_doc)
)
)
# test that numpy data is transmitted correctly
md = {
"numpy_data": {"nested": np.array([1, 2, 3])},
"numpy_scalar": np.float64(3),
"numpy_array": np.ones((3, 3)),
}
# documents will be generated by this plan
# and published by the Kafka Publisher
RE(count([hw.det]), md=md)
# retrieve the documents published by the Kafka broker
remote_documents = get_all_documents_from_queue(document_queue=document_queue)
# sanitize_doc normalizes some document data, such as numpy arrays, that are
# problematic for direct comparison of documents by "assert"
sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]
sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]
assert len(sanitized_remote_documents) == len(sanitized_local_documents)
assert sanitized_remote_documents == sanitized_local_documents
| 33.383333 | 92 | 0.668622 | 0 | 0 | 0 | 0 | 5,015 | 0.625936 | 0 | 0 | 2,940 | 0.36695 |
8bf35fc329c7f95687b72ea8d092fd4c3193b925 | 407 | py | Python | Chapter01/datastructures_06.py | vabyte/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 84 | 2018-08-09T09:30:03.000Z | 2022-01-04T23:20:38.000Z | Chapter01/datastructures_06.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 1 | 2019-11-04T18:57:40.000Z | 2020-09-07T08:52:25.000Z | Chapter01/datastructures_06.py | jiro74/Modern-Python-Standard-Library-Cookbook | 4f53e3ab7b61aca1cca9343e7421e170280cd5b5 | [
"MIT"
]
| 33 | 2018-09-26T11:05:55.000Z | 2022-03-15T10:31:10.000Z | import time
import heapq
class PriorityQueue(object):
def __init__(self):
self._q = []
def add(self, value, priority=0):
heapq.heappush(self._q, (priority, time.time(), value))
def pop(self):
return heapq.heappop(self._q)[-1]
def f1(): print('hello')
def f2(): print('world')
pq = PriorityQueue()
pq.add(f2, priority=1)
pq.add(f1, priority=0)
pq.pop()()
pq.pop()()
| 17.695652 | 63 | 0.619165 | 238 | 0.584767 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.034398 |
8bf4cdf0dd3a18f2cee9855d7af028188308986c | 1,080 | py | Python | webshots/popular-websites.py | acamero/evo-web | 5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9 | [
"MIT"
]
| null | null | null | webshots/popular-websites.py | acamero/evo-web | 5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9 | [
"MIT"
]
| null | null | null | webshots/popular-websites.py | acamero/evo-web | 5229ff89e2ac2d3f6a3a7f80d3f514fd3ed728c9 | [
"MIT"
]
| null | null | null | import requests
import sys
from lxml import html
#csv_file_name = sys.argv[1] # output file
csv_file_name = "../webshot_data/popular-web-sites.csv"
csv_file = open(csv_file_name, "w")
categories = ["Arts", "Business", "Computers", "Games", "Health", "Home", "Kids_and_Teens", "News", "Recreation", "Reference", "Regional", "Science", "Shopping", "Society", "Sports", "World"]
# categories = ["Adult", "Arts", "Business", "Computers", "Games", "Health", "Home", "Kids_and_Teens", "News", "Recreation", "Reference", "Regional", "Science", "Shopping", "Society", "Sports", "World"]
base = "http://www.alexa.com/topsites/category/Top/"
for category in categories:
path = base + category
print path
r = requests.get(path)
tree = html.fromstring(r.content)
trs = tree.xpath('.//a/@href')
for tr in trs:
if tr.startswith( '/siteinfo/' ) :
wp = tr.replace( '/siteinfo/', '' )
if len(wp) > 1:
print wp
csv_file.write( category + ',' + wp + '\n')
# end for
# end for
csv_file.close()
| 34.83871 | 202 | 0.605556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 540 | 0.5 |
8bf5aa849ab9919f36bd06cb32baf1102cd57b0f | 13,653 | py | Python | sunpy/coordinates/frames.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
]
| null | null | null | sunpy/coordinates/frames.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
]
| null | null | null | sunpy/coordinates/frames.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
]
| null | null | null | """
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from __future__ import absolute_import, division
import numpy as np
from astropy import units as u
from astropy.coordinates.representation import (CartesianRepresentation,
UnitSphericalRepresentation,
SphericalRepresentation)
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
RepresentationMapping)
from astropy.coordinates import FrameAttribute
from sunpy import sun # For Carrington rotation number
from .representation import (SphericalWrap180Representation,
UnitSphericalWrap180Representation)
from .frameattributes import TimeFrameAttributeSunPy
RSUN_METERS = sun.constants.get('radius').si.to(u.m)
DSUN_METERS = sun.constants.get('mean distance').si.to(u.m)
__all__ = ['HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective']
class HeliographicStonyhurst(BaseCoordinateFrame):
"""
A coordinate or frame in the Stonyhurst Heliographic
system.
This frame has its origin at the solar centre and the north pole above the
solar north pole, and the zero line on longitude pointing towards the
Earth.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or `None`
A representation object or None to have no data.
lon: `Angle` object.
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat: `Angle` object.
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius: `astropy.units.Quantity` object.
This quantity holds the radial distance. If not specified, it is, by
default, the radius of the photosphere. Optional.
Examples
--------
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km, frame="heliographic_stonyhurst",
dateobs="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HelioGraphicStonyhurst): dateobs=2010-01-01 00:00:45,
lon=1.0 deg, lat=1.0 deg, rad=2.0 km>
>>> sc.frame
<HelioGraphicStonyhurst Coordinate: dateobs=2010-01-01 00:00:45,
lon=1.0 deg, lat=1.0 deg, rad=2.0 km>
>>> sc = SkyCoord(HelioGraphicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HelioGraphicStonyhurst): dateobs=None, lon=-10.0 deg,
lat=2.0 deg, rad=695508.0 km>
Notes
-----
This frame will always be converted a 3D frame where the radius defaults to
rsun.
"""
name = "heliographic_stonyhurst"
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')],
'sphericalwrap180': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')]
}
dateobs = TimeFrameAttributeSunPy()
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation', None)
super(HeliographicStonyhurst, self).__init__(*args, **kwargs)
# Make 3D if specified as 2D
# If representation was explicitly passed, do not change the rep.
if not _rep_kwarg:
# The base __init__ will make this a UnitSphericalRepresentation
# This makes it Wrap180 instead
if isinstance(self._data, UnitSphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=RSUN_METERS.to(u.km))
self.representation = SphericalWrap180Representation
# Make a Spherical Wrap180 instead
if isinstance(self._data, SphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=self._data.distance)
self.representation = SphericalWrap180Representation
class HeliographicCarrington(HeliographicStonyhurst):
"""
A coordinate or frame in the Carrington Heliographic
system.
This frame differs from the Stonyhurst version in the
definition of the longitude, which is defined using
an offset which is a time-dependent scalar value.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
lon: `Angle` object.
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat: `Angle` object.
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius: `astropy.units.Quantity` object, optional, must be keyword.
This quantity holds the radial distance. If not specified, it is, by
default, the solar radius. Optional, must be keyword.
Examples
--------
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km, frame="heliographic_carrington",
dateobs="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HelioGraphicCarrington): dateobs=2010-01-01 00:00:30,
lon=1.0 deg, lat=2.0 deg, rad=3.0 km>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
dateobs="2010/01/01T00:00:45", frame="heliographic_carrington")
>>> sc
<SkyCoord (HelioGraphicCarrington): dateobs=2010-01-01 00:00:45,
(lon, lat, rad) in (deg, deg, km)
[(1.0, 4.0, 5.0), (2.0, 5.0, 6.0), (3.0, 6.0, 7.0)]>
"""
name = "heliographic_carrington"
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')],
'sphericalwrap180': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')]
}
dateobs = TimeFrameAttributeSunPy()
class Heliocentric(BaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system.
This frame may either be specified in Cartesian
or cylindrical representation.
Cylindrical representation replaces (x, y) with
(rho, psi) where rho is the impact parameter and
psi is the position angle in degrees.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form and if x, y and z are specified, it must
be None.
x: `Quantity` object.
X-axis coordinate, optional, must be keyword.
y: `Quantity` object.
Y-axis coordinate, optional, must be keyword.
z: `Quantity` object. Shared by both representations.
Z-axis coordinate, optional, must be keyword.
D0: `Quantity` object.
Represents the distance between the observer and the Sun center.
Defaults to 1AU.
Examples
--------
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
dateobs="2011/01/05T00:00:50", frame="heliocentric")
>>> sc
<SkyCoord (HelioCentric): dateobs=2011-01-05 00:00:50, D0=149597870.7 km,
x=10.0 km, y=1.0 km, z=2.0 km>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm, frame="heliocentric",
dateobs="2011/01/01T00:00:54")
>>> sc
<SkyCoord (HelioCentric): dateobs=2011-01-01 00:00:54, D0=149597870.7 km,
(x, y, z) in (km, m, cm)
[(1.0, 3.0, 5.0), (2.0, 4.0, 6.0)]>
"""
default_representation = CartesianRepresentation
_frame_specific_representation_info = {
'cylindrical': [RepresentationMapping('phi', 'psi', u.deg)]}
# d = FrameAttribute(default=(1*u.au).to(u.km))
D0 = FrameAttribute(default=(1*u.au).to(u.km))
dateobs = TimeFrameAttributeSunPy()
L0 = FrameAttribute(default=0*u.deg)
B0 = FrameAttribute(default=0*u.deg)
class Helioprojective(BaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective (Cartesian) system.
This is a projective coordinate system centered around the observer.
It is a full spherical coordinate system with position given as longitude
theta_x and latitude theta_y.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
Tx: `Angle` object.
X-axis coordinate.
Ty: `Angle` object.
Y-axis coordinate.
distance: Z-axis coordinate.
The radial distance from the observer to the coordinate point.
D0: `Quantity` object.
Represents the distance between observer and solar center.
Defaults to 1AU.
Examples
--------
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km, dateobs="2010/01/01T00:00:00",
frame="helioprojective")
>>> sc
<SkyCoord (HelioProjective): dateobs=2010-01-01 00:00:00, D0=149597870.7 km
, Tx=0.0 arcsec, Ty=0.0 arcsec, distance=5.0 km>
>>> sc = SkyCoord(0*u.deg, 0*u.deg, dateobs="2010/01/01T00:00:00",
frame="helioprojective")
>>> sc
<SkyCoord (HelioProjective): dateobs=2010-01-01 00:00:00, D0=149597870.7 km
, Tx=0.0 arcsec, Ty=0.0 arcsec, distance=149597870.7 km>
"""
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', u.km)],
'sphericalwrap180': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', u.km)],
'unitspherical': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
'unitsphericalwrap180': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)]}
D0 = FrameAttribute(default=(1*u.au).to(u.km))
dateobs = TimeFrameAttributeSunPy()
L0 = FrameAttribute(default=0*u.deg)
B0 = FrameAttribute(default=0*u.deg)
rsun = FrameAttribute(default=RSUN_METERS.to(u.km))
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation', None)
BaseCoordinateFrame.__init__(self, *args, **kwargs)
# Convert from Spherical to SphericalWrap180
# If representation was explicitly passed, do not change the rep.
if not _rep_kwarg:
# The base __init__ will make this a UnitSphericalRepresentation
# This makes it Wrap180 instead
if isinstance(self._data, UnitSphericalRepresentation):
self._data = UnitSphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon)
self.representation = UnitSphericalWrap180Representation
# Make a Spherical Wrap180 instead
elif isinstance(self._data, SphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=self._data.distance)
self.representation = SphericalWrap180Representation
def calculate_distance(self):
"""
This method calculates the third coordnate of the Helioprojective
frame. It assumes that the coordinate point is on the disk of the Sun
at the rsun radius.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.HelioProjective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if isinstance(self._data, SphericalRepresentation):
return self
rep = self.represent_as(UnitSphericalWrap180Representation)
lat, lon = rep.lat, rep.lon
alpha = np.arccos(np.cos(lat) * np.cos(lon)).to(lat.unit)
c = self.D0**2 - self.rsun**2
b = -2 * self.D0.to(u.m) * np.cos(alpha)
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2
return self.realize_frame(SphericalWrap180Representation(lon=lon,
lat=lat,
distance=d))
| 41.49848 | 90 | 0.620669 | 12,517 | 0.916795 | 0 | 0 | 0 | 0 | 0 | 0 | 7,841 | 0.574306 |
8bf7588b6e982ef5c34279f0381a39c74ff2495d | 4,640 | py | Python | python/ray/serve/tests/test_pipeline_dag.py | quarkzou/ray | 49de29969df0c55a5969b8ffbfc7d62459e5024b | [
"Apache-2.0"
]
| null | null | null | python/ray/serve/tests/test_pipeline_dag.py | quarkzou/ray | 49de29969df0c55a5969b8ffbfc7d62459e5024b | [
"Apache-2.0"
]
| null | null | null | python/ray/serve/tests/test_pipeline_dag.py | quarkzou/ray | 49de29969df0c55a5969b8ffbfc7d62459e5024b | [
"Apache-2.0"
]
| null | null | null | import pytest
import os
import sys
import numpy as np
import ray
from ray import serve
from ray.serve.api import _get_deployments_from_node
from ray.serve.handle import PipelineHandle
from ray.serve.pipeline.pipeline_input_node import PipelineInputNode
@serve.deployment
class Adder:
def __init__(self, increment: int):
self.increment = increment
def forward(self, inp: int) -> int:
print(f"Adder got {inp}")
return inp + self.increment
__call__ = forward
@serve.deployment
class Driver:
def __init__(self, dag: PipelineHandle):
self.dag = dag
def __call__(self, inp: int) -> int:
print(f"Driver got {inp}")
return ray.get(self.dag.remote(inp))
@serve.deployment
class Echo:
def __init__(self, s: str):
self._s = s
def __call__(self, *args):
return self._s
@ray.remote
def combine(*args):
return sum(args)
def test_single_node_deploy_success(serve_instance):
m1 = Adder.bind(1)
handle = serve.run(m1)
assert ray.get(handle.remote(41)) == 42
def test_single_node_driver_sucess(serve_instance):
m1 = Adder.bind(1)
m2 = Adder.bind(2)
with PipelineInputNode() as input_node:
out = m1.forward.bind(input_node)
out = m2.forward.bind(out)
driver = Driver.bind(out)
handle = serve.run(driver)
assert ray.get(handle.remote(39)) == 42
def test_options_and_names(serve_instance):
m1 = Adder.bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder"
m1 = Adder.options(name="Adder2").bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder2"
m1 = Adder.options(num_replicas=2).bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.num_replicas == 2
@pytest.mark.skip("TODO")
def test_mixing_task(serve_instance):
m1 = Adder.bind(1)
m2 = Adder.bind(2)
with PipelineInputNode() as input_node:
out = combine.bind(m1.forward.bind(input_node), m2.forward.bind(input_node))
driver = Driver.bind(out)
handle = serve.run(driver)
assert ray.get(handle.remote(1)) == 5
@serve.deployment
class TakeHandle:
def __init__(self, handle) -> None:
self.handle = handle
def __call__(self, inp):
return ray.get(self.handle.remote(inp))
def test_passing_handle(serve_instance):
child = Adder.bind(1)
parent = TakeHandle.bind(child)
driver = Driver.bind(parent)
handle = serve.run(driver)
assert ray.get(handle.remote(1)) == 2
def test_passing_handle_in_obj(serve_instance):
@serve.deployment
class Parent:
def __init__(self, d):
self._d = d
async def __call__(self, key):
return await self._d[key].remote()
child1 = Echo.bind("ed")
child2 = Echo.bind("simon")
parent = Parent.bind({"child1": child1, "child2": child2})
handle = serve.run(parent)
assert ray.get(handle.remote("child1")) == "ed"
assert ray.get(handle.remote("child2")) == "simon"
def test_pass_handle_to_multiple(serve_instance):
@serve.deployment
class Child:
def __call__(self, *args):
return os.getpid()
@serve.deployment
class Parent:
def __init__(self, child):
self._child = child
def __call__(self, *args):
return ray.get(self._child.remote())
@serve.deployment
class GrandParent:
def __init__(self, child, parent):
self._child = child
self._parent = parent
def __call__(self, *args):
# Check that the grandparent and parent are talking to the same child.
assert ray.get(self._child.remote()) == ray.get(self._parent.remote())
return "ok"
child = Child.bind()
parent = Parent.bind(child)
grandparent = GrandParent.bind(child, parent)
handle = serve.run(grandparent)
assert ray.get(handle.remote()) == "ok"
def test_non_json_serializable_args(serve_instance):
# Test that we can capture and bind non-json-serializable arguments.
arr1 = np.zeros(100)
arr2 = np.zeros(200)
@serve.deployment
class A:
def __init__(self, arr1):
self.arr1 = arr1
self.arr2 = arr2
def __call__(self, *args):
return self.arr1, self.arr2
handle = serve.run(A.bind(arr1))
ret1, ret2 = ray.get(handle.remote())
assert np.array_equal(ret1, arr1) and np.array_equal(ret2, arr2)
# TODO: check that serve.build raises an exception.
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| 25.217391 | 84 | 0.650647 | 1,634 | 0.352155 | 0 | 0 | 2,209 | 0.476078 | 77 | 0.016595 | 335 | 0.072198 |
8bf7c2002c8b113a9de4b7623d703ed3f154d1fb | 118 | py | Python | code/super_minitaur/script/lpmslib/lputils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
]
| 5 | 2019-03-22T06:39:42.000Z | 2021-07-27T13:56:45.000Z | code/super_minitaur/script/lpmslib/lputils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
]
| null | null | null | code/super_minitaur/script/lpmslib/lputils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
]
| 2 | 2021-02-16T09:52:04.000Z | 2021-11-30T12:12:55.000Z |
#helpers
def logd(tag, msg):
print "[Debug-"+tag+"]", msg
def loge(tag, msg):
print "[Error-"+tag+"]", msg
| 13.111111 | 32 | 0.550847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.271186 |
8bf7e9d1ed3871fd0972273d253da43b826c3e35 | 598 | py | Python | test/data_producer_kafka.py | netgroup/srv6-pm-dockerized | 770976e9e2da56780ae9bb4048360235d2568627 | [
"Apache-2.0"
]
| null | null | null | test/data_producer_kafka.py | netgroup/srv6-pm-dockerized | 770976e9e2da56780ae9bb4048360235d2568627 | [
"Apache-2.0"
]
| null | null | null | test/data_producer_kafka.py | netgroup/srv6-pm-dockerized | 770976e9e2da56780ae9bb4048360235d2568627 | [
"Apache-2.0"
]
| 2 | 2020-07-28T18:12:09.000Z | 2021-02-22T06:31:19.000Z | from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
# produce json messages
producer = KafkaProducer(bootstrap_servers='kafka:9092', security_protocol='PLAINTEXT',
value_serializer=lambda m: json.dumps(m).encode('ascii'))
result = producer.send('ktig', {'measure_id': 1, 'interval': 10, 'timestamp': '',
'color': 'red', 'sender_tx_counter': 50,
'sender_rx_counter': 50, 'reflector_tx_counter': 48,
'reflector_rx_counter': 48})
producer.close()
| 37.375 | 88 | 0.602007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.314381 |
8bf80a6b7a2e719d044ca3071a20a59ca3623e14 | 248 | py | Python | uasyncio.core/test_cb_args.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
]
| 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | uasyncio.core/test_cb_args.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
]
| 38 | 2019-08-28T01:46:31.000Z | 2022-03-17T05:46:51.000Z | uasyncio.core/test_cb_args.py | Carglglz/micropython-lib | 07102c56aa1087b97ee313cedc1d89fd20452e11 | [
"PSF-2.0"
]
| 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | try:
import uasyncio.core as asyncio
except:
import asyncio
def cb(a, b):
assert a == "test"
assert b == "test2"
loop.stop()
loop = asyncio.get_event_loop()
loop.call_soon(cb, "test", "test2")
loop.run_forever()
print("OK")
| 14.588235 | 35 | 0.637097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.120968 |
8bf8770f23fe5d9c46d48d1b60253229783948a7 | 1,491 | py | Python | labdevices/_mock/ando.py | jkrauth/labdevices | 4b00579117216b6431079d79c1c978b73a6c0b96 | [
"MIT"
]
| null | null | null | labdevices/_mock/ando.py | jkrauth/labdevices | 4b00579117216b6431079d79c1c978b73a6c0b96 | [
"MIT"
]
| null | null | null | labdevices/_mock/ando.py | jkrauth/labdevices | 4b00579117216b6431079d79c1c978b73a6c0b96 | [
"MIT"
]
| 1 | 2021-04-28T15:17:31.000Z | 2021-04-28T15:17:31.000Z | """
Provides a mock for the plx_gpib_ethernet package used in the
Ando devices.
"""
from unittest.mock import Mock
# The commands that are used in the methods of the
# ANDO devices and typical responses.
QUERY_COMMANDS = {
# Spectrum Analyzer commands
"*IDN?": "ANDO dummy\r\n",
"SWEEP?": "0\r\n",
"SMPL?": " 501\r\n",
"ANA?": " 490.808, 94.958, 19\r\n",
"CTRWL?": "1050.00\r\n",
"SPAN?": "1300.0\r\n",
"CWPLS?": "1\r\n",
"PLMOD?": " 38\r\n",
}
class PLXDummy(Mock):
"""
Mock class for the plx_gpib_ethernet package when using the
ANDO devices as dummy.
"""
@staticmethod
def query(command: str):
"""
Containes all the query commands used in the ANDO Spectrometer
and returns a valid string.
"""
if command in QUERY_COMMANDS:
return QUERY_COMMANDS[command]
if "LDATA" in command:
return ' 20,-210.00,-210.00,-210.00,-210.00,-75.28,-210.00,-210.00,-210.00,'\
'-210.00,-210.00,-210.00,-210.00,-210.00,-210.00,-210.00, -78.57, -70.96,'\
' -75.37,-210.00,-210.00\r\n'
if "WDATA" in command:
return ' 20, 400.000, 401.300, 402.600, 403.900, 405.200, 406.500, 407.800,'\
' 409.100, 410.400, 411.700, 413.000, 414.300, 415.600, 416.900, 418.200,'\
' 419.500, 420.800, 422.100, 423.400, 424.700\r\n'
return Mock()
| 33.886364 | 91 | 0.541247 | 951 | 0.637827 | 0 | 0 | 817 | 0.547954 | 0 | 0 | 965 | 0.647217 |
8bf9802eb12db8bd7835a073469cfa2b0ae5ce2e | 2,898 | py | Python | hearthstone/simulator/core/card_graveyard.py | JDBumgardner/stone_ground_hearth_battles | 9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f | [
"Apache-2.0"
]
| 20 | 2020-08-01T03:14:57.000Z | 2021-12-19T11:47:50.000Z | hearthstone/simulator/core/card_graveyard.py | JDBumgardner/stone_ground_hearth_battles | 9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f | [
"Apache-2.0"
]
| 48 | 2020-08-01T03:06:43.000Z | 2022-02-27T10:03:47.000Z | hearthstone/simulator/core/card_graveyard.py | JDBumgardner/stone_ground_hearth_battles | 9fe095651fab60e8ddbf563f0b9b7f3e723d5f4f | [
"Apache-2.0"
]
| 3 | 2020-06-28T01:23:37.000Z | 2021-11-11T23:09:36.000Z | import sys
from inspect import getmembers, isclass
from typing import Union
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.events import CardEvent, EVENTS, BuyPhaseContext, CombatPhaseContext
from hearthstone.simulator.core.monster_types import MONSTER_TYPES
class FloatingWatcher(MonsterCard):
tier = 4
monster_type = MONSTER_TYPES.DEMON
pool = MONSTER_TYPES.DEMON
base_attack = 4
base_health = 4
mana_cost = 5
def handle_event_powers(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
if event.event is EVENTS.PLAYER_DAMAGED:
bonus = 4 if self.golden else 2
self.attack += bonus
self.health += bonus
class ElistraTheImmortal(MonsterCard):
tier = 6
monster_type = MONSTER_TYPES.NEUTRAL
base_attack = 4
base_health = 4
base_divine_shield = True
base_reborn = True
divert_taunt_attack = True
legendary = True
class BarrensBlacksmith(MonsterCard):
tier = 3
monster_type = None
base_attack = 3
base_health = 5
def frenzy(self, context: CombatPhaseContext):
bonus = 4 if self.golden else 2
for card in context.friendly_war_party.board:
if card != self:
card.attack += bonus
card.health += bonus
class Siegebreaker(MonsterCard):
tier = 4
monster_type = MONSTER_TYPES.DEMON
pool = MONSTER_TYPES.DEMON
base_attack = 5
base_health = 8
base_taunt = True
mana_cost = 7
def handle_event_powers(self, event: 'CardEvent', context: Union['BuyPhaseContext', 'CombatPhaseContext']):
bonus = 2 if self.golden else 1
if event.event is EVENTS.COMBAT_PREPHASE or (event.event is EVENTS.SUMMON_COMBAT and event.card == self):
demons = [card for card in context.friendly_war_party.board if
card != self and card.check_type(MONSTER_TYPES.DEMON)]
for demon in demons:
demon.attack += bonus
elif event.event is EVENTS.SUMMON_COMBAT and event.card in context.friendly_war_party.board \
and event.card != self and event.card.check_type(MONSTER_TYPES.DEMON):
event.card.attack += bonus
elif event.event is EVENTS.DIES and event.card == self:
demons = [card for card in context.friendly_war_party.board if
card != self and card.check_type(MONSTER_TYPES.DEMON)]
for demon in demons:
demon.attack -= bonus
REMOVED_CARDS = [member[1] for member in getmembers(sys.modules[__name__],
lambda member: isclass(member) and issubclass(member,
MonsterCard) and member.__module__ == __name__)]
| 36.683544 | 149 | 0.640787 | 2,248 | 0.775707 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.033126 |
8bfa439c74e0b340dc223e43b06761bdee5d063d | 1,026 | py | Python | cookiecutter_mbam/scan/views.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
]
| null | null | null | cookiecutter_mbam/scan/views.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
]
| null | null | null | cookiecutter_mbam/scan/views.py | tiburona/cookiecutter_mbam | 13788774a4c1426c133b3f689f98d8f0c54de9c6 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Scan views."""
from flask import Blueprint, render_template, flash, redirect, url_for, session
from flask_login import current_user
from .forms import ScanForm
from .service import ScanService
from cookiecutter_mbam.utils import flash_errors
blueprint = Blueprint('scan', __name__, url_prefix='/scans', static_folder='../static')
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
@blueprint.route('/add', methods=['GET', 'POST'])
def add():
"""Add a scan."""
form = ScanForm()
if form.validate_on_submit():
f = form.scan_file.data
user_id = str(current_user.get_id())
exp_id = str(session['curr_experiment'])
ScanService(user_id, exp_id).upload(f)
flash('You successfully added a new scan.', 'success')
return redirect(url_for('experiment.experiments'))
else:
flash_errors(form)
return render_template('scans/upload.html',scan_form=form) | 34.2 | 87 | 0.693957 | 0 | 0 | 0 | 0 | 533 | 0.519493 | 0 | 0 | 252 | 0.245614 |
8bfa8f2b88f8aca9aab6973afb6831c3aa0a0478 | 3,460 | py | Python | python-route-endpoint/test_dbstore.py | blues/note-samples | a50c27ea0b8728668f2c44139b088d5fdf0c7d57 | [
"Apache-2.0"
]
| 1 | 2021-10-04T14:42:43.000Z | 2021-10-04T14:42:43.000Z | python-route-endpoint/test_dbstore.py | blues/note-samples | a50c27ea0b8728668f2c44139b088d5fdf0c7d57 | [
"Apache-2.0"
]
| 3 | 2021-09-07T17:54:58.000Z | 2021-11-16T21:40:52.000Z | python-route-endpoint/test_dbstore.py | blues/note-samples | a50c27ea0b8728668f2c44139b088d5fdf0c7d57 | [
"Apache-2.0"
]
| null | null | null | import pytest
import dbstore
inMemFile = ":memory:"
measurementTable = "measurements"
alertTable = "alerts"
def test_db_store_constructor():
s = dbstore.dbstore(file=inMemFile)
assert(s != None)
def test_dbStore_connect():
s = dbstore.dbstore(file=inMemFile)
s.connect()
assert s._connection is not None
def test_dbStore_connect_whenConnectionIsOpen():
s = dbstore.dbstore(file=inMemFile)
s.connect()
c = s._connection
s.connect()
assert s._connection == c
def test_dbStore_close_whenConnectionIsOpen():
s = dbstore.dbstore(file=inMemFile)
s.connect()
assert s._connection is not None
s.close()
assert s._connection is None
def test_dbStore_close_whenConnectionIsClosed():
s = dbstore.dbstore(file=inMemFile)
assert s._connection is None
s.close()
assert s._connection is None
def test_dbStore_createTables():
s = dbstore.dbstore(file="inMemFile")
s.connect()
s.createTables()
for n in [measurementTable, alertTable]:
s._cursor.execute(f"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{n}';")
isTable = s._cursor.fetchone()[0]==1
assert isTable
timestampTestData = "2021-04-29T23:25:44Z"
def generateConnectedInMemDb() -> dbstore.dbstore:
s = dbstore.dbstore(file=inMemFile)
s.connect()
s.createTables()
return s
def test_addMeasurement():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
measurementType = "sensor1"
timestamp = timestampTestData
value = 3.14
units = "units1"
s.addMeasurement(deviceId, timestamp, measurementType, value, units)
c = s._cursor.execute(f'SELECT * from {measurementTable}')
row = c.fetchone()
assert row[0] == deviceId
assert row[1] == timestamp
assert row[2] == measurementType
assert row[3] == value
assert row[4] == units
def test_addAlert():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
c = s._cursor.execute(f'SELECT * from {alertTable}')
row = c.fetchone()
assert row[0] == deviceId
assert row[1] == timestamp
assert row[2] == alertType
assert row[3] == message
def test_getAlerts():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
s.addAlert(deviceId, timestamp, alertType, message)
a = s.getAlerts()
e = [{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message},
{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message},]
assert a == e
def test_getAlerts_noAlertsStored():
s = generateConnectedInMemDb()
a = s.getAlerts()
assert a == []
def test_getAlerts_withLimit():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
s.addAlert(deviceId, timestamp, alertType, message)
a = s.getAlerts(limit=1)
e = [{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message}]
assert a == e | 23.69863 | 102 | 0.67052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.134104 |
8bfbe25b3704f8131128b16676dbbc1e54dcc6b4 | 446 | py | Python | bin/Notifier/NotificationLoader.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
]
| 55 | 2016-11-20T17:08:19.000Z | 2022-03-11T22:19:43.000Z | bin/Notifier/NotificationLoader.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
]
| 17 | 2017-09-20T07:52:17.000Z | 2021-12-03T10:03:00.000Z | bin/Notifier/NotificationLoader.py | juergenhoetzel/craft | 9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd | [
"BSD-2-Clause"
]
| 29 | 2016-12-10T15:00:11.000Z | 2021-12-02T12:54:05.000Z | import importlib
_NOTIFICATION_BACKENDS = None
def load(modules):
global _NOTIFICATION_BACKENDS;
if _NOTIFICATION_BACKENDS == None:
_NOTIFICATION_BACKENDS = dict()
for backend in modules:
backend = backend.strip()
backend = getattr(importlib.import_module("Notifier.Backends.%s" % backend), backend)()
_NOTIFICATION_BACKENDS[backend.name] = backend
return _NOTIFICATION_BACKENDS
| 29.733333 | 99 | 0.695067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.049327 |
8bfc984d3b1bbcef2b5af5e9508ff3a2a9c35186 | 604 | py | Python | basics/linear.py | zhijiahu/dltk | bf0484e22d3d0116b1ac60ae78f688a36c5a0636 | [
"MIT"
]
| null | null | null | basics/linear.py | zhijiahu/dltk | bf0484e22d3d0116b1ac60ae78f688a36c5a0636 | [
"MIT"
]
| null | null | null | basics/linear.py | zhijiahu/dltk | bf0484e22d3d0116b1ac60ae78f688a36c5a0636 | [
"MIT"
]
| null | null | null |
import numpy as np
import cv2
labels = ['dog', 'cat', 'panda']
np.random.seed(1)
# Simulate model already trained
W = np.random.randn(3, 3072)
b = np.random.randn(3)
orig = cv2.imread('beagle.png')
image = cv2.resize(orig, (32, 32)).flatten()
scores = W.dot(image) + b
for (label, score) in zip(labels, scores):
print('[INFO] {}: {:2}'.format(label, score))
cv2.putText(orig,
'Label: {}'.format(labels[np.argmax(scores)]),
(10,30),
cv2.FONT_HERSHEY_SIMPLEX,
0.9,
(0, 255, 0),
2)
cv2.imshow('Image', orig)
cv2.waitKey(0)
| 20.133333 | 58 | 0.574503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.15894 |
8bfd515b8c9ab45a349fc3b66ded01bb3b315143 | 2,759 | py | Python | sevivi/synchronizer/synchronizer.py | edgarriba/sevivi | 52c8bef206e531c797221a08037306c0c5b0ca59 | [
"MIT"
]
| null | null | null | sevivi/synchronizer/synchronizer.py | edgarriba/sevivi | 52c8bef206e531c797221a08037306c0c5b0ca59 | [
"MIT"
]
| 9 | 2021-09-09T07:40:21.000Z | 2022-01-13T07:03:59.000Z | sevivi/synchronizer/synchronizer.py | edgarriba/sevivi | 52c8bef206e531c797221a08037306c0c5b0ca59 | [
"MIT"
]
| 1 | 2022-01-26T09:51:29.000Z | 2022-01-26T09:51:29.000Z | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .signal_processing import (
resample_data,
normalize_signal,
calculate_magnitude,
calculate_offset_in_seconds_using_cross_correlation,
calculate_sampling_frequency_from_timestamps,
)
def get_synchronization_offset(
video_sync_df: pd.DataFrame,
sensor_sync_df: pd.DataFrame,
use_gradient: bool,
show_plots: bool = False,
) -> pd.Timedelta:
"""
Get the temporal offset between the two given sensor dataframes.
:param video_sync_df: the synchronization information from the video
:param sensor_sync_df: the synchronization information from the sensor
:param use_gradient: if true, the second derivation of the video synchronization data will be used. if false,
the raw data will be used.
:param show_plots: can enable debugging plots
:return: a pd.Timedelta object that specifies how much the sensor_sync_df needs to be moved in time to align it with
the video_sync_df
"""
video_sf = calculate_sampling_frequency_from_timestamps(video_sync_df.index)
sensor_sf = calculate_sampling_frequency_from_timestamps(sensor_sync_df.index)
if use_gradient:
video_acceleration = np.gradient(
np.gradient(video_sync_df.to_numpy(), axis=0), axis=0
)
else:
video_acceleration = video_sync_df.to_numpy()
video_acceleration = resample_data(
video_acceleration,
current_sampling_rate=video_sf,
new_sampling_rate=sensor_sf,
)
video_acceleration = normalize_signal(video_acceleration)
video_acceleration = calculate_magnitude(video_acceleration)
sensor_acceleration = normalize_signal(sensor_sync_df.to_numpy())
sensor_acceleration = calculate_magnitude(sensor_acceleration)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(sensor_acceleration, label="IMU")
plt.xlabel("Time (s)")
plt.ylabel("Acceleration Magnitude (normalized)")
plt.legend()
plt.show()
shift = calculate_offset_in_seconds_using_cross_correlation(
ref_signal=video_acceleration,
target_signal=sensor_acceleration,
sampling_frequency=sensor_sf,
)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(
np.arange(len(sensor_acceleration)) + (sensor_sf * shift),
sensor_acceleration,
label="IMU",
)
plt.xlabel("Time (s)")
plt.ylabel("Acceleration (normalized)")
plt.legend()
plt.show()
return pd.Timedelta(seconds=shift)
| 33.240964 | 120 | 0.696629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 707 | 0.256252 |
8bfd607f605b753ac1980b586075777909511585 | 244 | py | Python | bob.py | williamstern/Intro-to-CS-MIT-Course | 0f6129fa6bd47767cb57507279d49b27501a160f | [
"MIT"
]
| null | null | null | bob.py | williamstern/Intro-to-CS-MIT-Course | 0f6129fa6bd47767cb57507279d49b27501a160f | [
"MIT"
]
| null | null | null | bob.py | williamstern/Intro-to-CS-MIT-Course | 0f6129fa6bd47767cb57507279d49b27501a160f | [
"MIT"
]
| null | null | null | s = 'vpoboooboboobooboboo'
y = 0
counter = 0
times_run = 0
start = 0
end = 3
for letter in s:
sc = s[start:end]
start += 1
end += 1
if sc == str('bob'):
counter += 1
print('Number of times bob occurs is: ', counter)
| 10.166667 | 49 | 0.565574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.245902 |
8bfd9f299f8a3e49d68acee30f35331e05c04631 | 5,469 | py | Python | tests/main.py | bastienleonard/pysfml-cython | c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4 | [
"Zlib",
"BSD-2-Clause"
]
| 14 | 2015-09-14T18:04:27.000Z | 2021-02-19T16:51:57.000Z | tests/main.py | bastienleonard/pysfml-cython | c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4 | [
"Zlib",
"BSD-2-Clause"
]
| 3 | 2015-12-14T17:07:45.000Z | 2021-10-02T05:55:11.000Z | tests/main.py | bastienleonard/pysfml-cython | c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4 | [
"Zlib",
"BSD-2-Clause"
]
| 3 | 2015-04-12T16:57:02.000Z | 2021-02-20T17:15:51.000Z | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
import random
import unittest
import sfml as sf
class TestColor(unittest.TestCase):
def random_color(self):
return sf.Color(random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
def test_eq(self):
equal = [(sf.Color(i, i, i, i), sf.Color(i, i, i, i))
for i in range(256)]
for c1, c2 in equal:
self.assertEqual(c1, c2)
def test_neq(self):
not_equal = [(sf.Color(0, 0, 0, 1), sf.Color(0, 1, 0, 0)),
(sf.Color(255, 255, 255, 255),
sf.Color(254, 255, 255, 255))]
for c1, c2 in not_equal:
self.assertNotEqual(c1, c2)
def test_copy(self):
c1 = self.random_color()
c2 = c1.copy()
self.assertEqual(c1, c2)
class TestIntRect(unittest.TestCase):
def random_rect(self):
return sf.IntRect(random.randint(0, 100),
random.randint(0, 100),
random.randint(0, 100),
random.randint(0, 100))
def test_eq(self):
def r():
return random.randint(0, 100)
equal = [(sf.IntRect(l, t, w, h), sf.IntRect(l, t, w, h))
for l, t, w, h in
[(r(), r(), r(), r()) for i in range(100)]]
for r1, r2 in equal:
self.assertEqual(r1, r2)
def test_neq(self):
not_equal = [(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 0, 0, 10)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 0, 10, 0)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 10, 0, 0)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(10, 0, 0, 0))]
for r1, r2 in not_equal:
self.assertNotEqual
def test_copy(self):
r1 = self.random_rect()
r2 = r1.copy()
self.assertEqual(r1, r2)
class TestFloatRect(unittest.TestCase):
def random_rect(self):
return sf.FloatRect(random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0))
def test_eq(self):
def r():
return random.triangular(0.0, 100.0)
equal = [(sf.FloatRect(l, t, w, h), sf.FloatRect(l, t, w, h))
for l, t, w, h in
[(r(), r(), r(), r()) for i in range(100)]]
for r1, r2 in equal:
self.assertEqual(r1, r2)
def test_neq(self):
not_equal = [(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 0, 0, 10)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 0, 10, 0)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 10, 0, 0)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(10, 0, 0, 0))]
for r1, r2 in not_equal:
self.assertNotEqual
def test_copy(self):
r1 = self.random_rect()
r2 = r1.copy()
self.assertEqual(r1, r2)
class TestTime(unittest.TestCase):
def random_time(self):
return sf.Time(microseconds=random.randint(0, 1000000))
def test_eq(self):
equal = [(sf.Time(microseconds=x), sf.Time(microseconds=x))
for x in
[random.randint(0, 1000000) for n in range(10)]]
for t1, t2 in equal:
self.assertEqual(t1, t2)
def test_add(self):
t1 = self.random_time()
t2 = self.random_time()
self.assertEqual(
t1 + t2,
sf.Time(microseconds=t1.as_microseconds() + t2.as_microseconds()))
def test_sub(self):
t1 = self.random_time()
t2 = self.random_time()
self.assertEqual(
t1 - t2,
sf.Time(microseconds=t1.as_microseconds() - t2.as_microseconds()))
def test_mul(self):
t = self.random_time()
i = random.randint(1, 1000)
self.assertEqual(t * i,
sf.Time(microseconds=t.as_microseconds() * i))
f = random.triangular(0.0, 100.0)
self.assertEqual(t * f,
sf.Time(seconds=t.as_seconds() * f))
def test_div(self):
t = self.random_time()
i = random.randint(1, 1000)
self.assertEqual(t / i,
sf.Time(microseconds=t.as_microseconds() / i))
f = random.triangular(0.0, 100.0)
self.assertEqual(t / f,
sf.Time(seconds=t.as_seconds() / f))
def test_copy(self):
t1 = self.random_time()
t2 = t1.copy()
self.assertEqual(t1, t2)
class TestTransform(unittest.TestCase):
def random_transform(self):
return sf.Transform(*[random.triangular(0.0, 5.0) for i in range(9)])
def test_init(self):
self.assertEqual(sf.Transform().matrix, sf.Transform.IDENTITY.matrix)
self.assertRaises(TypeError, sf.Transform, *range(10))
def test_copy(self):
for i in range(10):
t1 = self.random_transform()
t2 = t1.copy()
self.assertEqual(t1.matrix, t2.matrix)
def test_imul(self):
t1 = self.random_transform()
t2 = self.random_transform()
t3 = t1.copy()
t3 *= t2
self.assertEqual((t1 * t2).matrix, t3.matrix)
if __name__ == '__main__':
unittest.main()
| 30.724719 | 78 | 0.513257 | 5,307 | 0.970378 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.01024 |
8bfef33258b56cdbd64d66536a38eaa752a6a523 | 12,840 | py | Python | textgen/augment/word_level_augment.py | shibing624/textgen | 0a9d55f1f61d5217b8e06f1f23904e49afa84370 | [
"Apache-2.0"
]
| 31 | 2021-06-29T14:31:35.000Z | 2022-03-25T00:36:44.000Z | textgen/augment/word_level_augment.py | shibing624/text-generation | 0a9d55f1f61d5217b8e06f1f23904e49afa84370 | [
"Apache-2.0"
]
| 1 | 2021-11-09T21:30:16.000Z | 2022-03-02T10:21:04.000Z | textgen/augment/word_level_augment.py | shibing624/text-generation | 0a9d55f1f61d5217b8e06f1f23904e49afa84370 | [
"Apache-2.0"
]
| 5 | 2021-06-21T03:13:39.000Z | 2022-02-07T06:53:22.000Z | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: Word level augmentations including Replace words with uniform
random words or TF-IDF based word replacement.
"""
import collections
import copy
import math
import numpy as np
from textgen.utils.log import logger
min_token_num = 3
class EfficientRandomGen(object):
"""A base class that generate multiple random numbers at the same time."""
def reset_random_prob(self):
"""Generate many random numbers at the same time and cache them."""
cache_len = 100000
self.random_prob_cache = np.random.random(size=(cache_len,))
self.random_prob_ptr = cache_len - 1
def get_random_prob(self):
"""Get a random number."""
value = self.random_prob_cache[self.random_prob_ptr]
self.random_prob_ptr -= 1
if self.random_prob_ptr == -1:
self.reset_random_prob()
return value
def get_random_token(self):
"""Get a Random token."""
token = self.token_list[self.token_ptr]
self.token_ptr -= 1
if self.token_ptr == -1:
self.reset_token_list()
return token
def get_insert_token(self, word):
"""Get a replace token."""
# Insert word choose
return ''.join([word] * 2)
def get_delete_token(self):
"""Get a replace token."""
# Insert word choose
return ''
class RandomReplace(EfficientRandomGen):
"""Uniformly replace word with random words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens randomly.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_random_token()
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
class InsertReplace(EfficientRandomGen):
"""Uniformly replace word with insert repeat words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens with insert data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_insert_token(tokens[i])
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
class DeleteReplace(EfficientRandomGen):
"""Uniformly replace word with delete words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens with insert data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_delete_token()
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
def get_data_idf(tokenized_sentence_list):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for cur_sent in tokenized_sentence_list:
cur_word_dict = {}
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(tokenized_sentence_list) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for cur_sent in tokenized_sentence_list:
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
}
class MixEfficientRandomGen(EfficientRandomGen):
"""Add word2vec to Random Gen"""
def __init__(self,
w2v,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(MixEfficientRandomGen, self).__init__()
self.word2vec_model = w2v
# Insert replace prob
self.insert_prob = insert_prob
# Delete replace prob
self.delete_prob = delete_prob
# Random replace prob
self.random_prob = random_prob
# Similar replace prob
self.similar_prob = similar_prob
def get_similar_token(self, word):
"""Get a Similar replace token."""
if word in self.word2vec_model.key_to_index:
target_candidate = self.word2vec_model.similar_by_word(word, topn=3)
target_words = [w for w, p in target_candidate if w]
if len(target_words) > 1:
word = np.random.choice(target_words, size=1).tolist()[0]
return word
return word
def get_replace_token(self, word):
"""Get a replace token."""
r_prob = np.random.rand()
# Similar choose prob
if r_prob < self.similar_prob:
word = self.get_similar_token(word)
elif r_prob - self.similar_prob < self.random_prob:
word = self.get_random_token()
elif r_prob - self.similar_prob - self.random_prob < self.delete_prob:
word = self.get_delete_token()
else:
word = self.get_insert_token(word)
return word
class TfIdfWordReplace(MixEfficientRandomGen):
"""TF-IDF Based Word Replacement."""
def __init__(self,
w2v,
token_prob,
data_idf,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(TfIdfWordReplace, self).__init__(w2v,
similar_prob=similar_prob,
random_prob=random_prob,
delete_prob=delete_prob,
insert_prob=insert_prob)
self.token_prob = token_prob
self.idf = data_idf["idf"]
self.tf_idf = data_idf["tf_idf"]
if not self.idf:
logger.error('sentence_list must set in tfidf word replace.')
raise ValueError("idf is None.")
data_idf = copy.deepcopy(data_idf)
tf_idf_items = data_idf["tf_idf"].items()
tf_idf_items = sorted(tf_idf_items, key=lambda item: -item[1])
self.tf_idf_keys = []
self.tf_idf_values = []
for key, value in tf_idf_items:
self.tf_idf_keys += [key]
self.tf_idf_values += [value]
self.normalized_tf_idf = np.array(self.tf_idf_values)
self.normalized_tf_idf = max(self.normalized_tf_idf) - self.normalized_tf_idf
self.normalized_tf_idf = self.normalized_tf_idf / self.normalized_tf_idf.sum()
self.reset_token_list()
self.reset_random_prob()
def get_replace_prob(self, all_words):
"""Compute the probability of replacing tokens in a sentence."""
cur_tf_idf = collections.defaultdict(int)
for word in all_words:
cur_tf_idf[word] += 1. / len(all_words) * self.idf[word]
replace_prob = []
for word in all_words:
replace_prob += [cur_tf_idf[word]]
replace_prob = np.array(replace_prob)
replace_prob = np.max(replace_prob) - replace_prob
if replace_prob.sum() != 0.0:
replace_prob = replace_prob / replace_prob.sum() * self.token_prob * len(all_words)
return replace_prob
def __call__(self, tokens):
"""
Replace tokens with tfidf data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
new_tokens = []
details = []
if len(tokens) >= min_token_num:
replace_prob = self.get_replace_prob(tokens)
new_tokens, details = self.replace_tokens(tokens, replace_prob[:len(tokens)])
return new_tokens, details
def replace_tokens(self, tokens, replace_prob):
"""Replace tokens with tfidf similar word"""
details = []
idx = 0
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < replace_prob[i]:
# Use Tfidf find similar token
tokens[i] = self.get_similar_token(tokens[i])
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
cache_len = len(self.tf_idf_keys)
token_list_idx = np.random.choice(
cache_len, (cache_len,), p=self.normalized_tf_idf)
self.token_list = []
for idx in token_list_idx:
self.token_list += [self.tf_idf_keys[idx]]
self.token_ptr = len(self.token_list) - 1
logger.debug("sampled token list: {}".format(self.token_list))
class MixWordReplace(TfIdfWordReplace):
"""Multi Method Based Word Replacement."""
def __init__(self,
w2v,
token_prob,
data_idf,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(MixWordReplace, self).__init__(w2v,
token_prob,
data_idf,
similar_prob=similar_prob,
random_prob=random_prob,
delete_prob=delete_prob,
insert_prob=insert_prob)
def replace_tokens(self, word_list, replace_prob):
"""Replace tokens with mix method."""
details = []
idx = 0
for i in range(len(word_list)):
old_token = word_list[i]
if self.get_random_prob() < replace_prob[i]:
word_list[i] = self.get_replace_token(word_list[i])
details.append((old_token, word_list[i], idx, idx + len(word_list[i])))
idx += len(word_list[i])
return word_list, details
| 35.469613 | 95 | 0.576947 | 11,676 | 0.909346 | 0 | 0 | 0 | 0 | 0 | 0 | 2,378 | 0.185202 |
e300c54c781958b660c0d153f40329e21fe52fd9 | 6,539 | py | Python | three_d_resnet_builder/builder.py | thauptmann/3D-ResNet-for-Keras | ac1b8b3d0032c9af832cc945bc57a63106366e54 | [
"MIT"
]
| 4 | 2021-05-23T09:30:40.000Z | 2021-12-29T16:14:46.000Z | three_d_resnet_builder/builder.py | thauptmann/3D-ResNet-for-Keras | ac1b8b3d0032c9af832cc945bc57a63106366e54 | [
"MIT"
]
| 3 | 2021-06-24T09:26:58.000Z | 2022-01-06T11:01:59.000Z | three_d_resnet_builder/builder.py | thauptmann/3D-ResNet-for-Keras | ac1b8b3d0032c9af832cc945bc57a63106366e54 | [
"MIT"
]
| 3 | 2021-06-07T18:11:34.000Z | 2021-12-22T01:57:03.000Z | from . import three_D_resnet
from .kernel import get_kernel_to_name
def build_three_d_resnet(input_shape, output_shape, repetitions, output_activation, regularizer=None,
squeeze_and_excitation=False, use_bottleneck=False, kernel_size=3, kernel_name='3D'):
"""Return a full customizable resnet.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param repetitions: Define the repetitions of the Residual Blocks e.g. (2, 2, 2, 2) for ResNet-18
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Define the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation: Activate or deactivate SE-Paths.
:param use_bottleneck: Activate bottleneck layers. Recommended for networks with many layers.
:param kernel_size: Set the kernel size. Don't need to be changes in almost all cases. It's just exist for
customization purposes.
:param kernel_name:
:return: Return the built network.
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, repetitions, output_activation,
regularizer, squeeze_and_excitation, use_bottleneck, kernel_size,
kernel=conv_kernel)
def build_three_d_resnet_18(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_18.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-18
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (2, 2, 2, 2),
regularizer, squeeze_and_excitation, kernel=conv_kernel)
def build_three_d_resnet_34(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_34.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-34
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 6, 3),
regularizer, squeeze_and_excitation, kernel=conv_kernel)
def build_three_d_resnet_50(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_50.
:param input_shape: The input shape of the network as (frames, height, width, channels)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-50
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 6, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
def build_three_d_resnet_102(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
"""Return a customizable resnet_102.
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-102
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 4, 23, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
def build_three_d_resnet_152(input_shape, output_shape, output_activation, regularizer=None,
squeeze_and_excitation=False, kernel_name='3D'):
""" Return a customizable resnet_152
:param input_shape: The input shape of the network as (frames, height, width, channel)
:param output_shape: The output shape. Dependant on the task of the network.
:param output_activation: Define the used output activation. Also depends on the task of the network.
:param regularizer: Defines the regularizer to use. E.g. "l1" or "l2"
:param squeeze_and_excitation:Activate or deactivate SE-Paths.
:param kernel_name:
:return: The built ResNet-152
"""
conv_kernel = get_kernel_to_name(kernel_name)
return three_D_resnet.ThreeDConvolutionResNet(input_shape, output_shape, output_activation, (3, 8, 36, 3),
regularizer, squeeze_and_excitation, use_bottleneck=True,
kernel=conv_kernel)
| 57.867257 | 115 | 0.692002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,496 | 0.534638 |
e301076532db001f5790d94584e7f5e4d2165387 | 1,198 | py | Python | ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
]
| null | null | null | ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
]
| null | null | null | ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
]
| null | null | null | from matplotlib import use
use('WXAgg')
import pylab as plt
import numpy as np
plt.figure(figsize=(8,5))
ax = plt.subplot(111)
fil = './spectrum_GOME.out'
data = np.loadtxt(fil)
y = data[:,1]
x = data[:,0]
pl_list = []
pl, = ax.plot(x,y,'r')
pl_list.append(pl)
y = 10*data[:,3]
pl, = ax.plot(x,y,'b')
pl_list.append(pl)
#plt.xlim([425,450])
#plt.ylim([0,2000])
plt.ylabel(r"Radiation (photons/(s cm$^2$ nm))", fontsize = 12)
plt.xlabel(r"Wavelength (nm)", fontsize = 12)
from matplotlib.legend import Legend
l0 = Legend(ax, pl_list[0:1], ('Solar irradiance',), loc=(0.1,0.85))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
l0 = Legend(ax, pl_list[1:2], ('Earth shine (multiplied by 10)',), loc=(0.1,0.75))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
#plt.show()
plt.savefig('spectrum_GOME.png')
| 26.622222 | 83 | 0.656093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 559 | 0.466611 |
e3018352709a236201cb1c03963553b833bc04b2 | 569 | py | Python | pepdb/tasks/migrations/0026_auto_20171031_0153.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
]
| 7 | 2015-12-21T03:52:46.000Z | 2020-07-24T19:17:23.000Z | pepdb/tasks/migrations/0026_auto_20171031_0153.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
]
| 12 | 2016-03-05T18:11:05.000Z | 2021-06-17T20:20:03.000Z | pepdb/tasks/migrations/0026_auto_20171031_0153.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
]
| 4 | 2016-07-17T20:19:38.000Z | 2021-03-23T12:47:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-30 23:53
from __future__ import unicode_literals
from django.db import migrations
from tasks.models import BeneficiariesMatching
def save_json_fields_again(apps, schema_editor):
# Ugly but works
for bf in BeneficiariesMatching.objects.all():
bf.person_json = bf.person_json
bf.save()
class Migration(migrations.Migration):
dependencies = [
('tasks', '0025_auto_20171022_0208'),
]
operations = [
migrations.RunPython(save_json_fields_again)
]
| 22.76 | 52 | 0.70123 | 191 | 0.335677 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.209139 |
e302119a1e26db2aa7e3d9148ce46b0ec243f446 | 24,156 | py | Python | condensation-forum/application.py | BitFracture/condensation | a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f | [
"BSD-2-Clause"
]
| null | null | null | condensation-forum/application.py | BitFracture/condensation | a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f | [
"BSD-2-Clause"
]
| 59 | 2018-03-02T03:08:22.000Z | 2018-03-11T01:43:02.000Z | condensation-forum/application.py | BitFracture/condensation | a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f | [
"BSD-2-Clause"
]
| null | null | null | """
An AWS Python3+Flask web app.
"""
from flask import Flask, redirect, url_for, request, session, flash, get_flashed_messages, render_template, escape
from flask_oauthlib.client import OAuth
import boto3,botocore
import jinja2
from boto3.dynamodb.conditions import Key, Attr
import urllib.request
import json
import cgi
import time
import random
import sys
from configLoader import ConfigLoader
from googleOAuthManager import GoogleOAuthManager
from data.session import SessionManager
from data import query, schema
from forms import CreateThreadForm, CreateCommentForm
import inspect
from werkzeug.utils import secure_filename
import uuid
import os
###############################################################################
#FLASK CONFIG
###############################################################################
# This is the EB application, calling directly into Flask
application = Flask(__name__)
# Loads config from file or environment variable
config = ConfigLoader("config.local.json")
# Enable encrypted session, required for OAuth to stick
application.secret_key = config.get("sessionSecret")
#used for form validation
application.config["SECRET_KEY"]=config.get("sessionSecret")
# Set up service handles
botoSession = boto3.Session(
aws_access_key_id = config.get("accessKey"),
aws_secret_access_key = config.get("secretKey"),
aws_session_token=None,
region_name = config.get("region"),
botocore_session=None,
profile_name=None)
dynamodb = botoSession.resource('dynamodb')
s3 = botoSession.resource('s3')
authCacheTable = dynamodb.Table('person-attribute-table')
# Example: bucket = s3.Bucket('elasticbeanstalk-us-west-2-3453535353')
# OAuth setup
authManager = GoogleOAuthManager(
flaskApp = application,
clientId = config.get("oauthClientId"),
clientSecret = config.get("oauthClientSecret"))
#This is the Upload requirement section
bucket = s3.Bucket('condensation-forum')
bucket_name = 'condensation-forum'
s3client = boto3.client(
"s3",
aws_access_key_id=config.get("accessKey"),
aws_secret_access_key=config.get("secretKey")
)
#database connection
dataSessionMgr = SessionManager(
config.get("dbUser"),
config.get("dbPassword"),
config.get("dbEndpoint"))
# Load up Jinja2 templates
templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")
templateEnv = jinja2.Environment(loader=templateLoader)
#pass in library functions to jinja, isn't python terrifying?
#we want to zip collections in view
templateEnv.globals.update(zip=zip)
#we also want to view our flashed messages
templateEnv.globals.update(get_flashed_messages=get_flashed_messages)
#generate urls for buttons in the view
templateEnv.globals.update(url_for=url_for)
bodyTemplate = templateEnv.get_template("body.html")
bodySimpleTemplate = templateEnv.get_template("body-simple.html")
homeTemplate = templateEnv.get_template("home.html")
threadTemplate = templateEnv.get_template("thread.html")
editThreadTemplate = templateEnv.get_template("edit-thread.html")
editCommentTemplate = templateEnv.get_template("edit-comment.html")
fileManagerTemplate = templateEnv.get_template("file-manager.html")
fileListTemplate = templateEnv.get_template("file-list.html")
sharedJavascript = templateEnv.get_template("shared.js")
###############################################################################
#END CONFIG
###############################################################################
@application.route('/', methods=['GET'])
@authManager.enableAuthentication
def indexGetHandler():
"""
Returns the template "home" wrapped by "body" served as HTML
"""
threads = None
#grab threads ordered by time, and zip them with some usernames
with dataSessionMgr.session_scope() as dbSession:
user = authManager.getUserData()
if not user:
flash("Welcome, please <a href='/login'>log in or create an account</a>.")
threads = query.getThreadsByCommentTime(dbSession)
urls = [url_for("threadGetHandler", tid=thread.id) for thread in threads]
usernames = [thread.user.name for thread in threads]
user = authManager.getUserData()
threads = query.extractOutput(threads)
homeRendered = homeTemplate.render(
threads=threads,
urls=urls,
usernames=usernames)
user = authManager.getUserData()
return bodyTemplate.render(
title="Home",
body=homeRendered,
user=user,
location=request.url)
@application.route("/new-thread", methods=["GET", "POST"])
@authManager.requireAuthentication
def newThreadHandler():
""" Renders the thread creation screen, creates thread if all data is validated """
#do not allow unauthenticated users to submit
form = CreateThreadForm()
user = authManager.getUserData()
if form.validate_on_submit():
tid = None
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
user = query.getUser(dbSession, user["id"])
thread = schema.Thread(
user=user,
heading=escape(form.heading.data),
body=escape(form.body.data),
attachments=files)
#commits current transactions so we can grab the generated id
dbSession.flush()
tid = thread.id
flash("Your thread was created successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while creating a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
#error handling is done in the html forms
user = authManager.getUserData()
#File attachment list
fileList = [];
rendered = editThreadTemplate.render(form=form, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Create Thread",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/shared.js", methods=["GET"])
def getSharedJs():
return sharedJavascript.render();
@application.route("/edit-thread?tid=<int:tid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def editThreadHandler(tid):
"""Renders an existing threaed to be modified """
#do not allow unauthenticated users to submit
form = CreateThreadForm()
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if user["id"] != thread.user_id:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
print (fileEntries, file=sys.stderr)
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
thread = query.getThreadById(dbSession, tid)
if user["id"] != thread.user_id:
abort(403)
thread.attachments = files
thread.heading = escape(form.heading.data)
thread.body = escape(form.body.data)
flash("Your thread was updated successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while updating a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
#populate with old data from forms
fileList = [];
try:
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
form.heading.data = thread.heading
form.body.data = thread.body
for file in thread.attachments:
fileList.append({
'id': file.id,
'name': file.name
})
except:
flash("loading failed")
#error handling is done in the html forms
rendered = editThreadTemplate.render(form=form, edit = True, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Edit Thread",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/delete-thread?tid=<int:tid>", methods=["GET"])
@authManager.requireAuthentication
def deleteThreadHandler(tid):
"""Deletes a thread."""
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
try:
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if not thread:
abort(404)
if user["id"] != thread.user_id:
abort(403)
dbSession.delete(thread)
flash("Your thread was deleted successfully.")
except:
flash("An unexpected error occurred while deleting a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
@application.route("/new-comment?<int:tid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def newCommentHandler(tid):
"""Renders the thread creation screen, creates thread if all data is validated"""
#do not allow unauthenticated users to submit
form = CreateCommentForm()
user = authManager.getUserData()
print(user, file=sys.stderr)
if not user:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
user = query.getUser(dbSession, user["id"])
thread = query.getThreadById(dbSession, tid)
thread.replies.append(schema.Comment(user=user, body=escape(form.body.data), attachments=files))
flash("Your comment was created successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while creating a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
fileList = [];
rendered = editCommentTemplate.render(form=form, fileListAsString=json.dumps(fileList))
user = authManager.getUserData()
return bodyTemplate.render(
title="Reply",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/edit-comment?cid=<int:cid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def editCommentHandler(cid):
"""Renders an existing comment to be modified """
#do not allow unauthenticated users to submit
form = CreateCommentForm()
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
if user["id"] != comment.user_id:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
comment = query.getCommentById(dbSession, cid)
tid = comment.thread_id
if user["id"] != comment.user_id:
abort(403)
comment.body = escape(form.body.data)
comment.attachments = files
flash("Your comment was updated successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while updating a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
#populate with old data from forms
fileList = [];
try:
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
form.body.data = comment.body
for file in comment.attachments:
fileList.append({
'id': file.id,
'name': file.name
})
except:
flash("Loading comment data failed, please try again.")
#error handling is done in the html forms
rendered = editCommentTemplate.render(form=form, edit=True, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Edit Comment",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/delete-comment?cid=<int:cid>", methods=["GET"])
@authManager.requireAuthentication
def deleteCommentHandler(cid):
"""Deletes a comment."""
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
try:
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
if not comment:
abort(404)
if user["id"] != comment.user_id:
abort(403)
dbSession.delete(comment)
flash("Your comment was deleted successfully.")
except:
flash("An unexpected error occurred while deleting a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
@application.route("/thread/<int:tid>)", methods=["GET"])
@authManager.enableAuthentication
def threadGetHandler(tid):
"""Renders a thread, attachments, and all relevant comments"""
#grab the thread with attachments
thread = None
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if thread is None:
flash("The thread you selected does not exist.")
return redirect(url_for("indexGetHandler"));
thread_attachments = query.extractOutput(thread.attachments)
user = authManager.getUserData()
uid = user["id"] if user else 0
op = query.extractOutput(thread.user)
op_permission = thread.user_id == uid
replyUrl = url_for("newCommentHandler", tid=thread.id)
post_attachments = query.extractOutput(thread.attachments)
comments = query.getCommentsByThread(dbSession, thread.id)
comment_attachments =[]
comment_users = []
edit_permissions = []
for comment in comments:
comment_attachments.append(query.extractOutput(comment.attachments))
comment_users.append(query.extractOutput(comment.user))
edit_permissions.append(uid == comment.user_id)
comments = query.extractOutput(comments)
thread = query.extractOutput(thread)
threadRendered = threadTemplate.render(
thread=thread,
thread_attachments=thread_attachments,
op=op,
op_permission=op_permission,
comments=comments,
comment_attachments=comment_attachments,
comment_users=comment_users,
edit_permissions=edit_permissions,
replyUrl=replyUrl)
user = authManager.getUserData();
return bodyTemplate.render(
title="Thread",
body=threadRendered,
user=user,
location=request.url)
@authManager.loginCallback
def loginCallback():
"""
This is invoked when a user logs in, before any other logic.
"""
user = authManager.getUserData()
if user:
try:
with dataSessionMgr.session_scope() as dbSession:
#add a new user if not in the database
if not query.getUser(dbSession, user["id"]):
dbSession.add(schema.User(
id=user["id"],
name=user["name"],
profile_picture=user["picture"]))
flash("Your Google account has been linked. Thank you!")
except:
flash("An unexpected error occurred while linking your account. Please try again later.")
#if this fails logout and redirect home
return redirect(authManager.LOGOUT_ROUTE)
@application.route("/delete-user", methods=["GET"])
@authManager.requireAuthentication
def deleteUserHandler():
"""Deletes a user and redirects them home"""
user = authManager.getUserData()
if user:
try:
with dataSessionMgr.session_scope() as dbSession:
account = query.getUser(dbSession, user["id"])
if account:
dbSession.delete(account)
flash("Your forum account has been deleted and unlinked from your Google account.")
except:
flash("An unexpected error occurred while deleting your account. Please try again later.")
return redirect(authManager.LOGOUT_ROUTE)
@authManager.logoutCallback
def logoutCallback():
"""
This is invoked when a user logs out, immediately before user context is destroyed.
"""
user = authManager.getUserData()
@application.route('/file-manager', methods=['GET'])
@authManager.enableAuthentication
def fileManagerGetHandler():
"""renders the users file manager screen"""
user = authManager.getUserData();
if not user:
return 401;
id = user['id']
fileManagerRendered = fileManagerTemplate.render()
return bodyTemplate.render(
title="File Manager",
body=fileManagerRendered,
user=user,
location=request.url)
@application.route('/file-delete', methods=['POST'])
@authManager.requireAuthentication
def fileListDeleteHander():
"""Deletes a list of files"""
user = authManager.getUserData()
fid = int(request.form['file'])
id = user['id']
# Find the file in S3
try:
with dataSessionMgr.session_scope() as dbSession:
file1 = query.getFileById(dbSession,fid)
file1 = query.extractOutput(file1)
except Exception as e:
flash("An unexpected error occurred while finding the file in our cloud storage. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
# Delete the file from S3
key = file1['cloud_key']
try:
s3client.delete_object(Bucket=bucket_name,Key=key)
except Exception as e:
flash("An unexpected error occurred while removing the file from our cloud storage. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
# Delete the file by fileID in RDS
try:
with dataSessionMgr.session_scope() as dbSession:
file = query.getFileById(dbSession,fid)
if file:
dbSession.delete(file)
except Exception as e:
flash("An unexpected error occurred while removing this file from our database. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
return redirect(url_for("fileListGetHandler"))
@application.route('/file-list', methods=['GET'])
@authManager.requireAuthentication
def fileListGetHandler():
"""Gives the list of files associated with current user"""
user = authManager.getUserData()
id = user['id']
#Get the user's profile from the DB and zip it first
with dataSessionMgr.session_scope() as dbSession:
files = query.getFilesByUser(dbSession,id)
files = query.extractOutput(files)
if not files:
files = [];
fileManagerRendered = fileListTemplate.render(files=files)
return bodySimpleTemplate.render(
title="File Manager",
body=fileManagerRendered)
@application.route('/file-list', methods=['POST'])
@authManager.requireAuthentication
def fileListPostHandler():
"""Uploads a list of files to s3 and the dv"""
user = authManager.getUserData()
# Get the user session and file to upload
id = user['id']
file = request.files['file']
# If user does not select file, browser also submit a empty part without filename
if not file or file.filename.strip() == '':
flash('You must select a file in order to upload one.')
return redirect(request.url)
# Determine shortened file name (secure)
filename = secure_filename(file.filename.strip())
while (len(filename) > 50):
cutString = len(filename) % 50
filename = filename[cutString:len(filename)]
# Determine the S3 key
try:
myUuid = uuid.uuid4().hex
fn, fileExtension = os.path.splitext(filename)
key = id + "/" + myUuid + fileExtension.lower()
# If the file already exists, we need to warn and abort
try:
with dataSessionMgr.session_scope() as dbSession:
checkFile = query.getFileByName(dbSession,id,filename)
checkFile = query.extractOutput(checkFile)
except Exception as e:
flash("We had an issue connecting to our storage, please try again", e);
return e
if checkFile is not None:
flash("That file already exists. Please delete it first and then re-upload. " \
+ "This will <b>remove</b> any attachments you have made to this file.")
return redirect(request.url)
# Since the file does not exist, we will upload it now
s3client.upload_fileobj(file, bucket_name, key, ExtraArgs={"ACL": "public-read", "ContentType": file.content_type})
url = "https://s3-us-west-2.amazonaws.com/condensation-forum/" + key
try:
with dataSessionMgr.session_scope() as dbSession:
user = query.getUser(dbSession, id)
file = schema.File(url=url, cloud_key=key, name=filename)
user.uploads.append(file)
except:
flash("We had an issue connecting to storage, please try again.")
return redirect(request.url)
except Exception:
flash("An unexpected error occurred while uploading your file. Things to try: "\
+ "<br/> - Rename the file to something shorter"\
+ "<br/> - Make sure the file size is under 1 megabyte"\
+ "<br/> - Make sure there are no special characters in the file name<br/><br/>");
return redirect(request.url)
# Redirect to end the POST handling the redirect can be to the same route or somewhere else
return redirect(request.url)
# Run Flask app now
if __name__ == "__main__":
# Enable debug output, disable in prod
application.debug = True
application.run()
| 36.711246 | 123 | 0.638475 | 0 | 0 | 0 | 0 | 20,479 | 0.847781 | 0 | 0 | 7,210 | 0.298477 |
e30514bdd0f30538d4ed999ec163ad0e47c028b6 | 186 | py | Python | CA3/news_test.py | aadyajha12/Covid19-SmartAlarm | 911fe819cff6ef792f14b7dd48cbbb2c73f2405d | [
"MIT"
]
| 1 | 2021-03-11T11:57:19.000Z | 2021-03-11T11:57:19.000Z | CA3/news_test.py | aadyajha12/Covid19-SmartAlarm | 911fe819cff6ef792f14b7dd48cbbb2c73f2405d | [
"MIT"
]
| null | null | null | CA3/news_test.py | aadyajha12/Covid19-SmartAlarm | 911fe819cff6ef792f14b7dd48cbbb2c73f2405d | [
"MIT"
]
| null | null | null | import json
from newsapi import covid_news
def news_test_one():
news_json = json.load(open('gb-news.json'))
news:str = covid_news(news_json)
assert news[0]['title'] != None | 26.571429 | 47 | 0.698925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.112903 |
e30656fdcf081203a75edc6af8dad04320307e06 | 390 | py | Python | 2015/02/fc_2015_02_10.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
]
| null | null | null | 2015/02/fc_2015_02_10.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
]
| 1 | 2015-04-27T01:43:45.000Z | 2015-04-27T01:43:45.000Z | 2015/02/fc_2015_02_10.py | mfwarren/FreeCoding | 58ac87f35ad2004a3514782556762ee0ed72c39a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# imports go here
import atexit
#
# Free Coding session for 2015-02-10
# Written by Matt Warren
#
def clean_up():
print("CLEANING UP")
@atexit.register
def done():
print("DONE")
if __name__ == '__main__':
atexit.register(clean_up)
try:
import time
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
| 14.444444 | 36 | 0.623077 | 0 | 0 | 0 | 0 | 46 | 0.117949 | 0 | 0 | 130 | 0.333333 |
e3073fdd2f59dca010998232729affa0626a74d8 | 3,133 | py | Python | core/scheduler/at.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
]
| 1 | 2016-10-08T09:01:05.000Z | 2016-10-08T09:01:05.000Z | core/scheduler/at.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
]
| 1 | 2019-09-24T09:56:52.000Z | 2019-09-24T09:56:52.000Z | core/scheduler/at.py | vsilent/smarty-bot | 963cba05433be14494ba339343c9903ccab3c37d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" apscheduler. """
import subprocess
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
from datetime import date, datetime, timedelta
import os
import shelve
import zmq
from core.config.settings import logger
def job(command):
#""" docstring for job. """
subprocess.Popen(command)
class ScheduleDaemon(object):
response = None
"""
scheduler - at daemon.
one of daemons
"""
def __init__(self, name="scheduler-at"):
"""docstring for __init__"""
self.context = zmq.Context()
self.name = name
self.sock = self.context.socket(zmq.REP)
self.sock.bind('ipc:///tmp/smarty-%s' % name)
def add_job(self, command, hour, minute, sec=0):
logger.info("2. scheduler adding job command: %s at %s:%s:%s" % (
command, hour, minute, sec
))
sched = Scheduler(standalone=True)
#make a db file
shelve.open(
os.path.join(
os.path.dirname(__file__),
'example.db'
)
)
sched.add_jobstore(ShelveJobStore('example.db'), 'shelve')
exec_time = datetime(
date.today().year,
date.today().month,
date.today().day,
int(hour),
int(minute),
int(sec)
)
#test
#exec_time = datetime.now() + timedelta(seconds=5)
sched.add_date_job(
job,
exec_time,
name='alarm',
jobstore='shelve',
args=[command]
)
sched.start()
def start(self):
""" start """
logger.info('daemon %s started successfully' % (self.name))
while True:
self.msg = self.sock.recv_json()
logger.info('daemon %s received %s' % (self.name, self.msg))
self.cmd = self.msg.get('cmd', None)
if self.cmd == 'terminate':
self.response['text'] = 'terminated'
self.sock.send_json(self.response)
self.sock.close()
self.context.term()
break
if self.cmd:
response = self.process_command(self.cmd)
logger.info('daemon responded with %s' % response)
exit()
def process_command(self, cmd):
"""docstring for process"""
if cmd == 'add_job':
err = 'uhm, I did not understand.'
response = {'text': ';-)'}
command = self.msg.pop('command', None)
hour = self.msg.pop('hour', None)
minute = self.msg.pop('minute', None)
sec = self.msg.pop('sec', None)
self.sock.send_json({'text': 'job added'})
try:
response = self.add_job(command, hour, minute, sec)
except (KeyboardInterrupt, SystemExit) as e:
logger.exception(e)
response = {'text': 'wrong params passed'}
return response
daemon = ScheduleDaemon()
daemon.start()
| 25.892562 | 73 | 0.531759 | 2,689 | 0.858283 | 0 | 0 | 0 | 0 | 0 | 0 | 656 | 0.209384 |
e307995e7666610653ffb5c496c1cf1dfe8feab6 | 897 | py | Python | machin/frame/algorithms/__init__.py | ikamensh/machin | af7b423c47bc1412530cf6c96c11bd3af9b3e239 | [
"MIT"
]
| 1 | 2021-04-01T21:21:23.000Z | 2021-04-01T21:21:23.000Z | machin/frame/algorithms/__init__.py | ikamensh/machin | af7b423c47bc1412530cf6c96c11bd3af9b3e239 | [
"MIT"
]
| null | null | null | machin/frame/algorithms/__init__.py | ikamensh/machin | af7b423c47bc1412530cf6c96c11bd3af9b3e239 | [
"MIT"
]
| null | null | null | import warnings
from .base import TorchFramework
from .dqn import DQN
from .dqn_per import DQNPer
from .rainbow import RAINBOW
from .ddpg import DDPG
from .hddpg import HDDPG
from .td3 import TD3
from .ddpg_per import DDPGPer
from .a2c import A2C
from .a3c import A3C
from .ppo import PPO
from .sac import SAC
from .maddpg import MADDPG
try:
from .apex import DQNApex, DDPGApex
from .impala import IMPALA
from .ars import ARS
except ImportError as _:
warnings.warn(
"Failed to import algorithms relying on torch.distributed." " Set them to None."
)
DQNApex = None
DDPGApex = None
IMPALA = None
ARS = None
__all__ = [
"TorchFramework",
"DQN",
"DQNPer",
"RAINBOW",
"DDPG",
"HDDPG",
"TD3",
"DDPGPer",
"A2C",
"A3C",
"PPO",
"SAC",
"DQNApex",
"DDPGApex",
"IMPALA",
"ARS",
"MADDPG",
]
| 16.924528 | 88 | 0.637681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.227425 |
e3079c30e7e32fd20e5ad106e7daf8c8a6a94f80 | 575 | py | Python | apps/paper/migrations/0008_alter_paper_course.py | godetaph/uresearch | fb23cb0fe07f8b434b9c46f80b5b43030a3d5323 | [
"MIT"
]
| null | null | null | apps/paper/migrations/0008_alter_paper_course.py | godetaph/uresearch | fb23cb0fe07f8b434b9c46f80b5b43030a3d5323 | [
"MIT"
]
| null | null | null | apps/paper/migrations/0008_alter_paper_course.py | godetaph/uresearch | fb23cb0fe07f8b434b9c46f80b5b43030a3d5323 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.7 on 2021-09-24 02:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('preferences', '0002_auto_20210923_2348'),
('paper', '0007_auto_20210923_2353'),
]
operations = [
migrations.AlterField(
model_name='paper',
name='course',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='paper_course', to='preferences.unit2'),
),
]
| 27.380952 | 157 | 0.653913 | 449 | 0.78087 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.286957 |
e308a4fb297dc8f9348bbe1730683c0c197aa336 | 2,925 | py | Python | plaso/cli/helpers/hashers.py | cugu-stars/plaso | a205f8e52dfe4c239aeae5558d572806b7b00e81 | [
"Apache-2.0"
]
| 1,253 | 2015-01-02T13:58:02.000Z | 2022-03-31T08:43:39.000Z | plaso/cli/helpers/hashers.py | cugu-stars/plaso | a205f8e52dfe4c239aeae5558d572806b7b00e81 | [
"Apache-2.0"
]
| 3,388 | 2015-01-02T11:17:58.000Z | 2022-03-30T10:21:45.000Z | plaso/cli/helpers/hashers.py | cugu-stars/plaso | a205f8e52dfe4c239aeae5558d572806b7b00e81 | [
"Apache-2.0"
]
| 376 | 2015-01-20T07:04:54.000Z | 2022-03-04T23:53:00.000Z | # -*- coding: utf-8 -*-
"""The hashers CLI arguments helper."""
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class HashersArgumentsHelper(interface.ArgumentsHelper):
"""Hashers CLI arguments helper."""
NAME = 'hashers'
DESCRIPTION = 'Hashers command line arguments.'
_DEFAULT_HASHER_STRING = 'sha256'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--hasher_file_size_limit', '--hasher-file-size-limit',
dest='hasher_file_size_limit', type=int, action='store', default=0,
metavar='SIZE', help=(
'Define the maximum file size in bytes that hashers should '
'process. Any larger file will be skipped. A size of 0 represents '
'no limit.'))
argument_group.add_argument(
'--hashers', dest='hashers', type=str, action='store',
default=cls._DEFAULT_HASHER_STRING, metavar='HASHER_LIST', help=(
'Define a list of hashers to use by the tool. This is a comma '
'separated list where each entry is the name of a hasher, such as '
'"md5,sha256". "all" indicates that all hashers should be '
'enabled. "none" disables all hashers. Use "--hashers list" or '
'"--info" to list the available hashers.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
hashers = cls._ParseStringOption(
options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)
hasher_file_size_limit = cls._ParseNumericOption(
options, 'hasher_file_size_limit', default_value=0)
# TODO: validate hasher names.
if hasher_file_size_limit < 0:
raise errors.BadConfigOption(
'Invalid hasher file size limit value cannot be negative.')
setattr(configuration_object, '_hasher_names_string', hashers)
setattr(
configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
manager.ArgumentHelperManager.RegisterHelper(HashersArgumentsHelper)
| 36.111111 | 80 | 0.699487 | 2,651 | 0.906325 | 0 | 0 | 2,442 | 0.834872 | 0 | 0 | 1,600 | 0.547009 |
e308f94d9774663e111da5671ce07f0ce2dd542e | 20,297 | py | Python | tutorials/create_sakila/migrations/0001_initial.py | MeGustas-5427/SQL_Tutorials | 627372c2d5d8656d72645830c9a1fae1df278fc7 | [
"Apache-2.0"
]
| 13 | 2020-11-05T04:22:51.000Z | 2022-02-27T08:44:50.000Z | tutorials/create_sakila/migrations/0001_initial.py | MeGustas-5427/SQL_Tutorials | 627372c2d5d8656d72645830c9a1fae1df278fc7 | [
"Apache-2.0"
]
| null | null | null | tutorials/create_sakila/migrations/0001_initial.py | MeGustas-5427/SQL_Tutorials | 627372c2d5d8656d72645830c9a1fae1df278fc7 | [
"Apache-2.0"
]
| 2 | 2020-11-10T10:01:20.000Z | 2021-04-07T02:33:29.000Z | # Generated by Django 3.1.5 on 2021-01-11 08:07
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
import utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('actor_id', models.SmallAutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '演员表',
'verbose_name_plural': '演员表',
'db_table': 'actor',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Address',
fields=[
('address_id', models.SmallAutoField(primary_key=True, serialize=False)),
('address', models.CharField(max_length=50)),
('address2', models.CharField(default=None, max_length=50, null=True)),
('district', models.CharField(max_length=20)),
('postal_code', models.CharField(max_length=10)),
('phone', models.CharField(max_length=20)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '地址表',
'verbose_name_plural': '地址表',
'db_table': 'address',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Category',
fields=[
('category_id', models.SmallAutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=25)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '栏目',
'verbose_name_plural': '栏目',
'db_table': 'category',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Country',
fields=[
('country_id', models.SmallAutoField(primary_key=True, serialize=False)),
('country', models.CharField(max_length=50)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '国家表',
'verbose_name_plural': '国家表',
'db_table': 'country',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Customer',
fields=[
('customer_id', models.SmallAutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('email', models.CharField(default=None, max_length=50, null=True)),
('active', models.BooleanField(default=True)),
('create_date', models.DateTimeField(verbose_name='创建时间')),
('last_update', models.DateTimeField(auto_now=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.address', verbose_name='客户住址')),
],
options={
'verbose_name': '客户表',
'verbose_name_plural': '客户表',
'db_table': 'customer',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Film',
fields=[
('film_id', models.SmallAutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=128)),
('description', models.TextField(default=None, null=True)),
('release_year', models.PositiveSmallIntegerField(choices=[(1901, 1901), (1902, 1902), (1903, 1903), (1904, 1904), (1905, 1905), (1906, 1906), (1907, 1907), (1908, 1908), (1909, 1909), (1910, 1910), (1911, 1911), (1912, 1912), (1913, 1913), (1914, 1914), (1915, 1915), (1916, 1916), (1917, 1917), (1918, 1918), (1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925), (1926, 1926), (1927, 1927), (1928, 1928), (1929, 1929), (1930, 1930), (1931, 1931), (1932, 1932), (1933, 1933), (1934, 1934), (1935, 1935), (1936, 1936), (1937, 1937), (1938, 1938), (1939, 1939), (1940, 1940), (1941, 1941), (1942, 1942), (1943, 1943), (1944, 1944), (1945, 1945), (1946, 1946), (1947, 1947), (1948, 1948), (1949, 1949), (1950, 1950), (1951, 1951), (1952, 1952), (1953, 1953), (1954, 1954), (1955, 1955), (1956, 1956), (1957, 1957), (1958, 1958), (1959, 1959), (1960, 1960), (1961, 1961), (1962, 1962), (1963, 1963), (1964, 1964), (1965, 1965), (1966, 1966), (1967, 1967), (1968, 1968), (1969, 1969), (1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099), (2100, 2100), (2101, 2101), (2102, 2102), (2103, 2103), (2104, 2104), (2105, 2105), (2106, 2106), (2107, 2107), (2108, 2108), (2109, 2109), (2110, 2110), (2111, 2111), (2112, 2112), (2113, 2113), (2114, 2114), (2115, 2115), (2116, 2116), (2117, 2117), (2118, 2118), (2119, 2119), (2120, 2120), (2121, 2121), (2122, 2122), (2123, 2123), (2124, 2124), (2125, 2125), (2126, 2126), (2127, 2127), (2128, 2128), (2129, 2129), (2130, 2130), (2131, 2131), (2132, 2132), (2133, 2133), (2134, 2134), (2135, 2135), (2136, 2136), (2137, 2137), (2138, 2138), (2139, 2139), (2140, 2140), (2141, 2141), (2142, 2142), (2143, 2143), (2144, 2144), (2145, 2145), (2146, 2146), (2147, 2147), (2148, 2148), (2149, 2149), (2150, 2150), (2151, 2151), (2152, 2152), (2153, 2153), (2154, 2154), (2155, 2155)], default=None, null=True)),
('rental_duration', models.PositiveSmallIntegerField(default=3)),
('rental_rate', models.DecimalField(decimal_places=2, default='4.99', max_digits=4)),
('length', models.PositiveSmallIntegerField(default=None, null=True)),
('replacement_cost', models.DecimalField(decimal_places=2, default='19.99', max_digits=5)),
('rating', models.CharField(choices=[('G', 'G'), ('R', 'R'), ('PG', 'PG'), ('PG-13', 'PG_13'), ('NC-17', 'NC_17')], default='G', max_length=5)),
('special_features', django_mysql.models.SetCharField(models.CharField(max_length=20), choices=[('Trailers', 'Trailers'), ('Commentaries', 'Commentaries'), ('Deleted Scenes', 'Deleted_Scenes'), ('Behind the Scenes', 'Behind_the_Scenes')], default=None, max_length=83, null=True, size=4, verbose_name='特殊功能')),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '电影表',
'verbose_name_plural': '电影表',
'db_table': 'film',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='FilmText',
fields=[
('film_id', models.SmallAutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('description', models.TextField()),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '电影详情表',
'verbose_name_plural': '电影详情表',
'db_table': 'film_text',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Inventory',
fields=[
('inventory_id', models.AutoField(primary_key=True, serialize=False)),
('last_update', models.DateTimeField(auto_now=True)),
('film', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.film')),
],
options={
'verbose_name': '库存表',
'verbose_name_plural': '库存表',
'db_table': 'inventory',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Language',
fields=[
('language_id', models.SmallAutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '语言种类',
'verbose_name_plural': '语言种类',
'db_table': 'language',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Staff',
fields=[
('staff_id', models.SmallAutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('email', models.CharField(default=None, max_length=50, null=True)),
('username', models.CharField(max_length=16)),
('password', models.CharField(default=None, max_length=40, null=True)),
('picture', models.BinaryField(default=None, null=True, verbose_name='图片文件')),
('active', models.BooleanField(default=True)),
('last_update', models.DateTimeField(auto_now=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.address')),
],
options={
'verbose_name': '员工表',
'verbose_name_plural': '员工表',
'db_table': 'staff',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Store',
fields=[
('store_id', models.SmallAutoField(primary_key=True, serialize=False)),
('city', models.CharField(max_length=50)),
('last_update', models.DateTimeField(auto_now=True)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.address')),
('manager_staff', models.OneToOneField(on_delete=django.db.models.deletion.RESTRICT, related_name='manager', to='create_sakila.staff', verbose_name='店长')),
],
options={
'verbose_name': '电影店表',
'verbose_name_plural': '电影店表',
'db_table': 'store',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.AddField(
model_name='staff',
name='store',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.store'),
),
migrations.CreateModel(
name='Rental',
fields=[
('rental_id', models.SmallAutoField(primary_key=True, serialize=False)),
('rental_date', models.DateTimeField(verbose_name='出租时间')),
('return_date', models.DateTimeField(default=None, null=True, verbose_name='返还时间')),
('last_update', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.customer', verbose_name='客户')),
('inventory', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.inventory', verbose_name='库存')),
('staff', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.staff', verbose_name='员工')),
],
options={
'verbose_name': '出租表',
'verbose_name_plural': '出租表',
'db_table': 'rental',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='Payment',
fields=[
('payment_id', models.SmallAutoField(primary_key=True, serialize=False)),
('amount', models.DecimalField(decimal_places=2, max_digits=5)),
('payment_date', models.DateTimeField(verbose_name='支付时间')),
('last_update', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.customer', verbose_name='客户')),
('rental', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.rental', verbose_name='出租')),
('staff', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.staff', verbose_name='员工')),
],
options={
'verbose_name': '付款表',
'verbose_name_plural': '付款表',
'db_table': 'payment',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.AddField(
model_name='inventory',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.store'),
),
migrations.CreateModel(
name='FilmCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_update', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.category', verbose_name='栏目')),
('film', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.film', verbose_name='电影')),
],
options={
'verbose_name': '电影栏目',
'verbose_name_plural': '电影栏目',
'db_table': 'film_category',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.CreateModel(
name='FilmActor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_update', models.DateTimeField(auto_now=True)),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.actor', verbose_name='演员')),
('film', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.film', verbose_name='电影')),
],
options={
'verbose_name': '电影演员表',
'verbose_name_plural': '电影演员表',
'db_table': 'film_actor',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.AddField(
model_name='film',
name='actors',
field=models.ManyToManyField(through='create_sakila.FilmActor', to='create_sakila.Actor', verbose_name='电影演员'),
),
migrations.AddField(
model_name='film',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.language'),
),
migrations.AddField(
model_name='film',
name='original_language',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='original_film', to='create_sakila.language'),
),
migrations.AddField(
model_name='customer',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.store', verbose_name='电影店'),
),
migrations.CreateModel(
name='City',
fields=[
('city_id', models.SmallAutoField(primary_key=True, serialize=False)),
('city', models.CharField(max_length=50)),
('last_update', models.DateTimeField(auto_now=True)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.country')),
],
options={
'verbose_name': '城市表',
'verbose_name_plural': '城市表',
'db_table': 'city',
},
bases=(models.Model, utils.models.ModelSerializationMixin),
),
migrations.AddField(
model_name='category',
name='films',
field=models.ManyToManyField(through='create_sakila.FilmCategory', to='create_sakila.Film', verbose_name='电影栏目'),
),
migrations.AddField(
model_name='address',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='create_sakila.city'),
),
migrations.AddIndex(
model_name='actor',
index=models.Index(fields=['last_name'], name='idx_actor_last_name'),
),
migrations.AddConstraint(
model_name='rental',
constraint=models.UniqueConstraint(fields=('rental_date', 'inventory', 'customer'), name='film_rental'),
),
migrations.AddIndex(
model_name='inventory',
index=models.Index(fields=['store', 'film'], name='idx_store_id_film_id'),
),
migrations.AddConstraint(
model_name='filmcategory',
constraint=models.UniqueConstraint(fields=('category', 'film'), name='idx_category_film'),
),
migrations.AddConstraint(
model_name='filmactor',
constraint=models.UniqueConstraint(fields=('actor', 'film'), name='idx_actor_film'),
),
migrations.AddIndex(
model_name='film',
index=models.Index(fields=['title'], name='idx_title'),
),
migrations.AddIndex(
model_name='customer',
index=models.Index(fields=['last_name'], name='idx_last_name'),
),
]
| 58.157593 | 3,672 | 0.564862 | 20,462 | 0.991616 | 0 | 0 | 0 | 0 | 0 | 0 | 3,809 | 0.184589 |
e30dad35391d44bf3295ac9fde3a87c8c67a561f | 2,098 | py | Python | ncrf_to_bed.py | makovalab-psu/NoiseCancellingRepeatFinder | b24732ae73a4cef431277664ad4193a0638758c1 | [
"MIT"
]
| 16 | 2019-03-30T05:15:53.000Z | 2022-01-28T15:20:06.000Z | ncrf_to_bed.py | makovalab-psu/NoiseCancellingRepeatFinder | b24732ae73a4cef431277664ad4193a0638758c1 | [
"MIT"
]
| 8 | 2019-04-04T19:46:08.000Z | 2020-11-18T15:11:53.000Z | ncrf_to_bed.py | makovalab-psu/NoiseCancellingRepeatFinder | b24732ae73a4cef431277664ad4193a0638758c1 | [
"MIT"
]
| 6 | 2019-10-05T05:16:00.000Z | 2021-01-28T10:07:49.000Z | #!/usr/bin/env python
"""
Convert the output of Noise Cancelling Repeat Finder to bed format.
"""
from sys import argv,stdin,stdout,stderr,exit
from os import path as os_path
from ncrf_parse import alignments,parse_noise_rate
def usage(s=None):
message = """
usage: ncrf_cat <output_from_NCRF> | ncrf_to_bed [options]
--minmratio=<ratio> discard alignments with a low frequency of matches;
ratio can be between 0 and 1 (e.g. "0.85"), or can be
expressed as a percentage (e.g. "85%")
--maxnoise=<ratio> (same as --minmratio but with 1-ratio)
Typical output is shown below. The 6th column ("score" in the bed spec) is
the match ratio times 1000 (e.g. 826 is 82.6%).
FAB41174_065680 1568 3021 . - 826
FAB41174_029197 3908 5077 . - 824
FAB41174_005950 2312 3334 . - 811
..."""
if (s == None): exit (message)
else: exit ("%s\n%s" % (s,message))
def main():
# parse the command line
minMRatio = None
requireEof = True
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1]
if (arg.startswith("--minmratio=")):
minMRatio = parse_noise_rate(argVal)
if (not (0.0 <= minMRatio <= 1.0)):
exit("%s: mratio has to be between 0 and 1 (e.g. 0.85 or 85%%)\n%s"
% (os_path.basename(argv[0]),arg))
elif (arg.startswith("--maxnoise=")):
minMRatio = 1 - parse_noise_rate(argVal)
if (not (0.0 <= minMRatio <= 1.0)):
exit("%s: noise has to be between 0 and 1 (e.g. 0.15 or 15%%)\n%s"
% (os_path.basename(argv[0]),arg))
elif (arg in ["--noendmark","--noeof","--nomark"]): # (unadvertised)
requireEof = False
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
else:
usage("unrecognized option: %s" % arg)
# process the alignments
for a in alignments(stdin,requireEof):
if (minMRatio != None) and (a.mRatio < minMRatio):
continue
print "\t".join([a.seqName,str(a.start),str(a.end),
".",
"%d" % (1000*a.mRatio),
a.strand])
if __name__ == "__main__": main()
| 29.549296 | 76 | 0.605815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,012 | 0.482364 |
e30fa4b4018e2cb629164838090fb39449877a74 | 2,551 | py | Python | advertorch/tests/test_utilities.py | sleepstagingrest/rest | cf0de7ae82b6b74fe23e9d057214970cd3c9672d | [
"MIT"
]
| 18 | 2020-02-03T07:14:40.000Z | 2021-12-20T18:45:43.000Z | advertorch/tests/test_utilities.py | sleepstagingrest/rest | cf0de7ae82b6b74fe23e9d057214970cd3c9672d | [
"MIT"
]
| 11 | 2020-01-28T23:16:25.000Z | 2022-02-10T01:04:56.000Z | advertorch/tests/test_utilities.py | sleepstagingrest/REST | cf0de7ae82b6b74fe23e9d057214970cd3c9672d | [
"MIT"
]
| 2 | 2020-08-20T08:15:09.000Z | 2021-02-23T07:30:40.000Z | # Copyright (c) 2018-present, Royal Bank of Canada.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import numpy as np
import torch
import torchvision.transforms.functional as F
from advertorch.utils import torch_allclose
from advertorch.utils import CIFAR10_MEAN
from advertorch.utils import CIFAR10_STD
from advertorch.utils import MNIST_MEAN
from advertorch.utils import MNIST_STD
from advertorch.utils import NormalizeByChannelMeanStd
from advertorch.utils import PerImageStandardize
from advertorch_examples.utils import bchw2bhwc
from advertorch_examples.utils import bhwc2bchw
def test_mnist_normalize():
# MNIST
tensor = torch.rand((16, 1, 28, 28))
normalize = NormalizeByChannelMeanStd(MNIST_MEAN, MNIST_STD)
assert torch_allclose(
torch.stack([F.normalize(t, MNIST_MEAN, MNIST_STD)
for t in tensor.clone()]),
normalize(tensor))
def test_cifar10_normalize():
# CIFAR10
tensor = torch.rand((16, 3, 32, 32))
normalize = NormalizeByChannelMeanStd(CIFAR10_MEAN, CIFAR10_STD)
assert torch_allclose(
torch.stack([F.normalize(t, CIFAR10_MEAN, CIFAR10_STD)
for t in tensor.clone()]),
normalize(tensor))
def test_grad_through_normalize():
tensor = torch.rand((2, 1, 28, 28))
tensor.requires_grad_()
mean = torch.tensor((0.,))
std = torch.tensor((1.,))
normalize = NormalizeByChannelMeanStd(mean, std)
loss = (normalize(tensor) ** 2).sum()
loss.backward()
assert torch_allclose(2 * tensor, tensor.grad)
def _run_tf_per_image_standardization(imgs):
import tensorflow as tf
import tensorflow.image
imgs = bchw2bhwc(imgs)
placeholder = tf.placeholder(tf.float32, shape=imgs.shape)
var_scaled = tf.map_fn(
lambda img: tf.image.per_image_standardization(img), placeholder)
with tf.Session() as sess:
tf_scaled = sess.run(var_scaled, feed_dict={placeholder: imgs})
return bhwc2bchw(tf_scaled)
def test_per_image_standardization():
imgs = np.random.normal(
scale=1. / (3072 ** 0.5), size=(10, 3, 32, 32)).astype(np.float32)
per_image_standardize = PerImageStandardize()
pt_scaled = per_image_standardize(torch.tensor(imgs)).numpy()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tf_scaled = _run_tf_per_image_standardization(imgs)
assert np.abs(pt_scaled - tf_scaled).max() < 0.001
| 30.73494 | 74 | 0.717758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.085065 |
e30ff60533abef30a592ebe83ada7b1e9f61003f | 5,595 | py | Python | RV/portfolio/portfolio/hindex.py | rmomizo/portfolio_bot | b7854c4b5c9f32e9631389bb2238b5bb30d54c8e | [
"MIT"
]
| null | null | null | RV/portfolio/portfolio/hindex.py | rmomizo/portfolio_bot | b7854c4b5c9f32e9631389bb2238b5bb30d54c8e | [
"MIT"
]
| null | null | null | RV/portfolio/portfolio/hindex.py | rmomizo/portfolio_bot | b7854c4b5c9f32e9631389bb2238b5bb30d54c8e | [
"MIT"
]
| null | null | null | from __future__ import division
import itertools
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
import random
from random import shuffle
from collections import Counter
def flatten_list(somelist):
if any(isinstance(el, list) for el in somelist) == False:
return somelist
flat_list = list(itertools.chain(*somelist))
return flat_list
def term_frequency(somelist):
"""Returns the term frequency of each unique token in the term list"""
somelist = flatten_list(somelist)
term_freqs = dict(Counter(somelist))
return term_freqs
def tf_ranks(somelist):
term_freqs = term_frequency(somelist)
#sort term frequencies from largest to smallest
freqs = list(set([v for (k,v) in term_freqs.items()]))
#add ranks to sorted term frequencies, creating tuple (term_freqs, rank)
i = 1
rfreqs = []
for item in sorted(freqs, reverse=True):
rfreqs.append((item, i))
i = i + 1
#create dict of keys based on terms
term_ranks ={}
for k, v in term_freqs.items():
term_ranks.setdefault(k, [])
#add (term_freq, rank) to keys
for k, v in term_freqs.items():
for item in rfreqs:
if v == item[0]:
term_ranks[k] = item
return term_ranks
def find_h_index(somelist):
tranks = tf_ranks(somelist)
#h_index = [(key, (val2, 1/(val1-val2)) for (key, (val1, val2)) in tranks.iteritems()]
#plot h_points
values = []
for key, (val1, val2) in tranks.iteritems():
if val1-val2 == 0:
h_point = key, (val1, val2)
#return 'h-point is: ' + str(h_point)
else:
values.append((val2, 1/(val1-val2)))
#[(val2, 1/(val1-val2)) for key, (val1, val2) in tranks.iteritems()]
sorted_values = sorted(values)
xvalues = [val1 for (val1, val2) in sorted_values]
yvalues = [val2 for (val1, val2) in sorted_values]
# plt.scatter(xvalues, yvalues)
# plt.title('h point')
# plt.ylabel('1/ranks - frequency')
# plt.xlabel('ranks')
# plt.show()
d = zip(xvalues, yvalues)
data = [[x,y] for (x,y) in d ]
return data
def find_abmin(somelist):
tranks = tf_ranks(somelist)
subs = []
for key, (val1, val2) in tranks.iteritems():
subs.append((val1-val2))
abmin = min(subs, key=abs)
return abmin
def find_h(somelist):
tranks = tf_ranks(somelist)
abmin = find_abmin(somelist)
for key, (val1, val2) in tranks.iteritems():
if val1-val2 == 0:
h_point = key, (val1, val2)
return h_point
elif val1-val2 ==abmin:
h_point = key, (val1, val2), val1-val2
return h_point
def fast_h(somelist):
h_point = find_h(somelist)
tranks = tf_ranks(somelist)
fast =[]
boundary = h_point[1][1]
for key, (val1, val2) in tranks.iteritems():
if val2 <= boundary:
fast.append((key, (val1, val2)))
return fast
def slow_h(somelist):
h_point = find_h(somelist)
tranks = tf_ranks(somelist)
slow =[]
boundary = h_point[1][1]
for key, (val1, val2) in tranks.iteritems():
if val2 > boundary:
slow.append((key, (val1, val2)))
return slow
def h_tag_nodes(somelist):
"""
Tag tokens in a processed list as either autosemantic(fast) or synsematic(slow).
"""
fast = fast_h(somelist)
fasth = [(word, {'h':'syns'}) for (word, rank) in fast]
slow = slow_h(somelist)
slowh = [(word, {'h':'auto'}) for (word,rank) in slow]
h_tags = fasth + slowh
return h_tags
def extract_fast_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using fast_h method
"""
fh = [key for (key, (val1, val2)) in fast_h(list_of_cycle_length_freqs)]
fast_cycles = [cycle for cycle in cycles if len(cycle) in fh]
return fast_cycles
def extract_slow_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using slow_h method
"""
sh = [key for (key, (val1, val2)) in slow_h(list_of_cycle_length_freqs)]
slow_cycles = [cycle for cycle in cycles if len(cycle) in sh]
return slow_cycles
def h_cycles(cycle_length):
fast = [key for (key, (va1, val2)) in fast_h(cycle_length)]
slow = [key for (key, (val1, val2)) in slow_h(cycle_length)]
h_cycles = []
for cycle in cycle_length:
if cycle in fast:
h_cycles.append((cycle, 'autosemantic'))
elif cycle in slow:
h_cycles.append((cycle, 'synsemantic'))
return h_cycles
def find_a_param(somelist):
h_point = find_h(somelist)
a = len(somelist) / h_point**2
return a
| 32.719298 | 94 | 0.557283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,101 | 0.196783 |
e3106531f1b9e6f9266ac05f2587a787cfc4e699 | 1,316 | py | Python | operators/device_output.py | a1exwang/fm-synth | fb14aa1dec3798b15a607ac03442decf322bebee | [
"MIT"
]
| 3 | 2018-01-18T12:25:38.000Z | 2020-03-19T13:19:31.000Z | operators/device_output.py | a1exwang/fm-synth | fb14aa1dec3798b15a607ac03442decf322bebee | [
"MIT"
]
| 4 | 2017-04-24T16:36:59.000Z | 2017-05-11T11:23:44.000Z | operators/device_output.py | a1exwang/fm-synth | fb14aa1dec3798b15a607ac03442decf322bebee | [
"MIT"
]
| null | null | null | from PyQt5.QtCore import pyqtSlot
from channels.channel import Channel
from operators.base import OutputOperator
import numpy as np
class DeviceOutput(OutputOperator):
def __init__(self, input_ops, volume=1.0, name=None):
super().__init__(input_ops, name)
self.total_count = 0
self.stream = None
self.volume = volume
self.channel = Channel.get_instance()
self.channel.add_channel(name='MasterVol', slot=self.volume_changed, get_val=lambda: self.volume)
@pyqtSlot(float, name='volume_changed')
def volume_changed(self, vol):
if vol <= 0:
vol = 0
if vol >= 1:
vol = 1
self.volume = vol
def next_buffer(self, input_buffers, n):
if len(input_buffers) == 1:
# mono
# [-1, 1) -> [0, 2**16)
arr = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2**16
arr = np.transpose(np.array([arr, arr]))
else:
# stereo
arr_l = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2 ** 16
arr_r = ((np.array(input_buffers[1], dtype='float32') + 1) / 2) * 2 ** 16
arr = np.transpose(np.array([arr_l, arr_r]))
result = np.array(arr, dtype='int16')
return [result * self.volume]
| 34.631579 | 105 | 0.575988 | 1,181 | 0.897416 | 0 | 0 | 182 | 0.138298 | 0 | 0 | 98 | 0.074468 |
e31093c826bcdc408129c3db911766a20c8f8973 | 524 | py | Python | code/0217-containsDuplicate.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
]
| null | null | null | code/0217-containsDuplicate.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
]
| null | null | null | code/0217-containsDuplicate.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
]
| null | null | null | class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
# 左移
if n == 1:
return True
power = 0
flag = True
while flag:
val = 2 << power
if val == n:
return True
elif val > n:
return False
power += 1
if "__main__" == __name__:
solution = Solution()
n = 1025
res = solution.isPowerOfTwo(n)
print(res) | 15.878788 | 34 | 0.412214 | 400 | 0.757576 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.142045 |
e310a6a628079388cd4034e0733f019c20a04124 | 308 | py | Python | yak/rest_social_auth/utils.py | johnchuks/YAK-server | 910af81a7b23e88585479131886c627e33163de1 | [
"MIT"
]
| 15 | 2015-10-10T07:56:23.000Z | 2021-07-26T14:39:17.000Z | yak/rest_social_auth/utils.py | johnchuks/YAK-server | 910af81a7b23e88585479131886c627e33163de1 | [
"MIT"
]
| 26 | 2015-01-06T00:43:50.000Z | 2018-10-29T03:12:09.000Z | yak/rest_social_auth/utils.py | johnchuks/YAK-server | 910af81a7b23e88585479131886c627e33163de1 | [
"MIT"
]
| 8 | 2015-09-28T14:47:52.000Z | 2018-02-09T18:53:53.000Z | from celery.task import task
from django.conf import settings
from social_core.backends.utils import get_backend
@task
def post_social_media(user_social_auth, social_obj):
backend = get_backend(settings.AUTHENTICATION_BACKENDS, user_social_auth.provider)
backend.post(user_social_auth, social_obj)
| 30.8 | 86 | 0.834416 | 0 | 0 | 0 | 0 | 192 | 0.623377 | 0 | 0 | 0 | 0 |
e312667320932a26f8caa618268190a0a7f675cc | 7,753 | py | Python | filepath/NuclearCMC_raw_data_file_list.py | hbar/alsTomographyTools | ec1edd1477367a57ee94e806134aee92e57db977 | [
"MIT"
]
| null | null | null | filepath/NuclearCMC_raw_data_file_list.py | hbar/alsTomographyTools | ec1edd1477367a57ee94e806134aee92e57db977 | [
"MIT"
]
| null | null | null | filepath/NuclearCMC_raw_data_file_list.py | hbar/alsTomographyTools | ec1edd1477367a57ee94e806134aee92e57db977 | [
"MIT"
]
| null | null | null |
#pathList = [
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_092220_tensile7_T700_240mic/raw/20160512_092220_tensile7_T700_240mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_085327_tensile7_T700_200mic/raw/20160512_085327_tensile7_T700_200mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_083018_tensile7_T700_140mic/raw/20160512_083018_tensile7_T700_140mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_080231_tensile7_T700_100mic/raw/20160512_080231_tensile7_T700_100mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_073647_tensile7_T700_060mic/raw/20160512_073647_tensile7_T700_060mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_071026_tensile7_T700_040mic/raw/20160512_071026_tensile7_T700_040mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_064307_tensile7_T700_020mic/raw/20160512_064307_tensile7_T700_020mic.h5",
#"/global/project/projectdirs/als/spade/warehouse/als/bl832/phosemann/20160512_061643_tensile7_T700_baseline1/raw/20160512_061643_tensile7_T700_baseline1.h5",
#....
fileListALL = [
"20131023_095305_TRISO_Shell",
"20131023_100150_TRISO_Shell",
"20131023_100319_TRISO_Shell",
"20131023_111954_TRISO_Shell_2",
"20131023_160337_TRISO_Shell_2",
"20131023_165529_SiC-SiC_fiber1",
"20131023_173205_SiC-SiC_fiber1_LuAG",
"20160427_134903_T3_scan1_5lb",
"20160427_141044_T3_scan2_12lb",
"20160427_143230_T3_scan2_13lb",
"20160427_145232_T3_scan4_broken",
"20160427_155707_T2_scan1_10x_RT_broken",
"20160427_181029_T5_scan1_10x_RT",
"20160427_194922_T5_scan2_10x_700C_lowload",
"20160427_200524_T5_scan3_10x_700C_50um",
"20160427_201945_T5_scan4_10x_700C_100um",
"20160427_203402_T5_scan5_10x_700C_150um",
"20160427_204733_T5_scan6_10x_700C_180um",
"20160427_210036_T5_scan7_10x_700C_205um",
"20160427_211351_T5_scan8_10x_700C_240um",
"20160427_212644_T5_scan9_10x_700C_290um",
"20160427_214157_T5_scan10_10x_700C_340um",
"20160427_215424_T5_scan11_10x_700C_380um_break",
#"20160428_094431_test",
#"20160429_105847_test",
"20160511_114747_tensile6_RT_scan0",
"20160511_120731_tensile6_RT_scan1",
"20160511_124156_tensile6_RT_scan2",
"20160511_133259_tensile6_RT_scan3",
"20160511_141112_tensile6_RT_scan4",
"20160511_144829_tensile6_RT_scan5",
"20160511_152620_tensile6_RT_scan6",
"20160511_155912_tensile6_RT_scan7",
"20160511_164956_tensile6_RT_automation",
"20160511_171448_tensile6_RT_automation",
"20160511_173948_tensile6_RT_automation",
"20160511_180448_tensile6_RT_automation",
"20160511_182950_tensile6_RT_automation",
"20160511_185451_tensile6_RT_automation",
"20160511_191955_tensile6_RT_automation",
"20160511_194454_tensile6_RT_automation",
"20160511_210757_tensile9_T1000_baseline1",
"20160511_212851_tensile9_T1000_baseline2",
"20160511_215551_tensile6_RT_automation",
"20160511_222053_tensile6_RT_automation",
"20160511_224554_tensile6_RT_automation",
"20160511_231059_tensile6_RT_automation",
"20160511_233557_tensile6_RT_automation",
"20160512_000100_tensile6_RT_automation",
"20160512_002605_tensile6_RT_automation",
"20160512_005106_tensile6_RT_automation",
"20160512_011607_tensile6_RT_automation",
"20160512_014105_tensile6_RT_automation",
"20160512_020605_tensile6_RT_automation",
"20160512_023115_tensile6_RT_automation",
"20160512_025622_tensile6_RT_automation",
"20160512_032120_tensile6_RT_automation",
"20160512_034618_tensile6_RT_automation",
"20160512_041123_tensile6_RT_automation",
"20160512_061643_tensile7_T700_baseline1",
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic",
"20160512_073647_tensile7_T700_060mic",
"20160512_080231_tensile7_T700_100mic",
"20160512_083018_tensile7_T700_140mic",
"20160512_085327_tensile7_T700_200mic",
"20160512_092220_tensile7_T700_240mic",
"20160915_111315_filename",
"20160915_115049_TowA_10x_testrun",
"20160915_123154_TowA_10x_testrun2",
"20160915_125446_TowA_10x_testrun2",
"20160915_132337_TowA_10x_testrun3",
"20160915_133622_TowA_5x_testrun4",
"20160915_135147_TowA_5x_testrun5",
"20160915_140821_TowA_5x_testrun6",
"20160915_143002_TowA_5x_testrun7",
"20160915_145626_TowA_5x_testrun8",
"20160915_151537_TowA_10x_baseload",
"20160915_153039_TowA_10x_10um",
"20160915_154304_TowA_10x_20um",
"20160915_155844_TowA_10x_50um",
"20160915_161315_TowA_10x_90um",
"20160915_163009_TowA_10x_120um",
"20160915_164534_TowA_10x_150um",
"20160915_170105_TowA_10x_190um",
"20160915_171946_TowA_10x_240um",
"20160915_182720_TowB_10x_baseload",
"20160915_191935_TowB_10x_baseload",
"20160915_194458_TowB_10x_20um",
"20160915_195935_TowB_10x_automation",
"20160915_201303_TowB_10x_automation",
"20160915_202619_TowB_10x_automation",
"20160915_204037_TowB_10x_automation",
"20160915_205552_TowB_10x_automation",
"20160915_211209_TowB_10x_automation",
"20160915_212622_TowB_10x_automation",
"20160915_213947_TowB_10x_automation",
"20160915_222012_TowC_5x_baseload_RT",
"20160915_230717_TowC_5x_automated",
"20160915_231816_TowC_5x_automated",
"20160915_232910_TowC_5x_automated",
"20160915_234856_TowC_5x_automated",
"20160916_000349_TowC_5x_automated",
"20160916_013821_TowD_5x_baseload_RT",
"20160916_020612_TowD_5x_automation",
"20160916_021651_TowD_5x_automation",
"20160916_022742_TowD_5x_automation",
"20160916_023832_TowD_5x_automation",
"20160916_025102_TowD_5x_automation",
"20160916_030236_TowD_5x_automation"
]
fileListShort = [
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic",
"20160512_073647_tensile7_T700_060mic",
"20160512_080231_tensile7_T700_100mic",
"20160512_083018_tensile7_T700_140mic",
"20160512_085327_tensile7_T700_200mic",
"20160512_092220_tensile7_T700_240mic"
]
fileListTEST = [
"20160512_061643_tensile7_T700_baseline1",
"20160512_064307_tensile7_T700_020mic",
"20160512_071026_tensile7_T700_040mic"]
fileList20160915 = [
#"20160915_111315_filename",
#"20160915_115049_TowA_10x_testrun",
#"20160915_123154_TowA_10x_testrun2",
#"20160915_125446_TowA_10x_testrun2",
#"20160915_132337_TowA_10x_testrun3",
#"20160915_133622_TowA_5x_testrun4",
#"20160915_135147_TowA_5x_testrun5",
#"20160915_140821_TowA_5x_testrun6",
#"20160915_143002_TowA_5x_testrun7",
#"20160915_145626_TowA_5x_testrun8",
#"20160915_151537_TowA_10x_baseload",
"20160915_153039_TowA_10x_10um",
"20160915_154304_TowA_10x_20um",
"20160915_155844_TowA_10x_50um",
"20160915_161315_TowA_10x_90um",
"20160915_163009_TowA_10x_120um",
"20160915_164534_TowA_10x_150um",
"20160915_170105_TowA_10x_190um",
"20160915_171946_TowA_10x_240um",
"20160915_182720_TowB_10x_baseload",
"20160915_191935_TowB_10x_baseload",
"20160915_194458_TowB_10x_20um",
"20160915_195935_TowB_10x_automation",
"20160915_201303_TowB_10x_automation",
"20160915_202619_TowB_10x_automation",
"20160915_204037_TowB_10x_automation",
"20160915_205552_TowB_10x_automation",
"20160915_211209_TowB_10x_automation",
"20160915_212622_TowB_10x_automation",
"20160915_213947_TowB_10x_automation",
"20160915_222012_TowC_5x_baseload_RT",
"20160915_230717_TowC_5x_automated",
"20160915_231816_TowC_5x_automated",
"20160915_232910_TowC_5x_automated",
"20160915_234856_TowC_5x_automated",
"20160916_000349_TowC_5x_automated",
"20160916_013821_TowD_5x_baseload_RT",
"20160916_020612_TowD_5x_automation",
"20160916_021651_TowD_5x_automation",
"20160916_022742_TowD_5x_automation",
"20160916_023832_TowD_5x_automation",
"20160916_025102_TowD_5x_automation",
"20160916_030236_TowD_5x_automation"
]
fileList = fileList20160915
| 40.591623 | 159 | 0.864569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,151 | 0.922353 |
e312d0f86ad81db6700f196a91af6d00bac33137 | 3,870 | py | Python | app/discal/cogs/handler.py | Shirataki2/DisCalendar | cfb5ecad6c65911fbb041cbc585d86588de125f5 | [
"MIT"
]
| 6 | 2020-11-29T08:04:07.000Z | 2021-05-07T11:05:10.000Z | app/discal/cogs/handler.py | Shirataki2/DisCalendar | cfb5ecad6c65911fbb041cbc585d86588de125f5 | [
"MIT"
]
| 139 | 2020-11-24T23:37:03.000Z | 2022-03-30T00:18:09.000Z | app/discal/cogs/handler.py | Shirataki2/DisCalendar | cfb5ecad6c65911fbb041cbc585d86588de125f5 | [
"MIT"
]
| 1 | 2021-02-01T15:07:17.000Z | 2021-02-01T15:07:17.000Z | import asyncio
import json
import discord
from discord.ext import commands, tasks
from discal.bot import Bot
from datetime import datetime, timedelta
from discal.logger import get_module_logger
logger = get_module_logger(__name__)
class Handler(commands.Cog):
def __init__(self, bot):
self.bot: Bot = bot
self.postloop.start()
def cog_unload(self):
self.postloop.cancel()
async def post_subtask(self, record):
setting = await self.bot.pool.fetchrow(
(
'SELECT * FROM event_settings WHERE '
'guild_id = $1;'
),
record['guild_id']
)
if setting is None:
return
guild = self.bot.get_guild(int(setting['guild_id']))
channel = guild.get_channel(int(setting['channel_id']))
notifications = [
json.loads(notification)
for notification in record['notifications']
]
notifications.append({'key': -1, 'num': 0, 'type': '分前'})
for notification in notifications:
minutes = int(notification['num'])
if notification['type'] == '時間前':
minutes *= 60
elif notification['type'] == '日前':
minutes *= 24 * 60
elif notification['type'] == '週間前':
minutes *= 7 * 24 * 60
start = record['start_at']
end = record['end_at']
if record['is_all_day']:
start = datetime(
start.year, start.month, start.day,
0, 0, 0, 0
)
end = datetime(
end.year, end.month, end.day,
0, 0, 0, 0
)
now_minus_1 = datetime.now() + timedelta(hours=9, minutes=minutes - 1)
now = datetime.now() + timedelta(hours=9, minutes=minutes)
if start >= now_minus_1 and start < now:
embed = discord.Embed(color=int(record['color'][1:], 16))
embed.title = record['name']
if record['description']:
embed.description = record['description']
if notification['key'] == -1:
embed.set_author(name='以下の予定が開催されます')
else:
prefix = f'{notification["num"]}{notification["type"][:-1]}後に'
embed.set_author(name=f'{prefix}以下の予定が開催されます')
if record['is_all_day']:
if start == end:
v = f'{start.strftime("%Y/%m/%d")}'
else:
v = f'{start.strftime("%Y/%m/%d")} - {end.strftime("%Y/%m/%d")}'
else:
start_date = datetime(start.year, start.month, start.day)
end_date = datetime(end.year, end.month, end.day)
if start_date == end_date:
v = f'{start.strftime("%Y/%m/%d %H:%M")} - {end.strftime("%H:%M")}'
else:
v = f'{start.strftime("%Y/%m/%d %H:%M")} - {end.strftime("%Y/%m/%d %H:%M")}'
embed.add_field(name='日時', value=v, inline=False)
logger.info(f'Send Notification: {record}')
await channel.send(embed=embed)
@tasks.loop(minutes=1)
async def postloop(self):
records = await self.bot.pool.fetch(
(
'SELECT * FROM events WHERE '
'start_at >= $1;'
),
datetime.now()
)
asyncio.gather(*[
self.post_subtask(record)
for record in records
], loop=self.bot.loop)
@postloop.before_loop
async def wait_ready(self):
logger.info('waiting...')
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(Handler(bot))
| 36.168224 | 100 | 0.496382 | 3,662 | 0.928028 | 0 | 0 | 501 | 0.126964 | 3,418 | 0.866194 | 727 | 0.184237 |
e312d4733d2d6ab5dadd53371794d5b4269ec969 | 2,738 | py | Python | nids/enipcip/enip_cpf.py | Cyphysecurity/ICS-SDN-1 | c04d9e7bb7ad945166e969e071a2f82fb5bd18bf | [
"MIT"
]
| 4 | 2019-12-17T08:59:57.000Z | 2022-01-09T19:52:27.000Z | nids/enipcip/enip_cpf.py | Cyphysecurity/ICS-SDN-1 | c04d9e7bb7ad945166e969e071a2f82fb5bd18bf | [
"MIT"
]
| 3 | 2020-08-13T16:05:46.000Z | 2021-10-17T07:49:33.000Z | nids/enipcip/enip_cpf.py | Cyphysecurity/ICS-SDN-1 | c04d9e7bb7ad945166e969e071a2f82fb5bd18bf | [
"MIT"
]
| 4 | 2017-06-14T23:41:50.000Z | 2021-03-01T18:54:03.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2015 David I. Urbina, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Ethernet/IP Common Packet Format Scapy dissector."""
import struct
from scapy import all as scapy_all
from . import utils
class CPF_SequencedAddressItem(scapy_all.Packet):
name = "CPF_SequencedAddressItem"
fields_desc = [
scapy_all.LEIntField("connection_id", 0),
scapy_all.LEIntField("sequence_number", 0),
]
class CPF_AddressDataItem(scapy_all.Packet):
name = "CPF_AddressDataItem"
fields_desc = [
scapy_all.LEShortEnumField('type_id', 0, {
0x0000: "Null Address",
0x00a1: "Connection-based Address",
0x00b1: "Connected Transport Packet",
0x00b2: "Unconnected Message",
0x0100: "ListServices response",
0x8002: 'Sequenced Address Item',
}),
scapy_all.LEShortField("length", None),
]
def extract_padding(self, p):
return p[:self.length], p[self.length:]
def post_build(self, p, pay):
if self.length is None and pay:
l = len(pay)
p = p[:2] + struct.pack("<H", l) + p[4:]
return p + pay
class ENIP_CPF(scapy_all.Packet):
name = "ENIP_CPF"
fields_desc = [
utils.LEShortLenField("count", 2, count_of="items"),
scapy_all.PacketListField("items", [CPF_AddressDataItem('', 0, 0), CPF_AddressDataItem('', 0, 0)],
CPF_AddressDataItem, count_from=lambda p: p.count),
]
def extract_padding(self, p):
return '', p
scapy_all.bind_layers(CPF_AddressDataItem, CPF_SequencedAddressItem, type_id=0x8002)
| 36.506667 | 106 | 0.685172 | 1,347 | 0.491965 | 0 | 0 | 0 | 0 | 0 | 0 | 1,475 | 0.538714 |
e314ca5cb9348b5a95152247da6288de4e244796 | 1,103 | py | Python | programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
]
| 2 | 2020-01-27T11:58:54.000Z | 2020-03-30T10:54:08.000Z | programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
]
| null | null | null | programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
]
| null | null | null | def merge_sort(arr):
if len(arr) > 1:
middle = len(arr) // 2
lefthalf = arr[:middle]
righthalf = arr[middle:]
merge_sort(lefthalf)
merge_sort(righthalf)
i = j = k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
arr[k] = lefthalf[i]
i += 1
else:
arr[k] = righthalf[j]
j += 1
k += 1
while i < len(lefthalf):
arr[k] = lefthalf[i]
i += 1
k += 1
while j < len(righthalf):
arr[k] = righthalf[j]
j += 1
k += 1
def insertion_sort(arr, length):
for i in range(1, length):
item_to_insert = arr[i]
j = i - 1
while j >= 0 and arr[j] > item_to_insert:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = item_to_insert
n = int(input())
words = [''] * n
for i in range(n):
words[i] = input()
# merge_sort(words)
insertion_sort(words, n)
for w in words:
print(w)
| 19.350877 | 55 | 0.44243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.019039 |
e31548410089b175367898405bf5be3d08d7b387 | 418 | py | Python | electionleaflets/apps/content/models.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
]
| null | null | null | electionleaflets/apps/content/models.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
]
| 23 | 2015-02-19T14:02:23.000Z | 2015-04-30T11:14:01.000Z | electionleaflets/apps/content/models.py | electionleaflets/electionleaflets | 4110e96a3035c32d0b6ff3c9f832c5e003728170 | [
"MIT"
]
| 2 | 2015-02-02T19:39:54.000Z | 2017-02-08T09:19:53.000Z | from django.db import models
class ContentBlock(models.Model):
"""
A simple block of HTML content that can be used by various sections of the
site based on the provided name, which acts as a key.
"""
name = models.CharField(max_length=64)
content = models.TextField(blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = u'contentblock'
| 24.588235 | 79 | 0.662679 | 386 | 0.923445 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.392344 |
e316c0dee9255d1c94a21d0fb077092ad8593724 | 162 | py | Python | Python/1017.py | lucasferreiraa/uri-judge-respostas | f5fc659d53c6b512a3624764041675e62d3fa053 | [
"MIT"
]
| null | null | null | Python/1017.py | lucasferreiraa/uri-judge-respostas | f5fc659d53c6b512a3624764041675e62d3fa053 | [
"MIT"
]
| null | null | null | Python/1017.py | lucasferreiraa/uri-judge-respostas | f5fc659d53c6b512a3624764041675e62d3fa053 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# URI Judge - Problema 1017
tempo = int(input())
velocidade = int(input())
litros = (velocidade / 12.0) * tempo
print("%.3f" % litros)
| 16.2 | 36 | 0.604938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.345679 |
e316f4ba8d78958af8ea71861f55f56a0c25786e | 765 | py | Python | Algorithms and Data Structures/sort/qks.py | ioyy900205/PyTorch_mess-around | 90d255e17158699fd7902f7746b35fa18975112e | [
"MIT"
]
| null | null | null | Algorithms and Data Structures/sort/qks.py | ioyy900205/PyTorch_mess-around | 90d255e17158699fd7902f7746b35fa18975112e | [
"MIT"
]
| null | null | null | Algorithms and Data Structures/sort/qks.py | ioyy900205/PyTorch_mess-around | 90d255e17158699fd7902f7746b35fa18975112e | [
"MIT"
]
| null | null | null | '''
Date: 2021-08-10 17:17:35
LastEditors: Liuliang
LastEditTime: 2021-08-10 18:27:56
Description:
'''
import random
import sys
sys.path.append("..")
from bacic_module.random_int_list import random_int_list
def partition(nums, left, right):
tmp = nums[left]
while left < right:
while left<right and nums[right] >= tmp:
right -= 1
nums[left] = nums[right]
while left<right and nums[left] <= tmp:
left += 1
nums[right] = nums[left]
nums[left] = tmp
return left
def qks(nums, left, right):
if left < right:
mid = partition(nums,left,right)
qks(nums,left,mid-1)
qks(nums,mid+1,right)
c = random_int_list(0,10,10)
print(c)
p = qks(c,0,len(c)-1)
print(c)
| 21.25 | 56 | 0.605229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.139869 |
e318e94372f3438841131a8e520812b4b488dc1f | 2,144 | py | Python | Core/config/CYCEnv/run_json_CYC_envs.py | geoffroygivry/CyclopsVFX-Unity | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 17 | 2017-06-27T04:14:42.000Z | 2022-03-07T03:37:44.000Z | Core/config/CYCEnv/run_json_CYC_envs.py | geoffroygivry/Cyclops-VFX | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 2 | 2017-06-14T04:17:51.000Z | 2018-08-23T20:12:44.000Z | Core/config/CYCEnv/run_json_CYC_envs.py | geoffroygivry/CyclopsVFX-Unity | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 2 | 2019-03-18T06:18:33.000Z | 2019-08-14T21:07:53.000Z | import os
import json
def create_json_CYC_envs(root_dir):
DI_ROOT = root_dir
CYC_ROOT = "%s/CyclopsVFX" % DI_ROOT
DATA_FILENAME = os.path.join(CYC_ROOT, "CYC_envs.json")
CYC_HYDRA_PATH = "%s/Hydra" % (CYC_ROOT)
CYC_HYDRA_CACHE = "%s/Hydra/cache" % (CYC_ROOT)
CYC_CORE_PATH = "%s/Core/config/" % (CYC_ROOT)
CYC_NUKE_ENV = "%s/Core/config/NukeEnv" % (CYC_ROOT)
CYC_MAYA_ENV = "%s/Core/config/MayaEnv" % (CYC_ROOT)
CYC_RV_ENV = "%s/Core/config/RVEnv" % (CYC_ROOT)
CYC_MARI_ENV = "%s/Core/config/MariEnv" % (CYC_ROOT)
CYC_3DE_ENV = "%s/Core/config/3DeEnv" % (CYC_ROOT)
CYC_CLARISSE_ENV = "%s/Core/config/ClarisseEnv" % (CYC_ROOT)
CYC_SHOW_ENV = "%s/Core/config/ShowEnv" % (CYC_ROOT)
CYC_POLYPHEMUS_PATH = "%s/Apps/Polyphemus" % (CYC_ROOT)
CYC_STEROPES_PATH = "%s/Apps/Steropes" % (CYC_ROOT)
CYC_ENGINE_NUKE = "%s/Apps/Engines/Nuke" % (CYC_ROOT)
CYC_ICON = "%s/icons" % (CYC_ROOT)
NUKE_PATH = CYC_NUKE_ENV
SHOW_PATH = os.path.join(DI_ROOT, "jobs")
with open(DATA_FILENAME, mode='w') as feedsjson:
CYC_envs = {
"CYC_envs": {
"DI_ROOT": DI_ROOT,
"CYC_ROOT": CYC_ROOT,
"CYC_HYDRA_PATH": CYC_HYDRA_PATH,
"CYC_HYDRA_CACHE": CYC_HYDRA_CACHE,
"CYC_CORE_PATH": CYC_CORE_PATH,
"CYC_NUKE_ENV": CYC_NUKE_ENV,
"CYC_MAYA_ENV": CYC_MAYA_ENV,
"CYC_RV_ENV": CYC_RV_ENV,
"CYC_MARI_ENV": CYC_MARI_ENV,
"CYC_3DE_ENV": CYC_3DE_ENV,
"CYC_CLARISSE_ENV": CYC_CLARISSE_ENV,
"CYC_SHOW_ENV": CYC_SHOW_ENV,
"CYC_POLYPHEMUS_PATH": CYC_POLYPHEMUS_PATH,
"CYC_STEROPES_PATH": CYC_STEROPES_PATH,
"CYC_ENGINE_NUKE": CYC_ENGINE_NUKE,
"CYC_ICON": CYC_ICON,
"NUKE_PATH": NUKE_PATH,
"SHOW_PATH": SHOW_PATH
}
}
json.dump(CYC_envs, feedsjson, indent=4, sort_keys=True)
create_json_CYC_envs("/home/geoff/Dropbox")
| 39.703704 | 65 | 0.58722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.283116 |
e31a51d9bad6493d50583997c938e58165b7c257 | 956 | py | Python | tests/some_test.py | ShashkovS/drawzero | 3722b2fccb655779b6b62e97b1584683413d7fc0 | [
"MIT"
]
| 2 | 2020-08-06T09:51:43.000Z | 2020-08-06T10:03:58.000Z | tests/some_test.py | ShashkovS/drawzero | 3722b2fccb655779b6b62e97b1584683413d7fc0 | [
"MIT"
]
| null | null | null | tests/some_test.py | ShashkovS/drawzero | 3722b2fccb655779b6b62e97b1584683413d7fc0 | [
"MIT"
]
| null | null | null | import unittest
import drawzero
class ColorTest(unittest.TestCase):
def test_wrong_color(self):
self.assertRaises(TypeError, drawzero._make_color, '#abcd')
self.assertRaises(TypeError, drawzero._make_color, 'dummy')
self.assertRaises(TypeError, drawzero._make_color, '#aabbZZ')
self.assertRaises(TypeError, drawzero._make_color, '#aa bb cc')
self.assertRaises(TypeError, drawzero._make_color, '# abc')
def test_red(self):
self.assertEqual(drawzero._make_color('red'), (255, 0, 0))
self.assertEqual(drawzero._make_color('#FF0000'), (255, 0, 0))
self.assertEqual(drawzero._make_color('#f00'), (255, 0, 0))
self.assertEqual(drawzero._make_color((255, 0, 0)), (255, 0, 0))
self.assertEqual(drawzero._make_color([255, 0, 0]), (255, 0, 0))
################################################################################
if __name__ == "__main__":
unittest.main()
| 38.24 | 80 | 0.614017 | 790 | 0.82636 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.15795 |
e31af962393b8a7c27bf698791ef898144c732f5 | 4,143 | py | Python | test/unit/api/test_api_safety.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
]
| null | null | null | test/unit/api/test_api_safety.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
]
| null | null | null | test/unit/api/test_api_safety.py | technocreep/FEDOT | c11f19d1d231bd9c1d96d6e39d14697a028f6272 | [
"BSD-3-Clause"
]
| null | null | null | import numpy as np
from fedot.api.api_utils.api_data import ApiDataProcessor
from fedot.api.api_utils.api_data_analyser import DataAnalyser
from fedot.api.main import Fedot
from fedot.core.data.data import InputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import TaskTypesEnum, Task
from fedot.preprocessing.preprocessing import DataPreprocessor
from test.unit.api.test_main_api import composer_params
def get_data_analyser_with_specific_params(max_size=18, max_cat_cardinality=5):
""" Create a DataAnalyser object with small max dataset size and small max cardinality for categorical features"""
safety_module = DataAnalyser(safe_mode=True)
preprocessor = ApiDataProcessor(Task(TaskTypesEnum.classification))
safety_module.max_size = max_size
safety_module.max_cat_cardinality = max_cat_cardinality
return safety_module, preprocessor
def get_small_cat_data():
""" Generate tabular data with categorical features."""
features = np.array([["a", "qq", 0.5],
["b", "pp", 1],
["c", np.nan, 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3]], dtype=object)
target = np.array([0, 0, 0, 0, 1, 1, 1, 1])
input_data = InputData(idx=np.arange(features.shape[0]),
features=features, target=target,
data_type=DataTypesEnum.table,
task=Task(TaskTypesEnum.classification))
input_data = DataPreprocessor().obligatory_prepare_for_fit(input_data)
return input_data
def test_safety_label_correct():
"""
Check if cutting and label encoding is used for pseudo large data with categorical features with high cardinality
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params()
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] <= api_safety.max_size
assert data.features.shape[1] == 3
assert data.features[0, 0] != 'a'
def test_no_safety_needed_correct():
"""
Check if oneHot encoding is used for small data with small cardinality of categorical features
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params(max_size=100, max_cat_cardinality=100)
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] == 24
assert data.features.shape[1] == 3
assert data.features[0, 0] == 'a'
def test_api_fit_predict_with_pseudo_large_dataset_with_label_correct():
"""
Test if safe mode in API cut large data and use LabelEncoder for features with high cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_cat_cardinality = 5
model.data_analyser.max_size = 18
data = get_small_cat_data()
pipeline = model.fit(features=data, predefined_model='auto')
pipeline.predict(data)
model.predict(features=data)
# the should be only tree like models + data operations
assert len(model.params.api_params['available_operations']) == 6
assert 'logit' not in model.params.api_params['available_operations']
def test_api_fit_predict_with_pseudo_large_dataset_with_onehot_correct():
"""
Test if safe mode in API use OneHotEncoder with small data with small cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_size = 1000
data = get_small_cat_data()
model.fit(features=data, predefined_model='auto')
model.predict(features=data)
# there should be all light models + data operations
assert 'logit' in model.params.api_params['available_operations']
| 42.71134 | 118 | 0.69901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 906 | 0.218682 |
e31bbe934af2c97028c0e66dc59a02ae268f0c31 | 7,765 | py | Python | parallelpy/parallelpy.py | krober/parallelpy | 356fa0b75d3de2fa695b2fd64f0a53555f6bf55f | [
"MIT"
]
| null | null | null | parallelpy/parallelpy.py | krober/parallelpy | 356fa0b75d3de2fa695b2fd64f0a53555f6bf55f | [
"MIT"
]
| 1 | 2018-08-26T03:01:18.000Z | 2018-08-26T03:01:18.000Z | parallelpy/parallelpy.py | krober/parallelpy | 356fa0b75d3de2fa695b2fd64f0a53555f6bf55f | [
"MIT"
]
| null | null | null | from multiprocessing import cpu_count, Manager, Process
from time import sleep
class Parallelizer:
def __init__(
self,
*,
target: 'function',
args: list,
enable_results: bool,
auto_proc_count: bool,
max_proc_count: int):
"""
Constructor, no positional args needed, all named args required
:param target: function: target for multiple processes
:param args: list: args to be passed to each instance of target.
Target function must accept an individual arg as its first param
(though can be tuple/dict/any encapsulating data structure).
:param enable_results: bool: enables/disables a managed proxylist
to hold data from the target function. Disable if you only need
the target func to run and do not need it to modify/persist data.
If enabled, passes managed proxylist to target func, therefore
target func must accept the list as its second param.
:param auto_proc_count: bool: True=let class determine number of
processes to use - calculates based on number of cores installed
and number of operations to be performed.
False=use max_proc_count number of processes.
:param max_proc_count: int: max number of processes to be spawned
simultaneously
"""
self.target = target
self.args = args
self.enable_results = enable_results
self.__proc_count = 0
self.__cpu_count = cpu_count()
self.__iterations = len(args)
self.__processes = []
self.__incoming = 0
self.__running = 0
self.__finished = [False for _ in range(len(args))]
self.__set_proc_count(auto_proc_count, max_proc_count)
def run(self):
"""
Runs the target function, manages core/process count/activity
:return: list: results, unpackaged from manager.list proxy.
Recommended to enclose results in target function in tuples
or other data structures before appending to the proxy list
to avoid race conditions.
"""
if self.enable_results:
return self.__run_managed()
else:
self.__run_unmanaged()
def __run_managed(self):
"""
Configures process manager and runs procs
:return: List: converted from ProxyList
"""
with Manager() as manager:
results = manager.list()
self.__generate_procs(results)
self.__run_procs()
self.__finalize_procs()
results = list(results)
return results
def __run_unmanaged(self):
"""
Runs data-unmanaged procs - for when you just want to run in
parallel and don't need 'return' data
:return: nothing
"""
self.__generate_procs()
self.__run_procs()
self.__finalize_procs()
def __run_procs(self):
"""
Runs processes, prints self on exception and re-raises exception
:return: nothing
"""
try:
while self.__incoming < self.__iterations:
# sleep reduces the CPU impact of this 'manager loop'
sleep(1 / 100)
self.__mark_finished_procs()
self.__spawn_available_procs()
except Exception as e:
print(self)
raise e
def __set_proc_count_auto(self, max_procs: int):
"""
Calculates optimal proc_count to reduce ram usage, but also
reduce wait time when only a single thread may be running
:param max_procs: int: max procs to allow simultaneously
:return: None
"""
if (self.__iterations <= self.__cpu_count
and self.__iterations <= max_procs):
self.__proc_count = self.__iterations
elif max_procs <= self.__cpu_count:
self.__proc_count = max_procs
else:
self.__proc_count = self.__cpu_count
for i in range(self.__cpu_count, max_procs + 1):
if self.__iterations % i == 0:
self.__proc_count = i
break
print(f'Using {self.__proc_count} processes')
def __set_proc_count_manual(self, count: int):
"""
Manually set the proc count. Use with care when using
very large counts. Higher count = higher ram usage.
:param count: int: number of procs to run simultaneously
:return: None
"""
self.__proc_count = count
def __validate_proc_count(self, count: int):
"""
Throws ValueError if count < 1
:return: None
"""
if count < 1:
raise ValueError('Number of processes must be > 0')
elif isinstance(count, bool) or not isinstance(count, int):
raise ValueError('Number of processes must be an integer')
def __set_proc_count(self, auto_proc_count: bool, max_proc_count: int):
"""
Sets proc count based on auto_procs true/false
:param auto_proc_count: bool: use auto proc count?
:param max_proc_count: int: max num of procs to run simultaneously
:return: None
"""
self.__validate_proc_count(max_proc_count)
if auto_proc_count:
self.__set_proc_count_auto(max_proc_count)
else:
self.__set_proc_count_manual(max_proc_count)
def __generate_procs(self, managed_results=None):
"""
Generates a list of procs ready for starting
:param managed_results: proxy manager.list: to store
data from target func
:return: None
"""
if managed_results is not None:
for arg in self.args:
self.__processes.append(Process(
target=self.target,
args=(arg, managed_results)
))
else:
for arg in self.args:
self.__processes.append(Process(
target=self.target,
args=(arg,)
))
def __spawn_available_procs(self):
"""
Spawns procs if the number of currently running procs is
less than the number of max_procs defined
:return: None
"""
if self.__running < self.__proc_count:
self.__processes[self.__incoming].start()
self.__incoming += 1
self.__running += 1
def __mark_finished_procs(self):
"""
Checks currently running procs for status, marks finished
:return: None
"""
for i in range(self.__incoming):
if not self.__processes[i].is_alive():
if not self.__finished[i]:
self.__running -= 1
self.__finished[i] = True
def __finalize_procs(self):
"""
Finalizes procs/waits on remaining running procs
:return: None
"""
[process.join() for process in self.__processes]
self.__mark_finished_procs()
def __str__(self):
stats = f'\n' \
f'Target function: {self.target.__name__}\n' \
f'Number of iters: {self.__iterations}\n' \
f'Number of threads: {self.__proc_count}\n' \
f'Number of procs: {len(self.__processes)}\n' \
f'Current incoming: {self.__incoming}\n' \
f'Current running: {self.__running}\n' \
f'Current finished: {sum(self.__finished)}' \
f'\n'
return stats
def __repr__(self):
return self.__str__()
| 34.665179 | 79 | 0.582228 | 7,683 | 0.98944 | 0 | 0 | 0 | 0 | 0 | 0 | 3,618 | 0.465937 |
e31cd77f7061ef13a9e31f26ee8ba9f374dfc272 | 9,781 | py | Python | sfa/util/api.py | planetlab/sfa | d0f743e245e0bb24d7ed1016bcc6e61d1e558a95 | [
"MIT"
]
| 1 | 2015-11-19T13:34:45.000Z | 2015-11-19T13:34:45.000Z | sfa/util/api.py | planetlab/sfa | d0f743e245e0bb24d7ed1016bcc6e61d1e558a95 | [
"MIT"
]
| null | null | null | sfa/util/api.py | planetlab/sfa | d0f743e245e0bb24d7ed1016bcc6e61d1e558a95 | [
"MIT"
]
| null | null | null | #
# SFA XML-RPC and SOAP interfaces
#
import sys
import os
import traceback
import string
import xmlrpclib
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
from sfa.util.sfalogging import logger
from sfa.trust.auth import Auth
from sfa.util.config import *
from sfa.util.faults import *
from sfa.util.cache import Cache
from sfa.trust.credential import *
from sfa.trust.certificate import *
# See "2.2 Characters" in the XML specification:
#
# #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# avoiding
# [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDDF]
invalid_xml_ascii = map(chr, range(0x0, 0x8) + [0xB, 0xC] + range(0xE, 0x1F))
xml_escape_table = string.maketrans("".join(invalid_xml_ascii), "?" * len(invalid_xml_ascii))
def xmlrpclib_escape(s, replace = string.replace):
"""
xmlrpclib does not handle invalid 7-bit control characters. This
function augments xmlrpclib.escape, which by default only replaces
'&', '<', and '>' with entities.
"""
# This is the standard xmlrpclib.escape function
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">",)
# Replace invalid 7-bit control characters with '?'
return s.translate(xml_escape_table)
def xmlrpclib_dump(self, value, write):
"""
xmlrpclib cannot marshal instances of subclasses of built-in
types. This function overrides xmlrpclib.Marshaller.__dump so that
any value that is an instance of one of its acceptable types is
marshalled as that type.
xmlrpclib also cannot handle invalid 7-bit control characters. See
above.
"""
# Use our escape function
args = [self, value, write]
if isinstance(value, (str, unicode)):
args.append(xmlrpclib_escape)
try:
# Try for an exact match first
f = self.dispatch[type(value)]
except KeyError:
raise
# Try for an isinstance() match
for Type, f in self.dispatch.iteritems():
if isinstance(value, Type):
f(*args)
return
raise TypeError, "cannot marshal %s objects" % type(value)
else:
f(*args)
# You can't hide from me!
xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump
# SOAP support is optional
try:
import SOAPpy
from SOAPpy.Parser import parseSOAPRPC
from SOAPpy.Types import faultType
from SOAPpy.NS import NS
from SOAPpy.SOAPBuilder import buildSOAP
except ImportError:
SOAPpy = None
def import_deep(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class ManagerWrapper:
"""
This class acts as a wrapper around an SFA interface manager module, but
can be used with any python module. The purpose of this class is raise a
SfaNotImplemented exception if the a someone attepmts to use an attribute
(could be a callable) thats not available in the library by checking the
library using hasattr. This helps to communicate better errors messages
to the users and developers in the event that a specifiec operation
is not implemented by a libarary and will generally be more helpful than
the standard AttributeError
"""
def __init__(self, manager, interface):
self.manager = manager
self.interface = interface
def __getattr__(self, method):
if not hasattr(self.manager, method):
raise SfaNotImplemented(method, self.interface)
return getattr(self.manager, method)
class BaseAPI:
protocol = None
def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8",
methods='sfa.methods', peer_cert = None, interface = None,
key_file = None, cert_file = None, cache = None):
self.encoding = encoding
# flat list of method names
self.methods_module = methods_module = __import__(methods, fromlist=[methods])
self.methods = methods_module.all
# Better just be documenting the API
if config is None:
return
# Load configuration
self.config = Config(config)
self.auth = Auth(peer_cert)
self.hrn = self.config.SFA_INTERFACE_HRN
self.interface = interface
self.key_file = key_file
self.key = Keypair(filename=self.key_file)
self.cert_file = cert_file
self.cert = Certificate(filename=self.cert_file)
self.cache = cache
if self.cache is None:
self.cache = Cache()
self.credential = None
self.source = None
self.time_format = "%Y-%m-%d %H:%M:%S"
self.logger = logger
# load registries
from sfa.server.registry import Registries
self.registries = Registries()
# load aggregates
from sfa.server.aggregate import Aggregates
self.aggregates = Aggregates()
def get_interface_manager(self, manager_base = 'sfa.managers'):
"""
Returns the appropriate manager module for this interface.
Modules are usually found in sfa/managers/
"""
if self.interface in ['registry']:
mgr_type = self.config.SFA_REGISTRY_TYPE
manager_module = manager_base + ".registry_manager_%s" % mgr_type
elif self.interface in ['aggregate']:
mgr_type = self.config.SFA_AGGREGATE_TYPE
manager_module = manager_base + ".aggregate_manager_%s" % mgr_type
elif self.interface in ['slicemgr', 'sm']:
mgr_type = self.config.SFA_SM_TYPE
manager_module = manager_base + ".slice_manager_%s" % mgr_type
elif self.interface in ['component', 'cm']:
mgr_type = self.config.SFA_CM_TYPE
manager_module = manager_base + ".component_manager_%s" % mgr_type
else:
raise SfaAPIError("No manager for interface: %s" % self.interface)
manager = __import__(manager_module, fromlist=[manager_base])
# this isnt necessary but will hlep to produce better error messages
# if someone tries to access an operation this manager doesn't implement
manager = ManagerWrapper(manager, self.interface)
return manager
def callable(self, method):
"""
Return a new instance of the specified method.
"""
# Look up method
if method not in self.methods:
raise SfaInvalidAPIMethod, method
# Get new instance of method
try:
classname = method.split(".")[-1]
module = __import__(self.methods_module.__name__ + "." + method, globals(), locals(), [classname])
callablemethod = getattr(module, classname)(self)
return getattr(module, classname)(self)
except ImportError, AttributeError:
raise SfaInvalidAPIMethod, method
def call(self, source, method, *args):
"""
Call the named method from the specified source with the
specified arguments.
"""
function = self.callable(method)
function.source = source
self.source = source
return function(*args)
def handle(self, source, data, method_map):
"""
Handle an XML-RPC or SOAP request from the specified source.
"""
# Parse request into method name and arguments
try:
interface = xmlrpclib
self.protocol = 'xmlrpclib'
(args, method) = xmlrpclib.loads(data)
if method_map.has_key(method):
method = method_map[method]
methodresponse = True
except Exception, e:
if SOAPpy is not None:
self.protocol = 'soap'
interface = SOAPpy
(r, header, body, attrs) = parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
# XXX Support named arguments
else:
raise e
try:
result = self.call(source, method, *args)
except SfaFault, fault:
result = fault
except Exception, fault:
logger.log_exc("BaseAPI.handle has caught Exception")
result = SfaAPIError(fault)
# Return result
response = self.prepare_response(result, method)
return response
def prepare_response(self, result, method=""):
"""
convert result to a valid xmlrpc or soap response
"""
if self.protocol == 'xmlrpclib':
if not isinstance(result, SfaFault):
result = (result,)
response = xmlrpclib.dumps(result, methodresponse = True, encoding = self.encoding, allow_none = 1)
elif self.protocol == 'soap':
if isinstance(result, Exception):
result = faultParameter(NS.ENV_T + ":Server", "Method Failed", method)
result._setDetail("Fault %d: %s" % (result.faultCode, result.faultString))
else:
response = buildSOAP(kw = {'%sResponse' % method: {'Result': result}}, encoding = self.encoding)
else:
if isinstance(result, Exception):
raise result
return response
def get_cached_server_version(self, server):
cache_key = server.url + "-version"
server_version = None
if self.cache:
server_version = self.cache.get(cache_key)
if not server_version:
server_version = server.GetVersion()
# cache version for 24 hours
self.cache.add(cache_key, server_version, ttl= 60*60*24)
return server_version
| 34.807829 | 112 | 0.618546 | 7,146 | 0.7306 | 0 | 0 | 0 | 0 | 0 | 0 | 2,873 | 0.293733 |
e31d9fd874884c64a5cfd7e556213a44724536fb | 9,507 | py | Python | deanslist/deanslist.py | upeducationnetwork/deanslist-python | 226eda2580055427119397bc28e7976f019d7301 | [
"MIT"
]
| null | null | null | deanslist/deanslist.py | upeducationnetwork/deanslist-python | 226eda2580055427119397bc28e7976f019d7301 | [
"MIT"
]
| 2 | 2016-05-16T19:54:26.000Z | 2016-05-20T12:02:20.000Z | deanslist/deanslist.py | upeducationnetwork/deanslist-python | 226eda2580055427119397bc28e7976f019d7301 | [
"MIT"
]
| null | null | null | __author__ = 'rknight'
import os
import csv
import logging
import datetime
from requests_futures.sessions import FuturesSession
def dl(reports, dlkeys):
# Primary call
# Send requests
allreports = dlrequest(reports=reports, dlkeys=dlkeys)
# Write results
for outreport in allreports.keys():
# Points and incidents require unique parsing
if outreport == 'points':
writepoints('points.csv', report=allreports[outreport])
elif outreport == 'coaching':
writecoaching('coaching.csv', report=allreports[outreport])
elif outreport == 'coaching_evidence':
writeevidence('coaching_evidence.csv', report=allreports[outreport])
elif outreport == 'incidents':
writeincidents(report=allreports[outreport])
else:
# Merge the schools into a single list
dat = []
for school in allreports[outreport]['data']:
dat.extend(school['data'])
writefile('{0}.csv'.format(outreport), dataset=dat, rewrite=allreports[outreport]['write'])
def dlrequest(reports, dlkeys):
'''
Primary function to get data for a range of dates
Returns a dict. Structure should be:
{'outname': {'data': [all the data for this report with one list item per school],
'write': whether to write or append},
'second outname': {'data': [all the data for this report with one list item per key],
'write': whether to write or append},
etc
}
'''
session = FuturesSession(max_workers=10)
allreports = {}
futures = []
# This is run in background once the download is completed
def bg_call(sess, resp, outname):
if resp.status_code == 200:
dat = resp.json()
allreports[outname]['data'].append(dat)
else:
logging.warning('Response code {0} for {1}'.format(resp.status_code, resp.url))
# Throw the requests at Deanslist
for ireport in reports:
outname = ireport['outname']
url = ireport['reporturl']
allreports[outname] = {'data': [], 'write': ireport.get('rewrite', 'w')}
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': ireport.get('pulldate', ''),
'edt': ireport.get('enddate', ''),
'apikey': dlkey},
background_callback=lambda sess, resp, outname=outname: bg_call(sess, resp, outname)))
# Parse errors in the results
for f in futures:
try:
f.result()
except:
logging.warning('{0}'.format(f.exception))
continue
return allreports
def dlall(outname, reporturl, startat, dlkeys, endat='', max_workers=5):
# Get all data for large datasets by sending a separate request for each week of data
one_week = datetime.timedelta(days=7)
one_day = datetime.timedelta(days=1)
try:
sdt = datetime.datetime.strptime(startat, '%Y-%m-%d').date()
except ValueError:
raise ValueError("Incorrect data format for startat, should be YYYY-MM-DD")
if endat != '':
try:
endat = datetime.datetime.strptime(endat, '%Y-%m-%d').date()
except ValueError:
raise ValueError("Incorrect data format for endat, should be YYYY-MM-DD")
else:
endat = datetime.date.today()
edt = sdt + one_week
alldat = []
session = FuturesSession(max_workers=max_workers)
while edt < endat + one_week:
# outname_date = outname + "/" + outname + "_Week_" + edt.strftime("%Y-%m-%d")
dat = dlrequest_single(reporturl=reporturl, sdt=sdt, edt=edt, dlkeys=dlkeys, session=session)
alldat.extend(dat)
sdt = edt + one_day
edt = edt + one_week
# Write to hard drive
if len(alldat) > 0:
writefile('{0}.csv'.format(outname), dataset=alldat, rewrite='w')
def dlrequest_single(reporturl, sdt, edt, dlkeys, session = FuturesSession(max_workers=5)):
"""
Request and write a single report for all schools for a date range
"""
alldat = []
futures = []
url = reporturl
# Throw the requests at Deanslist
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': sdt,
'edt': edt,
'apikey': dlkey}))
# Parse errors in the results
for f in futures:
try:
response = f.result()
except MemoryError:
logging.warning('Memory Error.')
if response.status_code != 200:
logging.warning('Response code {0} for {1}'.format(response.status_code, response.url))
continue
# Append results
dat = response.json()
alldat.extend(dat['data'])
return alldat
def writefile(outname, dataset, headers=None, rewrite='a'):
"""
Utility to write results to file
"""
if len(dataset) == 0:
logging.warning('No data for {0}'.format(outname))
return
# Make default headers
if not headers:
headers = sorted(list(dataset[0].keys()))
# Flag to write headers if its the first time
exists = os.path.isfile(outname)
# Write output
with open(outname, rewrite, encoding='utf-8') as file:
outfile = csv.DictWriter(file, headers, lineterminator='\n')
if not exists or rewrite == 'w':
outfile.writeheader()
for row in dataset:
outfile.writerow(row)
def writepoints(outname, report):
# Parse and write points
if 'data' not in report['data']:
logging.warning('No points data')
return
points = []
# Flatten
for dat in report['data']:
for row in dat['Students']:
for item in row['Terms']:
item['StudentID'] = row['StudentID']
item['StudentSchoolID'] = row['StudentSchoolID']
item['SchoolID'] = dat['SchoolID']
try:
item['StartDate'] = item['StartDate']['date']
item['EndDate'] = item['EndDate']['date']
except:
pass
points.append(item)
# Write
writefile(outname, dataset=points, rewrite=report['write'])
# Parse & write the incidents module, which has a unique json structure
def writeincidents(report):
incidents = []
penalties = []
actions = []
custfields = []
# All possible ids
inc_id_list = ['IncidentID', 'SchoolID', 'StudentID', 'StudentFirst', 'StudentLast',
'StudentSchoolID', 'GradeLevelShort', 'HomeroomName', 'Infraction', 'Location', 'ReportedDetails']
for school in report['data']:
for idat in school['data']:
# grab ids in this report
inc_id = {this_id: idat[this_id] for this_id in inc_id_list}
# Flatten
for timefield in ['CreateTS', 'UpdateTS', 'IssueTS', 'ReviewTS', 'CloseTS', 'ReturnDate']:
try:
idat[timefield] = idat.pop(timefield)['date']
except:
idat[timefield] = ''
# Actions
act_list = idat.pop('Actions')
idat['NumActions'] = len(actions)
for iact in act_list:
iact.update(inc_id)
actions.append(iact)
# Penalties
pen_list = idat.pop('Penalties')
idat['NumPenalties'] = len(penalties)
for ipen in pen_list:
ipen.update(inc_id)
penalties.append(ipen)
# Custom fields (not currently used)
if 'Custom_Fields' in idat:
cust_list = idat.pop('Custom_Fields')
for field in cust_list:
if field['StringValue'] == 'Y':
custfields.append({'IncidentID': inc_id['IncidentID'], 'SpecialCase': field['FieldName']})
# Incidents
incidents.append(idat)
# Export
exportdict = {'incidents': incidents, 'incidents-penalties': penalties, 'incidents-actions': actions, 'incidents-custfields': custfields}
for key in exportdict:
writefile('{0}.csv'.format(key), dataset=exportdict[key], rewrite='w')
def writecoaching(outname, report):
# Flatten
coaching = []
for school in report['data']:
for observation in school['data']:
for timefield in ['DebriefDate', 'ReviewDate', 'LessonDate']:
try:
observation[timefield] = observation.pop(timefield)['date']
except:
observation[timefield] = ''
feedbackitems = observation.pop('FeedbackItems')
for feedbackitem in feedbackitems:
feedbackitem.update(observation)
coaching.append(feedbackitem)
writefile(outname, dataset=coaching, rewrite=report['write'])
return coaching
def writeevidence(outname, report):
# Flatten
coaching = []
for school in report['data']:
for observation in school['data']:
for timefield in ['EvidenceDate']:
try:
observation[timefield] = observation.pop(timefield)['date']
except:
observation[timefield] = ''
coaching.append(observation)
writefile(outname, dataset=coaching, rewrite=report['write'])
| 31.376238 | 141 | 0.577154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,714 | 0.285474 |
e31da554e9612910aa7b87468de6e4101ac08273 | 7,210 | py | Python | anchore_engine/services/policy_engine/api/models/image.py | roachmd/anchore-engine | 521d6796778139a95f51542670714205c2735a81 | [
"Apache-2.0"
]
| null | null | null | anchore_engine/services/policy_engine/api/models/image.py | roachmd/anchore-engine | 521d6796778139a95f51542670714205c2735a81 | [
"Apache-2.0"
]
| null | null | null | anchore_engine/services/policy_engine/api/models/image.py | roachmd/anchore-engine | 521d6796778139a95f51542670714205c2735a81 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api import util
class Image(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, digest=None, user_id=None, state=None, distro_namespace=None, created_at=None, last_modified=None, tags=None): # noqa: E501
"""Image - a model defined in Swagger
:param id: The id of this Image. # noqa: E501
:type id: str
:param digest: The digest of this Image. # noqa: E501
:type digest: str
:param user_id: The user_id of this Image. # noqa: E501
:type user_id: str
:param state: The state of this Image. # noqa: E501
:type state: str
:param distro_namespace: The distro_namespace of this Image. # noqa: E501
:type distro_namespace: str
:param created_at: The created_at of this Image. # noqa: E501
:type created_at: datetime
:param last_modified: The last_modified of this Image. # noqa: E501
:type last_modified: datetime
:param tags: The tags of this Image. # noqa: E501
:type tags: List[str]
"""
self.swagger_types = {
'id': str,
'digest': str,
'user_id': str,
'state': str,
'distro_namespace': str,
'created_at': datetime,
'last_modified': datetime,
'tags': List[str]
}
self.attribute_map = {
'id': 'id',
'digest': 'digest',
'user_id': 'user_id',
'state': 'state',
'distro_namespace': 'distro_namespace',
'created_at': 'created_at',
'last_modified': 'last_modified',
'tags': 'tags'
}
self._id = id
self._digest = digest
self._user_id = user_id
self._state = state
self._distro_namespace = distro_namespace
self._created_at = created_at
self._last_modified = last_modified
self._tags = tags
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Image of this Image. # noqa: E501
:rtype: Image
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Image.
:return: The id of this Image.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Image.
:param id: The id of this Image.
:type id: str
"""
self._id = id
@property
def digest(self):
"""Gets the digest of this Image.
:return: The digest of this Image.
:rtype: str
"""
return self._digest
@digest.setter
def digest(self, digest):
"""Sets the digest of this Image.
:param digest: The digest of this Image.
:type digest: str
"""
self._digest = digest
@property
def user_id(self):
"""Gets the user_id of this Image.
:return: The user_id of this Image.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Image.
:param user_id: The user_id of this Image.
:type user_id: str
"""
self._user_id = user_id
@property
def state(self):
"""Gets the state of this Image.
State of the image in the policy evaluation system # noqa: E501
:return: The state of this Image.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Image.
State of the image in the policy evaluation system # noqa: E501
:param state: The state of this Image.
:type state: str
"""
allowed_values = ["failed", "initializing", "analyzing", "analyzed"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def distro_namespace(self):
"""Gets the distro_namespace of this Image.
The namespace identifier for this image for purposes of CVE matches, etc # noqa: E501
:return: The distro_namespace of this Image.
:rtype: str
"""
return self._distro_namespace
@distro_namespace.setter
def distro_namespace(self, distro_namespace):
"""Sets the distro_namespace of this Image.
The namespace identifier for this image for purposes of CVE matches, etc # noqa: E501
:param distro_namespace: The distro_namespace of this Image.
:type distro_namespace: str
"""
self._distro_namespace = distro_namespace
@property
def created_at(self):
"""Gets the created_at of this Image.
The timestamp on when this image record was created, not the image itself # noqa: E501
:return: The created_at of this Image.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Image.
The timestamp on when this image record was created, not the image itself # noqa: E501
:param created_at: The created_at of this Image.
:type created_at: datetime
"""
self._created_at = created_at
@property
def last_modified(self):
"""Gets the last_modified of this Image.
Time the image record in this service was last updated # noqa: E501
:return: The last_modified of this Image.
:rtype: datetime
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this Image.
Time the image record in this service was last updated # noqa: E501
:param last_modified: The last_modified of this Image.
:type last_modified: datetime
"""
self._last_modified = last_modified
@property
def tags(self):
"""Gets the tags of this Image.
List of tags currently applied to the image. Updated by new tag events. Similarly scoped by the user_id # noqa: E501
:return: The tags of this Image.
:rtype: List[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this Image.
List of tags currently applied to the image. Updated by new tag events. Similarly scoped by the user_id # noqa: E501
:param tags: The tags of this Image.
:type tags: List[str]
"""
self._tags = tags
| 27.414449 | 156 | 0.59251 | 6,917 | 0.959362 | 0 | 0 | 4,795 | 0.665049 | 0 | 0 | 4,422 | 0.613315 |
e31e1e564d0eb470b1f222fdeb2e2e5813305ea2 | 28,531 | py | Python | src/pte_decode/decoding/decoder_factory.py | richardkoehler/pte-decode | d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57 | [
"MIT"
]
| null | null | null | src/pte_decode/decoding/decoder_factory.py | richardkoehler/pte-decode | d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57 | [
"MIT"
]
| null | null | null | src/pte_decode/decoding/decoder_factory.py | richardkoehler/pte-decode | d1a466c166e5c3dd5e2c0caf1b12492f0e93bc57 | [
"MIT"
]
| null | null | null | """Module for machine learning models."""
from dataclasses import dataclass
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from bayes_opt import BayesianOptimization
from catboost import CatBoostClassifier
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis,
)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score, log_loss
from sklearn.model_selection import GroupKFold, GroupShuffleSplit
# from sklearn.svm import SVC
from xgboost import XGBClassifier
from pte_decode.decoding.decoder_base import Decoder
def get_decoder(
classifier: str = "lda",
scoring: str = "balanced_accuracy",
balancing: Optional[str] = None,
optimize: bool = False,
) -> Decoder:
"""Create and return Decoder of desired type.
Parameters
----------
classifier : str
Allowed values for `classifier`: ["catboost", "lda", "lin_svm", "lr",
"svm_lin", "svm_poly", "svm_rbf", "xgb"].
scoring : str | None, default="balanced_accuracy"
Score to be calculated. Possible values:
["oversample", "undersample", "balance_weights"].
balancing : str | None, default=None
Method for balancing skewed datasets. Possible values:
["oversample", "undersample", "balance_weights"].
Returns
-------
Decoder
Instance of Decoder given `classifer` and `balancing` method.
"""
classifiers = {
"catboost": CATB,
"dummy": Dummy,
"lda": LDA,
"lr": LR,
"qda": QDA,
# "svm_lin": SVC_Lin,
# "svm_poly": SVC_Poly,
# "svm_rbf": SVC_RBF,
"xgb": XGB,
}
scoring_methods = {
"balanced_accuracy": _get_balanced_accuracy,
"log_loss": _get_log_loss,
}
classifier = classifier.lower()
balancing = balancing.lower() if isinstance(balancing, str) else balancing
scoring = scoring.lower()
if classifier not in classifiers:
raise DecoderNotFoundError(classifier, classifiers.keys())
if scoring not in scoring_methods:
raise ScoringMethodNotFoundError(scoring, scoring_methods.keys())
return classifiers[classifier](
balancing=balancing,
optimize=optimize,
scoring=scoring_methods[scoring],
)
def _get_balanced_accuracy(model, data_test, label_test) -> Any:
"""Calculated balanced accuracy score."""
return balanced_accuracy_score(label_test, model.predict(data_test))
def _get_log_loss(model, data_test, label_test) -> Any:
"""Calculate Log Loss score."""
return log_loss(label_test, model.predict_proba(data_test))
class ScoringMethodNotFoundError(Exception):
"""Exception raised when invalid balancing method is passed.
Attributes:
input_value -- input value which caused the error
allowed -- allowed input values
message -- explanation of the error
"""
def __init__(
self,
input_value,
allowed,
message="Input scoring method is not an allowed value.",
) -> None:
self.input_value = input_value
self.allowed = allowed
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{{self.message}} Allowed values: {self.allowed}. Got:"
f" {self.input_value}."
)
class DecoderNotFoundError(Exception):
"""Exception raised when invalid Decoder is passed.
Attributes:
input_value -- input which caused the error
allowed -- allowed input types
message -- explanation of the error
"""
def __init__(
self,
input_value,
allowed,
message="Input decoding model is not an allowed value.",
) -> None:
self.input_value = input_value
self.allowed = allowed.values
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{{self.message}} Allowed values: {self.allowed}."
" Got: {self.input_value}."
)
@dataclass
class CATB(Decoder):
"""Class for CatBoostClassifier implementation."""
def __post_init__(self):
self.model = CatBoostClassifier(
loss_function="MultiClass",
verbose=False,
use_best_model=True,
eval_metric="MultiClass",
)
def fit(
self,
data: Union[pd.DataFrame, pd.Series],
labels: np.ndarray,
groups: np.ndarray,
) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
# Train outer model
(
self.data_train,
self.labels_train,
eval_set,
) = self._get_validation_split(
self.data_train,
self.labels_train,
self.groups_train,
train_size=0.8,
)
(
self.data_train,
self.labels_train,
sample_weight,
) = self._balance_samples(
self.data_train, self.labels_train, self.balancing
)
self.model.fit(
self.data_train,
self.labels_train,
eval_set=eval_set,
early_stopping_rounds=25,
sample_weight=sample_weight,
verbose=False,
)
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{
"max_depth": (4, 10),
"learning_rate": (0.003, 0.3),
"bagging_temperature": (0.0, 1.0),
"l2_leaf_reg": (1, 30),
"random_strength": (0.01, 1.0),
},
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
params = optimizer.max["params"]
params["max_depth"] = round(params["max_depth"])
return CatBoostClassifier(
iterations=200,
loss_function="MultiClass",
verbose=False,
use_best_model=True,
eval_metric="MultiClass",
max_depth=params["max_depth"],
learning_rate=params["learning_rate"],
random_strength=params["random_strength"],
bagging_temperature=params["bagging_temperature"],
l2_leaf_reg=params["l2_leaf_reg"],
)
def _bo_tune(
self,
max_depth,
learning_rate,
bagging_temperature,
l2_leaf_reg,
random_strength,
):
# Cross validating with the specified parameters in 5 folds
cv_inner = GroupShuffleSplit(
n_splits=3, train_size=0.66, random_state=42
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train[train_index],
self.data_train[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
groups_tr = self.groups_train[train_index]
(data_train_, y_tr, eval_set_inner,) = self._get_validation_split(
data=data_train_,
labels=y_tr,
groups=groups_tr,
train_size=0.8,
)
data_train_, y_tr, sample_weight = self._balance_samples(
data_train_, y_tr, self.balancing
)
inner_model = CatBoostClassifier(
iterations=100,
loss_function="MultiClass",
verbose=False,
eval_metric="MultiClass",
max_depth=round(max_depth),
learning_rate=learning_rate,
bagging_temperature=bagging_temperature,
l2_leaf_reg=l2_leaf_reg,
random_strength=random_strength,
)
inner_model.fit(
data_train_,
y_tr,
eval_set=eval_set_inner,
early_stopping_rounds=25,
sample_weight=sample_weight,
verbose=False,
)
y_probs = inner_model.predict_proba(data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
@dataclass
class LDA(Decoder):
"""Class for applying Linear Discriminant Analysis using scikit-learn."""
def __post_init__(self):
if self.balancing == "balance_weights":
raise ValueError(
"Sample weights cannot be balanced for Linear "
"Discriminant Analysis. Please set `balance_weights` to"
"either `oversample`, `undersample` or `None`."
)
if self.optimize:
raise ValueError(
"Hyperparameter optimization cannot be performed for this"
" implementation of Linear Discriminant Analysis. Please"
" set `optimize` to False."
)
def fit(
self, data: np.ndarray, labels: np.ndarray, groups: np.ndarray
) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = LinearDiscriminantAnalysis(
solver="lsqr", shrinkage="auto"
)
self.model.fit(self.data_train, self.labels_train)
@dataclass
class LR(Decoder):
"""Basic representation of class for finding and filtering files."""
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
else:
self.model = LogisticRegression(solver="newton-cg")
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model.fit(self.data_train, self.labels_train)
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{"C": (0.01, 1.0)}, # pylint: disable=invalid-name
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
# Train outer model with optimized parameters
params = optimizer.max["params"]
# params['max_iter'] = int(params['max_iter'])
return LogisticRegression(
solver="newton-cg", max_iter=500, C=params["C"]
)
def _bo_tune(self, C: float): # pylint: disable=invalid-name
# Cross validating with the specified parameters in 5 folds
cv_inner = GroupShuffleSplit(
n_splits=3, train_size=0.66, random_state=42
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train[train_index],
self.data_train[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
data_train_, y_tr, sample_weight = self._balance_samples(
data_train_, y_tr, self.balancing
)
inner_model = LogisticRegression(
solver="newton-cg", C=C, max_iter=500
)
inner_model.fit(data_train_, y_tr, sample_weight=sample_weight)
y_probs = inner_model.predict_proba(data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
@dataclass
class Dummy(Decoder):
"""Dummy classifier implementation from scikit learn"""
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = DummyClassifier(strategy="uniform")
self.model.fit(self.data_train, self.labels_train)
def get_score(self, data_test: np.ndarray, label_test: np.ndarray):
"""Calculate score."""
scores = [
self.scoring(self.model, data_test, label_test)
for _ in range(0, 100)
]
return np.mean(scores)
@dataclass
class QDA(Decoder):
"""Class for applying Linear Discriminant Analysis using scikit-learn."""
def __post_init__(self):
if self.balancing == "balance_weights":
raise ValueError(
"Sample weights cannot be balanced for Quadratic "
"Discriminant Analysis. Please set `balance_weights` to"
"either `oversample`, `undersample` or `None`."
)
if self.optimize:
raise ValueError(
"Hyperparameter optimization cannot be performed for this"
" implementation of Quadratic Discriminant Analysis. Please"
" set `optimize` to False."
)
def fit(self, data: np.ndarray, labels: np.ndarray, groups) -> None:
"""Fit model to given training data and training labels."""
self.data_train, self.labels_train, _ = self._balance_samples(
data, labels, self.balancing
)
self.model = QuadraticDiscriminantAnalysis()
self.model.fit(self.data_train, self.labels_train)
@dataclass
class XGB(Decoder):
"""Basic representation of class for finding and filtering files."""
def _bayesian_optimization(self):
"""Estimate optimal model parameters using bayesian optimization."""
optimizer = BayesianOptimization(
self._bo_tune,
{
"learning_rate": (0.003, 0.3),
"max_depth": (4, 10),
"gamma": (0, 1),
"colsample_bytree": (0.4, 1),
"subsample": (0.4, 1),
},
)
optimizer.maximize(init_points=10, n_iter=20, acq="ei")
# Train outer model with optimized parameters
params = optimizer.max["params"]
return XGBClassifier(
objective="binary:logistic",
use_label_encoder=False,
n_estimators=200,
eval_metric="logloss",
learning_rate=params["learning_rate"],
gamma=params["gamma"],
max_depth=int(params["max_depth"]),
subsample=params["subsample"],
colsample_bytree=params["colsample_bytree"],
)
def _bo_tune(
self, learning_rate, gamma, max_depth, subsample, colsample_bytree
):
cv_inner = GroupKFold(
n_splits=3,
)
scores = []
for train_index, test_index in cv_inner.split(
self.data_train, self.labels_train, self.groups_train
):
data_train_, data_test_ = (
self.data_train.iloc[train_index],
self.data_train.iloc[test_index],
)
y_tr, y_te = (
self.labels_train[train_index],
self.labels_train[test_index],
)
groups_tr = self.groups_train[train_index]
(data_train_, y_tr, eval_set_inner,) = self._get_validation_split(
data=data_train_,
labels=y_tr,
groups=groups_tr,
train_size=0.8,
)
(data_train_, y_tr, sample_weight,) = self._balance_samples(
data=data_train_, labels=y_tr, method=self.balancing
)
inner_model = XGBClassifier(
objective="binary:logistic",
booster="gbtree",
use_label_encoder=False,
eval_metric="logloss",
n_estimators=100,
learning_rate=learning_rate,
gamma=gamma,
max_depth=int(max_depth),
colsample_bytree=colsample_bytree,
subsample=subsample,
)
inner_model.fit(
X=data_train_,
y=y_tr,
eval_set=eval_set_inner,
early_stopping_rounds=20,
sample_weight=sample_weight,
verbose=False,
)
y_probs = inner_model.predict_proba(X=data_test_)
score = log_loss(y_te, y_probs, labels=[0, 1])
scores.append(score)
# Return the negative MLOGLOSS
return -1.0 * np.mean(scores)
def fit(
self, data: pd.DataFrame, labels: np.ndarray, groups: np.ndarray
) -> None:
"""Fit model to given training data and training labels."""
self.data_train = data
self.labels_train = labels
self.groups_train = groups
if self.optimize:
self.model = self._bayesian_optimization()
else:
self.model = XGBClassifier(
objective="binary:logistic",
booster="gbtree",
use_label_encoder=False,
n_estimators=200,
eval_metric="logloss",
)
# Train outer model
(
self.data_train,
self.labels_train,
eval_set,
) = self._get_validation_split(
self.data_train,
self.labels_train,
self.groups_train,
train_size=0.8,
)
(
self.data_train,
self.labels_train,
sample_weight,
) = self._balance_samples(
data=data, labels=labels, method=self.balancing
)
self.model.fit(
self.data_train,
self.labels_train,
eval_set=eval_set,
early_stopping_rounds=20,
sample_weight=sample_weight,
verbose=False,
)
# @dataclass
# class SVC_Lin(Decoder):
# """"""
# @dataclass
# class SVC_Poly(Decoder):
# """"""
# @dataclass
# class SVC_RBF(Decoder):
# """"""
# @dataclass
# class SVC_Sig(Decoder):
# """"""
# def classify_svm_lin(data_train, y_train, group_train, optimize,
# balance):
# """"""
# def bo_tune(C, tol):
# # Cross validating with the specified parameters in 5 folds
# cv_inner = GroupShuffleSplit(
# n_splits=3, train_size=0.66, random_state=42
# )
# scores = []
# for train_index, test_index in cv_inner.split(
# data_train, y_train, group_train
# ):
# data_train_, data_test_ = data_train[train_index],
# data_train[test_index]
# y_tr, y_te = y_train[train_index], y_train[test_index]
# inner_model = SVC(
# kernel="linear",
# C=C,
# max_iter=500,
# tol=tol,
# gamma="scale",
# shrinking=True,
# class_weight=None,
# probability=True,
# verbose=False,
# )
# inner_model.fit(data_train_, y_tr,
# sample_weight=sample_weight)
# y_probs = inner_model.predict_proba(data_test_)
# score = log_loss(y_te, y_probs, labels=[0, 1])
# scores.append(score)
# # Return the negative MLOGLOSS
# return -1.0 * np.mean(scores)
# if optimize:
# # Perform Bayesian Optimization
# bo = BayesianOptimization(
# bo_tune, {"C": (pow(10, -1), pow(10, 1)),
# "tol": (1e-4, 1e-2)}
# )
# bo.maximize(init_points=10, n_iter=20, acq="ei")
# # Train outer model with optimized parameters
# params = bo.max["params"]
# # params['max_iter'] = 500
# model = SVC(
# kernel="linear",
# C=params["C"],
# max_iter=500,
# tol=params["tol"],
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# else:
# # Use default values
# model = SVC(
# kernel="linear",
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# model.fit(data_train, y_train, sample_weight=sample_weight)
# return model
# def classify_svm_rbf(data_train, y_train, group_train, optimize,
# balance):
# """"""
# def bo_tune(C, tol):
# # Cross validating with the specified parameters in 5 folds
# cv_inner = GroupShuffleSplit(
# n_splits=3, train_size=0.66, random_state=42
# )
# scores = []
# for train_index, test_index in cv_inner.split(
# data_train, y_train, group_train
# ):
# data_train_, data_test_ = data_train[train_index],
# data_train[test_index]
# y_tr, y_te = y_train[train_index], y_train[test_index]
# inner_model = SVC(
# kernel="rbf",
# C=C,
# max_iter=500,
# tol=tol,
# gamma="scale",
# shrinking=True,
# class_weight=None,
# probability=True,
# verbose=False,
# )
# inner_model.fit(data_train_, y_tr,
# sample_weight=sample_weight)
# y_probs = inner_model.predict_proba(data_test_)
# score = log_loss(y_te, y_probs, labels=[0, 1])
# scores.append(score)
# # Return the negative MLOGLOSS
# return -1.0 * np.mean(scores)
# if optimize:
# # Perform Bayesian Optimization
# bo = BayesianOptimization(
# bo_tune, {"C": (pow(10, -1), pow(10, 1)),
# "tol": (1e-4, 1e-2)}
# )
# bo.maximize(init_points=10, n_iter=20, acq="ei")
# # Train outer model with optimized parameters
# params = bo.max["params"]
# model = SVC(
# kernel="rbf",
# C=params["C"],
# max_iter=500,
# tol=params["tol"],
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# else:
# # Use default values
# model = SVC(
# kernel="rbf",
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# model.fit(data_train, y_train, sample_weight=sample_weight)
# return model
# def classify_svm_poly(data_train, y_train, group_train):
# """"""
# def bo_tune(C, tol):
# # Cross validating with the specified parameters in 5 folds
# cv_inner = GroupShuffleSplit(
# n_splits=3, train_size=0.66, random_state=42
# )
# scores = []
# for train_index, test_index in cv_inner.split(
# data_train, y_train, group_train
# ):
# data_train_, data_test_ = data_train[train_index],
# data_train[test_index]
# y_tr, y_te = y_train[train_index], y_train[test_index]
# inner_model = SVC(
# kernel="poly",
# C=C,
# max_iter=500,
# tol=tol,
# gamma="scale",
# shrinking=True,
# class_weight=None,
# probability=True,
# verbose=False,
# )
# inner_model.fit(data_train_, y_tr,
# sample_weight=sample_weight)
# y_probs = inner_model.predict_proba(data_test_)
# score = log_loss(y_te, y_probs, labels=[0, 1])
# scores.append(score)
# # Return the negative MLOGLOSS
# return -1.0 * np.mean(scores)
# if optimize:
# # Perform Bayesian Optimization
# bo = BayesianOptimization(
# bo_tune, {"C": (pow(10, -1), pow(10, 1)),
# "tol": (1e-4, 1e-2)}
# )
# bo.maximize(init_points=10, n_iter=20, acq="ei")
# # Train outer model with optimized parameters
# params = bo.max["params"]
# model = SVC(
# kernel="poly",
# C=params["C"],
# max_iter=500,
# tol=params["tol"],
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# else:
# # Use default values
# model = SVC(
# kernel="poly",
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# model.fit(data_train, y_train, sample_weight=sample_weight)
# return model
# def classify_svm_sig(data_train, y_train, group_train, optimize,
# balance):
# """"""
# def bo_tune(C, tol):
# # Cross validating with the specified parameters in 5 folds
# cv_inner = GroupShuffleSplit(
# n_splits=3, train_size=0.66, random_state=42
# )
# scores = []
# for train_index, test_index in cv_inner.split(
# data_train, y_train, group_train
# ):
# data_train_, data_test_ = data_train[train_index],
# data_train[test_index]
# y_tr, y_te = y_train[train_index], y_train[test_index]
# inner_model = SVC(
# kernel="sigmoid",
# C=C,
# max_iter=500,
# tol=tol,
# gamma="auto",
# shrinking=True,
# class_weight=None,
# probability=True,
# verbose=False,
# )
# inner_model.fit(data_train_, y_tr, sample_weight=sample_weight)
# y_probs = inner_model.predict_proba(data_test_)
# score = log_loss(y_te, y_probs, labels=[0, 1])
# scores.append(score)
# # Return the negative MLOGLOSS
# return -1.0 * np.mean(scores)
# if optimize:
# # Perform Bayesian Optimization
# bo = BayesianOptimization(
# bo_tune, {"C": (pow(10, -1), pow(10, 1)), "tol": (1e-4, 1e-2)}
# )
# bo.maximize(init_points=10, n_iter=20, acq="ei")
# # Train outer model with optimized parameters
# params = bo.max["params"]
# model = SVC(
# kernel="sigmoid",
# C=params["C"],
# max_iter=500,
# tol=params["tol"],
# gamma="auto",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# else:
# # Use default values
# model = SVC(
# kernel="sigmoid",
# gamma="scale",
# shrinking=True,
# class_weight=None,
# verbose=False,
# )
# model.fit(data_train, y_train, sample_weight=sample_weight)
# return model
| 33.68477 | 78 | 0.537836 | 25,699 | 0.90074 | 0 | 0 | 24,335 | 0.852932 | 0 | 0 | 12,766 | 0.447443 |
e3203c55f3123f00f21c9072e3c16a2c74fb421f | 7,603 | py | Python | pikoToHM.py | lucasHSA/piko | a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7 | [
"MIT"
]
| null | null | null | pikoToHM.py | lucasHSA/piko | a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7 | [
"MIT"
]
| 1 | 2016-07-18T08:24:50.000Z | 2016-12-17T09:19:07.000Z | pikoToHM.py | lucasHSA/piko | a0bca6bfbdf1ecf95fd8dcca563350c676d2edf7 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2015 Lucas Koegel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from piko import Piko
from hm import HM
from pyowm import OWM
import time
import sys
import logging, logging.handlers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
fh = logging.handlers.RotatingFileHandler('/home/pi/Desktop/piko/pikoToHM.log', maxBytes=1024*1024*512, backupCount=2)
fh.setLevel(logging.DEBUG)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(format)
fh.setFormatter(format)
logger.addHandler(ch)
logger.addHandler(fh)
PIKO_INTERVAL = 30 # seconds
OWM_INTERVAL = 1800 # seconds
HM_PV_REMAINING_POWER_ID = 12772
HM_PV_STRING_1_POWER_ID = 15241
HM_PV_STRING_2_POWER_ID = 15242
HM_WEATHER_FORECAST_CLOUDS_ID = 20144
HM_WEATHER_CURRENT_TEMPERATURE_ID = 21442
HM_WEATHER_FORECAST_TEMPERATURE_ID = 21443
OWM_API_KEY = 'insert'
OWM_CITY_ID = 2835477
logging.info('Started')
p = Piko(host='http://192.168.178.123')
hm = HM('http://192.168.178.49')
owm = OWM(OWM_API_KEY)
last_weather_update = time.time() - OWM_INTERVAL # - OWM_INTERVAL to update on first run
while(True):
try:
# -------------------------------
# Weather
now = time.time()
if (now - last_weather_update) >= OWM_INTERVAL:
try:
# Queries the OWM web API for three hours weather forecast for the specified city ID.
# A Forecaster object is returned, containing a Forecast instance covering a global streak of five days:
# this instance encapsulates Weather objects, with a time interval of three hours one from each other
logging.debug('Calling: owm.three_hours_forecast_at_id')
forecast = owm.three_hours_forecast_at_id(OWM_CITY_ID).get_forecast()
# get current weather
logging.debug('Calling: owm.weather_at_id')
weather = owm.weather_at_id(OWM_CITY_ID).get_weather()
# set the cloud coverage of the weather to homematic
# .get_clouds(): Returns the cloud coverage percentage as an int
logging.debug('Calling: set_state HM_WEATHER_FORECAST_CLOUDS_ID')
hm.set_state(HM_WEATHER_FORECAST_CLOUDS_ID, weather.get_clouds())
# set the current temperature of the weather to homematic
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_CURRENT_TEMPERATURE_ID, weather.get_temperature(unit="celsius")["temp"])
# set the temperature of the weather in 12 hours to homematic
# .get(): Lookups up into the Weather items list for the item at the specified index
# .get_temperature(): Returns a dict with temperature info {'temp': 293.4, 'temp_kf': None, 'temp_max': 297.5, 'temp_min': 290.9}
hm.set_state(HM_WEATHER_FORECAST_TEMPERATURE_ID, forecast.get(3).get_temperature(unit="celsius")["temp"])
# Update last_weather_update time
last_weather_update = time.time()
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error on updating weather: {0}'.format(err))
# -------------------------------
# Piko
# Get values for remaining power calculation
logging.debug('Calling: get_current_power')
current_solar_power = p.get_current_power()
logging.debug('Calling: get_consumption_phase_1')
consumption_phase_1 = p.get_consumption_phase_1()
consumption_phase_2 = p.get_consumption_phase_2()
logging.debug('Calling: get_consumption_phase_2')
logging.debug('Calling: get_consumption_phase_3')
consumption_phase_3 = p.get_consumption_phase_3()
# Get values for string 1 power and string 2 power
logging.debug('Calling: get_string1_current')
string1Current = p.get_string1_current()
logging.debug('Calling: get_string2_current')
string2Current = p.get_string2_current()
logging.debug('Calling: get_string1_voltage')
string1Voltage = p.get_string1_voltage()
logging.debug('Calling: get_string2_voltage')
string2Voltage = p.get_string2_voltage()
if current_solar_power < 0:
# Piko is off
logging.info('Piko is off, going to sleep 10 minutes.')
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, 0)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, 0)
logging.debug('Calling: time.sleep 600')
time.sleep(600)
continue
# Calculate remaining power
logging.debug('Rounding for remaining_power')
remaining_power = round(current_solar_power - (consumption_phase_1 + consumption_phase_2 + consumption_phase_3))
if remaining_power < 0:
remaining_power = 0
# Calculate string 1 power and string 2 power
string1 = round(string1Current * string1Voltage)
string2 = round(string2Current * string2Voltage)
# Set state of homematic
logging.debug('Calling: set_state HM_PV_REMAINING_POWER_ID')
hm.set_state(HM_PV_REMAINING_POWER_ID, remaining_power)
logging.debug('Calling: set_state HM_PV_STRING_1_POWER_ID')
hm.set_state(HM_PV_STRING_1_POWER_ID, string1)
logging.debug('Calling: set_state HM_PV_STRING_2_POWER_ID')
hm.set_state(HM_PV_STRING_2_POWER_ID, string2)
# Sleep
logging.debug('Calling: time.sleep PIKO_INTERVAL')
time.sleep(PIKO_INTERVAL)
except KeyboardInterrupt:
break
except: # catch *all* exceptions
err = sys.exc_info()[0]
logging.exception('Error: {0}'.format(err))
continue
| 42.47486 | 145 | 0.663422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,464 | 0.45549 |
e321f4353a25d31bcaa64e339213294f5626c9c9 | 480 | py | Python | src/default/ellipse/index.py | mikeludemann/python-data-visualization | e5317505d41ae79389f6eec61cefeca1690935b0 | [
"MIT"
]
| null | null | null | src/default/ellipse/index.py | mikeludemann/python-data-visualization | e5317505d41ae79389f6eec61cefeca1690935b0 | [
"MIT"
]
| null | null | null | src/default/ellipse/index.py | mikeludemann/python-data-visualization | e5317505d41ae79389f6eec61cefeca1690935b0 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=np.random.rand(2) * 10,
width=np.random.rand(), height=np.random.rand(),
angle=np.random.rand() * 360)
for i in range(NUM)]
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(np.random.rand())
e.set_facecolor(np.random.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
| 20.869565 | 54 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.03125 |
e32283e627f56eef0ab47dab2fb3694cb482ef8d | 231 | py | Python | hdc-utility/model/Formation.py | YSRKEN/HDC_React2 | cba48a0563caef629169644254742f688a0e1ec7 | [
"MIT"
]
| null | null | null | hdc-utility/model/Formation.py | YSRKEN/HDC_React2 | cba48a0563caef629169644254742f688a0e1ec7 | [
"MIT"
]
| 13 | 2020-09-04T23:25:20.000Z | 2022-02-18T01:52:33.000Z | hdc-utility/model/Formation.py | YSRKEN/HDC_React2 | cba48a0563caef629169644254742f688a0e1ec7 | [
"MIT"
]
| null | null | null | from enum import Enum
class Formation(Enum):
LINE_AHEAD = 0 # 単縦陣
DOUBLE_LINE = 1 # 複縦陣
DIAMOND = 2 # 輪形陣
ECHELON = 3 # 梯形陣
LINE_ABREAST = 4 # 単横陣
FORMATION_3 = 5 # 第3陣形(第三警戒航行序列(輪形陣))
| 21 | 43 | 0.562771 | 264 | 0.913495 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.359862 |
e323376f728d32ac2cbf19f89a6bf1e46c450382 | 638 | py | Python | _/chapter5-OpenStack/IdentityService/createproject.py | paullewallencom/hybrid-cloud-978-1-7888-3087-4 | d101553fd342f420b581b87c58c7219f2b04a7c6 | [
"Apache-2.0"
]
| 3 | 2018-03-27T14:34:48.000Z | 2021-10-04T16:28:19.000Z | _/chapter5-OpenStack/IdentityService/createproject.py | paullewallencom/hybrid-cloud-978-1-7888-3087-4 | d101553fd342f420b581b87c58c7219f2b04a7c6 | [
"Apache-2.0"
]
| null | null | null | _/chapter5-OpenStack/IdentityService/createproject.py | paullewallencom/hybrid-cloud-978-1-7888-3087-4 | d101553fd342f420b581b87c58c7219f2b04a7c6 | [
"Apache-2.0"
]
| 1 | 2021-08-27T23:51:28.000Z | 2021-08-27T23:51:28.000Z | #import OpenStack connection class from the SDK
from openstack import connection
# Create a connection object by calling the constructor and pass the security information
conn = connection.Connection(auth_url="http://192.168.0.106/identity",
project_name="demo",
username="admin",
password="manoj",
user_domain_id="default",
project_domain_id="default")
def create_project(conn):
project_desc = {
"description":"This project is for packtpub readers",
"isenabled" : True,
"name":"packtpub_readers"
}
project = conn.identity.create_project(**project_desc)
create_project(conn)
| 30.380952 | 89 | 0.714734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.456113 |
e323be496777a0e952195a0a60b4f2ae474d9dd5 | 849 | py | Python | bisection.py | Raijeku/Optimizacion | b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b | [
"Apache-2.0"
]
| null | null | null | bisection.py | Raijeku/Optimizacion | b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b | [
"Apache-2.0"
]
| null | null | null | bisection.py | Raijeku/Optimizacion | b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b | [
"Apache-2.0"
]
| null | null | null | from sympy import *
import pandas as pd
def bisection(xl, xu, tolerance, function):
x = Symbol('x')
f = parse_expr(function)
iteration = 0
data = pd.DataFrame(columns=['iteration','xl','xu','xr','f(xl)','f(xu)','f(xr)','f(xl)f(xr)','error'])
while abs(xu-xl)>=tolerance:
xr = (xl + xu)/2
fxl = f.subs(x, xl)
fxu = f.subs(x, xu)
fxr = f.subs(x, xr)
data = data.append(pd.DataFrame({'iteration':[iteration], 'xl':[xl], 'xu':[xu], 'xr':[xr], 'f(xl)':[fxl], 'f(xu)':[fxu], 'f(xr)':[fxr], 'f(xl)f(xr)':[fxl*fxr], 'error':[abs(xu-xl)]}), ignore_index = True)
if fxl*fxr<0:
xu = xr
elif fxl*fxr>0:
xl = xr
iteration += 1
data.set_index('iteration', inplace=True)
return data
print(bisection(10, 50, 0.01, '3*x**2 - 120*x + 100')) | 30.321429 | 212 | 0.522968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.190813 |
e323ed5e92eb5da83c0443afabf48a5b468396f3 | 176 | py | Python | gd/utils/crypto/__init__.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
]
| null | null | null | gd/utils/crypto/__init__.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
]
| null | null | null | gd/utils/crypto/__init__.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
]
| null | null | null | """Main module for operating on crypted/encoded strings in Geometry Dash"""
from gd.utils.crypto.coders import Coder
from gd.utils.crypto.xor_cipher import XORCipher as xor
| 44 | 76 | 0.795455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.426136 |
e324c2b47225b873ec4b37a7708b700104f77b26 | 3,684 | py | Python | subt/ros/base/src/motor_controller.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
]
| 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | subt/ros/base/src/motor_controller.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
]
| 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | subt/ros/base/src/motor_controller.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
]
| 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | from pid import PID
import pdb
#for anonymous objects
Object = lambda **kwargs: type("Object", (), kwargs)
class MotorController:
def __init__(self, wheelBase,numberOfMotors):
self.pidControllerFrontLeft = PID()
self.pidControllerFrontRight = PID()
self.pidControllerRearLeft = PID()
self.pidControllerRearRight = PID()
self.wheelBase = wheelBase
self.numberOfMotors = numberOfMotors
self.lastForwardSpeed = 0
def update(self,cmd_vel,actualWheelSpeed):
#desiredSpeedFrontLeft = 0
#desiredSpeedFrontRight = 0
#desiredSpeedRearLeft = 0
#desiredSpeedRearRight = 0
desiredSpeed = Object
if self.numberOfMotors == 1:
desiredSpeed.frontLeft = cmd_vel.linear.x
desiredSpeed.frontRight = 0
desiredSpeed.rearLeft = 0
desiredSpeed.rearRight =0
else:
desiredSpeed.frontLeft = cmd_vel.linear.x - cmd_vel.angular.z * self.wheelBase / 2
desiredSpeed.frontRight = cmd_vel.linear.x + cmd_vel.angular.z * self.wheelBase / 2
desiredSpeed.rearLeft = desiredSpeed.frontLeft
desiredSpeed.rearRight = desiredSpeed.frontRight
newWheelSpeed = Object
if desiredSpeed.frontLeft == 0 and\
desiredSpeed.frontRight == 0 and\
desiredSpeed.rearLeft == 0 and\
desiredSpeed.rearRight == 0:
#robot wants to stop now
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.stop()
newWheelSpeed.frontRight = self.pidControllerFrontRight.stop()
newWheelSpeed.rearLeft = self.pidControllerRearLeft.stop()
newWheelSpeed.rearRight = self.pidControllerRearRight.stop()
elif (cmd_vel.linear.x > 0 and self.lastForwardSpeed < 0) or \
(cmd_vel.linear.x < 0 and self.lastForwardSpeed > 0):
#robot wants to change direction -> stop first.
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.stop()
newWheelSpeed.frontRight = self.pidControllerFrontRight.stop()
newWheelSpeed.rearLeft = self.pidControllerRearLeft.stop()
newWheelSpeed.rearRight = self.pidControllerRearRight.stop()
else:
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.update(desiredSpeed.frontLeft,actualWheelSpeed.frontLeft)
newWheelSpeed.frontRight = self.pidControllerFrontRight.update(desiredSpeed.frontRight,actualWheelSpeed.frontRight)
newWheelSpeed.rearLeft = self.pidControllerRearLeft.update(desiredSpeed.rearLeft,actualWheelSpeed.rearLeft)
newWheelSpeed.rearRight = self.pidControllerRearRight.update(desiredSpeed.rearRight,actualWheelSpeed.rearRight)
"""
print "FL:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.frontLeft,actualWheelSpeed.frontLeft,newWheelSpeed.frontLeft)
print "FR:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.frontRight,actualWheelSpeed.frontRight,newWheelSpeed.frontRight)
print "RL:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.rearLeft,actualWheelSpeed.rearLeft,newWheelSpeed.rearLeft)
print "RR:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.rearRight,actualWheelSpeed.rearRight,newWheelSpeed.rearRight)
"""
self.lastForwardSpeed = cmd_vel.linear.x
"""
newWheelSpeed.frontLeft = 0
newWheelSpeed.frontRight = 0
newWheelSpeed.rearLeft = 0
newWheelSpeed.rearRight = 0
"""
return newWheelSpeed
| 47.844156 | 140 | 0.659609 | 3,547 | 0.962812 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.254072 |
e325abcd58eea788430716963a4dc7047047719c | 4,931 | py | Python | shiftscheduler/gui/barebone.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
]
| 2 | 2020-04-16T17:03:56.000Z | 2021-04-08T17:23:21.000Z | shiftscheduler/gui/barebone.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
]
| null | null | null | shiftscheduler/gui/barebone.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
]
| 1 | 2020-05-04T18:03:59.000Z | 2020-05-04T18:03:59.000Z |
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import scrolledtext
from tkinter import ttk
import tkcalendar as tkc
from shiftscheduler.data_types import data_types
from shiftscheduler.excel import output as excel_output
from shiftscheduler.gui import constants
from shiftscheduler.gui import util
from shiftscheduler.i18n import gettext
_ = gettext.GetTextFn('gui/barebone')
LOCALE_CODE = gettext.GetLanguageCode()
DATE_PATTERN = _('y/m/d')
# TkInter frame for getting barebone Excel file
class BareboneExcelFrame(ttk.Frame):
def __init__(self, master, *args, **kwargs):
super().__init__(master, *args, **kwargs)
util.SetGridWeights(self, column_weights=(1, 2))
self.createLeftFrame()
self.createRightFrame()
# Create left side of the frame
def createLeftFrame(self):
left_frame = ttk.Frame(self)
util.SetGrid(left_frame, 0, 0)
util.SetGridWeights(left_frame, row_weights=(1, 9))
label = ttk.Label(left_frame, text=_('Please enter name of workers'))
util.SetGrid(label, 0, 0) #, sticky=ttk.W) # For some reason, ttk.NSEW does not work
#self.names_text_area = ttk.Text(left_frame)
self.names_text_area = scrolledtext.ScrolledText(left_frame)
util.SetGrid(self.names_text_area, 1, 0)
# Create right side of the frame
def createRightFrame(self):
right_frame = ttk.Frame(self)
util.SetGrid(right_frame, 0, 1)
util.SetGridWeights(right_frame, row_weights=(1, 1, 1, 1, 1, 5, 1))
# Start date widgets
start_date_label = ttk.Label(right_frame, text=_('Start Date'))
util.SetGrid(start_date_label, 0, 0)
self.start_cal = tkc.DateEntry(
right_frame, year=2020, month=5, day=1, date_pattern=DATE_PATTERN, locale=LOCALE_CODE)
util.SetGrid(self.start_cal, 1, 0)
# End date widgets
end_date_label = ttk.Label(right_frame, text=_('End Date'))
util.SetGrid(end_date_label, 2, 0)
self.end_cal = tkc.DateEntry(
right_frame, year=2020, month=5, day=31, date_pattern=DATE_PATTERN, locale=LOCALE_CODE)
util.SetGrid(self.end_cal, 3, 0)
# Instruction label
instruction = """
사용 방법
1.간호사 이름을 한줄씩 적어주세요
2.일정의 시작-끝 날짜를 지정합니다
3."엑셀 파일 받기"를 눌러 파일을 저장합니다
4."날짜별 설정" 시트에서 필요 인원을 입력합니다
5."간호사별 설정"에서 근무일수를 입력합니다
6."일정표"에서 기존에 정해진 일정을 입력합니다
7."새 일정" 탭에서 다음 단계를 진행해 주세요
"""
instruction_label = ttk.Label(right_frame, text=instruction, justify=tk.LEFT)
util.SetGrid(instruction_label,5, 0)
# Download button
def callback_func():
error = self.validateValues()
if error:
messagebox.showerror(message=error)
return
filepath = filedialog.asksaveasfilename(
title=_('Save the barebone Excel file'), filetypes=constants.EXCEL_FILE_TYPE)
if filepath:
self.CreateExcel(filepath)
download_button = ttk.Button(
right_frame, text=_('Download barebone Excel'), command=callback_func)
util.SetGrid(download_button, 6, 0)
# Get values from GUI
def getValues(self):
text_area_value = self.names_text_area.get('1.0', 'end').strip()
names = text_area_value.split('\n')
# Filter out all empty names
names = [name.strip() for name in names if name and not name.isspace()]
start_date = self.start_cal.get_date()
end_date = self.end_cal.get_date()
return (names, start_date, end_date)
def validateValues(self):
names, start_date, end_date = self.getValues()
# No name input
if not names:
return _('Please enter names')
if start_date > end_date:
return _('The start date is after the end date')
# Check for duplicate names
nameset = set()
duplicates = set()
for name in names:
if name not in nameset:
nameset.add(name)
else:
duplicates.add(name)
if duplicates:
return _('Duplicate names: {names}').format(','.join(sorted(duplicates)))
return '' # No error
def CreateExcel(self, filepath):
names, start_date, end_date = self.getValues()
sw_config = data_types.SoftwareConfig(start_date=start_date, end_date=end_date, num_person=len(names))
person_configs = [
data_types.PersonConfig(name, None, None, None, None) for name in names]
barebone_schedule = data_types.TotalSchedule(
software_config=sw_config, person_configs=person_configs, date_configs=[],
assignment_dict=dict())
excel_output.FromTotalSchedule(barebone_schedule, filepath)
| 35.47482 | 110 | 0.636585 | 4,617 | 0.892174 | 0 | 0 | 0 | 0 | 0 | 0 | 1,127 | 0.217778 |
e3274579faa2032556dd5e38f0e928addfcdc145 | 1,093 | py | Python | orders/migrations/0001_initial.py | MahmudulHassan5809/Ecommerce-WebSite-With-Django2 | a9c76e6e925e236ba064be194a03d9d6635edac2 | [
"MIT"
]
| 1 | 2021-09-24T04:32:35.000Z | 2021-09-24T04:32:35.000Z | orders/migrations/0001_initial.py | MahmudulHassan5809/Ecommerce-WebSite-With-Django2 | a9c76e6e925e236ba064be194a03d9d6635edac2 | [
"MIT"
]
| null | null | null | orders/migrations/0001_initial.py | MahmudulHassan5809/Ecommerce-WebSite-With-Django2 | a9c76e6e925e236ba064be194a03d9d6635edac2 | [
"MIT"
]
| null | null | null | # Generated by Django 2.1.5 on 2019-01-26 19:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=200)),
('quantity', models.IntegerField()),
('price', models.CharField(max_length=100)),
('total', models.CharField(max_length=100)),
('name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('address', models.TextField()),
('order_date', models.DateTimeField(blank=True, default=datetime.datetime(2019, 1, 27, 1, 42, 37, 95617))),
('user_id', models.IntegerField(blank=True)),
],
),
]
| 34.15625 | 123 | 0.563586 | 984 | 0.900274 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.129918 |
e3278fc449a9b7f42367d6c094639616a86c1514 | 353 | py | Python | setup.py | markus61/selfstoredict | c770fd0dd4976e66299f51f71a71ad9c1875d699 | [
"MIT"
]
| 1 | 2017-01-18T11:19:24.000Z | 2017-01-18T11:19:24.000Z | setup.py | markus61/selfstoredict | c770fd0dd4976e66299f51f71a71ad9c1875d699 | [
"MIT"
]
| null | null | null | setup.py | markus61/selfstoredict | c770fd0dd4976e66299f51f71a71ad9c1875d699 | [
"MIT"
]
| 1 | 2018-02-23T06:23:43.000Z | 2018-02-23T06:23:43.000Z | from setuptools import setup, find_packages
setup(
name='selfstoredict',
version='0.6',
packages=find_packages(),
url='https://github.com/markus61/selfstoredict',
license='MIT',
author='markus',
author_email='[email protected]',
description='a python class delivering a dict that stores itself into a JSON file or a redis db',
)
| 29.416667 | 101 | 0.696884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.484419 |
e328edcf699e6d13889b75058d9c53daede11262 | 428 | py | Python | play.py | Samitha156/100-days-of-coding | b47aff0f6d432945a20a5f95e2252cddb6cc5522 | [
"MIT"
]
| null | null | null | play.py | Samitha156/100-days-of-coding | b47aff0f6d432945a20a5f95e2252cddb6cc5522 | [
"MIT"
]
| null | null | null | play.py | Samitha156/100-days-of-coding | b47aff0f6d432945a20a5f95e2252cddb6cc5522 | [
"MIT"
]
| null | null | null | def add(*args):
c = 0
for n in args:
c += n
return c
sum = add(2,5,6,5)
print(sum)
def calculate(**kwargs):
print(kwargs)
calculate(add=3, mul=5)
class Car:
def __init__(self, **kw):
# self.make = kw["make"]
# self.model = kw["model"]
self.make = kw.get("make")
self.make = kw.get("model")
my_car = Car(make="Nissan")
print(my_car.model) | 17.12 | 36 | 0.514019 | 186 | 0.434579 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.170561 |
e3294c6b906349f5541063a2b6f7ca5cb0e7e90b | 21,406 | py | Python | lib/simpleauth/handler.py | Bekt/tweetement | 5cdb2e7db30a1600fbf522754c4917f8c9e377a6 | [
"MIT"
]
| 2 | 2015-02-18T17:31:58.000Z | 2019-04-01T13:44:45.000Z | lib/simpleauth/handler.py | Bekt/tweetement | 5cdb2e7db30a1600fbf522754c4917f8c9e377a6 | [
"MIT"
]
| 1 | 2015-01-26T03:58:19.000Z | 2015-01-26T03:58:19.000Z | lib/simpleauth/handler.py | Bekt/tweetement | 5cdb2e7db30a1600fbf522754c4917f8c9e377a6 | [
"MIT"
]
| 1 | 2021-05-04T21:15:53.000Z | 2021-05-04T21:15:53.000Z | # -*- coding: utf-8 -*-
import os
import sys
import logging
import json
from urllib import urlencode
import urlparse
# for CSRF state tokens
import time
import base64
# Get available json parser
try:
# should be the fastest on App Engine py27.
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
# at this point ImportError will be raised
# if none of the above could be imported
# it's a OAuth 1.0 spec even though the lib is called oauth2
import oauth2 as oauth1
# users module is needed for OpenID authentication.
from google.appengine.api import urlfetch, users
from webapp2_extras import security
__all__ = ['SimpleAuthHandler',
'Error',
'UnknownAuthMethodError',
'AuthProviderResponseError',
'InvalidCSRFTokenError',
'InvalidOAuthRequestToken',
'InvalidOpenIDUserError']
OAUTH1 = 'oauth1'
OAUTH2 = 'oauth2'
OPENID = 'openid'
class Error(Exception):
"""Base error class for this module"""
pass
class UnknownAuthMethodError(Error):
"""Raised when there's no method to call for a specific auth type"""
pass
class AuthProviderResponseError(Error):
"""Error coming from a provider"""
pass
class InvalidCSRFTokenError(Error):
"""Currently used only in OAuth 2.0 with CSRF protection enabled"""
pass
class InvalidOAuthRequestToken(Error):
"""OAuth1 request token -related error"""
pass
class InvalidOpenIDUserError(Error):
"""Error during OpenID auth callback"""
pass
class SimpleAuthHandler(object):
"""A mixin to be used with a real request handler,
e.g. webapp2.RequestHandler. See README for getting started and
a usage example, or look through the code. It really is simple.
See README for docs on authentication flows.
"""
PROVIDERS = {
# OAuth 2.0 providers
'google': (OAUTH2,
'https://accounts.google.com/o/oauth2/auth?{0}',
'https://accounts.google.com/o/oauth2/token'),
'googleplus': (OAUTH2,
'https://accounts.google.com/o/oauth2/auth?{0}',
'https://accounts.google.com/o/oauth2/token'),
'windows_live': (OAUTH2,
'https://login.live.com/oauth20_authorize.srf?{0}',
'https://login.live.com/oauth20_token.srf'),
'facebook': (OAUTH2,
'https://www.facebook.com/dialog/oauth?{0}',
'https://graph.facebook.com/oauth/access_token'),
'linkedin2': (OAUTH2,
'https://www.linkedin.com/uas/oauth2/authorization?{0}',
'https://www.linkedin.com/uas/oauth2/accessToken'),
'foursquare': (OAUTH2,
'https://foursquare.com/oauth2/authenticate?{0}',
'https://foursquare.com/oauth2/access_token'),
# OAuth 1.0a providers
'linkedin': (OAUTH1, {
'request': 'https://api.linkedin.com/uas/oauth/requestToken',
'auth': 'https://www.linkedin.com/uas/oauth/authenticate?{0}'
}, 'https://api.linkedin.com/uas/oauth/accessToken'),
'twitter': (OAUTH1, {
'request': 'https://api.twitter.com/oauth/request_token',
'auth': 'https://api.twitter.com/oauth/authenticate?{0}'
}, 'https://api.twitter.com/oauth/access_token'),
# OpenID
'openid': ('openid', None)
}
TOKEN_RESPONSE_PARSERS = {
'google': '_json_parser',
'googleplus': '_json_parser',
'windows_live': '_json_parser',
'foursquare': '_json_parser',
'facebook': '_query_string_parser',
'linkedin': '_query_string_parser',
'linkedin2': '_json_parser',
'twitter': '_query_string_parser'
}
# Set this to True in your handler if you want to use
# 'state' param during authorization phase to guard agains
# cross-site-request-forgery
#
# CSRF protection assumes there's self.session method on the handler
# instance. See BaseRequestHandler in example/handlers.py for sample usage.
OAUTH2_CSRF_STATE = False
OAUTH2_CSRF_STATE_PARAM = 'csrf'
OAUTH2_CSRF_SESSION_PARAM = 'oauth2_state'
OAUTH2_CSRF_TOKEN_TIMEOUT = 3600 # 1 hour
# This will form the actual state parameter, e.g. token:timestamp
# You don't normally need to override it.
OAUTH2_CSRF_DELIMITER = ':'
# Extra params passed to OAuth2 init handler are stored in the state
# under this name.
OAUTH2_STATE_EXTRA_PARAM = 'extra'
def _simple_auth(self, provider=None):
"""Dispatcher of auth init requests, e.g.
GET /auth/PROVIDER
Calls _<authtype>_init() method, where <authtype> is
oauth2, oauth1 or openid (defined in PROVIDERS dict).
May raise one of the exceptions defined at the beginning
of the module. See README for details on error handling.
"""
extra = None
if self.request is not None and self.request.params is not None:
extra = self.request.params.items()
cfg = self.PROVIDERS.get(provider, (None,))
meth = self._auth_method(cfg[0], 'init')
# We don't respond directly in here. Specific methods are in charge
# with redirecting user to an auth endpoint
meth(provider, cfg[1], extra)
def _auth_callback(self, provider=None):
"""Dispatcher of callbacks from auth providers, e.g.
/auth/PROVIDER/callback?params=...
Calls _<authtype>_callback() method, where <authtype> is
oauth2, oauth1 or openid (defined in PROVIDERS dict).
May raise one of the exceptions defined at the beginning
of the module. See README for details on error handling.
"""
cfg = self.PROVIDERS.get(provider, (None,))
meth = self._auth_method(cfg[0], 'callback')
# Get user profile data and their access token
result = meth(provider, *cfg[-1:])
user_data, auth_info = result[0], result[1]
extra = None
if len(result) > 2:
extra = result[2]
# The rest should be implemented by the actual app
self._on_signin(user_data, auth_info, provider, extra=extra)
def _auth_method(self, auth_type, step):
"""Constructs proper method name and returns a callable.
Args:
auth_type: string, One of 'oauth2', 'oauth1' or 'openid'
step: string, Phase of the auth flow. Either 'init' or 'callback'
Raises UnknownAuthMethodError if expected method doesn't exist on the
handler instance processing the request.
"""
method = '_%s_%s' % (auth_type, step)
try:
return getattr(self, method)
except AttributeError:
raise UnknownAuthMethodError(method)
def _oauth2_init(self, provider, auth_url, extra=None):
"""Initiates OAuth 2.0 web flow"""
key, secret, scope = self._get_consumer_info_for(provider)
callback_url = self._callback_uri_for(provider)
optional_params = self._get_optional_params_for(provider)
params = {
'response_type': 'code',
'client_id': key,
'redirect_uri': callback_url
}
if isinstance(optional_params, dict):
params.update(optional_params)
if scope:
params.update(scope=scope)
state_params = {}
if self.OAUTH2_CSRF_STATE:
csrf_token = self._generate_csrf_token()
state_params[self.OAUTH2_CSRF_STATE_PARAM] = csrf_token
self.session[self.OAUTH2_CSRF_SESSION_PARAM] = csrf_token
if extra is not None:
state_params[self.OAUTH2_STATE_EXTRA_PARAM] = extra
if len(state_params):
params.update(state=json.dumps(state_params))
target_url = auth_url.format(urlencode(params))
logging.debug('Redirecting user to %s', target_url)
self.redirect(target_url)
def _oauth2_callback(self, provider, access_token_url):
"""Step 2 of OAuth 2.0, whenever the user accepts or denies access."""
error = self.request.get('error')
if error:
raise AuthProviderResponseError(error, provider)
code = self.request.get('code')
callback_url = self._callback_uri_for(provider)
client_id, client_secret, scope = self._get_consumer_info_for(provider)
json_state = self.request.get('state')
logging.debug(json_state)
state = json.loads(json_state)
if self.OAUTH2_CSRF_STATE:
_expected = self.session.pop(self.OAUTH2_CSRF_SESSION_PARAM, '')
_actual = state[self.OAUTH2_CSRF_STATE_PARAM]
# If _expected is '' it won't validate anyway.
if not self._validate_csrf_token(_expected, _actual):
raise InvalidCSRFTokenError(
'[%s] vs [%s]' % (_expected, _actual), provider)
extra = state.get(self.OAUTH2_STATE_EXTRA_PARAM, None)
payload = {
'code': code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': callback_url,
'grant_type': 'authorization_code'
}
resp = urlfetch.fetch(
url=access_token_url,
payload=urlencode(payload),
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider])
_fetcher = getattr(self, '_get_%s_user_info' % provider)
auth_info = _parser(resp.content)
user_data = _fetcher(auth_info, key=client_id, secret=client_secret)
return user_data, auth_info, extra
def _oauth1_init(self, provider, auth_urls, extra=None):
"""Initiates OAuth 1.0 dance"""
key, secret = self._get_consumer_info_for(provider)
callback_url = self._callback_uri_for(provider)
optional_params = self._get_optional_params_for(provider)
token_request_url = auth_urls.get('request', None)
auth_url = auth_urls.get('auth', None)
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider], None)
# make a request_token request
client = self._oauth1_client(consumer_key=key, consumer_secret=secret)
resp, content = client.request(auth_urls['request'], "POST",
body=urlencode(
{'oauth_callback': callback_url}))
if resp.status != 200:
raise AuthProviderResponseError(
'%s (status: %d)' % (content, resp.status), provider)
# parse token request response
request_token = _parser(content)
if not request_token.get('oauth_token', None):
raise AuthProviderResponseError(
"Couldn't get a request token from %s" % str(request_token), provider)
params = {
'oauth_token': request_token.get('oauth_token', None),
'oauth_callback': callback_url
}
if isinstance(optional_params, dict):
params.update(optional_params)
target_url = auth_urls['auth'].format(urlencode(params))
logging.debug('Redirecting user to %s', target_url)
# save request token for later, the callback
self.session['req_token'] = request_token
self.redirect(target_url)
def _oauth1_callback(self, provider, access_token_url):
"""Third step of OAuth 1.0 dance."""
request_token = self.session.pop('req_token', None)
if not request_token:
raise InvalidOAuthRequestToken(
"No request token in user session", provider)
verifier = self.request.get('oauth_verifier')
if not verifier:
raise AuthProviderResponseError(
"No OAuth verifier was provided", provider)
consumer_key, consumer_secret = self._get_consumer_info_for(provider)
token = oauth1.Token(request_token['oauth_token'],
request_token['oauth_token_secret'])
token.set_verifier(verifier)
client = self._oauth1_client(token, consumer_key, consumer_secret)
resp, content = client.request(access_token_url, "POST")
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider])
_fetcher = getattr(self, '_get_%s_user_info' % provider)
auth_info = _parser(content)
user_data = _fetcher(auth_info, key=consumer_key, secret=consumer_secret)
return (user_data, auth_info)
def _openid_init(self, provider='openid', identity=None, extra=None):
"""Initiates OpenID dance using App Engine users module API."""
identity_url = identity or self.request.get('identity_url')
callback_url = self._callback_uri_for(provider)
target_url = users.create_login_url(
dest_url=callback_url, federated_identity=identity_url)
logging.debug('Redirecting user to %s', target_url)
self.redirect(target_url)
def _openid_callback(self, provider='openid', _identity=None):
"""Being called back by an OpenID provider
after the user has been authenticated.
"""
user = users.get_current_user()
if not user or not user.federated_identity():
raise InvalidOpenIDUserError(user, provider)
uinfo = {
'id': user.federated_identity(),
'nickname': user.nickname(),
'email': user.email()
}
return (uinfo, {'provider': user.federated_provider()})
#
# callbacks and consumer key/secrets
#
def _callback_uri_for(self, provider):
"""Returns a callback URL for a 2nd step of the auth process.
Override this with something like:
self.uri_for('auth_callback', provider=provider, _full=True)
"""
return None
def _get_consumer_info_for(self, provider):
"""Returns a (key, secret, desired_scopes) tuple.
Defaults to None. You should redefine this method and return real values.
For OAuth 2.0 it should be a 3 elements tuple:
(client_ID, client_secret, scopes)
OAuth 1.0 doesn't have scope so this should return just a
(consumer_key, consumer_secret) tuple.
OpenID needs neither scope nor key/secret, so this method is never called
for OpenID authentication.
See README for more info on scopes and where to get consumer/client
key/secrets.
"""
return (None, None, None)
def _get_optional_params_for(self, provider):
"""Returns optional parameters to send to provider on init
Defaults to None.
If you want to send optional parameter, redefine this method.
This should return a dictionary of parameter names and
values as defined by the provider.
"""
return None
#
# user profile/info
#
def _get_google_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
Google API endpoint:
https://www.googleapis.com/oauth2/v3/userinfo
"""
logging.warn('Google userinfo endpoint is deprecated. '
'Use Google+ API (googleplus provider): '
'https://developers.google.com/+/api/auth-migration#timetable')
resp = self._oauth2_request(
'https://www.googleapis.com/oauth2/v3/userinfo?{0}',
auth_info['access_token'])
data = json.loads(resp)
if 'id' not in data and 'sub' in data:
data['id'] = data['sub']
return data
def _get_googleplus_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
Google+ API endpoint:
https://www.googleapis.com/plus/v1/people/me
"""
resp = self._oauth2_request(
'https://www.googleapis.com/plus/v1/people/me?{0}',
auth_info['access_token'])
return json.loads(resp)
def _get_windows_live_user_info(self, auth_info, key=None, secret=None):
"""Windows Live API user profile endpoint.
https://apis.live.net/v5.0/me
Profile picture:
https://apis.live.net/v5.0/USER_ID/picture
"""
resp = self._oauth2_request('https://apis.live.net/v5.0/me?{0}',
auth_info['access_token'])
uinfo = json.loads(resp)
avurl = 'https://apis.live.net/v5.0/{0}/picture'.format(uinfo['id'])
uinfo.update(avatar_url=avurl)
return uinfo
def _get_facebook_user_info(self, auth_info, key=None, secret=None):
"""Facebook Graph API endpoint.
https://graph.facebook.com/me
"""
resp = self._oauth2_request('https://graph.facebook.com/me?{0}',
auth_info['access_token'])
return json.loads(resp)
def _get_foursquare_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
foursquare API endpoint:
https://api.foursquare.com/v2/users/self
"""
resp = self._oauth2_request(
'https://api.foursquare.com/v2/users/self?{0}&v=20130204',
auth_info['access_token'],'oauth_token')
data = json.loads(resp)
if data['meta']['code'] != 200:
logging.error(data['meta']['errorDetail'])
return data['response'].get('user')
def _get_linkedin_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currently logging in linkedin user.
LinkedIn user profile API endpoint:
http://api.linkedin.com/v1/people/~
or
http://api.linkedin.com/v1/people/~:<fields>
where <fields> is something like
(id,first-name,last-name,picture-url,public-profile-url,headline)
LinkedIn OAuth 1.0a is deprecated. Use LinkedIn with OAuth 2.0
"""
# TODO: remove LinkedIn OAuth 1.0a in the next release.
logging.warn('LinkedIn OAuth 1.0a is deprecated. '
'Use LinkedIn with OAuth 2.0: '
'https://developer.linkedin.com/documents/authentication')
token = oauth1.Token(key=auth_info['oauth_token'],
secret=auth_info['oauth_token_secret'])
client = self._oauth1_client(token, key, secret)
fields = 'id,first-name,last-name,picture-url,public-profile-url,headline'
url = 'http://api.linkedin.com/v1/people/~:(%s)' % fields
resp, content = client.request(url)
return self._parse_xml_user_info(content)
def _get_linkedin2_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currently logging in linkedin user.
LinkedIn user profile API endpoint:
http://api.linkedin.com/v1/people/~
or
http://api.linkedin.com/v1/people/~:<fields>
where <fields> is something like
(id,first-name,last-name,picture-url,public-profile-url,headline)
"""
fields = 'id,first-name,last-name,picture-url,public-profile-url,headline'
url = 'https://api.linkedin.com/v1/people/~:(%s)?{0}' % fields
resp = self._oauth2_request(url, auth_info['access_token'],
token_param='oauth2_access_token')
return self._parse_xml_user_info(resp)
def _parse_xml_user_info(self, content):
try:
# lxml is one of the third party libs available on App Engine out of the
# box. See example/app.yaml for more info.
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
person = etree.fromstring(content)
uinfo = {}
for e in person:
uinfo.setdefault(e.tag, e.text)
return uinfo
def _get_twitter_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of twitter user using
https://api.twitter.com/1.1/account/verify_credentials.json
"""
token = oauth1.Token(key=auth_info['oauth_token'],
secret=auth_info['oauth_token_secret'])
client = self._oauth1_client(token, key, secret)
resp, content = client.request(
'https://api.twitter.com/1.1/account/verify_credentials.json')
uinfo = json.loads(content)
uinfo.setdefault('link', 'http://twitter.com/%s' % uinfo['screen_name'])
return uinfo
#
# aux methods
#
def _oauth1_client(self, token=None, consumer_key=None,
consumer_secret=None):
"""Returns OAuth 1.0 client that is capable of signing requests."""
args = [oauth1.Consumer(key=consumer_key, secret=consumer_secret)]
if token:
args.append(token)
return oauth1.Client(*args)
def _oauth2_request(self, url, token, token_param='access_token'):
"""Makes an HTTP request with OAuth 2.0 access token using App Engine
URLfetch API.
"""
target_url = url.format(urlencode({token_param:token}))
return urlfetch.fetch(target_url).content
def _query_string_parser(self, body):
"""Parses response body of an access token request query and returns
the result in JSON format.
Facebook, LinkedIn and Twitter respond with a query string, not JSON.
"""
return dict(urlparse.parse_qsl(body))
def _json_parser(self, body):
"""Parses body string into JSON dict"""
return json.loads(body)
def _generate_csrf_token(self, _time=None):
"""Creates a new random token that can be safely used as a URL param.
Token would normally be stored in a user session and passed as 'state'
parameter during OAuth 2.0 authorization step.
"""
now = str(_time or long(time.time()))
secret = security.generate_random_string(30, pool=security.ASCII_PRINTABLE)
token = self.OAUTH2_CSRF_DELIMITER.join([secret, now])
return base64.urlsafe_b64encode(token)
def _validate_csrf_token(self, expected, actual):
"""Validates expected token against the actual.
Args:
expected: String, existing token. Normally stored in a user session.
actual: String, token provided via 'state' param.
"""
if expected != actual:
return False
try:
decoded = base64.urlsafe_b64decode(expected.encode('ascii'))
token_key, token_time = decoded.rsplit(self.OAUTH2_CSRF_DELIMITER, 1)
token_time = long(token_time)
if not token_key:
return False
except (TypeError, ValueError, UnicodeDecodeError):
return False
now = long(time.time())
timeout = now - token_time > self.OAUTH2_CSRF_TOKEN_TIMEOUT
if timeout:
logging.error("CSRF token timeout (issued at %d)", token_time)
return not timeout
| 34.525806 | 80 | 0.679996 | 20,386 | 0.952305 | 0 | 0 | 0 | 0 | 0 | 0 | 9,713 | 0.45373 |
e32db38efba021a5263a02a0f603ee6533341d64 | 766 | py | Python | test.py | litex-hub/pythondata-cpu-ibex | 9775779f0770fc635a17dfc467cb8d5afdf01d1d | [
"Apache-2.0"
]
| 2 | 2021-02-18T00:27:38.000Z | 2021-05-12T21:57:41.000Z | test.py | litex-hub/pythondata-cpu-ibex | 9775779f0770fc635a17dfc467cb8d5afdf01d1d | [
"Apache-2.0"
]
| null | null | null | test.py | litex-hub/pythondata-cpu-ibex | 9775779f0770fc635a17dfc467cb8d5afdf01d1d | [
"Apache-2.0"
]
| 1 | 2021-04-28T02:42:51.000Z | 2021-04-28T02:42:51.000Z | #!/usr/bin/env python3
from __future__ import print_function
import os
import pythondata_cpu_ibex
print("Found ibex @ version", pythondata_cpu_ibex.version_str, "(with data", pythondata_cpu_ibex.data_version_str, ")")
print()
print("Data is in", pythondata_cpu_ibex.data_location)
assert os.path.exists(pythondata_cpu_ibex.data_location)
print("Data is version", pythondata_cpu_ibex.data_version_str, pythondata_cpu_ibex.data_git_hash)
print("-"*75)
print(pythondata_cpu_ibex.data_git_msg)
print("-"*75)
print()
print("It contains:")
for root, dirs, files in os.walk(pythondata_cpu_ibex.data_location):
dirs.sort()
for f in sorted(files):
path = os.path.relpath(os.path.join(root, f), pythondata_cpu_ibex.data_location)
print(" -", path)
| 31.916667 | 119 | 0.765013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.146214 |
e331235f5a65953d372c517da81e56d9c43aa850 | 2,652 | py | Python | scenegraph/pddlgym_planners/lapkt.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
]
| 1 | 2022-01-30T22:06:57.000Z | 2022-01-30T22:06:57.000Z | scenegraph/pddlgym_planners/lapkt.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
]
| null | null | null | scenegraph/pddlgym_planners/lapkt.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
]
| null | null | null | """LAPKT-BFWS
https://github.com/nirlipo/BFWS-public
"""
import re
import os
import sys
import subprocess
import tempfile
from pddlgym_planners.pddl_planner import PDDLPlanner
from pddlgym_planners.planner import PlanningFailure
import numpy as np
from utils import FilesInCommonTempDirectory
DOCKER_IMAGE = 'khodeir/bfws:latest'
class LAPKTBFWS(PDDLPlanner):
def __init__(self):
super().__init__()
print("Instantiating LAPKT-BFWS")
self.install_delfi()
def install_delfi(self):
subprocess.check_call(f'docker pull {DOCKER_IMAGE}', shell=True, stdout=subprocess.DEVNULL)
def plan_from_pddl(self, dom_file, prob_file, horizon=np.inf, timeout=10, remove_files=False):
self.tmpdir = FilesInCommonTempDirectory(dom_file, prob_file)
(dom_file, prob_file) = self.tmpdir.new_fpaths
return super().plan_from_pddl(dom_file, prob_file, horizon=horizon, timeout=timeout, remove_files=remove_files)
def _get_cmd_str(self, dom_file, prob_file, timeout):
timeout_cmd = "gtimeout" if sys.platform == "darwin" else "timeout"
probdom_dir = os.path.dirname(dom_file)
dom_fname = os.path.basename(dom_file)
prob_fname = os.path.basename(prob_file)
assert probdom_dir == os.path.dirname(prob_file), "Files must be in the same directory"
cmd_str = f"docker run --privileged -it -v {probdom_dir}:/problem -w /problem {DOCKER_IMAGE} {timeout_cmd} {timeout} bfws --domain /problem/{dom_fname} --problem /problem/{prob_fname} --output /problem/bfws.plan --BFWS-f5 1"
return cmd_str
def _output_to_plan(self, output):
try:
self._statistics["num_node_expansions"] = int(re.search('nodes expanded during search: (\d+)', output.lower()).group(1))
self._statistics["total_time"] = self._statistics["search_time"] = float(re.search('total time: ([0-9.]+)', output.lower()).group(1))
self._statistics["plan_cost"] = float(re.search('plan found with cost: ([0-9.]+)', output.lower()).group(1))
except:
raise PlanningFailure("Failure parsing output of bfws")
try:
plan_fpath = os.path.join(self.tmpdir.dirname, 'bfws.plan')
with open(plan_fpath, 'r') as f:
plan_output = f.read()
self.tmpdir.cleanup()
plan = re.findall(r"^\(([^)]+)\)", plan_output.lower(), re.M)
assert plan
self._statistics["plan_length"] = len(plan)
return plan
except:
raise PlanningFailure("Plan not found with BFWS! Error: {}".format(output))
def _cleanup(self):
pass
| 42.774194 | 232 | 0.667044 | 2,319 | 0.874434 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.253017 |
e33575c4ac98eb7bd72db9483692a67e2a8b1c0f | 1,914 | py | Python | Create Network Zones.py | Tosatsu/okta-python-scripts | bca5ff89b8fc2381ccab08de971f65505ed0cda5 | [
"MIT"
]
| 1 | 2021-04-09T09:46:31.000Z | 2021-04-09T09:46:31.000Z | Create Network Zones.py | Tosatsu/okta-python-scripts | bca5ff89b8fc2381ccab08de971f65505ed0cda5 | [
"MIT"
]
| null | null | null | Create Network Zones.py | Tosatsu/okta-python-scripts | bca5ff89b8fc2381ccab08de971f65505ed0cda5 | [
"MIT"
]
| 1 | 2021-04-12T11:27:13.000Z | 2021-04-12T11:27:13.000Z | import csv
import re
import sys
import requests
import json
import Data # data container, replace with your own
orgName = Data.orgName # replace with your own
apiKey = Data.apiKey # provide your own API token
api_token = "SSWS " + apiKey
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': api_token}
def CreateZone(data):
createZoneUrl = "https://"+orgName+".com/api/v1/zones"
response = requests.post(createZoneUrl, headers=headers, data=data)
responseJSON = json.dumps(response.json())
responseData = json.loads(responseJSON)
if "errorCode" in responseJSON:
print(responseData)
return "Error"
else:
print(responseData)
return responseData
def CreateZones():
for x in range(1, 100):
Dict = {
"type": "IP",
"id": "null",
"name": "newNetworkZone" + str(x),
"status": "ACTIVE",
"created": "null",
"lastUpdated": "null",
"gateways": [
{
"type": "CIDR",
"value": "1.2.3.4/24"
},
{
"type": "CIDR",
"value": "2.3.4.5/24"
}
],
"proxies": [
{
"type": "CIDR",
"value": "2.2.3.4/24"
},
{
"type": "CIDR",
"value": "3.3.4.5/24"
}
]
}
CreateZone(json.dumps(Dict))
if __name__ == "__main__":
CreateZones() | 29.90625 | 71 | 0.405434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.241379 |
e33639a848594d63e324d70460cacf9ae086d33c | 959 | py | Python | simulador_de_dado.py | lucianoferreirasa/PythonProjects | c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5 | [
"MIT"
]
| null | null | null | simulador_de_dado.py | lucianoferreirasa/PythonProjects | c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5 | [
"MIT"
]
| null | null | null | simulador_de_dado.py | lucianoferreirasa/PythonProjects | c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5 | [
"MIT"
]
| null | null | null | import random
import PySimpleGUI as sg
class SimuladorDeDado:
def __init__(self):
self.valor_minimo = 1
self.valor_maximo = 6
self.layout = [
[sg.Text("Jogar o dado?")],
[sg.Button("Sim"),sg.Button("Não")]
]
def Iniciar(self):
self.janela = sg.Window("Simulador de Dado",layout=self.layout)
self.eventos, self.valores = self.janela.Read()
try:
if self.eventos =="Sim" or self.eventos =="s":
self.GerarValorDoDado()
elif self.eventos == "Não" or self.eventos =="n":
print("Agradacemos a sua participação!")
else:
print("Favor digitar sim (s) ou não (n)!")
except:
print("Ocorreu um erro ao receber sua resposta!")
def GerarValorDoDado(self):
print(random.randint(self.valor_minimo,self.valor_maximo))
simulador = SimuladorDeDado()
simulador.Iniciar()
| 29.96875 | 71 | 0.577685 | 871 | 0.903527 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.181535 |
e3370f6e006d93026ba5320fad4727621e81fc92 | 1,712 | py | Python | src/geometry/linear_algebra.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
]
| null | null | null | src/geometry/linear_algebra.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
]
| null | null | null | src/geometry/linear_algebra.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
]
| null | null | null | import math
from typing import List, Iterable, Union
Numeric = Union[int, float]
def magnitude(p: Iterable[Numeric]) -> float:
res: float = 0
for component in p:
res += component ** 2
res = math.sqrt(res)
return res
def vdot(p: List[Numeric], q: List[Numeric]) -> float:
"""Vector dot product."""
if len(p) == 0:
raise ValueError("p must not be None or empty")
if len(q) == 0:
raise ValueError("q must not be None or empty")
if len(p) != len(q):
raise ValueError("vectors p and q must have the same dimension")
res: float = 0
for i in range(len(p)):
res += p[i] * q[i]
return res
def full(rows: int, columns: int, fill: Numeric = 0) -> List[List[float]]:
"""Return a new array of given shape and type, filled with fill_value."""
return [[fill] * columns for _ in range(rows)]
def transpose(mat: List[List[Numeric]]) -> List[List[float]]:
res: List[List[float]] = full(rows=len(mat[0]), columns=len(mat))
for i in range(len(mat[0])):
for j in range(len(mat)):
res[i][j] = mat[j][i]
return res
def dot(p: List[List[Numeric]], q: List[List[Numeric]]) -> List[List[float]]:
"""Matrix dot product."""
p_shape = len(p), len(p[0])
q_shape = len(q), len(q[0])
if p_shape[1] != q_shape[0]:
raise ValueError("number of columns in p must equal the number of rows in q")
res: List[List[float]] = full(rows=p_shape[0], columns=q_shape[1])
for i in range(p_shape[0]):
for j in range(q_shape[1]):
for k in range(p_shape[1]):
res[i][j] += p[i][k] * q[k][j]
return res
| 31.703704 | 86 | 0.567757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.167056 |
e337c8816166ee2eea4a6327ac76523c1a2e9c32 | 1,231 | py | Python | plot/eigenvalue_statistics.py | dh4gan/tache | 51ed037769ecc4fdadc591e3b3619416c79e65b7 | [
"MIT"
]
| 5 | 2018-02-27T04:07:15.000Z | 2020-12-29T20:49:36.000Z | plot/eigenvalue_statistics.py | dh4gan/tache | 51ed037769ecc4fdadc591e3b3619416c79e65b7 | [
"MIT"
]
| null | null | null | plot/eigenvalue_statistics.py | dh4gan/tache | 51ed037769ecc4fdadc591e3b3619416c79e65b7 | [
"MIT"
]
| null | null | null | # Written 9/10/14 by dh4gan
# Code reads in output eigenvalue file from tache
# Computes statistics
import numpy as np
import matplotlib.pyplot as plt
import io_tache as io
# Read in inputs from command line
filename = ff.find_local_input_files('eigenvalues*')
threshold = input("What is the threshold for classification? ")
# Read in eigenvalue file
print "Reading eigenvalue file ", filename
npart,x,y,z,eigenpart,eigenvalues = io.read_eigenvalue_file(filename)
print np.amax(eigenvalues),np.amin(eigenvalues)
# Calculate the trace for each simulation element
trace = np.zeros(npart)
for i in range(npart):
for j in range(3):
trace[i] = trace[i]+ eigenvalues[i,j]
normedeigenvalues = eigenvalues.copy()
for i in range(npart):
if(trace[i]>0.0):
normedeigenvalues[i,:] = normedeigenvalues[i,:]/trace[i]
else:
normedeigenvalues[i,:] = 0.0
# Make a histogram of the eigenvalues
alleigenvalues = eigenvalues.flatten()
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.hist(alleigenvalues, bins=100, normed=True, log=True)
plt.show()
# Make a histogram of the traces
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.hist(trace, bins=100, normed=True, log=True)
plt.show()
| 21.224138 | 69 | 0.723802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.29082 |
e337db10027ece0f941b1295bc94ad1a0ed34904 | 4,179 | py | Python | arrow/forwarder/views.py | AkhilGKrishnan/arrow | bbd35faa5011c642cdcf218b180b48dd7ef39ef6 | [
"MIT"
]
| null | null | null | arrow/forwarder/views.py | AkhilGKrishnan/arrow | bbd35faa5011c642cdcf218b180b48dd7ef39ef6 | [
"MIT"
]
| null | null | null | arrow/forwarder/views.py | AkhilGKrishnan/arrow | bbd35faa5011c642cdcf218b180b48dd7ef39ef6 | [
"MIT"
]
| 3 | 2019-01-07T17:07:16.000Z | 2021-01-09T13:01:40.000Z | from django.views.generic.edit import CreateView, FormMixin
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django import forms
from django.urls import reverse
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from forwarder.models import Application, Hierarchy
class ForwardForm(forms.Form):
pass
class ApplicationCreate(CreateView):
model = Application
template_name = 'forwarder/application_create.html'
fields = ['type', 'other']
success_url = "/listApplication"
def form_valid(self, form):
form.instance.applicant = self.request.user
return super(ApplicationCreate, self).form_valid(form)
class ListApplicationView(ListView):
template_name = 'forwarder/list.html'
def get_queryset(self):
user = self.request.user
hierarchies = Hierarchy.objects.all()
#TODO: Generalize
if(user.designation == 'st'):
qs = Application.objects.filter(applicant=user)
elif(user.designation == 'tu'):
#for hr in hierarchies:
# qs += Application.objects.filter(applicant__branch=user.branch)
qs = Application.objects.filter(applicant__branch=user.branch, hierarchy_level=0)
elif(user.designation == 'ho'):
#for hr in hierarchies:
# qs += Application.objects.filter(applicant__branch=user.branch)
qs = Application.objects.filter(applicant__branch=user.branch, hierarchy_level=1)
else:
qs = Application.objects.filter(hierarchy_level=2)
return qs
class ApplicationDetailView(FormMixin, DetailView):
model = Application
template_name = 'forwarder/detail.html'
form_class = ForwardForm
def get_context_data(self, **kwargs):
context = super(ApplicationDetailView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
return context
def get_success_url(self):
return reverse("list-application")
def post(self, request, *args, **kwargs):
#if not request.user.is_authenticated:
# return HttpResponseForbidden()
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
# Here, we would record the user's interest using the message
# passed in form.cleaned_data['message']
if '_forward' in self.request.POST:
self.object.hierarchy_level += 1
self.object.save()
if '_reject' in self.request.POST:
self.object.hierarchy_level -= 1
self.object.save()
pass
return super(ApplicationDetailView, self).form_valid(form)
def pdf_dl(request, pk):
# Create the HttpResponse object with the appropriate PDF headers.
application = Application.objects.get(pk=pk)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % (application)
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(100, 800, "Name : " + application.applicant.name)
p.drawString(100, 780, "Admission no : " + str(application.applicant.admn_no))
p.drawString(100, 760, "Department : " + application.applicant.branch)
p.drawString(100, 740, "Semester : " + str(application.applicant.semester))
p.drawString(100, 720, "Parent name : " + application.applicant.parent_name)
if application.type == "OTH":
p.drawString(100, 700, "Application type : " + application.other())
else:
p.drawString(100, 700, "Application type : " + application.get_type_display())
p.drawString(100, 680, "Recommended by HOD of " + application.applicant.branch)
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
return response
| 36.025862 | 93 | 0.675042 | 2,502 | 0.598708 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.253888 |
e3390f43d3793bc787b6b52cd5f2cc575976a36e | 4,793 | py | Python | caption_feats_generation_scripts/full_vid_data_loader.py | Alterith/Dense_Video_Captioning_Feature_Extraction_Model_Choice | 65d0f2d26698cc8f7a5ffb564936113e2bbec201 | [
"MIT"
]
| 1 | 2021-04-21T12:39:07.000Z | 2021-04-21T12:39:07.000Z | caption_feats_generation_scripts/full_vid_data_loader.py | Alterith/masters_code | 65d0f2d26698cc8f7a5ffb564936113e2bbec201 | [
"MIT"
]
| null | null | null | caption_feats_generation_scripts/full_vid_data_loader.py | Alterith/masters_code | 65d0f2d26698cc8f7a5ffb564936113e2bbec201 | [
"MIT"
]
| null | null | null | import h5py
# torch imports
import torch
from torch.utils.data import Dataset
# generic imports
import os
import sys
import numpy as np
import random
import pandas as pd
import cv2
from decord import VideoReader
from decord import cpu, gpu
from matplotlib import pyplot as plt
import gc
# create data loader
class video_dataset(Dataset):
def __init__(self, data_dir, split, temporal_depth, patch_width, patch_height, dataset_name, stride=None, stride_idx=None):
print(data_dir)
# list of classes
self.vids = os.listdir(os.path.join(data_dir, split))
# list of the video file directories in each class folder
self.flattened_data_dir = [os.path.join(os.path.join(data_dir, split),v) for v in self.vids]
if stride is not None and stride_idx is not None:
try:
if stride*(stride_idx+1) <= len(self.flattened_data_dir):
self.flattened_data_dir = self.flattened_data_dir[stride*stride_idx:stride*(stride_idx+1)]
else:
self.flattened_data_dir = self.flattened_data_dir[stride*stride_idx:]
except Exception as e:
print("Dataloader out of range")
quit()
# train, test, val
self.split = split
# number of consecutive frames
self.temporal_depth = temporal_depth
# dimension of patch selected
self.patch_width = patch_width
self.patch_height = patch_height
#data augnemtation transforms
def transform(self, vid, split):
total_frames = int(len(vid))
print(total_frames)
if total_frames > 7200:
return torch.zeros(901, 1, 1, 1)
vid_width = vid[0].shape[1]
vid_height = vid[0].shape[0]
start_frame = random.randint(0, (total_frames - self.temporal_depth - 1))
patch_start_width = random.randint(0, 171 - self.patch_width - 1)
patch_start_height = random.randint(0, 128 - self.patch_height - 1)
clips = []
# the prob of flipping a video
flip_prob = random.random()
# frame iterator / stride index
stride = 0
stride_index = self.temporal_depth
# obtrain the temporal depth number of consecutive frames
inter_method_idx = 0 #random.randint(0,4)
inter_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_NEAREST, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
while(stride*stride_index + self.temporal_depth < total_frames):
imgs = []
start_frame = stride*stride_index
for i in range(start_frame, start_frame + self.temporal_depth):
frame = vid[i]
# frame = frame.astype()
frame = frame.asnumpy()
frame = frame.astype(np.float32)
frame = np.asarray(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# print(frame)
# plt.imshow(frame)
# plt.show()
# quit()
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (112, 112), interpolation = inter_methods[inter_method_idx]) # remove this or move it up
cv2.normalize(frame, frame, 0, 1, cv2.NORM_MINMAX)
imgs.append(frame)
stride = stride + 1
clips.append(imgs)
clips = np.asarray(clips, dtype=np.float32)
clips = clips.astype(np.float32)
clips = np.moveaxis(clips, 4, 1)
clips = torch.from_numpy(clips)
return clips
def __len__(self):
return len(self.flattened_data_dir)
def __getitem__(self, idx):
if idx < 0:
return torch.zeros(1, 1, 1, 1), self.flattened_data_dir[idx]
result = False
vid = None
# idx = 3456
# deal with corrupted videos in list or videos which are just too long for us to process
while not result:
try:
vid = VideoReader(self.flattened_data_dir[idx])
if(int(len(vid))>self.temporal_depth):
result = True
else:
#idx = random.randint(0, len(self.flattened_data_dir)-1)
del vid
gc.collect()
return torch.zeros(901, 1, 1, 1), -1
except:
#idx = random.randint(0, len(self.flattened_data_dir)-1)
del vid
gc.collect()
return torch.zeros(901, 1, 1, 1), -1
frames = self.transform(vid, self.split)
# vid.close()
del vid
gc.collect()
return frames, self.flattened_data_dir[idx]
| 30.335443 | 130 | 0.580638 | 4,478 | 0.934279 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.161068 |
e339d61b7c0a81fbe079a184470ec5bdef08b9e1 | 1,583 | py | Python | sklearn_baseline.py | Shinkai125/KerasForTextClassfication | ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4 | [
"MIT"
]
| null | null | null | sklearn_baseline.py | Shinkai125/KerasForTextClassfication | ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4 | [
"MIT"
]
| null | null | null | sklearn_baseline.py | Shinkai125/KerasForTextClassfication | ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4 | [
"MIT"
]
| null | null | null | """
@file: sklearn_method.py
@time: 2020-12-09 17:38:38
"""
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
myfont = fm.FontProperties(fname='SimHei.ttf') # 设置字体
train_data = pd.read_csv('chnsenticorp/train.tsv', sep='\t')
tfidf = TfidfVectorizer(norm='l2', ngram_range=(1, 2))
features = tfidf.fit_transform(train_data.text_a)
labels = train_data.label
print(features.shape)
models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
LinearSVC(),
MultinomialNB(),
LogisticRegression(random_state=0, solver='liblinear'),
]
CV = 10
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in tqdm(models):
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='f1', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
results = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'f1'])
sns.boxplot(x='model_name', y='f1', data=results)
sns.stripplot(x='model_name', y='f1', data=results,
size=8, jitter=True, edgecolor="gray", linewidth=2)
plt.show()
print(results.groupby('model_name').f1.mean())
| 31.66 | 78 | 0.753001 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.130735 |
e33bc5cbc72c8153bc963c853fb7e883e19b21c8 | 2,087 | py | Python | handypackages/gallery/tests.py | roundium/handypackages | b8a0e4952644144b31168f9a4ac8e743933d87c7 | [
"MIT"
]
| 1 | 2019-07-31T11:40:06.000Z | 2019-07-31T11:40:06.000Z | handypackages/gallery/tests.py | roundium/handypackages | b8a0e4952644144b31168f9a4ac8e743933d87c7 | [
"MIT"
]
| 10 | 2020-02-12T01:16:25.000Z | 2021-06-10T18:42:24.000Z | handypackages/gallery/tests.py | roundium/handypackages | b8a0e4952644144b31168f9a4ac8e743933d87c7 | [
"MIT"
]
| 1 | 2019-07-31T11:40:18.000Z | 2019-07-31T11:40:18.000Z | import tempfile
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from filer.models import Image
from handypackages.tag.models import Tag
from .models import Gallery
class TestGalleryModels(TestCase):
def setUp(self):
# make temp folder because we want to delete files after test
# and this code do that automatically
settings.MEDIA_ROOT = tempfile.mkdtemp()
self.user = User.objects.create_superuser('test_user', '', 'testing')
self.tags = [
Tag.objects.create(value='django'),
Tag.objects.create(value='python'),
Tag.objects.create(value='test'),
Tag.objects.create(value='app'),
]
with open('./handypackages/test_requirements/test_upload_image.jpg',
'rb') as image:
test_image = SimpleUploadedFile(
'test_upload_image.jpg',
image.read(),
content_type='image/jpeg'
)
image = Image(
owner=self.user,
file=test_image,
original_filename='gallery_image_file',
name='test_Gallery_image'
)
image.save()
gallery = Gallery(
title='gallery test title',
text='gallery test',
image=image,
)
gallery.save()
gallery.tags.add(*self.tags)
self.gallery = gallery
def test_gallery_model(self):
self.assertEqual(
str(self.gallery),
'gallery test title',
'__str__ in gallery model have an issue!'
)
self.assertEqual(
self.gallery.__unicode__(),
'gallery test title',
'__unicode__ in gallery model have an issue!'
)
self.assertEqual(
set(Tag.objects.all()),
set(self.gallery.gallery_tags),
'gallery gallery_tags method does not work!'
)
| 29.394366 | 77 | 0.577384 | 1,793 | 0.859128 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.233349 |
e33d45a696398845a0fe18a3dbb14693d8655739 | 1,926 | py | Python | src/jk_mediawiki/impl/WikiCronProcessFilter.py | jkpubsrc/python-module-jk-mediawiki | 5d76a060f0ed46c072d44e8084f6fa40d16e6069 | [
"Apache-1.1"
]
| null | null | null | src/jk_mediawiki/impl/WikiCronProcessFilter.py | jkpubsrc/python-module-jk-mediawiki | 5d76a060f0ed46c072d44e8084f6fa40d16e6069 | [
"Apache-1.1"
]
| null | null | null | src/jk_mediawiki/impl/WikiCronProcessFilter.py | jkpubsrc/python-module-jk-mediawiki | 5d76a060f0ed46c072d44e8084f6fa40d16e6069 | [
"Apache-1.1"
]
| null | null | null |
import os
import typing
import jk_typing
from .ProcessFilter import ProcessFilter
class WikiCronProcessFilter(ProcessFilter):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, userName:str, wikiInstDirPath:typing.Union[str,None], source:typing.Callable):
# {
# 'ppid': 21827,
# 'pid': 21841,
# 'tty': 'pts/7',
# 'stat': 'S',
# 'uid': 1000,
# 'gid': 1000,
# 'cmd': 'php',
# 'args': '/srv/wikis/srv/wikis/infowiki/infowiki/maintenance/runJobs.php --wait',
# 'user': 'woodoo',
# 'group': 'woodoo'
# }
super().__init__(
source = source,
userName = userName,
cmdExact="php",
#argEndsWith="runJobs.php",
argExact=os.path.join(wikiInstDirPath, "maintenance", "runJobs.php") if wikiInstDirPath else None
)
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
#
| 27.514286 | 129 | 0.297508 | 1,820 | 0.944964 | 0 | 0 | 622 | 0.322949 | 0 | 0 | 1,408 | 0.731049 |
e33da7e662f4c2fc76532c7c89e8edb38e2cccee | 96 | py | Python | venv/lib/python3.8/site-packages/filelock/_error.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
]
| 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/filelock/_error.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/filelock/_error.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
]
| null | null | null | /home/runner/.cache/pip/pool/ab/0b/2c/7ae80e56fd2208fbee5ef317ac009972f468b5601f62f8f799f9d9279a | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e33e7075c79b3b47f743f64502284119cdb5e862 | 2,094 | py | Python | konbata/Formats/xml_format.py | jzeuner/konbata | 41c5ec9ce4c84e82e09daaa106ceed9de38c437b | [
"MIT"
]
| 2 | 2019-12-01T16:12:24.000Z | 2021-05-18T22:10:12.000Z | konbata/Formats/xml_format.py | jzeuner/konbata | 41c5ec9ce4c84e82e09daaa106ceed9de38c437b | [
"MIT"
]
| 10 | 2019-09-19T17:08:46.000Z | 2021-02-17T21:42:10.000Z | konbata/Formats/xml_format.py | jzeuner/konbata | 41c5ec9ce4c84e82e09daaa106ceed9de38c437b | [
"MIT"
]
| 3 | 2019-11-27T18:39:12.000Z | 2021-02-10T15:11:58.000Z | """
Loader and Parser for the xml format.
Version: 0.01-alpha
"""
from xml.dom import minidom
from konbata.Data.Data import DataNode, DataTree
from konbata.Formats.Format import Format
def xml_toTree(file, delimiter, options=None):
"""
Function transforms a xml file into a DataTree.
Parameters
----------
file: file
open input file in at least read mode
Returns
-------
tree: DataTree
"""
# TODO: Second Parser with the import xml.etree.ElementTree as ET class
xml_reader = minidom.parse(file)
xml_reader.normalize()
tree = DataTree(tree_type='xml')
if xml_reader.hasChildNodes():
for node in xml_reader.childNodes:
childNode = help_xml_toTree(node)
tree.root.add(childNode)
return tree
def help_xml_toTree(xml_node):
"""
Helps xml_ToTree function, walks through xml recursive
Parameters
----------
xml_node: ElementType1
Returns
-------
node: DataNode
"""
if xml_node.hasChildNodes():
tree_node = DataNode(xml_node.localName)
for node in xml_node.childNodes:
tree_node.add(help_xml_toTree(node))
return tree_node
# TODO Add Attributes
node = None
if xml_node.nodeType == xml_node.TEXT_NODE:
# TODO: guess xml_node.nodeValue == xml_node.data
node = DataNode(xml_node.nodeValue.replace('\n ', ''))
elif xml_node.nodeType == xml_node.ELEMENT_NODE:
# TODO: guess xml_node.tagName == xml_node.localName
node = DataNode(xml_node.localName)
else:
# TODO: Implement the other nodeTypes
print('Warning: NodeType not supported yet')
node = DataNode(xml_node.localName)
return node
def xml_fromTree(tree, file, options=None):
"""
Function transforms a DataTree into a xml file.
Parameters
----------
tree: DataTree
file: file
open output file in at least write mode
options: list, optional
"""
# TODO
pass
xml_format = Format('xml', ['/n'], xml_toTree, xml_fromTree)
| 22.516129 | 75 | 0.637536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 944 | 0.450812 |
e33ec5c64b5732e244db6498e5c0817ede88b3d0 | 1,650 | py | Python | make_high_indel.py | wckdouglas/ngs_qc_plot | b279905f9e30d1cf547cda5f51cc77e8a134ce99 | [
"MIT"
]
| null | null | null | make_high_indel.py | wckdouglas/ngs_qc_plot | b279905f9e30d1cf547cda5f51cc77e8a134ce99 | [
"MIT"
]
| null | null | null | make_high_indel.py | wckdouglas/ngs_qc_plot | b279905f9e30d1cf547cda5f51cc77e8a134ce99 | [
"MIT"
]
| null | null | null | #!/usr/env python
import pandas as pd
import os
import sys
import numpy as np
if len(sys.argv) != 3:
sys.exit('[usage] python %s <repeat_index table> <indel cutoff>')
ref_table = sys.argv[1]
indel_cut_off = int(sys.argv[2])
for gdf in pd.read_csv(ref_table, sep='\t', chunksize = 10000):
for contig, contig_df in gdf.groupby('contig'):
df = contig_df\
.assign(indel_index = lambda d: d.negative_index + d.positive_index) \
.query('indel_index >= %i ' %indel_cut_off)
count = 0
for i, base in df.iterrows():
if base['negative_index'] == base['indel_index']:
start = base['start']
mononucleotide = base['fwd_base']
indel_index = base['indel_index']
taken_base = 1
elif taken_base != indel_index and base['fwd_base'] == mononucleotide:
taken_base += 1
elif taken_base == indel_index:
assert base['positive_index'] == indel_index and base['fwd_base'] == mononucleotide,'Wrong parsing'
end = base['start']
line = '{contig}\t{start}\t{end}\tIndel{id}\t{indel_index}\t+\t{mononucleotide}' \
.format(contig = base['contig'],
start = start,
end = end,
id = count,
indel_index = indel_index,
mononucleotide = mononucleotide)
print(line, file= sys.stdout)
count += 1
else:
print(base)
| 36.666667 | 115 | 0.512121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.18303 |
e340a47d3057e0a84cad2effe274acb95c936bc5 | 662 | py | Python | LeetCodeSolutions/python/474_Ones_and_Zeroes.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
]
| 1 | 2017-03-27T13:38:37.000Z | 2017-03-27T13:38:37.000Z | LeetCodeSolutions/python/474_Ones_and_Zeroes.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
]
| null | null | null | LeetCodeSolutions/python/474_Ones_and_Zeroes.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
]
| null | null | null | class Solution(object):
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
dp = [[0] * (n + 1) for _ in range(m + 1)]
def counts(s):
return sum(1 for c in s if c == '0'), \
sum(1 for c in s if c == '1')
for num_zero, num_one in [counts(s) for s in strs]:
for i in range(m, -1, -1):
for j in range(n, -1, -1):
if i >= num_zero and j >= num_one:
dp[i][j] = max(dp[i][j], dp[i - num_zero][j - num_one] + 1)
return dp[m][n]
| 30.090909 | 83 | 0.413897 | 661 | 0.998489 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.170695 |
e3431c6a3c1b12221c308a1da4d98113e28475f3 | 474 | py | Python | xicsrt/optics/_InteractNone.py | PrincetonUniversity/xicsrt | 15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd | [
"MIT"
]
| 1 | 2021-07-21T17:07:31.000Z | 2021-07-21T17:07:31.000Z | xicsrt/optics/_InteractNone.py | PrincetonUniversity/xicsrt | 15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd | [
"MIT"
]
| null | null | null | xicsrt/optics/_InteractNone.py | PrincetonUniversity/xicsrt | 15dfe5e3cd8ac6a326e8f0e502c8b739bd09d3fd | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
.. Authors:
Novimir Pablant <[email protected]>
Define the :class:`InteractNone` class.
"""
import numpy as np
from copy import deepcopy
from xicsrt.tools.xicsrt_doc import dochelper
from xicsrt.optics._InteractObject import InteractObject
@dochelper
class InteractNone(InteractObject):
"""
No interaction with surface, rays will pass through unchanged.
"""
# The behavior is identical to that of the base class.
pass | 21.545455 | 66 | 0.723629 | 187 | 0.394515 | 0 | 0 | 198 | 0.417722 | 0 | 0 | 255 | 0.537975 |
e34633ea0534cf1b5136a4ecb84b248d7c202e57 | 416 | py | Python | #103 - Ficha do Jogador.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
]
| null | null | null | #103 - Ficha do Jogador.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
]
| null | null | null | #103 - Ficha do Jogador.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
]
| null | null | null | def ficha(nome, gols):
if gols.isnumeric():
gols = int(gols)
else:
gols = 0
if nome.strip() != '':
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
else:
nome = '<desconhecido>'
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
print('='*30)
nome = str(input('Nome do Jogador: '))
gols = str(input('Número de Gols: '))
ficha(nome, gols)
| 24.470588 | 67 | 0.560096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.390887 |
e347285e41902227dea4612bf91fb04df4a24692 | 3,598 | py | Python | sub_1602_display.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
]
| null | null | null | sub_1602_display.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
]
| null | null | null | sub_1602_display.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
]
| null | null | null | #!/usr/bin/env python
"""
ZMQ Subscriber for 1602 display
Queue: INF and CMD
"""
import wiringpi2 as wiringpi
import datetime
import time
import json
import Adafruit_DHT
import traceback
import zmq
import sys
import pprint
infoSocket = "tcp://localhost:5550"
cmdSocket = "tcp://localhost:5560"
wiringpi.wiringPiSetup()
# Initialize mcp3008 (same as 3004) ADC - first parm is pin base (must be > 64)
# Second param is SPI bus number
wiringpi.mcp3004Setup(100,0)
# Initialize LCD
# 2 rows of 16 columns, driven by 4 bits
# Control pins are WiringPi 15 & 16
# Data pins are WiringPi 0,1,2,3
display = wiringpi.lcdInit (2, 16, 4, 15,16, 0,1,2,3,0,0,0,0)
# LCD Backlight
backlightPin = 26 # GPIO12 is set to ground to turn off backlight
wiringpi.pinMode(backlightPin,1) #output
wiringpi.digitalWrite(backlightPin, 0)
# Init zmq
context = zmq.Context()
# Subscribe to all the info queues
info = context.socket(zmq.SUB)
info.connect(infoSocket)
info.setsockopt(zmq.SUBSCRIBE, 'INF_SENSOR')
info.setsockopt(zmq.SUBSCRIBE, 'INF_CURRENTWX')
info.setsockopt(zmq.SUBSCRIBE, 'INF_FORECASTWX')
# Subscribe to LCD command queue
cmd = context.socket(zmq.SUB)
cmd.connect(cmdSocket)
cmd.setsockopt(zmq.SUBSCRIBE, 'CMD_LCD')
# set up a poller to read both sockets
poller = zmq.Poller()
poller.register(info, zmq.POLLIN)
poller.register(cmd, zmq.POLLIN)
# state variables
commandState = {'backlight':True}
# convert ADC reading to Lux
def rawToLux( raw ):
# Range for converting the output of the light sensor on the
# ADC to lux
rawRange = 1024 #
logRange = 5.0 # 3.3 V = 10^5 Lux
logLux = raw * logRange / rawRange
return round(pow(10, logLux))
def processSensor(dataIn):
officeData = json.loads(dataIn)
wiringpi.lcdPosition(display, 0,0)
now = datetime.datetime.now().strftime("%m/%d/%y %I:%M%p")
wiringpi.lcdPuts(display, now)
if 'temperatureF' in officeData :
out = "%2dF" % (officeData['temperatureF'])
wiringpi.lcdPosition(display, 0,1)
wiringpi.lcdPuts(display, out)
else:
print "Temp missing"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(officeData)
if 'pressure' in officeData :
out = "%4dMb" % (officeData['pressure'])
wiringpi.lcdPosition(display, 4,1)
wiringpi.lcdPuts(display, out)
else:
print "Pressure missing"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(officeData)
if 'lux' in officeData:
wiringpi.lcdPosition(display, 11,1)
out = "%3dLx" % (officeData['lux'])
wiringpi.lcdPuts(display, out)
else:
print "lux missing"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(officeData)
def processCommand(command):
# if backlight is on, turn it off
if command == 'toggleBacklight':
if commandState['backlight']:
wiringpi.digitalWrite(backlightPin, 1)
commandState['backlight'] = False
else:
wiringpi.digitalWrite(backlightPin, 0)
commandState['backlight'] = True
def main_sub_1602_display():
while True:
try:
socks = dict(poller.poll())
except KeyboardInterrupt:
print ("Exit by KeyboardInterrupt\n")
exit
if cmd in socks:
[queue, dataIn] = cmd.recv_multipart()
processCommand(dataIn)
if info in socks:
[queue, dataIn] = info.recv_multipart()
if queue == 'INF_SENSOR':
processSensor(dataIn)
if __name__ == '__main__':
main_sub_1602_display()
| 26.651852 | 79 | 0.658143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,017 | 0.282657 |
e347e8efaaade3a7b28a992e4961e185b12004e3 | 2,079 | py | Python | app/business_layers/presentation.py | martireg/bmat | b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c | [
"MIT"
]
| null | null | null | app/business_layers/presentation.py | martireg/bmat | b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c | [
"MIT"
]
| null | null | null | app/business_layers/presentation.py | martireg/bmat | b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c | [
"MIT"
]
| null | null | null | from typing import List, Dict
from fastapi import APIRouter, UploadFile, File, Depends, HTTPException
from pydantic import create_model
from starlette.responses import StreamingResponse
from app.business_layers.domain import Work
from app.business_layers.repository import WorkRepository
from app.business_layers.use_cases import (
bulk_upload_works_use_case,
get_work_use_case,
list_works_use_case,
)
from app.db.mongodb import get_client
from app.utils.csv_manipulation import process_csv, stream_csv_from_dicts
async def get_db():
return await get_client()
work_router = APIRouter()
# Model Fields are defined by either a tuple of the form (<type>, <default value>) or a default value
model_fields = {k: (v, ...) for k, v in Work.__annotations__.items()}
WorkModel = create_model("WorkModel", **model_fields)
@work_router.post("/upload_file", response_model=List[WorkModel])
async def upload_csv(file: UploadFile = File(...), db=Depends(get_db)) -> List[Dict]:
csv = process_csv(await file.read())
works = await bulk_upload_works_use_case(WorkRepository(db), csv)
return [work.to_dict() for work in works]
@work_router.get("/csv")
async def fetch_csv(db=Depends(get_db)) -> File:
works = await list_works(db)
if not works:
return HTTPException(status_code=500, detail="There are no works available")
response = StreamingResponse(
stream_csv_from_dicts(works, separator=",", keys=works[0].keys()),
media_type="text/csv",
)
response.headers["Content-Disposition"] = "attachment; filename=export.csv"
return response
@work_router.get("/work/{iswc}", response_model=WorkModel)
async def get_work(iswc: str, db=Depends(get_db)):
work = await get_work_use_case(WorkRepository(db), iswc)
if not work:
raise HTTPException(status_code=404, detail="Item not found")
return work.to_dict()
@work_router.get("/works", response_model=List[WorkModel])
async def list_works(db=Depends(get_db)):
return [work.to_dict() for work in await list_works_use_case(WorkRepository(db))]
| 34.65 | 101 | 0.746032 | 0 | 0 | 0 | 0 | 1,232 | 0.592593 | 1,072 | 0.515633 | 267 | 0.128427 |
e348be446d860ef514d588759be2dbd6de2b4764 | 651 | py | Python | essentials_kit_management/interactors/get_pay_through_details_interactor.py | RajeshKumar1490/iB_hubs_mini_project | f7126092400fb9a62fb4bff643dae7cda3a8d9d2 | [
"MIT"
]
| null | null | null | essentials_kit_management/interactors/get_pay_through_details_interactor.py | RajeshKumar1490/iB_hubs_mini_project | f7126092400fb9a62fb4bff643dae7cda3a8d9d2 | [
"MIT"
]
| 2 | 2021-09-07T07:06:00.000Z | 2021-09-07T07:24:26.000Z | essentials_kit_management/interactors/get_pay_through_details_interactor.py | RajeshKumar1490/iB_hubs_mini_project | f7126092400fb9a62fb4bff643dae7cda3a8d9d2 | [
"MIT"
]
| null | null | null | from essentials_kit_management.interactors.storages.storage_interface \
import StorageInterface
from essentials_kit_management.interactors.presenters.presenter_interface \
import PresenterInterface
class GetPayThroughDetailsInteractor:
def __init__(
self, storage: StorageInterface, presenter: PresenterInterface):
self.storage = storage
self.presenter = presenter
def get_pay_through_details(self):
upi_id = self.storage.get_upi_id()
pay_through_details_response = \
self.presenter.get_pay_through_details_response(upi_id=upi_id)
return pay_through_details_response
| 34.263158 | 76 | 0.761905 | 442 | 0.678955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e3496bdad8c230d6caf15ce743cc65f029480031 | 5,287 | py | Python | RecoEgamma/Configuration/python/RecoEgamma_cff.py | sebwieland/cmssw | 431e2fdfedec052e73c16e9f06de98ade41ebc56 | [
"Apache-2.0"
]
| null | null | null | RecoEgamma/Configuration/python/RecoEgamma_cff.py | sebwieland/cmssw | 431e2fdfedec052e73c16e9f06de98ade41ebc56 | [
"Apache-2.0"
]
| null | null | null | RecoEgamma/Configuration/python/RecoEgamma_cff.py | sebwieland/cmssw | 431e2fdfedec052e73c16e9f06de98ade41ebc56 | [
"Apache-2.0"
]
| null | null | null | import FWCore.ParameterSet.Config as cms
from RecoEgamma.EgammaElectronProducers.gsfElectronSequence_cff import *
from RecoEgamma.EgammaElectronProducers.uncleanedOnlyElectronSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.photonSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.conversionSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.allConversionSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.gedPhotonSequence_cff import *
from RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff import *
from RecoEgamma.EgammaIsolationAlgos.interestingEgammaIsoDetIdsSequence_cff import *
from RecoEgamma.PhotonIdentification.photonId_cff import *
from RecoEgamma.ElectronIdentification.electronIdSequence_cff import *
from RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff import *
from TrackingTools.Configuration.TrackingTools_cff import *
from RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff import *
#importing new gedGsfElectronSequence :
#from RecoEgamma.EgammaElectronProducers.gedGsfElectronSequence_cff import *
from RecoEgamma.EgammaElectronProducers.pfBasedElectronIso_cff import *
egammaGlobalRecoTask = cms.Task(electronGsfTrackingTask,conversionTrackTask,allConversionTask)
egammaGlobalReco = cms.Sequence(egammaGlobalRecoTask)
# this might be historical: not sure why we do this
from Configuration.Eras.Modifier_fastSim_cff import fastSim
_fastSim_egammaGlobalRecoTask = egammaGlobalRecoTask.copy()
_fastSim_egammaGlobalRecoTask.replace(conversionTrackTask,conversionTrackTaskNoEcalSeeded)
fastSim.toReplaceWith(egammaGlobalRecoTask, _fastSim_egammaGlobalRecoTask)
egammarecoTask = cms.Task(gsfElectronTask,conversionTask,photonTask)
egammareco = cms.Sequence(egammarecoTask)
egammaHighLevelRecoPrePFTask = cms.Task(gsfEcalDrivenElectronTask,uncleanedOnlyElectronTask,conversionTask,photonTask)
egammaHighLevelRecoPrePF = cms.Sequence(egammaHighLevelRecoPrePFTask)
# not commisoned and not relevant in FastSim (?):
fastSim.toReplaceWith(egammarecoTask, egammarecoTask.copyAndExclude([conversionTask]))
fastSim.toReplaceWith(egammaHighLevelRecoPrePFTask,egammaHighLevelRecoPrePFTask.copyAndExclude([uncleanedOnlyElectronTask,conversionTask]))
#egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask,hfEMClusteringTask)
#adding new gedGsfElectronTask and gedPhotonTask :
#egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,gedGsfElectronTask,interestingEgammaIsoDetIdsTask,gedPhotonTask,photonIDTask,eIdTask,hfEMClusteringTask)
egammaHighLevelRecoPostPFTask = cms.Task(interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,photonIDTaskGED,eIdTask,hfEMClusteringTask)
egammaHighLevelRecoPostPF = cms.Sequence(egammaHighLevelRecoPostPFTask)
egammarecoFullTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,eIdTask,hfEMClusteringTask)
egammarecoFull = cms.Sequence(egammarecoFullTask)
egammarecoWithIDTask = cms.Task(egammarecoTask,photonIDTask,eIdTask)
egammarecoWithID = cms.Sequence(egammarecoWithIDTask)
egammareco_woConvPhotonsTask = cms.Task(gsfElectronTask,photonTask)
egammareco_woConvPhotons = cms.Sequence(egammareco_woConvPhotonsTask)
egammareco_withIsolationTask = cms.Task(egammarecoTask,egammaIsolationTask)
egammareco_withIsolation = cms.Sequence(egammareco_withIsolationTask)
egammareco_withIsolation_woConvPhotonsTask = cms.Task(egammareco_woConvPhotonsTask,egammaIsolationTask)
egammareco_withIsolation_woConvPhotons = cms.Sequence(egammareco_withIsolation_woConvPhotonsTask)
egammareco_withPhotonIDTask = cms.Task(egammarecoTask,photonIDTask)
egammareco_withPhotonID = cms.Sequence(egammareco_withPhotonIDTask)
egammareco_withElectronIDTask = cms.Task(egammarecoTask,eIdTask)
egammareco_withElectronID = cms.Sequence(egammareco_withElectronIDTask)
egammarecoFull_woHFElectronsTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask)
egammarecoFull_woHFElectrons = cms.Sequence(egammarecoFull_woHFElectronsTask)
from Configuration.Eras.Modifier_pA_2016_cff import pA_2016
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_ppRef_2017_cff import ppRef_2017
#HI-specific algorithms needed in pp scenario special configurations
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerpp
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppGED
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppIsland
_egammaHighLevelRecoPostPF_HITask = egammaHighLevelRecoPostPFTask.copy()
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerpp)
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppGED)
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppIsland)
for e in [pA_2016, peripheralPbPb, pp_on_AA_2018, pp_on_XeXe_2017, ppRef_2017]:
e.toReplaceWith(egammaHighLevelRecoPostPFTask, _egammaHighLevelRecoPostPF_HITask)
| 66.924051 | 169 | 0.897863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.120862 |
e349722dbbb7eaf1a0dc75722c25f01806dbcca5 | 3,632 | py | Python | language/serene/boolq_tfds.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
]
| 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/serene/boolq_tfds.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
]
| 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/serene/boolq_tfds.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
]
| 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TF Dataset for BoolQ in same format as Fever TFDS."""
import json
from language.serene import constants
from language.serene import util
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class BoolQClaims(tfds.core.GeneratorBasedBuilder):
"""TFDS for treating boolq as fact verification."""
VERSION = tfds.core.Version('0.1.0')
def __init__(self,
*,
boolq_train_path,
boolq_dev_path,
data_dir=None,
config=None,
version=None):
super().__init__(data_dir=data_dir, config=config, version=version)
self._boolq_train_path = boolq_train_path
self._boolq_dev_path = boolq_dev_path
def _generate_examples(self, boolq_filepath, fold):
boolq_claims = util.read_jsonlines(boolq_filepath)
for idx, claim in enumerate(boolq_claims):
example_id = f'{fold}-{idx}'
example = {
'example_id':
example_id,
'claim_text':
claim['question'],
'evidence_text':
claim['passage'],
'wikipedia_url':
claim['title'],
'sentence_id':
'0',
# This is effectively gold evidence
'evidence_label':
constants.MATCHING,
'claim_label':
constants.SUPPORTS if claim['answer'] else constants.REFUTES,
'metadata':
json.dumps({})
}
yield example_id, example
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'example_id':
tf.string,
'metadata':
tf.string,
'claim_text':
tfds.features.Text(),
'evidence_text':
tfds.features.Text(),
'wikipedia_url':
tfds.features.Text(),
'sentence_id':
tfds.features.Text(),
'evidence_label':
tfds.features.ClassLabel(
names=constants.EVIDENCE_MATCHING_CLASSES),
'claim_label':
tfds.features.ClassLabel(names=constants.FEVER_CLASSES)
}),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'boolq_filepath': self._boolq_train_path,
'fold': 'train',
},
num_shards=100,
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'boolq_filepath': self._boolq_dev_path,
'fold': 'dev',
},
)
]
| 32.141593 | 75 | 0.593337 | 2,774 | 0.763767 | 773 | 0.21283 | 0 | 0 | 0 | 0 | 1,296 | 0.356828 |
e34a90751c66f311b5912bb8c6a8d1a8ad0deae9 | 449 | py | Python | 23/03/0.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
]
| null | null | null | 23/03/0.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
]
| 32 | 2017-09-01T00:52:17.000Z | 2017-10-01T00:30:02.000Z | 23/03/0.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
]
| null | null | null | import json
import pprint
from urllib.request import urlopen
with urlopen('http://pypi.python.org/pypi/Twisted/json') as url:
http_info = url.info()
raw_data = url.read().decode(http_info.get_content_charset())
project_info = json.loads(raw_data)
pprint.pprint(project_info)
print('------------------------------')
pprint.pprint(project_info, depth=2)
print('------------------------------')
pprint.pprint(project_info, depth=2, width=50)
| 29.933333 | 65 | 0.657016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.23608 |
e34b1404822d471e120b2d87a3f5be2a57d14434 | 1,323 | py | Python | molotov_ext/__init__.py | 2gis-test-labs/molotov-ext | 2cf2cc5b74f6676ed1680511030d4dddb8be8380 | [
"Apache-2.0"
]
| null | null | null | molotov_ext/__init__.py | 2gis-test-labs/molotov-ext | 2cf2cc5b74f6676ed1680511030d4dddb8be8380 | [
"Apache-2.0"
]
| null | null | null | molotov_ext/__init__.py | 2gis-test-labs/molotov-ext | 2cf2cc5b74f6676ed1680511030d4dddb8be8380 | [
"Apache-2.0"
]
| null | null | null | from argparse import Namespace
from functools import partial
from typing import Any
import molotov
from .formatters import DefaultFormatter
from .record_table import RecordTable
from .recorder import Recorder
from .reporter import Reporter
from .scenario import Scenario
__all__ = ("Reporter", "register_reporter", "scenario", "recorder")
recorder = Recorder(RecordTable())
scenario = partial(Scenario, recorder.on_starting_scenario)
@molotov.events()
async def event_listener(event: str, **info: Any) -> None:
if event == "sending_request":
recorder.on_sending_request(info["session"], info["request"])
elif event == "response_received":
recorder.on_response_received(info["session"], info["response"], info["request"])
elif event == "scenario_success":
recorder.on_scenario_success(info["scenario"]["name"], info["wid"])
elif event == "scenario_failure":
recorder.on_scenario_failure(info["scenario"]["name"], info["wid"], info['exception'])
elif event == "current_workers":
recorder.on_current_workers(info["workers"])
def register_reporter(args: Namespace) -> Reporter:
if args.processes > 1:
raise NotImplementedError('Возможность работы с несколькими процессами не поддерживается!')
return Reporter(recorder, DefaultFormatter())
| 32.268293 | 99 | 0.73167 | 0 | 0 | 0 | 0 | 651 | 0.472424 | 633 | 0.459361 | 365 | 0.264877 |
e34cc6ddd23022672aee1685f571b987ab87c815 | 936 | py | Python | services/viewcounts/utils.py | RyanFleck/AuxilliaryWebsiteServices | bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378 | [
"MIT"
]
| 1 | 2020-11-11T20:20:42.000Z | 2020-11-11T20:20:42.000Z | services/viewcounts/utils.py | RyanFleck/AuxilliaryWebsiteServices | bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378 | [
"MIT"
]
| 17 | 2020-11-09T19:04:04.000Z | 2022-03-01T18:08:42.000Z | services/viewcounts/utils.py | RyanFleck/AuxilliaryWebsiteServices | bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378 | [
"MIT"
]
| null | null | null | from slugify import slugify
from services.viewcounts.models import PageViewsModel
def get_page_views(url: str):
"""Returns the number of views for a given page object."""
# Pre-processing checks: Client should not pass full or partial URL.
if not url.startswith("/"):
raise Exception("Partial URL detected, only POST the page path.")
if ("http" in url) or ("localhost" in url):
raise Exception("Full URL detected, only POST the page path.")
# Boil down url to slug/path:
path = url_to_path(url)
print(f"User is at {path}")
# Creates a new object if none exists.
page, created = PageViewsModel.objects.get_or_create(path=path)
# Add a view to the model
if not created:
page.views = page.views + 1
page.save()
return page.views
def url_to_path(url: str):
"""Converts an incoming url into a path-slug."""
return slugify(url, max_length=199)
| 28.363636 | 73 | 0.672009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.426282 |
e34da2a39a4311b17cd41e029318a815155da9e9 | 9,875 | py | Python | bin/gaussian_process_samples.py | ltiao/videos | ba371078d107da5a4c726a957b31a29bb157664d | [
"MIT"
]
| null | null | null | bin/gaussian_process_samples.py | ltiao/videos | ba371078d107da5a4c726a957b31a29bb157664d | [
"MIT"
]
| null | null | null | bin/gaussian_process_samples.py | ltiao/videos | ba371078d107da5a4c726a957b31a29bb157664d | [
"MIT"
]
| null | null | null | import tensorflow as tf
import tensorflow_probability as tfp
from scipy.stats import expon
from videos.linalg import safe_cholesky
from manim import *
# shortcuts
tfd = tfp.distributions
kernels = tfp.math.psd_kernels
def default_float():
return "float64"
class State:
def __init__(self, kernel, x_grid, xa, xb_tracker, ci=.95):
self.kernel = kernel
self.x_grid = x_grid # shape (K, 1)
self.xa = xa # shape ()
self.xb_tracker = xb_tracker
self.ci = ci
# cholesky decomposition of gram matrix over grid points; shape (K, K)
self.scale_grid = safe_cholesky(self.kernel.matrix(x_grid, x_grid))
def index_points(self):
return np.vstack([self.xa, self.xb_tracker.get_value()]) # shape (2, 1)
def scale(self):
xs = self.index_points() # shape (2, 1)
Ks = self.kernel.matrix(xs, xs) # shape (2, 2)
Ks_grid = self.kernel.matrix(self.x_grid, xs) # shape (K, 2)
K_col = tf.concat([Ks_grid, Ks], axis=0) # shape (K+2, 2)
L = tfp.math.cholesky_concat(self.scale_grid, K_col) # shape (K+2, K+2)
return tf.linalg.LinearOperatorLowerTriangular(L)
def _ellipse_parametric(self, t):
xs = self.index_points() # shape (2, 1)
Ks = self.kernel.matrix(xs, xs) # shape (2, 2)
# compute 95% confidence interval using inverse cdf of
# chi-squared distribution with 2 degrees of freedom
s = expon(scale=2.).ppf(q=self.ci)
w, v = tf.linalg.eigh(Ks)
U = tf.sqrt(s * w) * v
z = tf.stack((tf.cos(t), tf.sin(t)), axis=-1)
a = tf.matmul(U, tf.expand_dims(z, axis=-1)).numpy()
return (*a, 0)
def plot_ellipse(self, ax):
return ax.plot_parametric_curve(self._ellipse_parametric,
t_range=(0, TAU),
fill_opacity=.25) \
.set_color(TEAL)
class SampleTrajectory:
def __init__(self, state, theta_tracker, random_state):
m = len(state.x_grid)
self.u = random_state.randn(m+2)
self.v = random_state.randn(m+2)
self.state = state
self.theta_tracker = theta_tracker
def __call__(self, theta):
v_norm = np.linalg.norm(self.v, axis=None, ord=2)
v_normed = np.true_divide(self.v, v_norm)
c = np.sum(self.u * v_normed, axis=None)
t = self.u - c * v_normed
t_norm = np.linalg.norm(t, ord=2, axis=None)
t_normed = np.true_divide(t, t_norm)
eps = v_norm * (v_normed * np.cos(theta) + t_normed * np.sin(theta))
return self.state.scale().matmul(tf.expand_dims(eps, axis=-1)).numpy()
def make_updater(self, ax, color, make_line_graph_fn):
def updater(m):
foo = self(self.theta_tracker.get_value())
y_values = foo[:-2]
return m.become(make_line_graph_fn(ax, self.state.x_grid, y_values, color))
return updater
def dot_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
y1, y2 = foo[-2:]
return m.move_to(ax.coords_to_point(y1, y2))
return updater
def make_xa_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
x = self.state.xa
y = foo[-2]
return m.move_to(ax.coords_to_point(x, y))
return updater
def make_xb_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
x = self.state.xb_tracker.get_value()
y = foo[-1]
return m.move_to(ax.coords_to_point(x, y))
return updater
def tset(self, ya, yb, ax, z_index, color):
return ax.get_lines_to_point(ax.coords_to_point(ya, yb), color=color) \
.set_z_index(z_index)
def make_lines_updater(self, ax, z_index, color):
def updater(m):
foo = self(self.theta_tracker.get_value())
ya, yb = foo[-2:]
return m.become(self.tset(ya, yb, ax, z_index, color))
return updater
class GaussianProcessSamples(Scene):
def make_line_graph(self, ax, x, y, color):
x_values = x.squeeze(axis=-1)
return ax.plot_line_graph(x_values=x_values,
y_values=y,
add_vertex_dots=False,
line_color=color,
# vertex_dot_style=dict(fill_color=color,
# fill_opacity=0.8),
stroke_opacity=0.9)
def construct(self):
# self.camera.background_color = WHITE
seed = 23
random_state = np.random.RandomState(seed)
# colors = [BLUE, TEAL, GREEN, GOLD, RED, MAROON, PURPLE]
colors = [RED, GREEN, BLUE]
n_samples = len(colors)
n_index_points = 512 # nbr of index points
n_foo = 2
y_min, y_max, y_step = -3.2, 3.2, .8
x_min, x_max, x_step = -.1, 1., .1
X_grid = np.linspace(x_min, x_max, n_index_points).reshape(-1, 1)
# X_foo = random_state.uniform(low=x_min, high=x_max, size=(n_foo, 1))
xa = 0.7
xb = xa - 0.2
# x2 = random_state.uniform(low=x_min, high=x_max)
# kernel_cls = kernels.MaternFiveHalves
kernel_cls = kernels.ExponentiatedQuadratic
amplitude = 1.
length_scale = .1
kernel = kernel_cls(
amplitude=tf.constant(amplitude, dtype=default_float()),
length_scale=tf.constant(length_scale, dtype=default_float())
)
# angle
theta = 0.
ax1 = Axes(
x_range=[x_min, x_max, x_step],
y_range=[y_min, y_max, y_step],
x_length=7.,
y_length=4.,
tips=False,
)
ax2 = Axes(
x_range=[y_min, y_max, y_step],
y_range=[y_min, y_max, y_step],
x_length=4.,
y_length=4.,
tips=False,
)
axes = VGroup(ax1, ax2).arrange(RIGHT, buff=LARGE_BUFF)
ax1_label = ax1.get_axis_labels(y_label=r"f(x)")
ax2_label = ax2.get_axis_labels(x_label=r"f(x_1)", y_label=r"f(x_2)")
labels = VGroup(ax1_label, ax2_label)
xb_tracker = ValueTracker(xb)
length_scale_tracker = ValueTracker(length_scale)
theta_tracker = ValueTracker(theta)
state = State(kernel, X_grid, xa, xb_tracker)
curve = state.plot_ellipse(ax2)
curve.add_updater(lambda m: m.become(state.plot_ellipse(ax2)))
graphs = VGroup()
lines = VGroup()
dots = VGroup()
for i, color in enumerate(colors):
traj = SampleTrajectory(state, theta_tracker, random_state)
foo = traj(theta_tracker.get_value())
*y_values, ya, yb = foo
graph = self.make_line_graph(ax1, X_grid, y_values, color) \
.set_z_index(i+1)
graph.add_updater(traj.make_updater(ax1, color, self.make_line_graph))
graphs.add(graph)
dot_xa = Dot(ax1.coords_to_point(xa, ya),
fill_color=color, fill_opacity=0.9, stroke_width=1.5) \
.set_z_index(i+1)
dot_xa.add_updater(traj.make_xa_updater(ax1))
dot_xb = Dot(ax1.coords_to_point(xb_tracker.get_value(), yb),
fill_color=color, fill_opacity=0.9, stroke_width=1.5) \
.set_z_index(i+1)
dot_xb.add_updater(traj.make_xb_updater(ax1))
dot = Dot(ax2.coords_to_point(ya, yb),
fill_color=color, stroke_width=1.5) \
.set_z_index(curve.z_index+i+1)
dot.add_updater(traj.dot_updater(ax2))
line = traj.tset(ya, yb, ax2, z_index=curve.z_index+i+1, color=color)
line.add_updater(traj.make_lines_updater(ax2, z_index=curve.z_index+i+1, color=color))
dots.add(dot, dot_xa, dot_xb)
lines.add(line)
line_a = ax1.get_vertical_line(ax1.coords_to_point(xa, .75 * y_min))
line_b = ax1.get_vertical_line(ax1.coords_to_point(xb_tracker.get_value(), .75 * y_max))
line_b.add_updater(lambda m: m.become(ax1.get_vertical_line(ax1.coords_to_point(xb_tracker.get_value(), .75 * y_max))))
lines.add(line_a, line_b)
label_a = MathTex("x_1").next_to(line_a, DOWN)
label_b = MathTex("x_2").next_to(line_b, UP)
label_b.add_updater(lambda m: m.next_to(line_b, UP))
labels.add(label_a, label_b)
logo = Text("@louistiao", font="Open Sans", font_size=20, color=BLUE_D).to_corner(DR)
self.add(logo, axes, labels, graphs, dots, curve, lines)
rotations = 1
frequency = 1
self.play(xb_tracker.animate.set_value(xa - 0.45))
self.wait()
self.animate_samples(theta_tracker, rotations, frequency)
self.wait()
self.next_section()
self.play(xb_tracker.animate.set_value(xa + 0.2))
self.wait()
self.animate_samples(theta_tracker, rotations, frequency)
self.wait()
# self.next_section()
# self.play(xb_tracker.animate.set_value(xa + .015))
# self.wait()
# self.animate_samples(theta_tracker, rotations, frequency)
# self.wait()
# self.next_section()
# self.play(xb_tracker.animate.set_value(xb))
# self.wait()
# self.animate_samples(theta_tracker, rotations, frequency)
# self.wait()
def animate_samples(self, tracker, rotations, frequency,
rate_func=rate_functions.linear):
self.play(tracker.animate.increment_value(rotations * TAU),
rate_func=rate_func, run_time=rotations / frequency)
| 35.142349 | 127 | 0.582278 | 9,602 | 0.972354 | 0 | 0 | 0 | 0 | 0 | 0 | 1,071 | 0.108456 |
e34e43e9e1aa6f169f4e3ce01d35a03a886c9108 | 932 | py | Python | app/api/v2/models/base_models.py | erick-maina/Questioner_API | 0ffad203fd525e22b52e861ce574803a844cc3b3 | [
"MIT"
]
| null | null | null | app/api/v2/models/base_models.py | erick-maina/Questioner_API | 0ffad203fd525e22b52e861ce574803a844cc3b3 | [
"MIT"
]
| 7 | 2019-01-15T12:23:59.000Z | 2019-01-20T17:32:45.000Z | app/api/v2/models/base_models.py | erick-maina/Questioner_API | 0ffad203fd525e22b52e861ce574803a844cc3b3 | [
"MIT"
]
| null | null | null | """
This module defines the base model and associated functions
"""
from flask import Flask, jsonify
from psycopg2.extras import RealDictCursor
from ....database import db_con
class BaseModels(object):
"""
This class encapsulates the functions of the base model
that will be shared across all other models
"""
def __init__(self, tablename):
"""Initializes the database"""
self.table = tablename
self.connect = db_con()
self.cur = self.connect.cursor(cursor_factory=RealDictCursor)
def check_exists(self, key, value):
"""Checks where a particular item exists within the
database given the table name, column name(key) and
the value to be checked"""
query = """SELECT * FROM {} WHERE {} = {};""".format(
self.table, key, value)
self.cur.execute(query)
result = self.cur.fetchall()
return len(result) > 0
| 30.064516 | 69 | 0.648069 | 752 | 0.806867 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.429185 |
e34fb05ebe987705aa9522c54e606db43ebf8086 | 9,288 | py | Python | cvr/core/task.py | john-james-ai/cvr | 37e12dff4d46acac64b09ad8ddb8d238d43a5513 | [
"BSD-3-Clause"
]
| null | null | null | cvr/core/task.py | john-james-ai/cvr | 37e12dff4d46acac64b09ad8ddb8d238d43a5513 | [
"BSD-3-Clause"
]
| null | null | null | cvr/core/task.py | john-james-ai/cvr | 37e12dff4d46acac64b09ad8ddb8d238d43a5513 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ================================================================================================ #
# Project : Deep Learning for Conversion Rate Prediction (CVR) #
# Version : 0.1.0 #
# File : \task.py #
# Language : Python 3.7.12 #
# ------------------------------------------------------------------------------------------------ #
# Author : John James #
# Email : [email protected] #
# URL : https://github.com/john-james-ai/cvr #
# ------------------------------------------------------------------------------------------------ #
# Created : Wednesday, January 19th 2022, 5:34:06 pm #
# Modified : Thursday, February 10th 2022, 9:28:37 pm #
# Modifier : John James ([email protected]) #
# ------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2022 Bryant St. Labs #
# ================================================================================================ #
from abc import ABC, abstractmethod
import pandas as pd
import inspect
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from cvr.utils.printing import Printer
from cvr.core.asset import AssetPassport
from cvr.core.dataset import Dataset
# ---------------------------------------------------------------------------- #
# TASK RESULT #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskResult(ABC):
"""Standardized result object for all tasks"""
executed: bool = field(default=False)
passed: bool = field(default=False)
complete: str = field(default=False)
completed: datetime = field(init=False)
comment: str = field(default="")
def __post_init__(self) -> None:
self._printer = Printer()
def todict(self) -> dict:
d = {
"Executed": self.executed,
"Passed": self.passed,
"Complete": self.complete,
"Completed": self.completed,
"Comment": self.comment,
}
return d
def print(self) -> None:
d = self.todict()
self._printer.print_title("Task Result")
self._printer.print_dictionary(d)
# ---------------------------------------------------------------------------- #
# TASK RESPONSE #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskResponse(ABC):
"""Task specific metrics to be overridden by subclasses."""
start: datetime = field(init=False)
end: datetime = field(init=False)
duration: timedelta = field(init=False)
def __post_init__(self) -> None:
pass
def begin(self) -> None:
self.start = datetime.now()
def stop(self) -> None:
self.end = datetime.now()
self.duration = self.end - self.start
def __post_init__(self) -> None:
self._printer = Printer()
def todict(self) -> dict:
d = {"Start": self.start, "End": self.end, "Duration": self.duration}
return d
def print(self) -> None:
title = "Task Response"
self._printer.print_title(title)
d = self.todict()
self._printer.print_dictionary(d)
# ---------------------------------------------------------------------------- #
# TASK SUMMARY #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskSummary(ABC):
"""Summarizes a Task.
Args:
passport (AssetPassport): Identity object
start (datetime): start time for event
end (datetime): end time for event
duration (timedelta): the duration of the event in minutes
passed (bool): True if the event passed
executed (bool): True if the event was executed. An event
may be skipped if its endpoint already exists
response (dict): Event specific response
result (str): result of the event
"""
passport: AssetPassport
response: TaskResponse
result: TaskResult
def __post_init__(self) -> None:
self._printer = Printer()
def print(self) -> None:
self.passport.print()
self.response.print()
self.result.print()
# ---------------------------------------------------------------------------- #
# TASK #
# ---------------------------------------------------------------------------- #
class Task(ABC):
"""Defines interface for task classes."""
def __init__(self, passport: AssetPassport, **kwargs) -> None:
self._passport = passport
self._config = None
@property
def config(self):
return self._config
@property
@abstractmethod
def passport(self):
pass
@property
@abstractmethod
def response(self) -> TaskResponse:
pass
@property
@abstractmethod
def result(self) -> TaskResult:
pass
@config.setter
def config(self, config) -> None:
self._config = config
def setup(self, **kwargs) -> None:
# Logging facility
self._logger = self._config.logger
# Subclass specific setup
self._setup()
def _setup(self) -> None:
pass
def teardown(self, **kwargs) -> None:
# Subclass specific teardown.
self._teardown()
# Base class gets last word
self._result.executed = "No" if self._result.executed is False else "Yes"
self._result.passed = "No" if self._result.passed is False else "Yes"
self._result.complete = "No" if self._result.complete is False else "Yes"
self._summary = TaskSummary(
passport=self.passport,
response=self.response,
result=self.result,
)
def _teardown(self, **kwargs) -> None:
pass
@abstractmethod
def run(self, data: pd.DataFrame = None) -> pd.DataFrame:
"""Runs the task through delegation to a private member on the subclass
Args:
df (pd.DataFrame): Input DataFrame object.
Returns:
df (pd.DataFrame): DataFrame object
response (dict): Dictionary containing task response information.
"""
pass
@abstractmethod
def passed(self) -> bool:
"""Checks success of task. Returns True if conditions pass."""
pass
def summary(self) -> TaskSummary:
return self._summary
def summarize(self) -> None:
self._summary.print()
# ============================================================================ #
# DATASET FACTORY #
# ============================================================================ #
class DatasetFactory(Task):
"""Creates Dataset objects."""
def __init__(self, passport: AssetPassport, dataset_passport: AssetPassport) -> None:
super(DatasetFactory, self).__init__(passport=passport)
self._dataset_passport = dataset_passport
def run(self, data: pd.DataFrame) -> Dataset:
self.setup()
self._logger.debug("\tStarted {} {}".format(self.__class__.__name__, inspect.stack()[0][3]))
aid = self._config.dataset_repo.aid_gen()
self._dataset_passport.aid = aid
dataset = Dataset(
passport=self._dataset_passport,
data=data,
)
dataset = self._config.dataset_repo.set_version(dataset)
self._logger.debug(
"\tCompleted {} {}".format(self.__class__.__name__, inspect.stack()[0][3])
)
self.teardown()
return dataset
def _setup(self) -> None:
self._response = TaskResponse()
self._response.begin()
self._result = TaskResult()
def _teardown(self) -> None:
self._response.stop()
self._result.executed = True
self._result.passed = self.passed()
self._result.complete = True
self._result.completed = datetime.now()
@property
def passport(self) -> AssetPassport:
return self._passport
@property
def response(self) -> TaskResponse:
return self._response
@property
def result(self) -> TaskResult:
return self._result
def passed(self) -> bool:
return True
| 33.053381 | 100 | 0.463932 | 5,984 | 0.644272 | 0 | 0 | 3,531 | 0.380168 | 0 | 0 | 4,252 | 0.457795 |
e34fb4cf8e7da5cda9892155346f681567a85054 | 304 | py | Python | matroids/matroid.py | Aasfga/matroids-library | 468f6fdc4b0c0e93346dba7365fae0fc6993f9cf | [
"MIT"
]
| null | null | null | matroids/matroid.py | Aasfga/matroids-library | 468f6fdc4b0c0e93346dba7365fae0fc6993f9cf | [
"MIT"
]
| null | null | null | matroids/matroid.py | Aasfga/matroids-library | 468f6fdc4b0c0e93346dba7365fae0fc6993f9cf | [
"MIT"
]
| null | null | null | from typing import Set
from numpy import ndarray
class Matroid:
def is_independent(self, s: Set) -> bool:
raise NotImplementedError
@property
def rank(self) -> int:
raise NotImplementedError
@property
def matrix(self) -> ndarray:
raise NotImplementedError
| 19 | 45 | 0.671053 | 252 | 0.828947 | 0 | 0 | 146 | 0.480263 | 0 | 0 | 0 | 0 |
e350ce9086d7c563b5e1154ba5f38a8024e85d87 | 779 | py | Python | inconnu/traits/traitcommon.py | tiltowait/inconnu | 6cca5fed520899d159537701b695c94222d8dc45 | [
"MIT"
]
| 4 | 2021-09-06T20:18:13.000Z | 2022-02-05T17:08:44.000Z | inconnu/traits/traitcommon.py | tiltowait/inconnu | 6cca5fed520899d159537701b695c94222d8dc45 | [
"MIT"
]
| 7 | 2021-09-13T00:46:57.000Z | 2022-01-11T06:38:50.000Z | inconnu/traits/traitcommon.py | tiltowait/inconnu | 6cca5fed520899d159537701b695c94222d8dc45 | [
"MIT"
]
| 2 | 2021-11-27T22:24:53.000Z | 2022-03-16T21:05:00.000Z | """traits/traitcommon.py - Common functionality across trait operations."""
import re
from ..constants import UNIVERSAL_TRAITS
VALID_TRAIT_PATTERN = re.compile(r"^[A-z_]+$")
def validate_trait_names(*traits):
"""
Raises a ValueError if a trait doesn't exist and a SyntaxError
if the syntax is bad.
"""
for trait in traits:
if (trait_len := len(trait)) > 20:
raise ValueError(f"`{trait}` is too long by {trait_len - 20} characters.")
if trait.lower() in UNIVERSAL_TRAITS:
raise SyntaxError(f"`{trait}` is a reserved trait and cannot be added/updated/deleted.")
if VALID_TRAIT_PATTERN.match(trait) is None:
raise SyntaxError(f"Traits can only have letters and underscores. Received `{trait}`.")
| 33.869565 | 100 | 0.671374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.49294 |
e3528306b7bd15d5fdcba5e58d4d0fd69b1294a4 | 349 | py | Python | sps/api/v1/socket/message.py | tantexian/sps-2014-12-4 | 0cdab186cb3bf148656c4c214a18215643b4969c | [
"Apache-2.0"
]
| 1 | 2018-07-27T15:16:14.000Z | 2018-07-27T15:16:14.000Z | sps/api/v1/socket/message.py | tantexian/sps-2014-12-4 | 0cdab186cb3bf148656c4c214a18215643b4969c | [
"Apache-2.0"
]
| null | null | null | sps/api/v1/socket/message.py | tantexian/sps-2014-12-4 | 0cdab186cb3bf148656c4c214a18215643b4969c | [
"Apache-2.0"
]
| null | null | null | class MessageType(object):
REG = 1
class Message(object):
def __init__(self):
self.type = None
self.body = None
def get_type(self):
return self.type
def get_body(self):
return self.body
def set_type(self, type):
self.type = type
def set_body(self, body):
self.body = body
| 17.45 | 29 | 0.581662 | 346 | 0.991404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e355aa3c3d4c58a325cb59719ca07b7c1a10df4b | 2,293 | py | Python | docker_ml_templates/simple_batch_model/container/src/tests/test_model.py | MadMedian/ubik | d8dabf0a26db1e35c653b23facb5045f2ae7bf0d | [
"Apache-2.0"
]
| null | null | null | docker_ml_templates/simple_batch_model/container/src/tests/test_model.py | MadMedian/ubik | d8dabf0a26db1e35c653b23facb5045f2ae7bf0d | [
"Apache-2.0"
]
| null | null | null | docker_ml_templates/simple_batch_model/container/src/tests/test_model.py | MadMedian/ubik | d8dabf0a26db1e35c653b23facb5045f2ae7bf0d | [
"Apache-2.0"
]
| null | null | null | import unittest
from ..model import RandomForestWithFeatureSelection
from sklearn.model_selection import train_test_split
import os
import numpy as np
def create_dataset(n_rows=1000, n_feats=10, pos_loc=2.0, neg_loc=0.0,
pos_scale=3.0, neg_scale=3.0, random_state=1):
np.random.seed(random_state)
X_pos = np.random.normal(pos_loc, pos_scale, size=(n_rows, n_feats))
X_neg = np.random.normal(neg_loc, neg_scale, size=(n_rows, n_feats))
X = np.vstack([X_pos, X_neg])
y = np.concatenate([np.ones(n_rows), np.zeros(n_rows)])
return X, y
class MyTestCase(unittest.TestCase):
def setUp(self):
self.random_state = 1
self.filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model.joblib')
def test_model(self):
unit = RandomForestWithFeatureSelection(random_state=self.random_state, n_estimators=10, top_k=8)
X, y = create_dataset()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=self.random_state)
model = unit.train(X_train, y_train)
train_score = unit.score(X_train, y_train)
test_score = unit.score(X_test, y_test)
self.assertGreater(train_score['precision_score'], 0.95)
self.assertGreater(test_score['precision_score'], 0.75)
for score_train, score_test in zip(train_score.values(), test_score.values()):
self.assertGreater(score_train, score_test)
def test_save_load(self):
model = RandomForestWithFeatureSelection(random_state=self.random_state, n_estimators=5, top_k=6)
X, y = create_dataset()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=self.random_state)
model = model.train(X_train, y_train)
model.save(self.filepath)
unit = RandomForestWithFeatureSelection.load(self.filepath)
train_score = unit.score(X_train, y_train)
test_score = unit.score(X_test, y_test)
self.assertGreater(train_score['precision_score'], 0.9)
self.assertGreater(test_score['precision_score'], 0.7)
def tearDown(self):
if os.path.exists(self.filepath):
os.remove(self.filepath)
return
if __name__ == '__main__':
unittest.main()
| 39.534483 | 113 | 0.69167 | 1,663 | 0.725251 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.040122 |
e357ec80e01c5cb1d929b33f8d9bbb4379d90eae | 43,051 | py | Python | DataAnalysis.py | ben-dent/Contract-Cheating-Analysis | 28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df | [
"MIT"
]
| null | null | null | DataAnalysis.py | ben-dent/Contract-Cheating-Analysis | 28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df | [
"MIT"
]
| null | null | null | DataAnalysis.py | ben-dent/Contract-Cheating-Analysis | 28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt;
plt.rcdefaults()
import csv
import sqlite3 as lite
from calendar import monthrange
from datetime import datetime, date, timedelta
from datetimerange import DateTimeRange
import numpy as np
import pycountry_convert as pc
from dateutil.relativedelta import relativedelta
from forex_python.converter import CurrencyRates, RatesNotAvailableError
import random
import pandas as pd
DATABASE_NAME = 'JobDetails.db'
con = lite.connect(DATABASE_NAME)
cur = con.cursor()
bidNames = ["Bid ID", "Job ID", "Country", "User", "Price", "Currency"]
jobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Year", "Week",
"Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment", "Category Type Two", "Possible Months"]
reviewJobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Date Scraped",
"Time Ago", "Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment",
"Possible Years", "Category Type Two", "Possible Months"]
profileNames = ["Profile ID", "Username", "Number Of Reviews", "Average Review", "Hourly Rate",
"Earnings Percentage",
"Country"]
qualificationNames = ["Qualification ID", "Qualification Type", "User", "Qualification Name", "Extra Information"]
reviewNames = ["Review ID", "Project URL", "Profile", "Score", "Amount Paid", "Currency", "Converted Currency",
"Date Scraped", "Date", "Country", "Notes", "Date Range", "Possible Months", "Possible Years"]
winnerNames = ["Job ID", "Job URL", "Username", "Profile URL"]
names = {"Bids": bidNames, "Jobs": jobNames, "JobsHourly": jobNames, "ReviewJobs": reviewJobNames, "Profiles": profileNames,
"Qualifications": qualificationNames, "Reviews": reviewNames, "Winners": winnerNames}
# Converts the currency to USD at the historic rate
def convertCurrency(currency, amount, date):
c = CurrencyRates()
dollarAmount = c.get_rate(currency, 'USD', date) * float(amount)
dollarAmount = '%.2f' % dollarAmount
split = dollarAmount.split('.')
if (int(split[1]) == 0):
return split[0]
return (dollarAmount)
def convertCurrencyWithYear(currency, amount, week, year):
week = str(year) + "-W" + str(week)
startDate = datetime.strptime(week + '-1', "%Y-W%W-%w")
endDate = startDate + relativedelta(weeks=1)
return getAverage(currency, startDate, endDate, amount)
def daterange(startDate, endDate):
for n in range(int((endDate - startDate).days)):
yield startDate + timedelta(n)
def getAverage(currency, startDate, endDate, amount):
c = CurrencyRates()
total = 0
n = 0
for singleDate in daterange(startDate, endDate):
total += c.get_rate(currency, 'USD', singleDate)
n += 1
average = total / n
dollarAmount = average * float(amount)
dollarAmount = '%.2f' % dollarAmount
split = dollarAmount.split('.')
if (int(split[1]) == 0):
return split[0]
return (dollarAmount)
def calculateWeeklyAverage(currency, amount, weeksAgo):
today = date.today()
newDay = (today + relativedelta(weeks=-weeksAgo))
week = newDay.isocalendar()[1]
# startDate = datetime.strptime(str(week) + '-1', "%Y-W%W-%w")
startDate = newDay
endDate = startDate + relativedelta(weeks=1)
return getAverage(currency, startDate, endDate, amount)
def calculateMonthlyAverage(currency, amount, monthsAgo):
today = date.today()
newDay = (today + relativedelta(months=-monthsAgo))
month = newDay.month
year = newDay.year
startDate = date(year, month, 1)
endDate = date(year, month, monthrange(year, month)[1])
return getAverage(currency, startDate, endDate, amount)
def calculateYearlyAverage(currency, amount, year):
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
return getAverage(currency, startDate, endDate, amount)
# Retrieves saved details to plot
def plotFromDatabase():
cur.execute('SELECT Country FROM Bids')
results = cur.fetchall()
countries = {}
for item in results:
country = item[0]
n = 1
if (countries.get(country) != None):
n = countries.get(country) + 1
countries.update({country: n})
plotBarChartsOfBidderCountries(countries)
# Generates multiple windows of bar charts to display the countries of bidders - grouped by continent
def plotBarChartsOfBidderCountries(countryValues):
# Dictionary containing continent codes and continent names
continents = {
'AN': 'Antarctica',
'NA': 'North America',
'EU': 'Europe',
'SA': 'South America',
'AS': 'Asia',
'OC': 'Oceania',
'AF': 'Africa'
}
# Dictionary that will hold the data for each country
countryData = {
'AN': [[], []],
'NA': [[], []],
'EU': [[], []],
'SA': [[], []],
'AS': [[], []],
'OC': [[], []],
'AF': [[], []]
}
continentPlotData = {
'Antarctica': 0,
'North America': 0,
'Europe': 0,
'South America': 0,
'Asia': 0,
'Oceania': 0,
'Africa': 0
}
# Gets all the countries and the number of bidders from each country
countries = list(countryValues.keys())
values = list(countryValues.values())
# Populating the countryData dictionary with the data from the countries and values lists
# Grouped by continent
for i in range(len(countries)):
country = countries[i]
if country == 'Lao Peoples Democratic Republic':
country = "Lao People's Democratic Republic"
elif country == "Cote DIvoire":
country = "Cote D'Ivoire"
try:
country_code = pc.country_name_to_country_alpha2(country, cn_name_format="default")
except KeyError:
continue
try:
continent_code = pc.country_alpha2_to_continent_code(country_code)
except KeyError:
continue
# continent_code = pc.country_alpha2_to_continent_code(country_code)
valuesFromContinent = countryData.get(continent_code)
try:
continentCountries = valuesFromContinent[0]
except TypeError:
a = 1
continentCountries.append(country)
continentValues = valuesFromContinent[1]
continentValues.append(values[i])
countryData.update({continent_code: [continentCountries, continentValues]})
continentNames = list(countryData.keys())
# Plots a graph for each continent
for name in continentNames:
data = countryData.get(name)
if (data != [[], []]):
countries = sorted(data[0])
values = []
for country in countries:
if country == "Lao People's Democratic Republic":
country = "Lao Peoples Democratic Republic"
elif country == "Cote D'Ivoire":
country = "Cote DIvoire"
values.append(countryValues.get(country))
nameOfContinent = continents.get(name)
try:
continentPlotData.update({nameOfContinent: sum(values)})
except TypeError:
b = 2
yPos = np.arange(len(countries))
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title("Countries of bidders")
plt.xticks(yPos, countries, rotation='vertical')
ax.bar(yPos, values, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
continent_name = continents.get(name)
plt.title(continent_name)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
imageName = "image" + ''.join(char for char in continent_name if char.isalnum()) + ".png"
plt.savefig(imageName, bbox_inches='tight', dpi=100)
yPos = np.arange(len(continentPlotData))
vals = list(continentPlotData.values())
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title("Continents")
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
plt.xticks(yPos, sorted(list(continentPlotData.keys())), rotation='vertical')
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title("Continents")
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("imageContinents", bbox_inches='tight', dpi=100)
plt.show(block=False)
def plotComparison(data, title):
yPos = np.arange(len(data))
vals = list(data.values())
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title(title)
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
if title == 'Categories':
vals = [1,2,3,4,5,'Not Categorised']
else:
vals = sorted(list(data.keys()))
plt.xticks(yPos, vals)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title(title)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("image" + title, bbox_inches='tight', dpi=100)
plt.show(block=False)
def plotAllCategories(data):
labels = list(data.keys())
values = list(data.values())
yPos = np.arange(1)
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
title = 'All Categories'
fig.canvas.set_window_title(title)
ax.bar(yPos, values, align='center', alpha=0.5)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
ax.set_ylim(bottom=0)
plt.xticks(yPos, labels)
plt.ylabel('Number')
plt.title('All Categories')
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.show()
def plotSingleType(data, type):
head = list(data.keys())[0]
values = data.get(head)
yPos = np.arange(1)
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
title = ''
if type in ['Tags', 'Category', 'Range', 'Keyword']:
title = type
else:
title = 'Countries of '
if (type == 'Bids'):
title += 'Bidders'
else:
title += type
fig.canvas.set_window_title(title)
# plt.xticks(yPos, [head], rotation='vertical')
ax.bar(yPos, values, align='center', alpha=0.5)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
ax.set_ylim(bottom=0)
ax.xaxis.set_visible(False)
plt.ylabel('Number')
plt.title(head)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.show()
def doAverages():
cur.execute('SELECT JobID, AverageBidCost FROM Jobs')
jobs = cur.fetchall()
con.commit()
for job in jobs:
jobID = job[0]
cost = job[1]
if (cost == ''):
bidAverage = calcAverage(cur, jobID)
if (bidAverage == -1):
bidAverage = "None"
bidAverage = str(str(bidAverage[1]) + str(bidAverage[0]))
update = "UPDATE Jobs SET AverageBidCost = ? WHERE JobID = ?"
cur.execute(update, [bidAverage, jobID])
con.commit()
cur.execute('SELECT JobID, AverageBidCost FROM ReviewJobs')
jobs = cur.fetchall()
con.commit()
for job in jobs:
jobID = job[0]
cost = job[1]
if (cost == ''):
bidAverage = calcAverage(cur, jobID)
if (bidAverage == -1):
bidAverage = "None"
bidAverage = str(str(bidAverage[1]) + str(bidAverage[0]))
update = "UPDATE ReviewJobs SET AverageBidCost = ? WHERE JobID = ?"
cur.execute(update, [bidAverage, jobID])
con.commit()
def calcAverage(cur, jobID):
average = 0.0
n = 0
select = "SELECT Price FROM Bids WHERE JobID = ?"
cur.execute(select, [jobID])
prices = cur.fetchall()
for price in prices:
givenAmount = price[0]
price = float(''.join(c for c in givenAmount if c.isnumeric() or c == '.'))
n += 1
average += price
try:
result = average / n
except ZeroDivisionError:
return [-1, -1]
symbol = givenAmount[0]
return [float('%.2f' % result), symbol]
# Saving values from the database to CSV files
def saveAllDataToCSV():
cur.execute("SELECT name FROM sqlite_master WHERE type = 'table'")
con.commit()
tables = [each[0] for each in cur.fetchall()]
saveToCSV(tables, '*', None, None)
def saveToCSV(tables, columns, filter, name):
for table in tables:
query = "SELECT " + columns + " FROM " + table
if filter is not None:
query += " WHERE " + filter
cur.execute(query)
data = []
for item in cur.fetchall():
data.append(list(item))
con.commit()
if name is None:
file = table + ".csv"
else:
if (table == "ReviewJobs"):
outputTable = "Review Jobs"
else:
outputTable = table
file = name.split('.')[0] + ' - ' + outputTable + '.csv'
columnNames = names.get(table)
if len(data) > 0:
data.insert(0, columnNames)
data.insert(1, [])
for i in range(len(data)):
line = data[i]
if (i == 0):
open(file, 'w').close()
with open(file, 'a', newline='') as fp:
a = csv.writer(fp, delimiter=',')
line = [line]
a.writerows(line)
def countDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
for table in tables:
data = []
query = 'SELECT * FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[15]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append(job)
return len(data)
def saveDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
for table in tables:
data = []
query = 'SELECT * FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[15]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append(job)
columnNames = names.get(table)
file = "Date Range for " + table + " from " + start.replace("/", "-") + " to " + end.replace("/", "-") + ".csv"
if len(data) > 0:
data.insert(0, columnNames)
data.insert(1, [])
for i in range(len(data)):
line = data[i]
if (i == 0):
open(file, 'w+').close()
with open(file, 'a', newline='') as fp:
a = csv.writer(fp, delimiter=',')
line = [line]
a.writerows(line)
def scoreProjects(constant, doPrint):
positive, negative = getKeywords()
positiveCopy = []
for word in positive:
positiveCopy.append(word)
new = ''.join(c + '.' for c in word if c.isalpha())
positiveCopy.append(new[:-1])
positive = positiveCopy
ratio = (len(positive) * constant) / len(negative)
cur.execute('SELECT JobID, Title, Description FROM Jobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
if doPrint:
print("Job Score " + str(i + 1) + "/" + str(len(results) + 1))
job = results[i]
jID = job[0]
title = job[1].lower()
description = job[2].lower()
posMatches = ""
negMatches = ""
numPositive = 0
numNegative = 0
for keyword in positive:
numPositive += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in posMatches):
posMatches += (", " + keyword)
for keyword in negative:
numNegative += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in negMatches):
negMatches += (", " + keyword)
try:
# numNegative *= ratio
# l = (numPositive * ratio)
# score = round((numPositive / (numPositive + numNegative)) * 100)
score = max(0, round((((numPositive * 100) - (ratio * numNegative)) / (numPositive + numNegative))))
except ZeroDivisionError:
score = -1
p = posMatches.split(",")
b = ""
for j in range(len(p)):
if (j > 0):
b += p[j]
if (j != len(p) - 1):
b += ", "
posMatches = b.lstrip()
n = negMatches.split(",")
b = ""
for j in range(len(n)):
if (j > 0):
b += n[j]
if (j != len(n) - 1):
b += ", "
negMatches = b.lstrip()
query = "UPDATE Jobs SET Score = " + str(score) + \
", PositiveMatches = '" + str(posMatches) + "', NegativeMatches = '" + str(
negMatches) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT JobID, Title, Description FROM ReviewJobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
if doPrint:
print("Review Job Score " + str(i + 1) + "/" + str(len(results) + 1))
job = results[i]
jID = job[0]
title = job[1].lower()
description = job[2].lower()
posMatches = ""
negMatches = ""
numPositive = 0
numNegative = 0
for keyword in positive:
numPositive += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in posMatches):
posMatches += (", " + keyword)
for keyword in negative:
numNegative += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in negMatches):
negMatches += (", " + keyword)
try:
# numNegative *= ratio
# l = (numPositive * ratio)
# score = round((numPositive / (numPositive + numNegative)) * 100)
score = max(0, round((((numPositive * 100) - (ratio * numNegative)) / (numPositive + numNegative))))
except ZeroDivisionError:
score = -1
p = posMatches.split(",")
b = ""
for i in range(len(p)):
if (i > 0):
b += p[i]
if (i != len(p) - 1):
b += ", "
posMatches = b.lstrip()
n = negMatches.split(",")
b = ""
for i in range(len(n)):
if (i > 0):
b += n[i]
if (i != len(n) - 1):
b += ", "
negMatches = b.lstrip()
query = "UPDATE ReviewJobs SET Score = " + str(score) + \
", PositiveMatches = '" + str(posMatches) + "', NegativeMatches = '" + str(
negMatches) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def getKeywords():
positive = []
negative = []
for line in open('positiveKeywords.txt'):
if (len(line) > 1):
word = line.rstrip('\n')
positive.append(word)
for line in open('negativeKeywords.txt'):
if (len(line) > 1):
word = line.rstrip('\n')
negative.append(word)
return [keyword.lower() for keyword in positive], [keyword.lower() for keyword in negative]
def jobsInDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
data = []
for table in tables:
query = 'SELECT DateRange, JobID, CountryOfWinner FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[0]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
endFormat = ''
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append([job[1], job[2]])
return data
def conversions():
cur.execute("SELECT ReviewID, AmountPaid, Currency, Date FROM Reviews WHERE ConvertedCurrency = 'None' or ConvertedCurrency = '' AND AmountPaid != ''")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
amount = ""
if (value != 'SEALED'):
try:
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
except ValueError:
a = 1
else:
amount = "None"
currency = r[2]
dateOff = r[3]
timeSplit = dateOff.split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
convertedCurrency = "None"
if amount != "None":
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, amount, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, amount, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, amount,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, amount, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE Reviews SET ConvertedCurrency = '" + str(convertedCurrency) + "' WHERE ReviewID = " + str(id)
cur.execute(query)
con.commit()
def jobConversions():
cur.execute(
"SELECT JobID, FinalCost, Currency, Year, Week FROM Jobs WHERE (ConvertedFinalCost = 'None' or ConvertedFinalCost = '') AND FinalCost != 'None'")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
if (value != 'None'):
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
else:
amount = "None"
currency = r[2]
year = r[3]
week = r[4]
convertedCurrency = "None"
if amount != "None":
# convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# success = False
# while not success:
try:
convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# convertedCurrency = "$" + str(convertedCurrency)
except RatesNotAvailableError:
convertedCurrency = "Unavailable"
query = "UPDATE Jobs SET ConvertedFinalCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
id)
cur.execute(query)
con.commit()
reviewJobConversions()
def jobAvConversions():
cur.execute(
"SELECT JobID, AverageBidCost, Currency, Year, Week FROM Jobs")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
if (value != 'None'):
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
else:
amount = "None"
currency = r[2]
year = r[3]
week = r[4]
convertedCurrency = "None"
if amount != "None":
# convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# success = False
# while not success:
try:
convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# convertedCurrency = "$" + str(convertedCurrency)
except RatesNotAvailableError:
convertedCurrency = "Unavailable"
query = "UPDATE Jobs SET AverageBidCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
id)
cur.execute(query)
con.commit()
reviewAvJobConversions()
def reviewAvJobConversions():
cur.execute(
"SELECT JobID, AverageBidCost, Currency, TimeAgo FROM ReviewJobs")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
timeSplit = r[3].split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
currency = r[2]
finalCost = r[1]
convertedCurrency = ""
jID = r[0]
if (finalCost != "None"):
valuePaid = float(''.join(c for c in finalCost if c.isnumeric() or c == '.'))
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, valuePaid,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, valuePaid, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE ReviewJobs SET AverageBidCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def reviewJobConversions():
cur.execute(
"SELECT JobID, FinalCost, Currency, TimeAgo FROM ReviewJobs WHERE (ConvertedFinalCost = 'None' or ConvertedFinalCost = '' or ConvertedFinalCost = '$') AND FinalCost != 'None'")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
timeSplit = r[3].split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
currency = r[2]
finalCost = r[1]
convertedCurrency = ""
jID = r[0]
if (finalCost != "None"):
valuePaid = float(''.join(c for c in finalCost if c.isnumeric() or c == '.'))
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, valuePaid,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, valuePaid, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE ReviewJobs SET ConvertedFinalCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def calcDateRange(time):
today = date.today()
split = time.split()
timeFrame = split[1]
timeAmount = int(split[0])
if (timeFrame == "day") or (timeFrame == "days"):
newDate = today + relativedelta(days=-timeAmount)
timeRange = newDate.strftime("%d/%m/%y")
return timeRange
if (timeFrame == "hour") or (timeFrame == "hours"):
startDate = today + relativedelta(days=-1)
endDate = today
if (timeFrame == "week") or (timeFrame == "weeks"):
newDate = today + relativedelta(weeks=-timeAmount)
while (newDate.weekday() != 0):
newDate += relativedelta(days=-1)
startDate = newDate
endDate = startDate + relativedelta(days=6)
elif (timeFrame == "month") or (timeFrame == "months"):
newDate = today + relativedelta(months=-timeAmount)
year = newDate.year
month = newDate.month
startDate = date(year, month, 1)
endDate = date(year, month, monthrange(year, month)[1])
elif (timeFrame == "year") or (timeFrame == "years"):
newDate = today + relativedelta(years=-timeAmount)
startDate = date(newDate.year, 1, 1)
endDate = date(newDate.year, 12, 31)
return (startDate.strftime("%d/%m/%y") + " - " + endDate.strftime("%d/%m/%y"))
def getDateRanges():
today = date.today()
cur.execute('SELECT JobID, TimeAgo FROM ReviewJobs WHERE DateRange IS NULL')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Review Job Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
# timeSplit = r[1].split()
# timeFrame = timeSplit[1]
# timeAmount = int(timeSplit[0])
jID = r[0]
timeRange = calcDateRange(r[1])
query = "UPDATE ReviewJobs SET DateRange = '" + str(timeRange) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT ReviewID, Date FROM Reviews WHERE DateRange IS NULL')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Review Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
# timeSplit = r[1].split()
# timeFrame = timeSplit[1]
# timeAmount = int(timeSplit[0])
jID = r[0]
timeRange = ""
timeRange = calcDateRange(r[1])
query = "UPDATE Reviews SET DateRange = '" + str(timeRange) + "' WHERE ReviewID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT JobID, Year, Week FROM Jobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Job Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
year = r[1]
jobWeek = r[2]
jID = r[0]
week = str(year) + "-W" + str(jobWeek)
startDate = datetime.strptime(week + '-1', "%Y-W%W-%w")
endDate = startDate + relativedelta(weeks=1)
timeRange = startDate.strftime("%d/%m/%y") + " - " + endDate.strftime("%d/%m/%y")
query = "UPDATE Jobs SET DateRange = '" + str(timeRange) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def optimiseConstant():
low = 9
high = 17
averageDistance = 1000
constant = random.randrange(low, high + 1)
iteration = 1
ranges = {1: [0, 20], 2: [20, 40], 3: [40, 60], 4: [60, 80], 5: [80, 100]}
while ((averageDistance >= 5) and (iteration < 10000)):
print("Iteration number: " + str(iteration) + " - Constant = " + str(constant))
tooBig = 0
tooSmall = 0
scoreProjects(constant, False)
averageDistances = []
for i in range(1, 6):
totalDistance = 0
n = 0
query = 'SELECT Score FROM ReviewJobs WHERE Category = ' + str(i)
cur.execute(query)
results = [r[0] for r in cur.fetchall()]
scoreRange = ranges.get(i)
lower = scoreRange[0]
upper = scoreRange[1]
for result in results:
n += 1
if (result != -1):
if ((result >= lower) and (result <= upper)):
distance = 0
elif (result > upper):
distance = result - upper
tooBig += 1
else:
distance = lower - result
tooSmall += 1
# distance = min(abs(result - lower), abs(result - upper))
totalDistance += distance
averageDistances.append(totalDistance / n)
averageDistance = sum(averageDistances) / 5
print("Average Distance: " + str(averageDistance) + "\n")
if (averageDistance >= 5):
if (tooBig > tooSmall):
constant += 0.0125
else:
constant -= 0.0125
iteration += 1
print(constant)
def plotYears(type):
cur.execute('SELECT DISTINCT(Year) FROM Jobs ORDER BY Year')
years = [each[0] for each in cur.fetchall()]
cur.execute('SELECT PossibleYears FROM ReviewJobs')
results = [each[0] for each in cur.fetchall()]
for result in results:
ys = [int(each.lstrip().rstrip()) for each in result.split(',')]
years += [each for each in ys if each not in years]
years = sorted(years)
data = {}
for year in years:
num = 0
if type == 'Projects':
query = "SELECT COUNT(JobID) FROM Jobs WHERE Year = " + str(year)
cur.execute(query)
num = cur.fetchone()[0]
query = "SELECT COUNT(JobID) FROM ReviewJobs WHERE PossibleYears LIKE '%" + str(year) + "%'"
cur.execute(query)
num += cur.fetchone()[0]
elif type == 'Bidders':
query = "SELECT COUNT(DISTINCT(User)) FROM Bids WHERE JobID IN (SELECT JobID FROM Jobs WHERE Year = " + str(year) + ")"
cur.execute(query)
num = cur.fetchone()[0]
query = "SELECT COUNT(DISTINCT(User)) FROM Bids WHERE JobID IN (SELECT JobID FROM ReviewJobs WHERE PossibleYears LIKE '%" + str(
year) + "%')"
cur.execute(query)
num += cur.fetchone()[0]
data.update({year: num})
yPos = np.arange(len(data))
vals = []
for year in sorted(list(data.keys())):
vals.append(data.get(year))
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title(type + ' By Year')
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
plt.xticks(yPos, years)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title(type + ' By Year')
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("image" + type + ' By Year', bbox_inches='tight', dpi=100)
plt.show(block=False)
def possibleYears():
cur.execute('SELECT JobID, DateRange FROM ReviewJobs')
results = [list(each) for each in cur.fetchall()]
for pair in results:
jID = pair[0]
dateRange = pair[1]
years = ''
split = dateRange.split()
startYear = 2000 + int(split[0].split('/')[-1])
endYear = 2000 + int(split[-1].split('/')[-1])
for year in range(startYear, endYear):
years += str(year) + ", "
years += str(endYear)
query = "UPDATE ReviewJobs SET PossibleYears = '" + years + "' WHERE JobID = " + str(jID)
cur.execute(query)
con.commit()
def avConversions():
data = pd.read_csv('savedIDs.txt', delimiter="\t")
saved = [each[0] for each in data.values]
jobsDf = pd.read_csv('jobsAv.txt', delimiter="\t")
jobsToSave = [pair for pair in jobsDf.values if pair[0] not in saved]
a = open('savedIDs.txt', 'at')
for i in range(len(jobsToSave)):
pair = jobsToSave[i]
print("Job " + str(i + 1) + "/" + str(len(jobsToSave)))
jID = pair[0]
av = pair[1]
if av != 'None':
cur.execute("SELECT DateRange FROM Jobs WHERE JobID = " + str(jID))
dateRange = cur.fetchone()[0]
split = dateRange.split()
startSplit = split[0].split('/')
startDate = date(2000 + int(startSplit[2]), int(startSplit[1]), int(startSplit[0]))
endSplit = split[2].split('/')
endDate = date(2000 + int(endSplit[2]), int(endSplit[1]), int(endSplit[0]))
cur.execute('SELECT Currency FROM Jobs WHERE JobID = ' + str(jID))
currency = cur.fetchone()[0]
av = getAverage(currency, startDate, endDate, av)
cur.execute('SELECT NumberOfBidders FROM Jobs WHERE JobID = ' + str(jID))
numBids = cur.fetchone()[0]
if numBids != 0:
av = '%.2f' % (float(av) / int(numBids))
else:
av = 0
cur.execute("UPDATE Jobs SET AverageBidCost = " + str(av) + " WHERE JobID = " + str(jID))
else:
cur.execute("UPDATE Jobs SET AverageBidCost = 'None' WHERE JobID = " + str(jID))
a.write(str(jID) + "\n")
con.commit()
reviewJobsDf = pd.read_csv('reviewJobsAv.txt', delimiter="\t")
reviewJobstoSave = [pair for pair in reviewJobsDf if pair[0] not in saved]
for i in range(len(reviewJobstoSave)):
pair = reviewJobstoSave[i]
print("Job " + str(i + 1) + "/" + str(len(reviewJobstoSave)))
jID = pair[0]
av = pair[1]
if av != 'None':
cur.execute("SELECT DateRange FROM ReviewJobs WHERE JobID = " + str(jID))
dateRange = cur.fetchone()[0]
split = dateRange.split()
startSplit = split[0].split('/')
startDate = date(2000 + int(startSplit[2]), int(startSplit[1]), int(startSplit[0]))
endSplit = split[2].split('/')
endDate = date(2000 + int(endSplit[2]), int(endSplit[1]), int(endSplit[0]))
cur.execute("SELECT Currency FROM ReviewJobs WHERE JobID = " + str(jID))
currency = cur.fetchone()[0]
av = getAverage(currency, startDate, endDate, av)
cur.execute("SELECT NumberOfBidders FROM ReviewJobs WHERE JobID = " + str(jID))
numBids = cur.fetchone()[0]
av = '%.2f' % (av / numBids)
cur.execute("UPDATE ReviewJobs SET AverageBidCost = " + str(av) + ' WHERE JobID = ' + str(jID))
a.write(str(jID) + "\n")
con.commit()
# def doExtras():
# # doAverages()
# # jobConversions()
# # reviewJobConversions()
# # conversions()
# # getDateRanges()
# # possibleYears()
# # plotYears('Projects')
# doExtras()
# avConversions() | 31.016571 | 184 | 0.551207 | 0 | 0 | 126 | 0.002927 | 0 | 0 | 0 | 0 | 8,281 | 0.192353 |
e359de552a30d24e6371b5e1ad922405353576ab | 1,733 | py | Python | deploy_flask_plotly/app.py | mohamedsaadmoustafa/Arabic_Dialect_Classification | a13e92ddaa8fda5afcc40d1ce97946174f9a4674 | [
"BSD-3-Clause"
]
| null | null | null | deploy_flask_plotly/app.py | mohamedsaadmoustafa/Arabic_Dialect_Classification | a13e92ddaa8fda5afcc40d1ce97946174f9a4674 | [
"BSD-3-Clause"
]
| null | null | null | deploy_flask_plotly/app.py | mohamedsaadmoustafa/Arabic_Dialect_Classification | a13e92ddaa8fda5afcc40d1ce97946174f9a4674 | [
"BSD-3-Clause"
]
| 1 | 2022-03-14T19:41:57.000Z | 2022-03-14T19:41:57.000Z | from flask import Flask, render_template, request, jsonify
import numpy as np
import pickle
import sys
import json
import re
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
target_names = [
'AE', 'BH', 'DZ',
'EG', 'IQ', 'JO',
'KW', 'LB', 'LY',
'MA', 'OM','PL',
'QA', 'SA', 'SD',
'SY', 'TN', 'YE'
]
arabic_dialects = {
'AE': 'لهجة اماراتية', 'BH': 'لهجة بحرينية', 'DZ': 'لهجة جزائرية', 'EG': 'لهجة مصرية', 'IQ': 'لهجة عراقية',
'JO': 'لهجة أردنية', 'KW': 'لهجة كويتية', 'LB': 'لهجة لبنانية', 'LY': 'لهجة ليبية', 'MA': 'لهجة مغربية',
'OM': 'لهجة عمانية', 'PL': 'لهجة فلسطينية', 'QA': 'لهجة قطرية', 'SA': 'لهجة سعودية', 'SD': 'لهجة سودانية',
'SY': 'لهجة سورية', 'TN': 'لهجة تونسية', 'YE': 'لهجة يمنية'
}
def model(text):
print(text, file=sys.stderr)
filename = 'model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
pred = loaded_model.predict( [text] )
pred_p = ( loaded_model.predict_proba( [text] )[0] * 100 ).round(2) # %
return arabic_dialects[target_names[pred[0]]], pred_p
@app.route('/')
def home():
for i in range(3): print(i)
return render_template('home.html')
@app.route('/api')
def predict():
text_input = request.args.get('text')#.decode("utf-8")
text_input = re.sub(r'[0-9a-zA-Z?]', '', text_input) #remove english words and numbers
if text_input == "": return "null"
predict, predict_p = model(text_input)
return jsonify(
{
'predict': json.dumps(predict, ensure_ascii = False ),
'predict_p': predict_p.tolist(),
}
)
if __name__ == '__main__':
app.run(debug=True) | 27.507937 | 112 | 0.562031 | 0 | 0 | 0 | 0 | 555 | 0.289666 | 0 | 0 | 731 | 0.381524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.