hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d203709221f68ac2daa8de8a8dbe4b58bcd9f4f3
| 497 |
py
|
Python
|
Src/StdLib/Lib/test/xmltests.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 2,293 |
2015-01-02T12:46:10.000Z
|
2022-03-29T09:45:43.000Z
|
Src/StdLib/Lib/test/xmltests.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 1,074 |
2016-12-07T05:02:48.000Z
|
2022-03-22T02:09:11.000Z
|
Src/StdLib/Lib/test/xmltests.py
|
cwensley/ironpython2
|
f854444e1e08afc8850cb7c1a739a7dd2d10d32a
|
[
"Apache-2.0"
] | 1,033 |
2015-01-04T07:48:40.000Z
|
2022-03-24T09:34:37.000Z
|
# Convenience test module to run all of the XML-related tests in the
# standard library.
import sys
import test.test_support
test.test_support.verbose = 0
def runtest(name):
__import__(name)
module = sys.modules[name]
if hasattr(module, "test_main"):
module.test_main()
runtest("test.test_minidom")
runtest("test.test_pyexpat")
runtest("test.test_sax")
runtest("test.test_xml_etree")
runtest("test.test_xml_etree_c")
runtest("test.test_xmllib")
runtest("test.test_xmlrpc")
| 22.590909 | 68 | 0.748491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.464789 |
d20425193c1b51cfe42ea596643380c8747b1847
| 1,275 |
py
|
Python
|
memcnn/experiment/tests/test_factory.py
|
classner/memcnn
|
107ea40945b2b0d312d05cab5b78633e5f977a52
|
[
"MIT"
] | 224 |
2018-03-03T02:46:54.000Z
|
2022-02-12T14:33:56.000Z
|
memcnn/experiment/tests/test_factory.py
|
classner/memcnn
|
107ea40945b2b0d312d05cab5b78633e5f977a52
|
[
"MIT"
] | 62 |
2018-04-28T01:25:14.000Z
|
2021-11-25T13:20:57.000Z
|
memcnn/experiment/tests/test_factory.py
|
classner/memcnn
|
107ea40945b2b0d312d05cab5b78633e5f977a52
|
[
"MIT"
] | 25 |
2018-04-20T18:08:12.000Z
|
2022-02-03T22:13:44.000Z
|
import pytest
import os
import memcnn.experiment.factory
from memcnn.config import Config
def test_get_attr_from_module():
a = memcnn.experiment.factory.get_attr_from_module('memcnn.experiment.factory.get_attr_from_module')
assert a is memcnn.experiment.factory.get_attr_from_module
def test_load_experiment_config():
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
@pytest.mark.skip(reason="Covered more efficiently by test_train.test_run_experiment")
def test_experiment_config_parser(tmp_path):
tmp_data_dir = tmp_path / "tmpdata"
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
cfg = memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
memcnn.experiment.factory.experiment_config_parser(cfg, str(tmp_data_dir), workers=None)
def test_circular_dependency(tmp_path):
p = str(tmp_path / "circular.json")
content = u'{ "circ": { "base": "circ" } }'
with open(p, 'w') as fh:
fh.write(content)
with open(p, 'r') as fh:
assert fh.read() == content
with pytest.raises(RuntimeError):
memcnn.experiment.factory.load_experiment_config(p, ['circ'])
| 37.5 | 104 | 0.741176 | 0 | 0 | 0 | 0 | 427 | 0.334902 | 0 | 0 | 253 | 0.198431 |
d2045b61e5e8006918d4654b503671b6d4cfdf28
| 303 |
py
|
Python
|
source/bluetooth/test_search_serial_port.py
|
Takahiro55555/CameraSystem
|
53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa
|
[
"MIT"
] | 1 |
2019-12-03T05:28:35.000Z
|
2019-12-03T05:28:35.000Z
|
source/bluetooth/test_search_serial_port.py
|
Takahiro55555/CameraSystem
|
53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa
|
[
"MIT"
] | 88 |
2019-07-01T09:11:35.000Z
|
2021-09-08T01:13:16.000Z
|
source/bluetooth/test_search_serial_port.py
|
Takahiro55555/CameraSystem
|
53a77b7a7bd0c34b486d73af8ef2a49201a0bdaa
|
[
"MIT"
] | 5 |
2019-05-22T06:44:38.000Z
|
2019-09-18T05:20:30.000Z
|
"""
@file: test_search_serial_port.py
@author: Futa HIRAKOBA
@brief: search_serial_port.pyのをテストするプログラム
"""
from search_serial_port import search_com_ports, search_enabled_com_port
def test_search_com_ports():
search_com_ports()
def test_search_enabled_com_port():
search_enabled_com_port()
| 18.9375 | 72 | 0.808581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.397554 |
d2054031cc7f367ae05b0c0f073e7b256fa4a564
| 238 |
py
|
Python
|
Aula 01/ConversaoMedidas.py
|
eduardojpsena/EstruturaDeDados-Python-IESP
|
97c22fc1411dfdae2d1085e9a3ca0c334ee07988
|
[
"MIT"
] | null | null | null |
Aula 01/ConversaoMedidas.py
|
eduardojpsena/EstruturaDeDados-Python-IESP
|
97c22fc1411dfdae2d1085e9a3ca0c334ee07988
|
[
"MIT"
] | null | null | null |
Aula 01/ConversaoMedidas.py
|
eduardojpsena/EstruturaDeDados-Python-IESP
|
97c22fc1411dfdae2d1085e9a3ca0c334ee07988
|
[
"MIT"
] | null | null | null |
print("---CONVERSÃO DE MEDIDAS---")
valor_metros = float(input("Informe o valor em metros à ser convertido: "))
valor_centimetros = valor_metros * 100
print("{} metros equivale a {} centimetros.".format(valor_metros, valor_centimetros))
| 39.666667 | 85 | 0.747899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.475 |
d2057f4c0253aa5e357b86320d8d2148ad029e12
| 385 |
py
|
Python
|
src/easymql/__init__.py
|
vivek-shrikhande/easy-mql
|
8cbf6a77aed8230bd92cee5585227ea4a09001b8
|
[
"MIT"
] | null | null | null |
src/easymql/__init__.py
|
vivek-shrikhande/easy-mql
|
8cbf6a77aed8230bd92cee5585227ea4a09001b8
|
[
"MIT"
] | null | null | null |
src/easymql/__init__.py
|
vivek-shrikhande/easy-mql
|
8cbf6a77aed8230bd92cee5585227ea4a09001b8
|
[
"MIT"
] | null | null | null |
from pyparsing import ParseException
from easymql.exc import EasyMQLSyntaxError
from easymql.pipeline import Pipeline, encode
class EasyMQL:
def parse(self, query_string):
try:
return encode(Pipeline.parse(query_string, explode=False))
except ParseException as e:
raise EasyMQLSyntaxError(query_string, str(e), e.lineno, e.col) from None
| 29.615385 | 85 | 0.724675 | 255 | 0.662338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d205e00637b9718f14c4962c0430f40c178683e5
| 266 |
py
|
Python
|
src/guildapi.py
|
nsde/discord-guildapi
|
b1303423e74c1370498e594429f3bf4aeae4ee95
|
[
"MIT"
] | null | null | null |
src/guildapi.py
|
nsde/discord-guildapi
|
b1303423e74c1370498e594429f3bf4aeae4ee95
|
[
"MIT"
] | null | null | null |
src/guildapi.py
|
nsde/discord-guildapi
|
b1303423e74c1370498e594429f3bf4aeae4ee95
|
[
"MIT"
] | null | null | null |
import requests
import json
def getguild(guild_id):
guild_id = str(guild_id)
http_response = requests.get(f'https://discord.com/api/guilds/{guild_id}/widget.json')
response_data = http_response.json()
data = json.dumps(response_data)
return data
| 29.555556 | 90 | 0.733083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.210526 |
d2066abfbaca62c1d5be55ef5d80f560df075d0a
| 409 |
py
|
Python
|
smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py
|
nunocaseiro/smarthome-server-django
|
711db6ff360061d861d9985264f753e0f7846327
|
[
"Apache-2.0"
] | null | null | null |
smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py
|
nunocaseiro/smarthome-server-django
|
711db6ff360061d861d9985264f753e0f7846327
|
[
"Apache-2.0"
] | null | null | null |
smarthome/smarthomeproj/server/migrations/0011_auto_20210122_0256.py
|
nunocaseiro/smarthome-server-django
|
711db6ff360061d861d9985264f753e0f7846327
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-01-22 02:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0010_auto_20210122_0054'),
]
operations = [
migrations.AlterField(
model_name='sensorvalue',
name='value',
field=models.DecimalField(decimal_places=2, max_digits=6),
),
]
| 21.526316 | 70 | 0.613692 | 316 | 0.772616 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.244499 |
d207522acb3ce4394972c46c3f9f025ef3ebed35
| 683 |
py
|
Python
|
p2/core/tasks.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/core/tasks.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/core/tasks.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
"""p2 core tasks"""
from p2.core.celery import CELERY_APP
from p2.lib.reflection import path_to_class
@CELERY_APP.task(bind=True)
def signal_marshall(self, signal, args=None, kwargs=None):
"""Run signal in task worker"""
if not args:
args = []
if not kwargs:
kwargs = {}
# Lookup PK to model instance
for key, value in kwargs.items():
if 'class' in value and 'pk' in value:
model_class = path_to_class(value.get('class'))
model_instance = model_class.objects.get(pk=value.get('pk'))
kwargs[key] = model_instance
signal_cls = path_to_class(signal)
signal_cls.send(sender=self, *args, **kwargs)
| 32.52381 | 72 | 0.648609 | 0 | 0 | 0 | 0 | 578 | 0.846266 | 0 | 0 | 101 | 0.147877 |
d207656cad5f592cc3b1825bcd0b8c7607785174
| 4,463 |
py
|
Python
|
tests/keras_contrib/layers/test_convolutional.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | 7 |
2017-07-22T09:05:44.000Z
|
2019-04-30T02:08:04.000Z
|
tests/keras_contrib/layers/test_convolutional.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | 1 |
2017-12-26T02:59:59.000Z
|
2017-12-26T02:59:59.000Z
|
tests/keras_contrib/layers/test_convolutional.py
|
rgreenblatt/keras-contrib
|
46fcdb9384b3bc9399c651b2b43640aa54098e64
|
[
"MIT"
] | 11 |
2017-07-06T14:11:51.000Z
|
2021-08-21T23:18:20.000Z
|
import pytest
import numpy as np
import itertools
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test, keras_test
from keras.utils.conv_utils import conv_input_length
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import convolutional, pooling
from keras.models import Sequential
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
else:
_convolution_border_modes = ['valid', 'same']
@keras_test
def test_cosineconvolution_2d():
num_samples = 2
num_filter = 2
stack_size = 3
num_row = 10
num_col = 6
if K.backend() == 'theano':
data_format = 'channels_first'
elif K.backend() == 'tensorflow':
data_format = 'channels_last'
for border_mode in _convolution_border_modes:
for subsample in [(1, 1), (2, 2)]:
for use_bias_mode in [True, False]:
if border_mode == 'same' and subsample != (1, 1):
continue
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format},
input_shape=(num_samples, num_row, num_col, stack_size))
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2'},
input_shape=(num_samples, num_row, num_col, stack_size))
if data_format == 'channels_first':
X = np.random.randn(1, 3, 5, 5)
input_dim = (3, 5, 5)
W0 = X[:, :, ::-1, ::-1]
elif data_format == 'channels_last':
X = np.random.randn(1, 5, 5, 3)
input_dim = (5, 5, 3)
W0 = X[0, :, :, :, None]
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=True,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = W0
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=False,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * W0
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
@keras_test
def test_sub_pixel_upscaling():
num_samples = 2
num_row = 16
num_col = 16
input_dtype = K.floatx()
for scale_factor in [2, 3, 4]:
input_data = np.random.random((num_samples, 4 * (scale_factor ** 2), num_row, num_col))
input_data = input_data.astype(input_dtype)
if K.image_data_format() == 'channels_last':
input_data = input_data.transpose((0, 2, 3, 1))
input_tensor = K.variable(input_data)
expected_output = K.eval(KC.depth_to_space(input_tensor,
scale=scale_factor))
layer_test(convolutional.SubPixelUpscaling,
kwargs={'scale_factor': scale_factor},
input_data=input_data,
expected_output=expected_output,
expected_output_dtype=K.floatx())
if __name__ == '__main__':
pytest.main([__file__])
| 37.191667 | 95 | 0.538203 | 0 | 0 | 0 | 0 | 3,842 | 0.860856 | 0 | 0 | 434 | 0.097244 |
d20883f007efa4a112403e5dc5f0370600e053b9
| 8,131 |
py
|
Python
|
superpyrate/task_countfiles.py
|
willu47/superpyrate
|
60ce6f98a00cac418f62ccac9a194023a4f4b37a
|
[
"MIT"
] | null | null | null |
superpyrate/task_countfiles.py
|
willu47/superpyrate
|
60ce6f98a00cac418f62ccac9a194023a4f4b37a
|
[
"MIT"
] | null | null | null |
superpyrate/task_countfiles.py
|
willu47/superpyrate
|
60ce6f98a00cac418f62ccac9a194023a4f4b37a
|
[
"MIT"
] | null | null | null |
"""Holds the luigi tasks which count the number of rows in the files
Records the number of clean and dirty rows in the AIS data,
writing these stats to the database and finally producing a report of the
statistics
1. Count the number of rows in the raw csv files (in ``files/unzipped/<archive>``)
2. Count the number of rows int the clean csv files (in ``files/cleancsv/``)
3. Write the clean rows in the clean column of ais_sources
4. Write the dirty (raw - clean) rows into the dirty column of ais_sources
"""
import luigi
from luigi.util import requires
from luigi.contrib.external_program import ExternalProgramTask
from luigi.postgres import CopyToTable, PostgresQuery
from superpyrate.pipeline import get_environment_variable, ProcessZipArchives, \
GetZipArchive, get_working_folder, \
RunQueryOnTable, GetCsvFile
from plumbum.cmd import wc
from glob import glob
import os
import logging
LOGGER = logging.getLogger(__name__)
logging.basicConfig(filename='reporting.log',
level=logging.DEBUG,
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
@requires(ProcessZipArchives)
class GetCountsForAllFiles(luigi.Task):
"""Counts the rows in all clean (validated) and raw files
"""
def run(self):
"""
"""
working_folder = get_working_folder()
paths_to_count = [os.path.join(working_folder, 'files', 'cleancsv')]
with self.input().open('r') as list_of_archives:
for archive in list_of_archives:
filename = os.path.basename(archive)
#Remove extension
name, ext = os.path.splitext(filename)
# count lines of each file in each unzipped archive stored in
# LUIGIWORK/files/unzipped/<name>
path = os.path.join(working_folder, 'files', 'unzipped', name)
LOGGER.debug("Input path: {}".format(path))
if str(ext).strip() == '.zip':
paths_to_count.append(path)
yield [CountLines(countable_path) for countable_path in paths_to_count]
with self.output().open('w') as outfile:
for countable_path in paths_to_count:
outfile.write("{}\n".format(countable_path))
def output(self):
rootdir = get_working_folder()
output_folder = os.path.join(rootdir,'tmp', 'countraw', 'got_all_counts.txt')
return luigi.file.LocalTarget(output_folder)
@requires(GetZipArchive)
class CountLines(luigi.Task):
"""Counts the number of lines for all the csvfiles in a folder
Writes all the counts and filenames to a delimited file with the name of the
folder
Arguments
=========
folder_name : str
The absolute path of the csv file
"""
def run(self):
"""Runs the bash program `wc` to count the number of lines
"""
input_names = glob(self.input().fn + '/*')
output_name = self.output().fn
LOGGER.debug('Counting lines in {} and saving to {}'.format(input_names, output_name))
(wc['-l', input_names] > output_name)()
def output(self):
"""Outputs the files into a folder of the same name as the zip file
The files are placed in a subdirectory of ``LUIGIWORK`` called ``tmp/countraw``
"""
out_folder_name = os.path.basename(self.input().fn)
rootdir = get_working_folder()
output_folder = os.path.join(rootdir,'tmp', 'countraw', out_folder_name + ".csv")
return luigi.file.LocalTarget(output_folder)
class DoIt(luigi.Task):
"""
"""
folder_of_zips = luigi.Parameter(significant=True)
with_db = luigi.BoolParameter(significant=False)
def requires(self):
return GetCountsForAllFiles(self.folder_of_zips, self.with_db)
def run(self):
working_folder = get_working_folder()
count_folder = os.path.join(working_folder, 'tmp', 'countraw')
clean_path = os.path.join(count_folder, 'cleancsv.csv')
countfiles = os.listdir(count_folder)
LOGGER.debug("Files in tmp/countraw: {}".format(countfiles))
filtered_countfiles = [a for a in countfiles if (a != 'cleancsv.csv' and a.endswith('.csv'))]
LOGGER.debug("Files in tmp/countraw after filtering: {}".format(filtered_countfiles))
if len(filtered_countfiles) == 0:
raise RuntimeError("No counted files available")
LOGGER.error("No counted files available")
clean_results = {}
raw_results = {}
counts = []
with open(clean_path, 'r') as clean_counts_file:
for row in clean_counts_file:
LOGGER.debug(row.strip().split(" "))
count, filename = row.strip().split(" ")
just_filename = os.path.basename(filename)
clean_results[just_filename] = int(count)
for filename in filtered_countfiles:
results_file = os.path.join(count_folder, filename)
with open(results_file, 'r') as open_results_file:
for row in open_results_file:
LOGGER.debug(row.strip().split(" "))
count, filename = row.strip().split(" ")
just_filename = os.path.basename(filename)
raw_results[just_filename] = int(count)
_ = raw_results.pop('total')
_ = clean_results.pop('total')
LOGGER.debug("Keys: {}; {}".format(raw_results.keys(), clean_results.keys()))
for filename in raw_results.keys():
clean = clean_results[filename]
dirty = raw_results[filename] - clean
# filepath = os.path.join(working_folder, 'files', 'unzipped', filename)
counts.append((filename, clean, dirty))
LOGGER.debug("Counts of file {}".format(counts))
queries = [("UPDATE ais_sources "\
"SET clean = {}, dirty = {} " \
"WHERE filename = '{}';".format(clean_count,
dirty_count,
filename), filename)
for (filename, clean_count, dirty_count) in counts ]
table = 'ais_sources'
yield [RunQueryOnTable(query, table, id) for query, id in queries]
with self.output().open('w') as outfile:
outfile.write("Done")
def output(self):
working_folder = get_working_folder()
path = os.path.join(working_folder, 'tmp', 'database', 'reports.txt')
return luigi.file.LocalTarget(path)
@requires(DoIt)
class ProduceStatisticsReport(PostgresQuery):
"""Produces a report of the data statistics
"""
host = get_environment_variable('DBHOSTNAME')
database = get_environment_variable('DBNAME')
user = get_environment_variable('DBUSER')
password = get_environment_variable('DBUSERPASS')
query = "SELECT filename, clean, dirty, round(1.0*dirty/(clean+dirty), 2) " \
"AS coverage FROM ais_sources ORDER BY filename ASC;"
table = 'ais_sources'
update_id = 'stats_report'
def run(self):
"""Produce the report and write to a file
"""
connection = self.output().connect()
cursor = connection.cursor()
sql = self.query
LOGGER.info('Executing query from task: {name}'.format(name=self.__class__))
cursor.execute(sql)
working_folder = get_working_folder()
path = os.path.join(working_folder, 'files', 'data_statistics.csv')
with open(path, 'w') as report_file:
report_file.write("{} {} {} {}".format('filename', 'clean', 'dirty', 'coverage'))
for filename, clean, dirty, coverage in cursor.fetchall():
report_file.write("{} {} {} {}".format(filename, clean, dirty, coverage))
# Update marker table
self.output().touch(connection)
# commit and close connection
connection.commit()
connection.close()
| 39.663415 | 101 | 0.615422 | 6,800 | 0.836305 | 3,583 | 0.440659 | 3,857 | 0.474357 | 0 | 0 | 2,369 | 0.291354 |
d20a84d94f2ed93364b818533786034015f7b86f
| 1,917 |
py
|
Python
|
pyquil/api/__init__.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 677 |
2017-01-09T23:20:22.000Z
|
2018-11-26T10:57:49.000Z
|
pyquil/api/__init__.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 574 |
2018-11-28T05:38:40.000Z
|
2022-03-23T20:38:28.000Z
|
pyquil/api/__init__.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 202 |
2018-11-30T06:36:28.000Z
|
2022-03-29T15:38:18.000Z
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Sub-package for facilitating connections to the QVM / QPU.
"""
__all__ = [
"AbstractCompiler",
"BenchmarkConnection",
"EncryptedProgram",
"EngagementManager",
"get_qc",
"list_quantum_computers",
"local_forest_runtime",
"QAM",
"QAMExecutionResult",
"QCSClientConfiguration",
"QCSQuantumProcessor",
"QPU",
"QPUCompiler",
"QuantumComputer",
"QuantumExecutable",
"QVM",
"QVMCompiler",
"WavefunctionSimulator",
]
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._benchmark import BenchmarkConnection
from pyquil.api._compiler import QVMCompiler, QPUCompiler, QuantumExecutable, EncryptedProgram, AbstractCompiler
from pyquil.api._engagement_manager import EngagementManager
from pyquil.api._qam import QAM, QAMExecutionResult
from pyquil.api._qpu import QPU
from pyquil.api._quantum_computer import (
QuantumComputer,
list_quantum_computers,
get_qc,
local_forest_runtime,
)
from pyquil.api._qvm import QVM
from pyquil.api._wavefunction_simulator import WavefunctionSimulator
from pyquil.quantum_processor import QCSQuantumProcessor
| 33.631579 | 112 | 0.691706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,117 | 0.582681 |
d20aad59d161f70830e20fabfe7cc1b0d6c4b1b9
| 946 |
py
|
Python
|
nodes/makeblock_ros_one.py
|
to4dy/makeblock-ros
|
12b58195c9be3cc95c6398704a17ceb3a841813e
|
[
"MIT"
] | 7 |
2017-12-17T00:45:07.000Z
|
2022-03-11T10:25:54.000Z
|
nodes/makeblock_ros_one.py
|
to4dy/makeblock-ros
|
12b58195c9be3cc95c6398704a17ceb3a841813e
|
[
"MIT"
] | null | null | null |
nodes/makeblock_ros_one.py
|
to4dy/makeblock-ros
|
12b58195c9be3cc95c6398704a17ceb3a841813e
|
[
"MIT"
] | 3 |
2016-06-21T05:45:24.000Z
|
2017-04-19T18:48:31.000Z
|
#!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Float32
from megapi import *
from makeblock_ros.srv import *
bot = None
def onRead(v):
print("and here!")
rospy.loginfo(v)
pub.publish(v)
def handle_makeblock_motors(req):
global bot
bot.motorRun(M1, req.s1)
bot.motorRun(M2, req.s2)
return 1
pub = rospy.Publisher('makeblock_ros_ultrasensor', Float32, queue_size=1)
s = rospy.Service('makeblock_ros_move_motors', MakeBlockMover,
handle_makeblock_motors)
def main():
global bot
bot = MegaPi()
bot.start("/dev/ttyUSB0")
rospy.init_node('makeblock_ros', anonymous=False)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
sleep(0.1)
print("been here!")
bot.ultrasonicSensorRead(3, onRead)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| 20.12766 | 73 | 0.662791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.181818 |
d20acbdc55dd2187f4e70d6f0f36211cc6ddf2d9
| 9,347 |
py
|
Python
|
bets-templates.py
|
longnow/longview
|
9345faacec64f427eab43790abc165af6a572e3d
|
[
"BSD-2-Clause"
] | 82 |
2015-01-23T04:20:31.000Z
|
2022-02-18T22:33:53.000Z
|
bets-templates.py
|
longnow/longview
|
9345faacec64f427eab43790abc165af6a572e3d
|
[
"BSD-2-Clause"
] | 2 |
2015-03-27T22:24:46.000Z
|
2017-02-20T08:19:12.000Z
|
bets-templates.py
|
longnow/longview
|
9345faacec64f427eab43790abc165af6a572e3d
|
[
"BSD-2-Clause"
] | 7 |
2015-06-04T20:37:02.000Z
|
2021-03-10T02:41:08.000Z
|
# Copyright (c) 2004, The Long Now Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# HTML template substitutions
#
# %n - nodeId (aka item number)
# %t - title
# %d - date string
# %1 [...] - positional arguments
# The HTML template used for a popup.
popupTemplate = """
<div class="node" id="node%n" onmouseout="javascript:hideNode('%n')">
<table cellpadding="0" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp">
BET<br><span class="txt">%n</span></td>
<td class="exp" align="right">
%d
</td>
</tr>
</table>
<div class="txt-sm">
%1</div>
<table cellpadding="3" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp" align="right">
AGREE
</td>
<td class="txt" align="left">
%2
</td>
</tr>
<tr>
<td class="exp" align="right">
DISAGREE
</td>
<td class="txt" align="left">
%3
</td>
</tr>
<tr>
<td class="exp" align="right">
STAKES
</td>
<td class="txt" align="left">
%4
</td>
</tr>
</table>
</div>
"""
notifyTemplate = """
<div class="node" id="node%n" onmouseout="javascript:hideNode('%n')">
<table cellpadding="0" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp">
BET<br><span class="txt">%1</span></td>
<td class="exp" align="right">
REMEMBER AND REMIND
</td>
</tr>
</table>
<div class="txt-sm">
%2</div>
<table cellpadding="3" cellspacing="0" border="0" width="100%">
<tr>
<td class="exp" align="center">
%3
</td>
</tr>
</table>
</div>
"""
# this string gets written out in its entirety to styles.css
stylesheets = """
/* for the whole page, unless overridden */
body {
padding: 0;
margin: 0;
background-image: url("./img-static/bg.jpg");
}
/* Long Bets specific styles */
.exp {
font-size: 11px;
font-family: Verdana, Helvetica, sans-serif;
}
.txt-lg {
font-size: 16px;
font-family: Georgia, Times, serif;
}
.txt {
font-size: 14px;
font-family: Georgia, Times, serif;
}
.txt-sm {
font-size: 11px;
font-family: Georgia, Times, serif;
}
.txt-lt {
font-size: 14px;
font-family: Georgia, Times, serif;
color: #666666;
}
.node .txt-sm {
padding: 5px 0;
font-size: 12px;
}
.key {
width: 664px;
margin: 10px 0;
border: #ccc 1px solid;
}
.key td {
padding: 1px;
font-size: 11px;
width: 50%;
font-family: Verdana, Helvetica, sans-serif;
text-align: center;
}
/* links that have not been visited */
a:link {
color: #930;
text-decoration: none;
}
/* links that have already been visited */
a:visited {
color: #930;
text-decoration: none;
}
/* applied to a link when the cursor is hovering over it */
a:hover {
color: #c63;
text-decoration: underline;
}
/* the table at the very top of the page containing the logo image */
.logotable {
width: 100%; /* percent of the browser window occupied by the table */
margin: 0px;
padding: 0px;
}
/* the table data cell which contains the logo image */
.logo {
text-align: right;
background-color: #000;
border-bottom: 1px solid #996;
}
/* the table containing the title and navbar */
.titleandnav {
width: 100%; /* percent of the browser window occupied by the table */
}
/* the title cell itself */
.titlecell {
padding: 6px 10px; /* first value: top & bottom; second: left & right */
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 16px;
border-top: 1px solid #996;
border-bottom: 1px solid #996;
color: #666;
}
/* the table cell which holds the navigation bar & surrounding whitespace */
.navcell {
text-align: center;
vertical-align: middle;
padding-left: 15px;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
color: #666;
}
/* table which holds the navigation bar & horizontal whitespace, but no
* vertical whitespace */
.navtable {
margin-left: auto;
margin-right: auto;
}
/* the dates on both ends of the navigation bar */
.navlabel {
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
padding: 4px;
}
/* table cell that holds the "Long View Powered" image */
.power {
padding-left: 15px;
padding-right: 5px;
text-align: right;
}
/* row of dates labeling the X-axis of the timeline, at the top */
.ytabletop {
border-bottom: 1px dotted #996;
}
/* cell containing an individual date label on the X-axis of the timeline */
.ycell {
text-align: center;
vertical-align: top;
padding: 0;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
font-size: 10px;
}
/* row of dates labeling the X-axis of the timeline, at the bottom */
.ytablebottom {
border-top: 1px dotted #996;
border-bottom: 1px solid #996;
}
/* table cell containing "Past", "Now", and "Future" at the top of the */
/* timeline*/
.pastnowcell {
text-align: right;
padding: 0;
}
/* the table containing the body of the timeline */
#datatable {
border-top: 1px #ddd solid;
border-right: 1px #ddd solid;
background-image: url('./img-generated/timeline-bg.png');
}
/* the background of each timeline bar */
.data {
padding-top: 1px;
padding-bottom: 1px;
background-position: 200px;
background-repeat: repeat-x;
}
/* the block that contains all of the timeline labels on the left side of
* the screen. */
#labels {
position: absolute;
top: 26px;
z-index: 3;
}
/* cell containing a single label on the left side of the screen */
.labelscell {
font-size: 10px;
font-weight: normal;
font-family: verdana, helvetica, arial, sans-serif; /* in order of desirability */
color: #999;
padding-top: 3px;
border: 0;
}
/* the popups themselves */
.node {
position: absolute;
visibility: hidden;
color: #333;
width: 200px;
z-index: 5;
border: 1px solid #999;
background-image: url(./img-static/popup-bg.gif);
padding: 6px;
}
/* The body of the popups (eg the HTML inside the table) */
.popupcell {
font-size: 10px;
font-weight: normal;
font-family: verdana, helvetica, arial, sans-serif; /* in order of */
/* desirability */
}
/* Popup titles */
.popuptitle {
font-size: 12px;
}
"""
# override the default header top matter from the lvhtml module
headerTop = """<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%s</title>
<link rel="stylesheet" href="./styles.css" />
<script language="javascript" type="text/javascript" src="./rollover.js"></script>
</head>
<body onload="loadimgs();">
<img src="./img-static/no.gif" alt="" width="1" height="25" border="0"><br>
<div align="center">
<table cellpadding="0" cellspacing="0" border="0" width="664">
<tr>
<td colspan="3">
<img src="./img-static/timeline.gif" alt="Timeline" width="664" height="38" border="0"></td>
</tr>
<tr>
<td class="exp" nowrap>
<img src="./img-static/no.gif" alt="" width="5" height="1" border="0">
<span class="txt"><b>%s</b></span><br>
<!-- longview.py unused value hack: %s - %s -->
« On the Record: <a href="http://www.longbets.com/bets" target="_top">Bets</a> | <a href="http://www.longbets.com/predictions" target="_top">Predictions</a></td>
<td class="navcell" align="right" nowrap>
<table class="navtable" cellpadding="0" cellspacing="0" border="0">
<tr>
<td class="navlabel">
%s</td>
<td nowrap="nowrap">\n"""
# another override
headerBottom = """</td>
<td class="navlabel">%s</td>
</tr>
</table></td>
<td class="power"><img src="img-static/longview-power.gif" alt="Powered by Long View" width="89" height="22" border="0" /></td>
</td>
</tr>
</table>
<table class="key">
<tr>
<td>
Votes: YES <img src="img-generated/key1.png" alt="" width="65" height="12"> NO</td>
<td>
Discussion Intensity: LESS <img src="img-generated/key2.png" alt="" width="65" height="12"> MORE</td>
</tr>
</table>
</div>
</body>
</html>
"""
| 23.484925 | 167 | 0.642559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,226 | 0.987055 |
d20af14dd3e3f451b0c30965586bb3662c6ee4a4
| 768 |
py
|
Python
|
ansible/roles/kraken.config/filter_plugins/expand_config.py
|
yenicapotediaz/k2
|
90aeb6efd77371c388b1429fc443aa30673c7787
|
[
"Apache-2.0"
] | 85 |
2016-10-06T23:15:14.000Z
|
2017-09-15T00:52:25.000Z
|
ansible/roles/kraken.config/filter_plugins/expand_config.py
|
yenicapotediaz/k2
|
90aeb6efd77371c388b1429fc443aa30673c7787
|
[
"Apache-2.0"
] | 739 |
2016-09-19T21:48:58.000Z
|
2017-09-15T17:46:52.000Z
|
ansible/roles/kraken.config/filter_plugins/expand_config.py
|
yenicapotediaz/k2
|
90aeb6efd77371c388b1429fc443aa30673c7787
|
[
"Apache-2.0"
] | 47 |
2016-09-22T21:32:12.000Z
|
2017-09-14T21:00:53.000Z
|
import copy, os
from ansible import errors
def expand_config(config_data):
try:
all_data = copy.deepcopy(expand_envs(config_data))
return all_data
except Exception, e:
raise errors.AnsibleFilterError(
'expand_config plugin error: {0}, config_data={1}'.format(
str(e),
str(config_data)))
def expand_envs(obj):
if isinstance(obj, dict):
return { key: expand_envs(val) for key, val in obj.items()}
if isinstance(obj, list):
return [ expand_envs(item) for item in obj ]
if isinstance(obj, basestring):
return os.path.expandvars(obj)
return obj
class FilterModule(object):
''' Expand Kraken configuration file '''
def filters(self):
return {
'expand_config': expand_config
}
| 26.482759 | 70 | 0.669271 | 147 | 0.191406 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.136719 |
d20cfcc3e7e361f935e2feabc8a3b8078a59377a
| 2,514 |
py
|
Python
|
jaxopt/_src/loop.py
|
ianwilliamson/jaxopt
|
0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8
|
[
"Apache-2.0"
] | 2 |
2021-10-04T15:20:55.000Z
|
2021-10-05T08:52:46.000Z
|
jaxopt/_src/loop.py
|
ianwilliamson/jaxopt
|
0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8
|
[
"Apache-2.0"
] | null | null | null |
jaxopt/_src/loop.py
|
ianwilliamson/jaxopt
|
0ff6be8094aacb3bf5472a41d780e3f56fc8e0f8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loop utilities."""
import jax
import jax.numpy as jnp
def _while_loop_scan(cond_fun, body_fun, init_val, max_iter):
"""Scan-based implementation (jit ok, reverse-mode autodiff ok)."""
def _iter(val):
next_val = body_fun(val)
next_cond = cond_fun(next_val)
return next_val, next_cond
def _fun(tup, it):
val, cond = tup
# When cond is met, we start doing no-ops.
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), it
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def _while_loop_python(cond_fun, body_fun, init_val, maxiter):
"""Python based implementation (no jit, reverse-mode autodiff ok)."""
val = init_val
for _ in range(maxiter):
cond = cond_fun(val)
if not cond:
# When condition is met, break (not jittable).
break
val = body_fun(val)
return val
def _while_loop_lax(cond_fun, body_fun, init_val, maxiter):
"""lax.while_loop based implementation (jit by default, no reverse-mode)."""
def _cond_fun(_val):
it, val = _val
return jnp.logical_and(cond_fun(val), it <= maxiter - 1)
def _body_fun(_val):
it, val = _val
val = body_fun(val)
return it+1, val
return jax.lax.while_loop(_cond_fun, _body_fun, (0, init_val))[1]
def while_loop(cond_fun, body_fun, init_val, maxiter, unroll=False, jit=False):
"""A while loop with a bounded number of iterations."""
if unroll:
if jit:
fun = _while_loop_scan
else:
fun = _while_loop_python
else:
if jit:
fun = _while_loop_lax
else:
raise ValueError("unroll=False and jit=False cannot be used together")
if jit and fun is not _while_loop_lax:
# jit of a lax while_loop is redundant, and this jit would only
# constrain maxiter to be static where it is not required.
fun = jax.jit(fun, static_argnums=(0, 1, 3))
return fun(cond_fun, body_fun, init_val, maxiter)
| 30.289157 | 79 | 0.699682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.441925 |
d20e5a4fd52895393eb34015d45cba3558f08f7a
| 8,407 |
py
|
Python
|
official/recommendation/model_runner.py
|
decster/models
|
82e783e3172f254b62dc4af08987754ebb7c348c
|
[
"Apache-2.0"
] | 3 |
2018-10-31T02:16:47.000Z
|
2018-11-06T09:11:37.000Z
|
official/recommendation/model_runner.py
|
decster/models
|
82e783e3172f254b62dc4af08987754ebb7c348c
|
[
"Apache-2.0"
] | null | null | null |
official/recommendation/model_runner.py
|
decster/models
|
82e783e3172f254b62dc4af08987754ebb7c348c
|
[
"Apache-2.0"
] | 1 |
2020-01-21T17:39:55.000Z
|
2020-01-21T17:39:55.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains NcfModelRunner, which can train and evaluate an NCF model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import time
import tensorflow as tf
from tensorflow.contrib.compiler import xla
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
class NcfModelRunner(object):
"""Creates a graph to train/evaluate an NCF model, and runs it.
This class builds both a training model and evaluation model in the graph.
The two models share variables, so that during evaluation, the trained
variables are used.
"""
# _TrainModelProperties and _EvalModelProperties store useful properties of
# the training and evaluation models, respectively.
# _SHARED_MODEL_PROPERTY_FIELDS is their shared fields.
_SHARED_MODEL_PROPERTY_FIELDS = (
# A scalar tf.string placeholder tensor, that will be fed the path to the
# directory storing the TFRecord files for the input data.
"record_files_placeholder",
# The tf.data.Iterator to iterate over the input data.
"iterator",
# A scalar float tensor representing the model loss.
"loss",
# The batch size, as a Python int.
"batch_size",
# The op to run the model. For the training model, this trains the model
# for one step. For the evaluation model, this computes the metrics and
# updates the metric variables.
"run_model_op")
_TrainModelProperties = namedtuple("_TrainModelProperties", # pylint: disable=invalid-name
_SHARED_MODEL_PROPERTY_FIELDS)
_EvalModelProperties = namedtuple( # pylint: disable=invalid-name
"_EvalModelProperties", _SHARED_MODEL_PROPERTY_FIELDS + (
# A dict from metric name to (metric, update_op) tuple.
"metrics",
# Initializes the metric variables.
"metric_initializer",))
def __init__(self, ncf_dataset, params):
with tf.Graph().as_default() as self._graph:
if params["use_xla_for_gpu"]:
# The XLA functions we use require resource variables.
tf.enable_resource_variables()
self._ncf_dataset = ncf_dataset
self._global_step = tf.train.create_global_step()
self._train_model_properties = self._build_model(params, is_training=True)
self._eval_model_properties = self._build_model(params, is_training=False)
initializer = tf.global_variables_initializer()
self._graph.finalize()
self._session = tf.Session(graph=self._graph)
self._session.run(initializer)
def _build_model(self, params, is_training):
"""Builds the NCF model.
Args:
params: A dict of hyperparameters.
is_training: If True, build the training model. If False, build the
evaluation model.
Returns:
A _TrainModelProperties if is_training is True, or an _EvalModelProperties
otherwise.
"""
record_files_placeholder = tf.placeholder(tf.string, ())
input_fn, _, _ = \
data_preprocessing.make_input_fn(
ncf_dataset=self._ncf_dataset, is_training=is_training,
record_files=record_files_placeholder)
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
model_fn = xla.estimator_model_fn(model_fn)
if is_training:
features, labels = iterator.get_next()
estimator_spec = model_fn(
features, labels, tf.estimator.ModeKeys.TRAIN, params)
with tf.control_dependencies([estimator_spec.train_op]):
run_model_op = self._global_step.assign_add(1)
return self._TrainModelProperties(
record_files_placeholder, iterator,
estimator_spec.loss, params["batch_size"], run_model_op)
else:
features = iterator.get_next()
estimator_spec = model_fn(
features, None, tf.estimator.ModeKeys.EVAL, params)
run_model_op = tf.group(*(update_op for _, update_op in
estimator_spec.eval_metric_ops.values()))
metric_initializer = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
return self._EvalModelProperties(
record_files_placeholder, iterator, estimator_spec.loss,
params["eval_batch_size"], run_model_op,
estimator_spec.eval_metric_ops, metric_initializer)
def _train_or_eval(self, model_properties, num_steps, is_training):
"""Either trains or evaluates, depending on whether `is_training` is True.
Args:
model_properties: _TrainModelProperties or an _EvalModelProperties
containing the properties of the training or evaluation graph.
num_steps: The number of steps to train or evaluate for.
is_training: If True, run the training model. If False, run the evaluation
model.
Returns:
record_dir: The directory of TFRecords where the training/evaluation input
data was read from.
"""
if self._ncf_dataset is not None:
epoch_metadata, record_dir, template = data_preprocessing.get_epoch_info(
is_training=is_training, ncf_dataset=self._ncf_dataset)
batch_count = epoch_metadata["batch_count"]
if batch_count != num_steps:
raise ValueError(
"Step counts do not match. ({} vs. {}) The async process is "
"producing incorrect shards.".format(batch_count, num_steps))
record_files = os.path.join(record_dir, template.format("*"))
initializer_feed_dict = {
model_properties.record_files_placeholder: record_files}
del batch_count
else:
initializer_feed_dict = None
record_dir = None
self._session.run(model_properties.iterator.initializer,
initializer_feed_dict)
fetches = (model_properties.loss, model_properties.run_model_op)
mode = "Train" if is_training else "Eval"
start = None
for i in range(num_steps):
loss, _, = self._session.run(fetches)
if i % 100 == 0:
if start is None:
# Only start the timer after 100 steps so there is a warmup.
start = time.time()
start_step = i
tf.logging.info("{} Loss = {}".format(mode, loss))
end = time.time()
if start is not None:
print("{} peformance: {} examples/sec".format(
mode, (i - start_step) * model_properties.batch_size / (end - start)))
return record_dir
def train(self, num_train_steps):
"""Trains the graph for a single cycle.
Args:
num_train_steps: The number of steps per cycle to train for.
"""
record_dir = self._train_or_eval(self._train_model_properties,
num_train_steps, is_training=True)
if record_dir:
# We delete the record_dir because each cycle, new TFRecords is generated
# by the async process.
tf.gfile.DeleteRecursively(record_dir)
def eval(self, num_eval_steps):
"""Evaluates the graph on the eval data.
Args:
num_eval_steps: The number of steps to evaluate for.
Returns:
A dict of evaluation results.
"""
self._session.run(self._eval_model_properties.metric_initializer)
self._train_or_eval(self._eval_model_properties, num_eval_steps,
is_training=False)
eval_results = {
'global_step': self._session.run(self._global_step)}
for key, (val, _) in self._eval_model_properties.metrics.items():
val_ = self._session.run(val)
tf.logging.info("{} = {}".format(key, self._session.run(val)))
eval_results[key] = val_
return eval_results
| 40.418269 | 93 | 0.692875 | 7,300 | 0.868324 | 0 | 0 | 0 | 0 | 0 | 0 | 3,473 | 0.413108 |
d20eb1e22a6672296afae7cc1ca61eef92581ba3
| 53,916 |
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 8 |
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/amberPrmTop.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2001
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/MolKit/amberPrmTop.py,v 1.32 2007/07/24 17:30:40 vareille Exp $
#
# $Id: amberPrmTop.py,v 1.32 2007/07/24 17:30:40 vareille Exp $
#
#from MolKit.molecule import Atom, AtomSet, Bond
from sff.amber import AmberParm
import numpy.oldnumeric as Numeric, types
from math import pi, sqrt, ceil, fabs
from string import split, strip, join
from os.path import basename
from MolKit.data.all_amino94_dat import all_amino94_dat
from MolKit.data.all_aminont94_dat import all_aminont94_dat
from MolKit.data.all_aminoct94_dat import all_aminoct94_dat
class Parm:
"""class to hold parameters for Amber Force Field calcuations
"""
def __init__(self, allDictList = [all_amino94_dat], ntDictList = [all_aminont94_dat],
ctDictList = [all_aminoct94_dat]):
#amber parameter reference dictionaries:
if len(allDictList)==0:
allDict = all_amino94_dat
else:
allDict = allDictList[0]
if type(allDict)==types.StringType:
allDict = self.getDictObj(allDict)
if len(allDictList)>1:
for d in allDictList:
if type(d)== types.StringType:
d = self.getDictObj(d)
allDict.update(d)
#allDict.extend(d)
self.allDict = allDict
if len(ntDictList)==0:
ntDict = all_aminont94_dat
else:
ntDict = ntDictList[0]
if type(ntDict)==types.StringType:
ntDict = self.getDictObj(ntDict)
if len(ntDictList)>1:
for d in ntDictList:
if type(d)== types.StringType:
d = self.getDictObj(d)
ntDict.update(d)
#ntDict.extend(d)
self.ntDict = ntDict
if len(ctDictList)==0:
ctDict = all_aminoct94_dat
else:
ctDict = ctDictList[0]
if type(ctDict)==types.StringType:
ctDict = self.getDictObj(ctDict)
if len(ctDictList)>1:
for d in ctDictList:
if type(d)== types.StringType:
d = self.getDictObj(d)
ctDict.update(d)
#ctDict.extend(d)
self.ctDict = ctDict
#formatD is used for write method
formatD = {}
for k in ['Iac', 'Iblo', 'Cno', 'Ipres', 'ExclAt']:
formatD[k] = ('%6d', 12, 0)
for k in ['Charges', 'Masses', 'Rk', 'Req', 'Tk', 'Teq',\
'Pk', 'Pn', 'Phase', 'Solty', 'Cn1', 'Cn2']:
formatD[k] = ('%16.8E', 5, 0)
for k in ['AtomNames', 'ResNames', 'AtomSym', 'AtomTree']:
formatD[k] = ('%-4.4s', 20, 0)
for k in ['allHBnds', 'allBnds']:
formatD[k] = ('%6d', 12, 3)
for k in ['allHAngs', 'allAngs']:
formatD[k] = ('%6d', 12, 4)
for k in ['allHDihe', 'allDihe']:
formatD[k] = ('%6d', 12, 5)
self.formatD = formatD
#processAtoms results are built in this dictionary
self.prmDict = {}
def getDictObj(self, nmstr):
#mod = __import__('MolKit/data/' + nmstr)
#dict = eval('mod.'+ nmstr)
mod = __import__('MolKit')
b = getattr(mod.data, nmstr)
dict = getattr(b, nmstr)
return dict
def loadFromFile(self, filename):
"""reads a parmtop file"""
self.prmDict = self.py_read(filename)
self.createSffCdataStruct(self.prmDict)
def processAtoms(self, atoms, parmDict=None, reorder=1):
"""finds all Amber parameters for the given set of atoms
parmDict is parm94_dat """
if atoms:
self.build(atoms, parmDict, reorder)
self.createSffCdataStruct(self.prmDict)
print 'after call to createSffCdataStruct'
def checkSanity(self):
d = self.prmDict
#length checks:
Natom = d['Natom']
assert len(d['Charges']) == Natom
assert len(d['Masses']) == Natom
assert len(d['Iac']) == Natom
assert len(d['Iblo']) == Natom
assert len(d['AtomRes']) == Natom
assert len(d['N14pairs']) == Natom
assert len(d['TreeJoin']) == Natom
Nres = d['Nres']
assert len(d['Ipres']) == Nres + 1
assert len(d['AtomNames']) == Natom * 4 + 81
assert len(d['AtomSym']) == Natom * 4 + 81
assert len(d['AtomTree']) == Natom * 4 + 81
assert len(d['ResNames']) == Nres * 4 + 81
#Ntypes is number of unique amber_types w/equiv replacement
Ntypes = d['Ntypes']
assert len(d['Cno']) == Ntypes**2
assert len(d['ExclAt']) == d['Nnb']
assert len(d['Cn1']) == Ntypes*(Ntypes+1)/2.
assert len(d['Cn2']) == Ntypes*(Ntypes+1)/2.
#Numbnd is number of bnd types
Numbnd = d['Numbnd']
assert len(d['Rk']) == Numbnd
assert len(d['Req']) == Numbnd
#Numang is number of angle types
Numang = d['Numang']
assert len(d['Tk']) == Numang
assert len(d['Teq']) == Numang
#Numptra is number of dihe types
Nptra = d['Nptra']
assert len(d['Pk']) == Nptra
assert len(d['Pn']) == Nptra
assert len(d['Phase']) == Nptra
assert len(d['Solty']) == d['Natyp']
#Nbona is number of bonds w/out H
Nbona = d['Nbona']
assert len(d['BondAt1']) == Nbona
assert len(d['BondAt2']) == Nbona
assert len(d['BondNum']) == Nbona
#Nbonh is number of bonds w/ H
Nbonh = d['Nbonh']
assert len(d['BondHAt1']) == Nbonh
assert len(d['BondHAt2']) == Nbonh
assert len(d['BondHNum']) == Nbonh
#Ntheta is number of angles w/out H
Ntheta = d['Ntheta']
assert len(d['AngleAt1']) == Ntheta
assert len(d['AngleAt2']) == Ntheta
assert len(d['AngleAt3']) == Ntheta
assert len(d['AngleNum']) == Ntheta
#Ntheth is number of angles w/ H
Ntheth = d['Ntheth']
assert len(d['AngleHAt1']) == Ntheth
assert len(d['AngleHAt2']) == Ntheth
assert len(d['AngleHAt3']) == Ntheth
assert len(d['AngleHNum']) == Ntheth
#Nphia is number of dihedrals w/out H
Nphia = d['Nphia']
assert len(d['DihAt1']) == Nphia
assert len(d['DihAt2']) == Nphia
assert len(d['DihAt3']) == Nphia
assert len(d['DihAt4']) == Nphia
assert len(d['DihNum']) == Nphia
#Nphih is number of dihedrals w/ H
Nphih = d['Nphih']
assert len(d['DihHAt1']) == Nphih
assert len(d['DihHAt2']) == Nphih
assert len(d['DihHAt3']) == Nphih
assert len(d['DihHAt4']) == Nphih
assert len(d['DihHNum']) == Nphih
##WHAT ABOUT HB10, HB12, N14pairs, N14pairlist
#value based on length checks:
#all values of BondNum and BondHNum in range (1, Numbnd)
for v in d['BondNum']:
assert v >0 and v < Numbnd + 1
for v in d['BondHNum']:
assert v >0 and v < Numbnd + 1
#all values of AngleNum and AngleHNum in range (1, Numang)
for v in d['AngleNum']:
assert v >0 and v < Numang + 1
for v in d['AngleHNum']:
assert v >0 and v < Numang + 1
#all values of DihNum and DihHNum in range (1, Nptra)
for v in d['DihNum']:
assert v >0 and v < Nptra + 1
for v in d['DihHNum']:
assert v >0 and v < Nptra + 1
def createSffCdataStruct(self, dict):
"""Create a C prm data structure"""
print 'in createSffCdataStruct'
self.ambPrm = AmberParm('test1', parmdict=dict)
print 'after call to init'
def build(self, allAtoms, parmDict, reorder):
# find out amber special residue name and
# order the atoms inside a residue to follow the Amber convention
self.residues = allAtoms.parent.uniq()
self.residues.sort()
self.fixResNamesAndOrderAtoms(reorder)
# save ordered chains
self.chains = self.residues.parent.uniq()
self.chains.sort()
# save ordered atoms
self.atoms = self.residues.atoms
# renumber them
self.atoms.number = range(1, len(allAtoms)+1)
print 'after call to checkRes'
self.getTopology(self.atoms, parmDict)
print 'after call to getTopology'
if reorder:
self.checkSanity()
print 'passed sanity check'
else:
print 'skipping sanity check'
return
def reorderAtoms(self, res, atList):
ats = []
rlen = len(res.atoms)
if rlen!=len(atList):
print "atoms missing in residue", res
print "expected:", atList
print "found :", res.atoms.name
for i in range(rlen):
a = atList[i]
for j in range(rlen):
b = res.atoms[j]
# DON'T rename HN atom H, HN1->H1, etc...
# use editCommands instead
#if b.name=='HN': b.name='H'
#elif len(b.name)==3 and b.name[:2]=='HN':
#b.name ='H'+b.name[2]
if b.name==a:
ats.append(b)
break
if len(ats)==len(res.atoms):
res.children.data = ats
res.atoms.data = ats
def fixResNamesAndOrderAtoms(self, reorder):
# level list of atom names used to rename residues
# check is HIS is HIS, HID, HIP, HIE, etc...
residues = self.residues
last = len(residues)-1
for i in range(len(residues)):
residue = residues[i]
chNames = residue.atoms.name
amberResType = residue.type
if amberResType=='CYS':
returnVal = 'CYS'
#3/21:
if 'HSG' in chNames or 'HG' in chNames:
amberResType ='CYS'
elif 'HN' in chNames:
amberResType = 'CYM'
else:
amberResType = 'CYX'
elif amberResType=='LYS':
# THIS DOESN'T SUPPORT LYH assigned in all.in
returnVal = 'LYS'
if 'HZ1' in chNames or 'HZN1' in chNames:
amberResType ='LYS'
else:
amberResType ='LYN'
elif amberResType=='ASP':
returnVal = 'ASP'
#3/21
if 'HD' in chNames or 'HD2' in chNames:
amberResType ='ASH'
else:
amberResType ='ASP'
elif amberResType=='GLU':
returnVal = 'GLU'
#3/21
if 'HE' in chNames or 'HE2' in chNames:
amberResType ='GLH'
else:
amberResType ='GLU'
elif amberResType=='HIS':
returnVal = 'HIS'
hasHD1 = 'HD1' in chNames
hasHD2 = 'HD2' in chNames
hasHE1 = 'HE1' in chNames
hasHE2 = 'HE2' in chNames
if hasHD1 and hasHE1:
if hasHD2 and not hasHE2:
amberResType = 'HID'
elif hasHD2 and hasHE2:
amberResType = 'HIP'
elif (not hasHD1) and (hasHE1 and hasHD2 and hasHE2):
amberResType = 'HIE'
else:
print 'unknown HISTIDINE config'
raise ValueError
residue.amber_type = amberResType
if residue == residue.parent.residues[0]:
residue.amber_dict = self.ntDict[amberResType]
elif residue == residue.parent.residues[-1]:
residue.amber_dict = self.ctDict[amberResType]
else:
residue.amber_dict = self.allDict[amberResType]
if reorder:
self.reorderAtoms(residue, residue.amber_dict['atNameList'])
def processChain(self, residues, parmDict):
#this should be called with a list of residues representing a chain
# NOTE: self.parmDict is parm94 which was parsed by Ruth while parmDict is
# MolKit.parm94_dat.py
dict = self.prmDict
#residues = self.residues
# initialize
atNames = ''
atSym = ''
atTree = ''
resname = ''
masses = dict['Masses']
charges = dict['Charges']
uniqList = []
uniqTypes = {} # used to build list with equivalent names removed
atypTypes = {} # used to build list without equivalent names removed
allTypeList = [] # list of all types
last = len(residues)-1
dict['Nres'] = dict['Nres'] + last + 1
atres = dict['AtomRes']
ipres = dict['Ipres']
maxResLen = 0
for i in range(last+1):
res = residues[i]
atoms = res.atoms
nbat = len(atoms)
if nbat > maxResLen: maxResLen = nbat
ipres.append(ipres[-1]+nbat)
resname = resname + res.amber_type + ' '
ad = res.amber_dict
pdm = parmDict.atomTypes
for a in atoms:
# get the amber atom type
name = a.name
atres.append(i+1)
atNames = atNames+'%-4s'%name
atD = ad[name]
a.amber_type = newtype = '%-2s'%atD['type']
chg = a._charges['amber'] = atD['charge']*18.2223
charges.append(chg)
mas = a.mass = pdm[newtype][0]
masses.append(mas)
atTree = atTree+'%-4.4s'%atD['tree']
allTypeList.append(newtype)
atSym = atSym+'%-4s'%newtype
symb = newtype[0]
if symb in parmDict.AtomEquiv.keys():
if newtype in parmDict.AtomEquiv[symb]:
newsym = symb + ' '
uniqTypes[symb+' '] = 0
a.amber_symbol = symb+' '
if newsym not in uniqList:
uniqList.append(newsym)
else:
uniqTypes[newtype] = 0
a.amber_symbol = newtype
if newtype not in uniqList:
uniqList.append(newtype)
else:
uniqTypes[newtype] = 0
a.amber_symbol = newtype
if newtype not in uniqList: uniqList.append(newtype)
# to get uniq list of all types w/out equiv replacement
atypTypes[newtype] = 0
# post processing of some variable
dict['AtomNames'] = dict['AtomNames'] + atNames
dict['AtomSym'] = dict['AtomSym'] + atSym
dict['AtomTree'] = dict['AtomTree'] + atTree
dict['ResNames'] = dict['ResNames'] + resname
# save list of unique types for later use
###1/10:
#self.uniqTypeList = uniqList
uL = self.uniqTypeList
for t in uniqList:
if t not in uL:
uL.append(t)
#self.uniqTypeList = uniqTypes.keys()
self.uniqTypeList = uL
ntypes = len(uL)
dict['Ntypes'] = ntypes
aL = self.atypList
for t in atypTypes.keys():
if t not in aL:
aL.append(t)
self.atypList = aL
dict['Natyp'] = len(aL)
dict['Ntype2d'] = ntypes*ntypes
dict['Nttyp'] = ntypes * (ntypes+1)/2
if maxResLen > dict['Nmxrs']:
dict['Nmxrs'] = maxResLen
newtypelist = []
for t in residues.atoms.amber_symbol:
# Iac is 1-based
newtypelist.append( self.uniqTypeList.index(t) + 1 )
###1/10:
#dict['Iac'] = newtypelist
dict['Iac'].extend( newtypelist)
def processBonds(self, bonds, parmDict):
# NOTE: self,parmDict is parm94 parsed by Ruth while parmDict is
# MolKit.parm94_dat.py):
dict = self.prmDict
bat1 = dict['BondAt1']
bat2 = dict['BondAt2']
bnum = dict['BondNum']
batH1 = dict['BondHAt1']
batH2 = dict['BondHAt2']
bHnum = dict['BondHNum']
rk = dict['Rk']
req = dict['Req']
bndTypes = {} # used to build a unique list of bond types
btDict = parmDict.bondTypes #needed to check for wildcard * in type
for b in bonds:
a1 = b.atom1
#t1 = a1.amber_symbol
t1 = a1.amber_type
a2 = b.atom2
#t2 = a2.amber_symbol
t2 = a2.amber_type
if t1<t2:
newtype = '%-2.2s-%-2.2s'%(t1,t2)
else:
newtype = '%-2.2s-%-2.2s'%(t2,t1)
bndTypes[newtype] = 0
n1 = (a1.number-1)*3
n2 = (a2.number-1)*3
if n2<n1: tmp=n1; n1=n2; n2=tmp
if a1.element=='H' or a2.element=='H':
bHnum.append(newtype)
batH1.append(n1)
batH2.append(n2)
else:
bnum.append(newtype)
bat1.append(n1)
bat2.append(n2)
dict['Numbnd'] = len(bndTypes)
btlist = bndTypes.keys()
for bt in btlist:
rk.append( btDict[bt][0] )
req.append( btDict[bt][1] )
newbnum = []
for b in bnum:
newbnum.append( btlist.index(b) + 1 )
dict['BondNum'] = newbnum
newbnum = []
for b in bHnum:
newbnum.append( btlist.index(b) + 1 )
dict['BondHNum'] = newbnum
return
def processAngles(self, allAtoms, parmDict):
dict = self.prmDict
aa1 = dict['AngleAt1']
aa2 = dict['AngleAt2']
aa3 = dict['AngleAt3']
anum = dict['AngleNum']
aHa1 = dict['AngleHAt1']
aHa2 = dict['AngleHAt2']
aHa3 = dict['AngleHAt3']
aHnum = dict['AngleHNum']
tk = dict['Tk']
teq = dict['Teq']
angTypes = {}
atdict = parmDict.bondAngles
for a1 in allAtoms:
t1 = a1.amber_type
for b in a1.bonds:
a2 = b.atom1
if id(a2)==id(a1): a2=b.atom2
t2 = a2.amber_type
for b2 in a2.bonds:
a3 = b2.atom1
if id(a3)==id(a2): a3=b2.atom2
if id(a3)==id(a1): continue
if a1.number > a3.number: continue
t3 = a3.amber_type
nn1 = n1 = (a1.number-1)*3
nn2 = n2 = (a2.number-1)*3
nn3 = n3 = (a3.number-1)*3
if n3<n1:
nn3 = n1
nn1 = n3
rev = 0
if (t1==t3 and a1.name > a3.name) or t3 < t1:
rev = 1
if rev:
newtype = '%-2.2s-%-2.2s-%-2.2s'%(t3,t2,t1)
else:
newtype = '%-2.2s-%-2.2s-%-2.2s'%(t1, t2, t3)
#have to check for wildcard *
angTypes[newtype] = 0
if a1.element=='H' or a2.element=='H' or a3.element=='H':
aHa1.append( nn1 )
aHa2.append( nn2 )
aHa3.append( nn3 )
aHnum.append(newtype)
else:
aa1.append( nn1 )
aa2.append( nn2 )
aa3.append( nn3 )
anum.append(newtype)
atlist = angTypes.keys()
torad = pi / 180.0
atKeys = atdict.keys()
for t in atlist:
tk.append( atdict[t][0] )
teq.append( atdict[t][1]*torad )
anewlist = []
for a in anum:
anewlist.append( atlist.index( a ) + 1 )
dict['AngleNum'] = anewlist
anewlist = []
for a in aHnum:
anewlist.append( atlist.index( a ) + 1 )
dict['AngleHNum'] = anewlist
dict['Numang'] = len(atlist)
dict['Ntheth'] = len(aHa1)
dict['Mtheta'] = len(aa1)
dict['Ntheta'] = len(aa1)
return
def checkDiheType(self, t, t2, t3, t4, dict):
#zero X
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%(t,t2,t3,t4)
if dict.has_key(newtype): return newtype
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%(t4,t3,t2,t)
if dict.has_key(newtype): return newtype
#X
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t2,t3,t4)
if dict.has_key(newtype): return newtype
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t3,t2,t)
if dict.has_key(newtype): return newtype
#2X
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t2,t3,'X')
if dict.has_key(newtype): return newtype
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t3,t2,'X')
if dict.has_key(newtype): return newtype
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X','X',t3,t4)
if dict.has_key(newtype): return newtype
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X','X',t2,t)
if dict.has_key(newtype): return newtype
raise RuntimeError('dihedral type not in dictionary')
## it is slower to check a list if the key is in there than to ask a
## dictionanry if it has this key
##
## keys = dict.keys()
## #zero X
## newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%(t,t2,t3,t4)
## if newtype in keys:
## return newtype
## newtype2 = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%(t4,t3,t2,t)
## if newtype2 in keys:
## return newtype2
## #X
## newtypeX = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t2,t3,t4)
## if newtypeX in keys:
## return newtypeX
## newtype2X = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t3,t2,t)
## if newtype2X in keys:
## return newtype2X
## #2X
## newtypeX_X = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t2,t3,'X')
## if newtypeX_X in keys:
## return newtypeX_X
## newtype2X_X = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X',t3,t2,'X')
## if newtype2X_X in keys:
## return newtype2X_X
## newtypeXX = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X','X',t3,t4)
## if newtypeXX in keys:
## return newtypeXX
## newtype2XX = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%('X','X',t2,t)
## if newtype2XX in keys:
## return newtype2XX
## raise RuntimError('dihedral type not in dictionary')
def processTorsions(self, allAtoms, parmDict):
# find torsions and also excuded atoms
dict = self.prmDict
foundDihedTypes = {}
ta1 = dict['DihAt1']
ta2 = dict['DihAt2']
ta3 = dict['DihAt3']
ta4 = dict['DihAt4']
tnum = dict['DihNum']
taH1 = dict['DihHAt1']
taH2 = dict['DihHAt2']
taH3 = dict['DihHAt3']
taH4 = dict['DihHAt4']
tHnum = dict['DihHNum']
nb14 = dict['N14pairs']
n14list = dict['N14pairlist']
iblo = dict['Iblo']
exclAt = dict['ExclAt']
dihedTypes = parmDict.dihedTypes
for a1 in allAtoms:
n14 = []
excl = []
t1 = a1.amber_type
restyp = a1.parent.type
if restyp in ['PRO', 'TRP', 'HID', 'HIE', 'HIP']:
ringlist = self.AA5rings[restyp]
else:
ringlist = None
for b in a1.bonds:
a2 = b.atom1
if id(a2)==id(a1): a2=b.atom2
t2 = a2.amber_type
if a2.number > a1.number: excl.append(a2.number)
for b2 in a2.bonds:
a3 = b2.atom1
if id(a3)==id(a2): a3=b2.atom2
if id(a3)==id(a1): continue
if a3.number > a1.number: excl.append(a3.number)
t3 = a3.amber_type
for b3 in a3.bonds:
a4 = b3.atom1
if id(a4)==id(a3): a4=b3.atom2
if id(a4)==id(a2): continue
if id(a4)==id(a1): continue
if a1.number > a4.number: continue
excl.append(a4.number)
t4 = a4.amber_type
newtype = '%-2.2s-%-2.2s-%-2.2s-%-2.2s'%(t1,t2,t3,t4)
dtype = self.checkDiheType(t1,t2,t3,t4,dihedTypes)
for i in range(len(dihedTypes[dtype])):
tname = dtype+'_'+str(i)
foundDihedTypes[tname] = 0
sign3 = 1
period = dihedTypes[dtype][i][3]
if period < 0.0: sign3= -1
if a4.parent==a1.parent:
if ringlist and a4.name in ringlist \
and a1.name in ringlist:
sign3= -1
if a1.element=='H' or a2.element=='H' or \
a3.element=='H' or a4.element=='H':
taH1.append( (a1.number-1)*3 )
taH2.append( (a2.number-1)*3 )
taH3.append( sign3*(a3.number-1)*3 )
taH4.append( (a4.number-1)*3 )
tHnum.append( tname )
else:
ta1.append( (a1.number-1)*3 )
ta2.append( (a2.number-1)*3 )
ta3.append( sign3*(a3.number-1)*3 )
ta4.append( (a4.number-1)*3 )
tnum.append( tname )
if sign3>0.0:
# this trick work only for 6 rings and
# prevents from adding 14 interactions
# twice between atoms in the ring
# PRO, TRP and HIS and cp. have to be handle
# separately
num = a4.number-1
if num not in n14:
n14.append( num )
else: # make 3rd atom in torsion negative
ta3[-1] = -ta3[-1]
if len(excl):
# excl can contain duplicated values (pro tyr phe cycles)
# we also sort the values (probably only comsetics)
excl.sort()
last = excl[0]
uexcl = [last]
for i in range(1,len(excl)):
if excl[i]!=last:
last = excl[i]
uexcl.append(last)
iblo.append(len(uexcl))
exclAt.extend(uexcl)
else:
iblo.append( 1 )
exclAt.append( 0 )
nb14.append(len(n14))
##!##1/28: n14.sort()
n14list.extend(n14)
# remember how many proper diehedrals
lastProper = len(tnum)
lastHProper = len(tHnum)
# loop over residues to add improper torsions
sumAts = 0
foundImproperDihedTypes = {}
for res in self.residues:
foundImproperDihedTypes = self.getImpropTors(
res, sumAts, foundImproperDihedTypes, parmDict)
sumAts = sumAts + len(res.atoms)
#typeDict = foundDihedTypes.copy()
#typeDict.update(foundImproperDihedTypes)
#print typeDict.keys()
dict['Nptra'] = len(foundDihedTypes) + len(foundImproperDihedTypes)
dict['Mphia'] = dict['Nphia'] = len(ta1)
dict['Nphih'] = len(taH1)
pn = dict['Pn']
pk = dict['Pk']
phase = dict['Phase']
dtlist = foundDihedTypes.keys()
torad = pi/180.
for t in dtlist:
index = int(t[-1])
val = dihedTypes[t[:-2]][index] # remove the '_x'
pk.append(val[1]/val[0])
phase.append(val[2]*torad)
pn.append(fabs(val[3]))
dihedTypes = parmDict.improperDihed
dtlist1 = foundImproperDihedTypes.keys()
for t in dtlist1:
val = dihedTypes[t]
pk.append(val[0])
phase.append(val[1]*torad)
pn.append(val[2])
typenum = []
dtlist = dtlist + dtlist1
for t in tnum:
typenum.append( dtlist.index(t) + 1 ) # types are 1-based
dict['DihNum'] = typenum
typenum = []
for t in tHnum:
typenum.append( dtlist.index(t) + 1 ) # types are 1-based
dict['DihHNum'] = typenum
dict['Nnb'] = len(dict['ExclAt'])
#print len(tnum), len(dict['DihNum'])
return
def getImpropTors(self, res, sumAts, foundDihedTypes, parmDict):
#eg tList:[['CA','+M','C','0'],['-M','CA','N','H']]
dict = self.prmDict
offset = sumAts * 3
nameList = res.atoms.name
typeList = res.atoms.amber_type
ta1 = dict['DihAt1']
ta2 = dict['DihAt2']
ta3 = dict['DihAt3']
ta4 = dict['DihAt4']
tnum = dict['DihNum']
taH1 = dict['DihHAt1']
taH2 = dict['DihHAt2']
taH3 = dict['DihHAt3']
taH4 = dict['DihHAt4']
tHnum = dict['DihHNum']
dihedTypes = parmDict.improperDihed
atNameList = res.amber_dict['atNameList']
resat = res.atoms
for item in res.amber_dict['impropTors']:
atomNum = []
atomType = []
newTors = []
offset = res.atoms[0].number
#use hasH to detect 'HZ2' etc
hasH = 0
for t in item:
if t[0]=='H': hasH = 1
if len(t)==2 and t[1]=='M':
if t[0]=='-':
atomType.append('C ')
atomNum.append(offset - 2)
else:
atomType.append('N ')
atomNum.append(offset + len(res.atoms) )
else:
atIndex = atNameList.index(t)
atom = resat[atIndex]
atomType.append(atom.amber_type)
atomNum.append( atom.number )
newType = self.checkDiheType(atomType[0], atomType[1],
atomType[2], atomType[3],
dihedTypes)
foundDihedTypes[newType] = 0
if hasH:
taH1.append( (atomNum[0]-1)*3 )
taH2.append( (atomNum[1]-1)*3 )
taH3.append(-(atomNum[2]-1)*3 )
taH4.append(-(atomNum[3]-1)*3 )
tHnum.append(newType)
else:
ta1.append( (atomNum[0]-1)*3 )
ta2.append( (atomNum[1]-1)*3 )
ta3.append(-(atomNum[2]-1)*3 )
ta4.append(-(atomNum[3]-1)*3 )
tnum.append(newType)
return foundDihedTypes
def getTopology(self, allAtoms, parmDict):
dict = self.prmDict
dict['ititl'] = allAtoms.top.uniq()[0].name + '.prmtop\n'
natom = dict['Natom'] = len(allAtoms)
dict['Nat3'] = natom * 3
dict['AtomNames'] = ''
dict['AtomSym'] = ''
dict['AtomTree'] = ''
dict['Ntypes'] = 0
dict['Natyp'] = 0
dict['Ntype2d'] = 0
dict['Nttyp'] = 0
dict['Masses'] = []
dict['Charges'] = []
dict['Nres'] = 0
dict['AtomRes'] = []
dict['ResNames'] = ''
dict['Ipres'] = [1]
dict['Nmxrs'] = 0
###1/10:
dict['Iac'] = []
self.uniqTypeList = []
#used for construction of Natyp
self.atypList = []
# fill get all arrays that are of len natom
# we have to call for each chain
for ch in self.chains:
self.processChain( ch.residues, parmDict)
#PAD AtomNames with 81 spaces
dict['AtomNames'] = dict['AtomNames'] + 81*' '
dict['AtomSym'] = dict['AtomSym'] + 81*' '
dict['AtomTree'] = dict['AtomTree'] + 81*' '
dict['ResNames'] = dict['ResNames'] + 81*' '
# create Iac list
#iac = []
#tl = self.uniqTypeList
#for a in allAtoms:
# iac.append( tl.index(a.amber_symbol) + 1 )
# delattr(a, 'amber_symbol')
#dict['Iac'] = iac
# to find out the number of bonds with hydrogen we simply count the
# number of hydrogen atoms
hlist = allAtoms.get(lambda x: x.element=='H')
if hlist is not None and len(hlist):
dict['Nbonh'] = numHs = len(hlist)
else:
numHs = 0
# number of bonds not involving an H atom
bonds = allAtoms.bonds[0]
dict['Mbona'] = len(bonds) - numHs
# since no bonds are constrined, Nbona==Mbona
dict['Nbona'] = dict['Mbona']
print 'after call to processChain'
# new process bond info
dict['BondAt1'] = []
dict['BondAt2'] = []
dict['BondNum'] = []
dict['BondHAt1'] = []
dict['BondHAt2'] = []
dict['BondHNum'] = []
dict['Rk'] = []
dict['Req'] = []
self.processBonds(bonds, parmDict)
print 'after call to processBonds'
# now process the angles
dict['AngleAt1'] = []
dict['AngleAt2'] = []
dict['AngleAt3'] = []
dict['AngleNum'] = []
dict['AngleHAt1'] = []
dict['AngleHAt2'] = []
dict['AngleHAt3'] = []
dict['AngleHNum'] = []
dict['Tk'] = []
dict['Teq'] = []
self.processAngles(allAtoms, parmDict)
print 'after call to processAngles'
# now handle the torsions
dict['Nhparm'] = 0
dict['Nparm'] = 0
dict['DihAt1'] = []
dict['DihAt2'] = []
dict['DihAt3'] = []
dict['DihAt4'] = []
dict['DihNum'] = []
dict['DihHAt1'] = []
dict['DihHAt2'] = []
dict['DihHAt3'] = []
dict['DihHAt4'] = []
dict['DihHNum'] = []
dict['Pn'] = []
dict['Pk'] = []
dict['Phase'] = []
dict['Nphih'] = dict['Mphia'] = dict['Nphia'] = dict['Nptra'] = 0
dict['N14pairs'] = []
dict['N14pairlist'] = []
dict['Nnb'] =0
dict['Iblo'] = []
dict['ExclAt'] = []
# FIXME
self.AA5rings ={
'PRO':['N', 'CA', 'CB', 'CG', 'CD'],
'TRP':['CG', 'CD1', 'CD2', 'NE1', 'CE2'],
'HID':['CG', 'ND1', 'CE1', 'NE2', 'CD2'],
'HIE':['CG', 'ND1', 'CE1', 'NE2', 'CD2'],
'HIP':['CG', 'ND1', 'CE1', 'NE2', 'CD2']
}
self.processTorsions(allAtoms, parmDict)
print 'after call to processTorsions'
# some unused values
dict['Nspm'] = 1
dict['Box'] = [0., 0., 0.]
dict['Boundary'] = [natom]
dict['TreeJoin'] = range(natom)
dict['Nphb'] = 0
dict['HB12'] = []
dict['HB10'] = []
llist = ['Ifpert', 'Nbper','Ngper','Ndper','Mbper', 'Mgper',
'Mdper','IfBox', 'IfCap', 'Cutcap', 'Xcap', 'Ycap',
'Zcap', 'Natcap','Ipatm', 'Nspsol','Iptres']
for item in llist:
dict[item] = 0
dict['Cno'] = self.getICO( dict['Ntypes'] )
dict['Solty'] = self.getSOLTY( dict['Natyp'] )
dict['Cn1'], dict['Cn2'] = self.getCNList(parmDict)
return
def getICO(self, ntypes):
ct = 1
icoArray = Numeric.zeros((ntypes, ntypes), 'i')
for i in range(1, ntypes+1):
for j in range(1, i+1):
icoArray[i-1,j-1]=ct
icoArray[j-1,i-1]=ct
ct = ct+1
return icoArray.ravel().tolist()
def getSOLTY(self, ntypes):
soltyList = []
for i in range(ntypes):
soltyList.append(0.)
return soltyList
def getCN(self, type1, type2, pow, parmDict, factor=1):
#pow is 12 or 6
#factor is 1 except when pow is 6
d = parmDict.potParam
if type1=='N3': type1='N '
if type2=='N3': type2='N '
r1, eps1 = d[type1][:2]
r2, eps2 = d[type2][:2]
eps = sqrt(eps1*eps2)
rij = r1 + r2
newval = factor*eps*rij**pow
return newval
def getCNList(self, parmDict):
ntypes = len(self.uniqTypeList)
ct = 1
## size = self.prmDict['Nttyp']
## cn1List = [0]*size
## cn2List = [0]*size
## iac = self.prmDict['Iac']
## cno = self.prmDict['Cno']
## for i in range(ntypes):
## indi = i*ntypes
## ival = self.uniqTypeList[i]
## for j in range(i, ntypes):
## jval = self.uniqTypeList[j]
## ind = cno[indi+j]-1
## cn1List[ind] = self.getCN(jval, ival, 12, parmDict)
## cn2List[ind] = self.getCN(jval, ival, 6, parmDict, 2)
nttyp = self.prmDict['Nttyp']
cn1List = []
cn2List = []
for j in range(ntypes):
jval = self.uniqTypeList[j]
for i in range(j+1):
ival = self.uniqTypeList[i]
cn1List.append(self.getCN(ival, jval, 12, parmDict))
cn2List.append(self.getCN(ival, jval, 6, parmDict, 2))
return cn1List, cn2List
def readSummary(self, allLines, dict):
#set summary numbers
ll = split(allLines[1])
assert len(ll)==12
#FIX THESE NAMES!
natom = dict['Natom'] = int(ll[0])
ntypes = dict['Ntypes'] = int(ll[1])
nbonh = dict['Nbonh'] = int(ll[2])
dict['Mbona'] = int(ll[3])
ntheth = dict['Ntheth'] = int(ll[4])
dict['Mtheta'] = int(ll[5])
nphih = dict['Nphih'] = int(ll[6])
dict['Mphia'] = int(ll[7])
dict['Nhparm'] = int(ll[8])
dict['Nparm'] = int(ll[9])
#called 'next' in some documentation
#NEXT-> Nnb
next = dict['Nnb'] = int(ll[10])
dict['Nres'] = int(ll[11])
ll = split(allLines[2])
assert len(ll)==12
nbona = dict['Nbona'] = int(ll[0])
ntheta = dict['Ntheta'] = int(ll[1])
nphia = dict['Nphia'] = int(ll[2])
numbnd = dict['Numbnd'] = int(ll[3])
numang = dict['Numang'] = int(ll[4])
numptra = dict['Nptra'] = int(ll[5])
natyp = dict['Natyp'] = int(ll[6])
dict['Nphb'] = int(ll[7])
dict['Ifpert'] = int(ll[8])
dict['Nbper'] = int(ll[9])
dict['Ngper'] = int(ll[10])
dict['Ndper'] = int(ll[11])
ll = split(allLines[3])
assert len(ll)==6
dict['Mbper'] = int(ll[0])
dict['Mgper'] = int(ll[1])
dict['Mdper'] = int(ll[2])
dict['IfBox'] = int(ll[3])
dict['Nmxrs'] = int(ll[4])
dict['IfCap'] = int(ll[5])
return dict
def readIGRAPH(self, allLines, numIGRAPH, ind=3):
#the names are not necessarily whitespace delimited
igraph = []
for i in range(numIGRAPH):
ind = ind + 1
l = allLines[ind]
for k in range(20):
it = l[k*4:k*4+4]
igraph.append(strip(it))
#igraph.extend(split(l))
return igraph, ind
def readCHRG(self, allLines, ind, numCHRG, natom):
chrg = []
ct = 0
for i in range(numCHRG):
ind = ind + 1
l = allLines[ind]
newl = []
# build 5 charges per line if enough are left
#otherwise, build the last line's worth
if natom - ct >=5:
rct = 5
else:
rct = natom - ct
for q in range(rct):
lindex = q*16
item = l[lindex:lindex+16]
newl.append(float(item))
ct = ct + 1
chrg.extend(newl)
return chrg, ind
def readNUMEX(self, allLines, ind, numIAC):
numex = []
NumexSUM = 0
for i in range(numIAC):
ind = ind + 1
ll = split(allLines[ind])
newl = []
for item in ll:
newent = int(item)
newl.append(newent)
NumexSUM = NumexSUM + newent
numex.extend(newl)
return numex, ind, NumexSUM
def readLABRES(self, allLines, ind):
done = 0
labres = []
while not done:
ind = ind + 1
ll = split(allLines[ind])
try:
int(ll[0])
done = 1
break
except ValueError:
labres.extend(ll)
#correct for 1 extra line read here
ind = ind - 1
return labres, ind
def readFList(self, allLines, ind, numITEMS):
v = []
for i in range(numITEMS):
ind = ind + 1
ll = split(allLines[ind])
newl = []
for item in ll:
newl.append(float(item))
v.extend(newl)
return v, ind
def readIList(self, allLines, ind, numITEMS):
v = []
for i in range(numITEMS):
ind = ind + 1
ll = split(allLines[ind])
newl = []
for item in ll:
newl.append(int(item))
v.extend(newl)
return v, ind
def readILList(self, allLines, ind, numITEMS, n):
bhlist = []
for i in range(n):
bhlist.append([])
ct = 0
for i in range(numITEMS):
ind = ind + 1
ll = split(allLines[ind])
for j in range(len(ll)):
item = ll[j]
newl = bhlist[ct%n]
newl.append(int(item))
ct = ct + 1
return bhlist, ind
def py_read(self, filename, **kw):
#??dict['Iptres'] #dict['Nspsol'] #dict['Ipatm'] #dict['Natcap']
f = open(filename, 'r')
allLines = f.readlines()
f.close()
dict = {}
#set title
dict['ititl'] = allLines[0]
#get summary numbers
dict = self.readSummary(allLines, dict)
#set up convenience fields:
natom = dict['Natom']
ntypes = dict['Ntypes']
dict['Nat3'] = natom * 3
dict['Ntype2d'] = ntypes ** 2
nttyp = dict['Nttyp'] = ntypes * (ntypes+1)/2
# read IGRAPH->AtomNames
numIGRAPH = int(ceil((natom*1.)/20.))
anames, ind = self.readIGRAPH(allLines, numIGRAPH)
dict['AtomNames'] = join(anames)
# read CHRG->Charges
numCHRG = int(ceil((natom*1.)/5.))
dict['Charges'], ind = self.readCHRG(allLines, ind, numCHRG, natom)
# read AMASS **same number of lines as charges->Masses
dict['Masses'], ind = self.readFList(allLines, ind, numCHRG)
# read IAC **NOT same number of lines as IGRAPH 12!!
numIAC = int(ceil((natom*1.)/12.))
dict['Iac'], ind = self.readIList(allLines, ind, numIAC)
# read NUMEX **same number of lines as IAC
dict['Iblo'], ind, NumexSUM = self.readNUMEX(allLines, ind, numIAC)
# read ICO *Ntype2d/12
numICO = int(ceil((ntypes**2*1.0)/12.))
dict['Cno'], ind = self.readIList(allLines, ind, numICO)
##NB this should be half of a matrix
# read LABRES....no way to know how many
dict['ResNames'], ind = self.readLABRES(allLines, ind)
labres = dict['ResNames']
# read IPRES....depends on len of LABRES
numIPRES = int(ceil((len(labres)*1.)/20.))
dict['Ipres'], ind = self.readIList(allLines, ind, numIPRES)
# read RK + REQ-> depend on numbnd
numbnd = dict['Numbnd']
numRK = int(ceil((numbnd*1.)/5.))
dict['Rk'], ind = self.readFList(allLines, ind, numRK)
dict['Req'], ind = self.readFList(allLines, ind, numRK)
# read TK + TEQ-> depend on numang
numang = dict['Numang']
numTK = int(ceil((numang*1.)/5.))
dict['Tk'], ind = self.readFList(allLines, ind, numTK)
dict['Teq'], ind = self.readFList(allLines, ind, numTK)
# read PK, PN + PHASE-> depend on numptra
nptra = dict['Nptra']
numPK = int(ceil((nptra*1.)/5.))
dict['Pk'], ind = self.readFList(allLines, ind, numPK)
dict['Pn'], ind = self.readFList(allLines, ind, numPK)
dict['Phase'], ind = self.readFList(allLines, ind, numPK)
# read SOLTY
natyp = dict['Natyp']
numSOLTY = int(ceil((natyp*1.)/5.))
dict['Solty'], ind = self.readFList(allLines, ind, numSOLTY)
# read CN1 and CN2
numCN = int(ceil((nttyp*1.)/5.))
dict['Cn1'], ind = self.readFList(allLines, ind, numCN)
dict['Cn2'], ind = self.readFList(allLines, ind, numCN)
# read IBH, JBH, ICBH 12
nbonh = dict['Nbonh']
numIBH = int(ceil((nbonh*3.0)/12.))
[dict['BondHAt1'], dict['BondHAt2'], dict['BondHNum']], ind = \
self.readILList(allLines, ind, numIBH, 3)
# read IB, JB, ICB 12
nbona = dict['Nbona']
numIB = int(ceil((nbona*3.0)/12.))
[dict['BondAt1'], dict['BondAt2'], dict['BondNum']], ind = \
self.readILList(allLines, ind, numIB, 3)
# read ITH, JTH, KTH, ICTH 12
ntheth = dict['Ntheth']
numITH = int(ceil((ntheth*4.0)/12.))
[dict['AngleHAt1'], dict['AngleHAt2'], dict['AngleHAt3'],\
dict['AngleHNum']], ind = self.readILList(allLines, ind, numITH, 4)
# read IT, JT, KT, ICT 12
ntheta = dict['Ntheta']
numIT = int(ceil((ntheta*4.0)/12.))
[dict['AngleAt1'], dict['AngleAt2'], dict['AngleAt3'],\
dict['AngleNum']], ind = self.readILList(allLines, ind, numIT, 4)
# read IPH, JPH, KPH, LPH, ICPH 12
nphih = dict['Nphih']
numIPH = int(ceil((nphih*5.0)/12.))
[dict['DihHAt1'], dict['DihHAt2'], dict['DihHAt3'], dict['DihHAt4'],\
dict['DihHNum']], ind = self.readILList(allLines, ind, numIPH, 5)
# read IP, JP, KP, LP, ICP 12
nphia = dict['Nphia']
numIP = int(ceil((nphia*5.0)/12.))
[dict['DihAt1'], dict['DihAt2'], dict['DihAt3'], dict['DihAt4'],\
dict['DihNum']], ind = self.readILList(allLines, ind, numIP, 5)
# read NATEX 12
#FIX THIS: has to be the sum of previous entries
numATEX = int(ceil((NumexSUM*1.0)/12.))
dict['ExclAt'], ind = self.readIList(allLines, ind, numATEX)
# read CN1 and CN2
# skip ASOL
# skip BSOL
# skip HBCUT
ind = ind + 3
# read ISYMBL 20
asym, ind = self.readIGRAPH(allLines, numIGRAPH, ind)
dict['AtomSym'] = join(asym)
# read ITREE 20
atree, ind = self.readIGRAPH(allLines, numIGRAPH, ind)
dict['AtomTree'] = join(atree)
return dict
def makeList(self, llist, num):
newL = []
for i in range(len(llist[0])):
ni = []
for j in range(num):
ni.append(llist[j][i])
newL.append(ni)
return newL
#functions to write self
def write(self, filename, **kw):
fptr = open(filename, 'w')
dict = self.prmDict
self.writeItitl(fptr, dict['ititl'])
self.writeSummary(fptr)
#WHAT ABOUT SOLTY???
self.writeString(fptr,dict['AtomNames'][:-81])
for k in ['Charges', 'Masses', 'Iac','Iblo','Cno']:
item = dict[k]
f = self.formatD[k]
if f[2]:
self.writeTupleList(fptr, item, f[0], f[1], f[2])
else:
self.writeList(fptr, item, f[0], f[1])
self.writeString(fptr,dict['ResNames'][:-81])
self.writeList(fptr, dict['Ipres'][:-1], '%6d', 12 )
for k in ['Rk', 'Req', 'Tk', 'Teq',
'Pk', 'Pn', 'Phase', 'Solty', 'Cn1','Cn2']:
item = dict[k]
f = self.formatD[k]
if f[2]:
self.writeTupleList(fptr, item, f[0], f[1], f[2])
else:
self.writeList(fptr, item, f[0], f[1])
#next write bnds angs and dihe
allHBnds = zip(dict['BondHAt1'], dict['BondHAt2'],
dict['BondHNum'])
self.writeTupleList(fptr, allHBnds, "%6d", 12, 3)
allBnds = zip(dict['BondAt1'], dict['BondAt2'],
dict['BondNum'])
self.writeTupleList(fptr, allBnds, "%6d", 12, 3)
allHAngs = zip(dict['AngleHAt1'], dict['AngleHAt2'],
dict['AngleHAt3'], dict['AngleHNum'])
self.writeTupleList(fptr, allHAngs, "%6d", 12,4)
allAngs = zip(dict['AngleAt1'], dict['AngleAt2'],
dict['AngleAt3'], dict['AngleNum'])
self.writeTupleList(fptr, allAngs, "%6d", 12, 4)
allHDiHe = zip(dict['DihHAt1'], dict['DihHAt2'],
dict['DihHAt3'], dict['DihHAt4'], dict['DihHNum'])
self.writeTupleList(fptr, allHDiHe, "%6d", 12,5)
allDiHe = zip(dict['DihAt1'], dict['DihAt2'],
dict['DihAt3'], dict['DihAt4'], dict['DihNum'])
self.writeTupleList(fptr, allDiHe, "%6d", 12, 5)
self.writeList(fptr, dict['ExclAt'], '%6d', 12)
fptr.write('\n')
fptr.write('\n')
fptr.write('\n')
for k in ['AtomSym', 'AtomTree']:
item = dict[k][:-81]
self.writeString(fptr, item)
zList = []
for i in range(dict['Natom']):
zList.append(0)
self.writeList(fptr, zList, "%6d", 12)
self.writeList(fptr, zList, "%6d", 12)
fptr.close()
def writeString(self, fptr, item):
n = int(ceil(len(item)/80.))
for p in range(n):
if p!=n-1:
fptr.write(item[p*80:(p+1)*80]+'\n')
else:
#write to the end, whereever it is
fptr.write(item[p*80:]+'\n')
def writeList(self, fptr, outList, formatStr="%4.4s", lineCt=12):
ct = 0
s = ""
nlformatStr = formatStr+'\n'
lenList = len(outList)
for i in range(lenList):
#do something with outList[i]
s = s + formatStr%outList[i]
#ct is how many item are in s
ct = ct + 1
#if line is full, write it and reset s and ct
if ct%lineCt==0:
s = s + '\n'
fptr.write(s)
s = ""
ct = 0
#if last entry write it and exit
elif i == lenList-1:
s = s + '\n'
fptr.write(s)
break
def writeTupleList(self, fptr, outList, formatStr="%4.4s", lineCt=12, ll=2):
ct = 0
s = ""
nlformatStr = formatStr+'\n'
for i in range(len(outList)):
if i==len(outList)-1:
for k in range(ll):
s = s + formatStr%outList[i][k]
ct = ct + 1
if ct%lineCt==0:
s = s + '\n'
fptr.write(s)
s = ""
ct = 0
#after adding last entry, if anything left, print it
if ct!=0:
s = s + '\n'
fptr.write(s)
else:
for k in range(ll):
s = s + formatStr%outList[i][k]
ct = ct + 1
if ct%lineCt==0:
s = s + '\n'
fptr.write(s)
s = ""
ct = 0
def writeItitl(self, fptr, ititl):
fptr.write(ititl)
def writeSummary(self, fptr):
#SUMMARY
#fptr.write('SUMMARY\n')
##FIX THESE NAMES!!!
kL1 = ['Natom','Ntypes','Nbonh','Mbona',\
'Ntheth','Mtheta','Nphih','Mphia','Nhparm',\
'Nparm','Nnb','Nres']
kL2 = ['Nbona','Ntheta','Nphia','Numbnd',\
'Numang','Nptra','Natyp','Nphb','Ifpert',\
'Nbper','Ngper','Ndper']
kL3 = ['Mbper','Mgper','Mdper','IfBox','Nmxrs',\
'IfCap']
for l in [kL1, kL2, kL3]:
newL = []
for item in l:
newL.append(self.prmDict[item])
#print 'newL=', newL
self.writeList(fptr, newL, "%6d", 12)
if __name__ == '__main__':
# load a protein and build bonds
from MolKit import Read
p = Read('sff/testdir/p1H.pdb')
p[0].buildBondsByDistance()
# build an Amber parameter description objects
from MolKit.amberPrmTop import ParameterDict
pd = ParameterDict()
from MolKit.amberPrmTop import Parm
prm = Parm()
prm.processAtoms(p.chains.residues.atoms)
| 34.037879 | 108 | 0.476167 | 52,633 | 0.976204 | 0 | 0 | 0 | 0 | 0 | 0 | 12,838 | 0.238111 |
d20f67ca5ace0109a27cb8bee9fd7724ffdbb6df
| 2,342 |
py
|
Python
|
main_model/example.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | null | null | null |
main_model/example.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | null | null | null |
main_model/example.py
|
benmaier/DigCT
|
62fc3fddb7600e2a43761e08618b2e3df423569c
|
[
"MIT"
] | 1 |
2021-07-12T13:50:35.000Z
|
2021-07-12T13:50:35.000Z
|
import numpy as np
from simulation import simulation_code
from tqdm import tqdm
def make_length(arr,maxlen):
dL = maxlen - len(arr)
if dL > 0:
newa = np.concatenate((arr, np.ones(dL)*arr[-1]))
else:
newa = arr
return newa
def make_equal_length(arr_list):
maxlen = max([len(a) for a in arr_list])
new_arr_list = []
for a in arr_list:
newa = make_length(a,maxlen)
new_arr_list.append(newa)
return new_arr_list
np.random.seed(981736)
N = 10_000
n_meas = 100
kwargs = dict(
N = N,
q = 0.3,
a = 0.3,
R0 = 2.5,
quarantiningS = True,
parameter = {
'chi':1/2.5,
'recovery_rate' : 1/7,
'alpha' : 1/3,
'beta' : 1/2,
'number_of_contacts' : 20,
'x':0.17,
'I_0' : N*0.01,
'omega':1/10,
"y" : 0.1,
"z": 0.64,
"R0": 2.5,
"network_model":'er_network',
},
sampling_dt = 1,
time = 1e7,
)
import matplotlib.pyplot as pl
def get_epidemic(result):
return sum([ result[C] for C in ['I_P','I_S','I_A','I_Pa','I_Sa','I_Aa'] ])
results_tracing = []
results_no_trac = []
for meas in tqdm(range(n_meas)):
kwargs['a'] = 0.3
_, result0 = simulation_code(kwargs)
result0 = get_epidemic(result0)
kwargs['a'] = 0.0
_, result1 = simulation_code(kwargs)
result1 = get_epidemic(result1)
results_tracing.append(result0)
results_no_trac.append(result1)
results_tracing = np.array(make_equal_length(results_tracing))
results_no_trac = np.array(make_equal_length(results_no_trac))
t0 = np.arange(np.shape(results_tracing)[1])
t1 = np.arange(np.shape(results_no_trac)[1])
mn0 = np.mean(results_tracing,axis=0)
mn1 = np.mean(results_no_trac,axis=0)
err0 = np.std(results_tracing,axis=0)
err1 = np.std(results_no_trac,axis=0)
err0low, md0, err0high = np.percentile(results_tracing,[25,50,75],axis=0)
err1low, md1, err1high = np.percentile(results_no_trac,[25,50,75],axis=0)
pl.plot(t0, md0, label='with tracing (a=0.3)')
pl.plot(t1, md1, label='without tracing')
pl.fill_between(t0, err0low, err0high, alpha=0.2)
pl.fill_between(t1, err1low, err1high, alpha=0.2)
pl.xlabel('time [d]')
pl.ylabel('prevalence')
pl.legend()
pl.gcf().savefig('example.png',dpi=300)
pl.show()
| 23.897959 | 79 | 0.62041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.093083 |
d2106f01efc43255c99ac9c1592bee8f1c926386
| 5,480 |
py
|
Python
|
app/migrations/0002_appointment_doctor_patient_person_receptionist.py
|
sairamBikkina/sdp1
|
e48cb01e8100259f95c16911f5fe6f843313464e
|
[
"MIT"
] | 5 |
2020-08-06T07:16:00.000Z
|
2022-01-20T22:07:58.000Z
|
app/migrations/0002_appointment_doctor_patient_person_receptionist.py
|
sairamBikkina/sdp1
|
e48cb01e8100259f95c16911f5fe6f843313464e
|
[
"MIT"
] | 2 |
2020-10-04T13:58:24.000Z
|
2020-10-04T14:00:35.000Z
|
app/migrations/0002_appointment_doctor_patient_person_receptionist.py
|
sairamBikkina/sdp1
|
e48cb01e8100259f95c16911f5fe6f843313464e
|
[
"MIT"
] | 3 |
2020-10-03T07:19:52.000Z
|
2021-10-05T07:15:30.000Z
|
# Generated by Django 3.0.5 on 2020-05-24 10:19
import datetime
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
(
'user',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
primary_key=True,
serialize=False,
to=settings.AUTH_USER_MODEL,
),
),
('type', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Receptionist',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('Address', models.CharField(default=None, max_length=100)),
('Email', models.CharField(default=None, max_length=100)),
('Phone', models.CharField(default=None, max_length=100)),
('gender', models.CharField(default=None, max_length=100)),
(
'person',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='app.Person'
),
),
],
),
migrations.CreateModel(
name='Patient',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('location', models.CharField(blank=True, default='', max_length=500)),
('bio', models.CharField(blank=True, max_length=500)),
('Address', models.CharField(default=None, max_length=100)),
('Email', models.CharField(default=None, max_length=100)),
('Phone', models.CharField(default=None, max_length=100)),
('gender', models.CharField(default=None, max_length=100)),
(
'person',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='app.Person'
),
),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('Speciality', models.CharField(default=None, max_length=100)),
('Address', models.CharField(default=None, max_length=100)),
('Email', models.CharField(default=None, max_length=100)),
('Phone', models.CharField(default=None, max_length=100)),
('gender', models.CharField(default=None, max_length=100)),
(
'person',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='app.Person'
),
),
],
),
migrations.CreateModel(
name='Appointment',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'Date',
models.DateField(default=datetime.date.today, verbose_name='Date'),
),
(
'status',
models.CharField(
choices=[
('PD', 'Pending'),
('AP', 'Approved'),
('RJ', 'rejected'),
],
default='PD',
max_length=2,
),
),
(
'message',
models.CharField(default='Pending Approval', max_length=1000),
),
(
'Doctor',
models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='app.Doctor',
),
),
(
'user',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| 34.683544 | 87 | 0.387044 | 5,304 | 0.967883 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.084307 |
d210d3e4fc7f26c1bc84d6a2851b1aad30445d94
| 2,185 |
py
|
Python
|
notebook/datetime_fromisoformat.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174 |
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/datetime_fromisoformat.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5 |
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/datetime_fromisoformat.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53 |
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import datetime
s = '2018-12-31'
d = datetime.date.fromisoformat(s)
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
# print(datetime.date.fromisoformat('2018-12'))
# ValueError: Invalid isoformat string: '2018-12'
print(datetime.date.fromisoformat('2018-01-01'))
# 2018-01-01
# print(datetime.date.fromisoformat('2018-1-1'))
# ValueError: Invalid isoformat string: '2018-1-1'
s = '05:00:30.001000'
t = datetime.time.fromisoformat(s)
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
print(datetime.time.fromisoformat('05'))
# 05:00:00
# print(datetime.time.fromisoformat('5:00:30'))
# ValueError: Invalid isoformat string: '5:00:30'
s = '2018-12-31T05:00:30.001000'
dt = datetime.datetime.fromisoformat(s)
print(dt)
# 2018-12-31 05:00:30.001000
print(type(dt))
# <class 'datetime.datetime'>
print(datetime.datetime.fromisoformat('2018-12-31x05:00:30.001000'))
# 2018-12-31 05:00:30.001000
# print(datetime.datetime.fromisoformat('2018-12-31xx05:00:30.001000'))
# ValueError: Invalid isoformat string: '2018-12-31xx05:00:30.001000'
print(datetime.datetime.fromisoformat('2018-12-31T05'))
# 2018-12-31 05:00:00
print(datetime.datetime.fromisoformat('2018-12-31'))
# 2018-12-31 00:00:00
# print(datetime.datetime.fromisoformat('2018-12-31T5:00'))
# ValueError: Invalid isoformat string: '2018-12-31T5:00'
s = '2018-12-31T05:00:30.001000'
# print(datetime.date.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
# print(datetime.time.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
d = datetime.datetime.fromisoformat(s).date()
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
t = datetime.datetime.fromisoformat(s).time()
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
s = '2018-12-31T05:00:30'
s_basic = s.replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s = '2018-12-31T05:00:30.001000'
s_basic = s.split('.')[0].replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s_ex = datetime.datetime.strptime(s_basic, '%Y%m%dT%H%M%S').isoformat()
print(s_ex)
# 2018-12-31T05:00:30
| 20.809524 | 71 | 0.707551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,365 | 0.624714 |
d211994f319cdf819a2e0d0b5d58c4101deb9cd5
| 418 |
py
|
Python
|
app/main/models/hello_db.py
|
ZenithClown/flask-docker-template
|
cf5949fb6f448dd73cc287842b5deb1d5f7bd321
|
[
"MIT"
] | null | null | null |
app/main/models/hello_db.py
|
ZenithClown/flask-docker-template
|
cf5949fb6f448dd73cc287842b5deb1d5f7bd321
|
[
"MIT"
] | 41 |
2021-09-01T17:31:47.000Z
|
2022-03-28T12:13:12.000Z
|
app/main/models/hello_db.py
|
ZenithClown/flask-docker-template
|
cf5949fb6f448dd73cc287842b5deb1d5f7bd321
|
[
"MIT"
] | 1 |
2021-12-22T07:25:08.000Z
|
2021-12-22T07:25:08.000Z
|
# -*- encoding: utf-8 -*-
from .. import db
from ._base_model import ModelSchema
class HelloDB(db.Model, ModelSchema):
"""Use the Model to Establish a Connection to DB"""
__tablename__ = "HelloDB"
id = db.Column(db.Integer, primary_key = True, autoincrement = True, nullable = False)
field = db.Column(db.String(255), nullable = False)
def __init__(self):
ModelSchema().__init__()
| 24.588235 | 93 | 0.665072 | 334 | 0.799043 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.203349 |
d2126b69bc34d19eeaa2b4aa3508f4499874a0f2
| 3,069 |
py
|
Python
|
affineTransform.py
|
LuBru90/Facemorphing
|
ddeb8b0d368d62c66a032290cd756f0e3f3d6a81
|
[
"Apache-2.0"
] | null | null | null |
affineTransform.py
|
LuBru90/Facemorphing
|
ddeb8b0d368d62c66a032290cd756f0e3f3d6a81
|
[
"Apache-2.0"
] | null | null | null |
affineTransform.py
|
LuBru90/Facemorphing
|
ddeb8b0d368d62c66a032290cd756f0e3f3d6a81
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
def getTransformMatrix(origin, destination):
x = np.zeros(origin.shape[0] + 1) # insert [0]-element for better indexing -> x[1] = first element
x[1:] = origin[:,0]
y = np.copy(x)
y[1:] = origin[:,1]
x_ = np.copy(x)
x_[1:] = destination[:,0]
y_ = np.copy(x)
y_[1:] = destination[:,1]
a11 = (y[1] * (x_[2] - x_[3]) + y[2] * (x_[3] - x_[1]) + y[3] * (x_[1] - x_[2]))
a12 = (x[1] * (x_[3] - x_[2]) + x[2] * (x_[1] - x_[3]) + x[3] * (x_[2] - x_[1]))
a21 = (y[1] * (y_[2] - y_[3]) + y[2] * (y_[3] - y_[1]) + y[3] * (y_[1] - y_[2]))
a22 = (x[1] * (y_[3] - y_[2]) + x[2] * (y_[1] - y_[3]) + x[3] * (y_[2] - y_[1]))
a13 = (x[1] * (y[3]*x_[2] - y[2]*x_[3]) + x[2] * (y[1]*x_[3] - y[3]*x_[1]) + x[3] * (y[2]*x_[1] - y[1]*x_[2]))
a23 = (x[1] * (y[3]*y_[2] - y[2]*y_[3]) + x[2] * (y[1]*y_[3] - y[3]*y_[1]) + x[3] * (y[2]*y_[1] - y[1]*y_[2]))
d = x[1]*(y[3] - y[2]) + x[2]*(y[1] - y[3]) + x[3]*(y[2] - y[1])
return 1/d * np.array([[a11, a12, a13], [a21, a22, a23], [0, 0, 1]])
def transformImage(image, M):
warpedImage = np.zeros(image.shape, dtype=np.int32)
for y, row in enumerate(image):
for x, value in enumerate(row):
newX, newY, _ = np.dot(M, np.array([x,y,1]))
cond1 = newY < warpedImage.shape[0] and newX < warpedImage.shape[1]
cond2 = newY > 0 and newX > 0
if cond1 and cond2:
warpedImage[int(newY)][int(newX)] = value
return warpedImage
def interpolateMissingPixels(image):
#interpImage = np.zeros(image.shape, dtype=np.int32)
interpImage = np.array(image)
for y in range(1, len(image) - 1):
row = interpImage[y]
for x in range(1, len(row) - 1):
if row[x].all() == 0: # empty pixel
windowPixels = interpImage[y-1:y+2, x-1:x+2] # [rgb], [rgb], [rgb]
# if windowPixels.sum() == 0:
# continue
newPixel = np.array([0,0,0])
for channel in range(3): # interpolate rgb
channelValues = windowPixels[:, :, channel]
temp = channelValues != 0
meancount = temp.sum()
newPixel[channel] = channelValues.sum() / meancount if meancount != 0 else 0
interpImage[y][x] = newPixel
return interpImage
def main():
origin = np.array([[50, 50], [50, 100], [100, 50]])
destination = np.array([[50, 100], [100, 250], [150, 50]])
m = getTransformMatrix(origin, destination)
image = plt.imread("scarlet.jpg")[100:400, 100:400]
warpedImage = transformImage(image, m)
interpImage = interpolateMissingPixels(warpedImage)
fig, ax = plt.subplots(1,3)
ax[0].imshow(image)
ax[1].imshow(warpedImage)
ax[2].imshow(interpImage)
plt.show()
if __name__ == "__main__":
main()
| 38.848101 | 121 | 0.491691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.07592 |
d214f97afaf19189be80677ea4aa9be0be0607e7
| 4,219 |
py
|
Python
|
demo_count.py
|
addtt/multi-object-datasets
|
d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb
|
[
"MIT"
] | 4 |
2020-01-06T08:50:04.000Z
|
2021-12-06T08:41:13.000Z
|
demo_count.py
|
addtt/multi-object-datasets
|
d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb
|
[
"MIT"
] | 2 |
2021-06-08T20:48:25.000Z
|
2021-09-08T01:35:58.000Z
|
demo_count.py
|
addtt/multi-object-datasets
|
d3b03ec56a9e971fed4d3519e8bfee5ed02ed9cb
|
[
"MIT"
] | 2 |
2020-11-19T14:20:29.000Z
|
2021-01-12T12:00:44.000Z
|
import argparse
import os
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim.adamax import Adamax
from multiobject.pytorch import MultiObjectDataLoader, MultiObjectDataset
epochs = 100
batch_size = 64
lr = 3e-4
dataset_filename = os.path.join(
'dsprites',
'multi_dsprites_color_012.npz')
# dataset_filename = os.path.join(
# 'binary_mnist',
# 'multi_binary_mnist_012.npz')
class SimpleBlock(nn.Module):
def __init__(self, ch, kernel, stride=1, dropout=0.25):
super().__init__()
assert kernel % 2 == 1
padding = (kernel - 1) // 2
self.net = nn.Sequential(
nn.Conv2d(ch, ch, kernel, padding=padding, stride=stride),
nn.Dropout2d(dropout),
nn.LeakyReLU(),
nn.BatchNorm2d(ch),
)
def forward(self, x):
return self.net(x)
class Model(nn.Module):
def __init__(self, color_channels, n_classes):
super().__init__()
self.convnet = nn.Sequential(
nn.Conv2d(color_channels, 64, 5, padding=2, stride=2),
nn.LeakyReLU(),
SimpleBlock(64, 3, stride=2),
SimpleBlock(64, 3, stride=2),
SimpleBlock(64, 3, stride=2),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
)
self.fcnet = nn.Sequential(
nn.Linear(64, 64),
nn.LeakyReLU(),
nn.Linear(64, n_classes),
)
def forward(self, x):
x = self.convnet(x) # output is 2x2 for 64x64 images
x = x.sum((2, 3)) # sum over spatial dimensions
x = self.fcnet(x)
return x
def main():
args = parse_args()
path = os.path.join('generated', args.dataset_path)
# Datasets and dataloaders
print("loading dataset...")
train_set = MultiObjectDataset(path, train=True)
test_set = MultiObjectDataset(path, train=False)
train_loader = MultiObjectDataLoader(
train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = MultiObjectDataLoader(test_set, batch_size=100)
# Model and optimizer
print("initializing model...")
channels = train_set.x.shape[1]
n_classes = 3 # hardcoded for dataset with 0 to 2 objects
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Model(channels, n_classes).to(device)
optimizer = Adamax(model.parameters(), lr=lr)
# Training loop
print("training starts")
step = 0
model.train()
for e in range(1, epochs + 1):
for x, labels in train_loader:
# Run model and compute loss
loss, acc = forward(model, x, labels, device)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
if step % 100 == 0:
print("[{}] loss: {:.2g} acc: {:.2g}".format(
step, loss.item(), acc))
# Test
with torch.no_grad():
model.eval()
loss = acc = 0.
for x, labels in test_loader:
loss_, acc_ = forward(model, x, labels, device)
k = len(x) / len(test_set)
loss += loss_.item() * k
acc += acc_ * k
model.train()
print("TEST [epoch {}] loss: {:.2g} acc: {:.2g}".format(
e, loss, acc))
def forward(model, x, labels, device):
# Forward pass through model
n = labels['n_obj'].to(device)
x = x.to(device)
logits = model(x)
# Loss
loss = F.cross_entropy(logits, n)
# Accuracy
pred = logits.max(1)[1]
accuracy = (n == pred).float().mean().item()
return loss, accuracy
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
allow_abbrev=False)
parser.add_argument('--dataset',
type=str,
default=dataset_filename,
metavar='PATH',
dest='dataset_path',
help="relative path of the dataset")
return parser.parse_args()
if __name__ == '__main__':
main()
| 27.756579 | 73 | 0.569803 | 1,207 | 0.286087 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.150273 |
d21501d0dc912be2f83952df41a003d90a5d9684
| 2,015 |
py
|
Python
|
run_main_script.py
|
korombus/blender_battleVR_py
|
d0d0ccfabfa644fc97105e5cc99e86e37167cb55
|
[
"MIT"
] | null | null | null |
run_main_script.py
|
korombus/blender_battleVR_py
|
d0d0ccfabfa644fc97105e5cc99e86e37167cb55
|
[
"MIT"
] | null | null | null |
run_main_script.py
|
korombus/blender_battleVR_py
|
d0d0ccfabfa644fc97105e5cc99e86e37167cb55
|
[
"MIT"
] | null | null | null |
import bpy
import random
import math
## 固定値設定 #############################################################
# 実行ファイルパス一覧
FILE_ROOT_PATH = 'D:/blender_battleVR_py/'
setrendr_file_name = FILE_ROOT_PATH + "setting_render.py"
magicobj_file_name = FILE_ROOT_PATH + "magic_model.py"
fieldins_file_name = FILE_ROOT_PATH + "field_model.py"
wizardob_file_name = FILE_ROOT_PATH + "wizard_model.py"
witchcft_file_name = FILE_ROOT_PATH + "witchcraft_model.py"
camerast_file_name = FILE_ROOT_PATH + "camera_setting.py"
# SEファイルパス一覧
SE_ROOT_PATH = FILE_ROOT_PATH + 'se/'
#sound_begin = (SE_ROOT_PATH + "花火・一発_begin.wav", SE_ROOT_PATH + "花火・一発_begin.wav")
#sound_bomb = (SE_ROOT_PATH + "花火・一発_bomb.wav", SE_ROOT_PATH + "nc178345_bomb.wav")
# 魔法陣ファイルパス一覧
IMG_ROOT_PATH = FILE_ROOT_PATH + 'img/'
witchcraft_img_name = (
IMG_ROOT_PATH + "magic_0.png",
IMG_ROOT_PATH + "magic_1.png",
IMG_ROOT_PATH + "magic_2.png",
IMG_ROOT_PATH + "magic_3.png",
IMG_ROOT_PATH + "magic_4.png"
)
# シーンのエンドフレーム
FRAME_END = 500
##########################################################################
#オブジェクト全選択
bpy.ops.object.select_all(action='SELECT')
#オブジェクト全削除
bpy.ops.object.delete(True)
# シーケンスエディタを生成
if bpy.context.scene.sequence_editor:
bpy.context.scene.sequence_editor_clear()
bpy.context.scene.sequence_editor_create()
# 最終フレームを設定
bpy.data.scenes["Scene"].frame_end = FRAME_END
# レンダリング設定
exec(compile(open(setrendr_file_name).read().replace("FILE_ROOT_PATH", FILE_ROOT_PATH), setrendr_file_name, 'exec'))
# カメラを生成
exec(compile(open(camerast_file_name).read(), camerast_file_name, 'exec'))
# フィールドを生成
exec(compile(open(fieldins_file_name).read(), fieldins_file_name, 'exec'))
# 魔法使いモデルを生成
exec(compile(open(wizardob_file_name).read(), wizardob_file_name, 'exec'))
# 魔法陣を生成
exec(compile(open(witchcft_file_name).read().replace("WITCHECRAFT_IMAGES", str(witchcraft_img_name)), witchcft_file_name, 'exec'))
# 魔法に使用するオブジェクトの作成
exec(compile(open(magicobj_file_name).read(), magicobj_file_name, 'exec'))
| 31.984127 | 130 | 0.718114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,073 | 0.462301 |
d215f5660d06095bfa19474e13bb492e71765463
| 2,014 |
py
|
Python
|
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
apps/genres/tests/__init__.py
|
GiannisClipper/payments
|
94e08144597b3f4cd0de8485edf3f5535aeb9da6
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
import copy
from django.contrib.auth import get_user_model
from funds.models import Fund
from genres.models import Genre
from users.tests import UserCreateMethods
from funds.tests import FundCreateMethods
from users.tests import USER_SAMPLES, ADMIN_SAMPLES
from funds.tests import FUND_SAMPLES
GENRE_SAMPLES = {
# First key digit is equal to user id
11: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '1', 'name': 'INCOME',
'is_income': True}, # noqa: E127
12: {'user': {'id': 1}, 'fund': {'key': 11}, 'code': '2', 'name': 'EXPENSES',
'is_income': False}, # noqa: E127
21: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'ES', 'name': 'ESODA',
'is_income': True}, # noqa: E127
22: {'user': {'id': 2}, 'fund': {'key': 21}, 'code': 'EX', 'name': 'EXODA',
'is_income': False}, # noqa: E127
}
class GenreCreateMethods:
def create_genre(self, **genre):
genre['user'] = get_user_model().objects.get(pk=genre['user']['id'])
genre['fund'] = Fund.objects.get(pk=genre['fund']['id'])
return Genre.objects.create(**genre)
def create_genres(self, samples):
for sample in samples.values():
genre = self.create_genre(**sample)
sample['id'] = genre.pk
class GenresTests(TestCase, GenreCreateMethods, UserCreateMethods, FundCreateMethods):
def setUp(self):
self.samples = {
'users': copy.deepcopy(USER_SAMPLES),
'admins': copy.deepcopy(ADMIN_SAMPLES),
'funds': copy.deepcopy(FUND_SAMPLES),
'genres': copy.deepcopy(GENRE_SAMPLES),
}
self.create_users(self.samples['users'])
self.create_admins(self.samples['admins'])
self.create_funds(self.samples['funds'])
for sample in self.samples['genres'].values():
key = sample['fund']['key']
sample['fund']['id'] = self.samples['funds'][key]['id']
sample['fund'].pop('key', None)
| 31.968254 | 86 | 0.602781 | 1,120 | 0.556107 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.22145 |
d2160cde3b51571cda15a85e9fdd3c56dfb2cae0
| 4,881 |
py
|
Python
|
rltorch/papers/DQN/hyperparams.py
|
Jjschwartz/rltorch
|
eeb2ad955f018d768db98c4a2be5da96a75579f6
|
[
"MIT"
] | null | null | null |
rltorch/papers/DQN/hyperparams.py
|
Jjschwartz/rltorch
|
eeb2ad955f018d768db98c4a2be5da96a75579f6
|
[
"MIT"
] | null | null | null |
rltorch/papers/DQN/hyperparams.py
|
Jjschwartz/rltorch
|
eeb2ad955f018d768db98c4a2be5da96a75579f6
|
[
"MIT"
] | null | null | null |
"""Hyperparameters from paper """
import numpy as np
import torch.optim as optim
from .model import DQN, DuelingDQN
class AtariHyperparams:
ALGO = "DQN"
SEED = 2
LOG_DISPLAY_FREQ = 10
# Image sizing
WIDTH = 84
HEIGHT = 84
# Number of most recent frames given as input to Q-network
AGENT_HISTORY = 4
STATE_DIMS = (AGENT_HISTORY, WIDTH, HEIGHT)
NORMALIZE = False
DISCOUNT = 0.99
MINIBATCH_SIZE = 32
REPLAY_SIZE = int(1e6)
REPLAY_S_DTYPE = np.uint8
# Number of network updates between target network updates
# TARGET_NETWORK_UPDATE_FREQ = 10000
TARGET_NETWORK_UPDATE_FREQ = 2500 # every 10000 frames
# Number of times an action is repeated, i.e. number of frames skipped
ACTION_REPEAT = 4
# Num actions (ignoring repeats) performed before Gradient descent update
NETWORK_UPDATE_FREQUENCY = 4
# Parameters for network learning
OPTIMIZER = optim.RMSprop
LEARNING_RATE = 0.00025
GRADIENT_MOMENTUM = 0.95
SQUARED_GRADIENT_MOMENTUM = 0.95
MIN_SQUARED_GRADIENT = 0.01
OPTIMIZER_KWARGS = {
"lr": LEARNING_RATE,
"momentum": GRADIENT_MOMENTUM,
"eps": MIN_SQUARED_GRADIENT
}
GRAD_CLIP = [-1, 1]
# for reward
R_CLIP = [-1, 1]
# Exploration
EXPLORATION_SCHEDULE = "Linear"
INITIAL_EXPLORATION = 1.0
FINAL_EXPLORATION = 0.1
FINAL_EXPLORATION_FRAME = 1000000
# Number of frames to run random policy and before learning starts
REPLAY_START_SIZE = 50000
# Max number of "do nothing" actions to be performed at start of episode
NO_OP_MAX = 30
# Network architecture
INPUT_DIMS = (WIDTH, HEIGHT, AGENT_HISTORY)
LAYER_1 = {"type": "convolutional",
"filters": 32, "kernel_size": (8, 8),
"stride": 4, "activation": "relu"}
LAYER_2 = {"type": "convolutional",
"filters": 64, "kernel_size": (4, 4),
"stride": 2, "activation": "relu"}
LAYER_3 = {"type": "convolutional",
"filters": 64, "kernel_size": (3, 3),
"stride": 1, "activation": "relu"}
LAYER_4 = {"type": "fully_connected",
"size": 512, "activation": "relu"}
OUTPUT = {"type": "fully_connected"}
MODEL = DQN
# training duration (50 million)
TRAINING_FRAMES = int(5e7)
# Other hyperparams not related to paper
# Model Save Freq
MODEL_SAVE_FREQ = int(1e6)
# Evaluation
EVAL_FREQ = int(1e6)
EVAL_STEPS = 125000
EVAL_EPSILON = 0.05
@classmethod
def set_seed(cls, seed):
cls.SEED = seed
@classmethod
def set_mode(cls, mode='dqn'):
if mode == "testing":
print("WARNING: using test hyperparams")
input("Press any key to continue..")
cls.ALGO += "_test"
cls.REPLAY_SIZE = int(1e4)
cls.REPLAY_START_SIZE = 100
cls.INITIAL_EXPLORATION = 0.1
cls.TARGET_NETWORK_UPDATE_FREQ = 1000
cls.EVAL_FREQ = 2000
cls.EVAL_STEPS = 1000
cls.MODEL_SAVE_FREQ = 2500
cls.LOG_DISPLAY_FREQ = 1
cls.MINIBATCH_SIZE = 12
elif mode == "eval":
cls.ALGO += "_eval"
cls.REPLAY_SIZE = int(1e4)
elif mode == "ddqn":
print("Using DDQN hyperparams")
cls.ALGO = "DDQN"
elif mode == "ddqn-tuned":
print("Using DDQN-Tuned hyperparams")
cls.ALGO = "DDQN-Tuned"
cls.TARGET_NETWORK_UPDATE_FREQ = 30000
cls.FINAL_EXPLORATION = 0.01
cls.EVAL_EPSILON = 0.001
elif mode == "dqn":
print("Using DQN hyperparams")
pass
elif mode == "duelingdqn":
print("Using Dueling DQN hyperparams")
cls.ALGO = "DuelingDQN"
cls.MODEL = DuelingDQN
elif mode == "normalized":
print("Using normalized observations")
cls.NORMALIZE = True
cls.REPLAY_S_DTYPE = np.float16
elif mode == "pong_tuned":
print("Using pong tuned hyperparams")
cls.REPLAY_SIZE = 100000
cls.REPLAY_START_SIZE = 10000
cls.INITIAL_EXPLORATION = 1.0
cls.FINAL_EXPLORATION = 0.02
cls.FINAL_EXPLORATION_FRAME = 100000
# this corresponds to updating every 1000 frames
cls.TARGET_NETWORK_UPDATE_FREQ = 250
cls.OPTIMIZER = optim.Adam
cls.OPTIMIZER_KWARGS = {"lr": 1e-4}
else:
raise ValueError("Unsupported Hyper param mode")
@classmethod
def get_all_hyperparams(cls):
all_kwargs = {}
for k, v in cls.__dict__.items():
if not any([k.startswith("__"),
isinstance(v, classmethod)]):
all_kwargs[k] = v
return all_kwargs
| 31.901961 | 77 | 0.591477 | 4,761 | 0.975415 | 0 | 0 | 2,325 | 0.476337 | 0 | 0 | 1,430 | 0.292973 |
d2162729fc2afb100ad7e2d7244982b56598a414
| 822 |
py
|
Python
|
scripts/problem0002.py
|
Joel301/Project_Euler
|
2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943
|
[
"MIT"
] | null | null | null |
scripts/problem0002.py
|
Joel301/Project_Euler
|
2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943
|
[
"MIT"
] | null | null | null |
scripts/problem0002.py
|
Joel301/Project_Euler
|
2280dc19b8e0a2c956cf0d6db6c7d24eedd5e943
|
[
"MIT"
] | null | null | null |
#! python3
# -*- coding: utf-8 -*-
"""
Euler description from https://projecteuler.net/
Problem 0002
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million[4000000], find the sum of the even-valued terms.
"""
#fibonacci list generator
def fibonacci(limit=89):
lst = [1,2]
n1, n2 = 1, 2
while(n2 < limit):
n = n1 + n2
n1 = n2
n2 = n
lst.append(n)
return lst
# main function same aproach as problem0001
def compute(v = 4000000):
ans = sum(x for x in fibonacci(v) if x % 2 == 0)
return ans
if __name__ == "__main__":
print(compute(4000000))
| 22.833333 | 79 | 0.644769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.610706 |
d216db91805a0649ebde91802222cf781d19168b
| 1,090 |
py
|
Python
|
pandas/main.py
|
monishshah18/python-cp-cheatsheet
|
a5514b08816959de1198156f7764c54a7a585f20
|
[
"Apache-2.0"
] | 140 |
2020-10-21T13:23:52.000Z
|
2022-03-31T15:09:45.000Z
|
pandas/main.py
|
stacykutyepov/python-cp-cheatsheet
|
a00a57e1b36433648d1cace331e15ff276cef189
|
[
"Apache-2.0"
] | 1 |
2021-07-22T14:01:25.000Z
|
2021-07-22T14:01:25.000Z
|
pandas/main.py
|
stacykutyepov/python-cp-cheatsheet
|
a00a57e1b36433648d1cace331e15ff276cef189
|
[
"Apache-2.0"
] | 33 |
2020-10-21T14:17:02.000Z
|
2022-03-25T11:25:03.000Z
|
"""
Summarize a column total cases column and total deaths column
Country by country data in columns, sum up and match global totals
"""
import csv
import pandas
pandas.set_option("display.max_rows", None, "display.max_columns", None)
col_list = ["Total Cases", "Country/ Other", "Total Deaths", "# 9/27/2020"]
df = pandas.read_csv("covidmilliondead.csv", usecols=col_list, thousands=',')
totalCases, totalDeaths = 0,0
for idx, cases,deaths in zip(df["# 9/27/2020"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
totalCases += cases
if deaths > 0:
totalDeaths += deaths
for idx, country, cases, deaths in zip(df["# 9/27/2020"], df["Country/ Other"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
print("\n",country)
print("Cases : ", cases, "/", totalCases, " %", "{:.5%}".format(cases/totalCases))
if deaths > 0:
print("Deaths : ", int(deaths), "/", totalDeaths, " %", "{:.5%}".format(deaths/totalDeaths))
print("")
print("Total Cases")
print(totalCases)
print("Total Deaths")
print(totalDeaths)
| 34.0625 | 119 | 0.633945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.400917 |
d2179a39d18a821a8ac003b90306797cd588fe76
| 908 |
py
|
Python
|
conradomateu/day08/day08.py
|
CloudCoders/AdventOfCode2017
|
5a52d1e89076eccb55686e4af5848de289309813
|
[
"MIT"
] | 8 |
2017-12-11T18:22:52.000Z
|
2017-12-13T00:50:24.000Z
|
conradomateu/day08/day08.py
|
CloudCoders/AdventOfCode2017
|
5a52d1e89076eccb55686e4af5848de289309813
|
[
"MIT"
] | 8 |
2017-12-01T14:31:29.000Z
|
2017-12-07T21:43:43.000Z
|
conradomateu/day08/day08.py
|
CloudCoders/AdventOfCode2017
|
5a52d1e89076eccb55686e4af5848de289309813
|
[
"MIT"
] | null | null | null |
import operator
def parse(op):
dict[op[0]] = ops[op[1]](dict[op[0]], int(op[2]))
maxs.append(max(dict.values()))
def parser(exp):
return ops[exp[1]](dict[exp[0]], int(exp[2]))
def execute(op, exp):
if parser(exp):
parse(op)
def initDictVars(arr):
vars = set([x.split(' ')[0] for x in arr])
for var in vars:
dict[var] = 0
def sol(input):
initDictVars(input)
list = [x.split(" if ") for x in input]
l = [val for sublist in list for val in sublist]
oddIndexes = [index for index, x in enumerate(l) if index % 2 == 1]
for i in oddIndexes:
execute(l[i - 1].split(" "), l[i].split(" "))
return max(dict.values())
ops = {
"inc": operator.add,
"dec": operator.sub,
">": operator.gt,
"<": operator.lt,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le}
maxs = []
dict = {}
| 18.916667 | 71 | 0.544053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.051762 |
d21892bc6e13fbca51eb7154188132cae4f0e838
| 667 |
py
|
Python
|
app/db/events.py
|
ilya-goldin/kanban-board-app
|
3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122
|
[
"MIT"
] | null | null | null |
app/db/events.py
|
ilya-goldin/kanban-board-app
|
3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122
|
[
"MIT"
] | null | null | null |
app/db/events.py
|
ilya-goldin/kanban-board-app
|
3c7026aedb0e21eaccc26a2ac4a37f0b6a91a122
|
[
"MIT"
] | null | null | null |
import asyncpg
from fastapi import FastAPI
from loguru import logger
from app.core.settings.app import AppSettings
async def connect_to_db(app: FastAPI, settings: AppSettings) -> None:
logger.info('Connecting to PostgreSQL')
app.state.pool = await asyncpg.create_pool(
str(settings.database_url),
min_size=settings.min_connection_count,
max_size=settings.max_connection_count,
command_timeout=60,
)
logger.info('Connection established')
async def close_db_connection(app: FastAPI) -> None:
logger.info('Closing connection to database')
await app.state.pool.close()
logger.info('Connection closed')
| 24.703704 | 69 | 0.731634 | 0 | 0 | 0 | 0 | 0 | 0 | 545 | 0.817091 | 101 | 0.151424 |
d21994c5a36ba9f1f16825926274957f83707bde
| 912 |
py
|
Python
|
Problem009/Python/solution_1.py
|
drocha87/ProjectEuler
|
c18407448aa4f05484191a0df1380e34f2b8c5d7
|
[
"MIT"
] | 167 |
2015-08-12T19:32:03.000Z
|
2022-03-25T12:26:43.000Z
|
Problem009/Python/solution_1.py
|
drocha87/ProjectEuler
|
c18407448aa4f05484191a0df1380e34f2b8c5d7
|
[
"MIT"
] | 153 |
2016-02-16T02:05:31.000Z
|
2020-11-06T15:35:51.000Z
|
Problem009/Python/solution_1.py
|
drocha87/ProjectEuler
|
c18407448aa4f05484191a0df1380e34f2b8c5d7
|
[
"MIT"
] | 84 |
2015-08-12T20:54:04.000Z
|
2022-02-27T05:14:53.000Z
|
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © Manoel Vilela
#
#
from __future__ import print_function
"""
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a² + b² = c²
For example, 3² + 4² = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
def decompSum(n):
from itertools import combinations
m = (x for x in range(1, n // 2))
div = [3, 4, 5]
comb = combinations((x for x in m if any(d for d in div if not x % d)), 3)
for a, b, c in comb:
if a + b + c == n and a != b != c:
yield sorted((a, b, c))
def pythagorean(a, b, c):
return (a ** 2 + b ** 2) == c ** 2
def problem9(n):
for a, b, c in decompSum(n):
if pythagorean(a, b, c):
return a * b * c
print(problem9(1000))
| 19.826087 | 78 | 0.582237 | 0 | 0 | 297 | 0.323529 | 0 | 0 | 0 | 0 | 366 | 0.398693 |
d2201ef9718699e7cd1fdb19d37ed6f30c51724b
| 1,248 |
py
|
Python
|
contrib/automation_tests/orbit_load_presets.py
|
vwbaker/orbit
|
361cc416d1b3ecbc07318275c1bdbc1bb1bc9651
|
[
"BSD-2-Clause"
] | 2 |
2020-07-31T08:18:58.000Z
|
2021-12-26T06:43:07.000Z
|
contrib/automation_tests/orbit_load_presets.py
|
jayant99acharya/orbit
|
f713721e33448a6b0dc8ea4c5aa587855337e32c
|
[
"BSD-2-Clause"
] | 3 |
2022-02-15T02:46:06.000Z
|
2022-02-28T01:28:39.000Z
|
contrib/automation_tests/orbit_load_presets.py
|
jayant99acharya/orbit
|
f713721e33448a6b0dc8ea4c5aa587855337e32c
|
[
"BSD-2-Clause"
] | 1 |
2021-03-10T15:21:19.000Z
|
2021-03-10T15:21:19.000Z
|
"""
Copyright (c) 2020 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
from absl import app
from core.orbit_e2e import E2ETestSuite
from test_cases.connection_window import FilterAndSelectFirstProcess, ConnectToStadiaInstance
from test_cases.symbols_tab import LoadAndVerifyHelloGgpPreset
"""Apply two presets in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started. Two presets named
draw_frame_in_hello_ggp_1_52.opr and ggp_issue_frame_token_in_hello_ggp_1_52
(hooking the functions DrawFrame and GgpIssueFrameToken) need to exist in the
preset folder.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test it needs
to by run from 64 bit python.
"""
def main(argv):
test_cases = [
ConnectToStadiaInstance(),
FilterAndSelectFirstProcess(process_filter="hello_ggp"),
LoadAndVerifyHelloGgpPreset()
]
suite = E2ETestSuite(test_name="Load Preset", test_cases=test_cases)
suite.execute()
if __name__ == '__main__':
app.run(main)
| 32 | 93 | 0.786058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 738 | 0.591346 |
d22021e322a81ec24f4d2957e1994d21c7ec3963
| 52 |
py
|
Python
|
interrogatio/shortcuts/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 5 |
2019-02-19T13:10:39.000Z
|
2022-03-04T19:11:04.000Z
|
interrogatio/shortcuts/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 11 |
2020-03-24T16:58:41.000Z
|
2021-12-14T10:19:17.000Z
|
interrogatio/shortcuts/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 2 |
2019-05-31T08:36:26.000Z
|
2020-12-18T17:58:50.000Z
|
from interrogatio.shortcuts.dialogs import * # noqa
| 52 | 52 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.115385 |
d220977b89635aa8f8397e7f63e18931cf662876
| 609 |
py
|
Python
|
skit_pipelines/components/extract_tgz.py
|
skit-ai/skit-pipelines
|
d692582107aee81b1bb4aebcf169f7260ac956b5
|
[
"MIT"
] | null | null | null |
skit_pipelines/components/extract_tgz.py
|
skit-ai/skit-pipelines
|
d692582107aee81b1bb4aebcf169f7260ac956b5
|
[
"MIT"
] | 4 |
2022-03-22T14:17:46.000Z
|
2022-03-24T16:22:23.000Z
|
skit_pipelines/components/extract_tgz.py
|
skit-ai/skit-pipelines
|
d692582107aee81b1bb4aebcf169f7260ac956b5
|
[
"MIT"
] | null | null | null |
from typing import Union
import kfp
from kfp.components import InputPath, OutputPath
from skit_pipelines import constants as pipeline_constants
def extract_tgz_archive(
tgz_path: InputPath(str),
output_path: OutputPath(str),
):
import tarfile
from loguru import logger
logger.debug(f"Extracting .tgz archive {tgz_path}.")
tar = tarfile.open(tgz_path)
tar.extractall(path=output_path)
tar.close()
logger.debug(f"Extracted successfully.")
extract_tgz_op = kfp.components.create_component_from_func(
extract_tgz_archive, base_image=pipeline_constants.BASE_IMAGE
)
| 22.555556 | 65 | 0.766831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.10509 |
d220ea28079528b416680ff1ccebd74a80b37141
| 4,438 |
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1 |
2021-04-27T19:49:59.000Z
|
2021-04-27T19:49:59.000Z
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 7 |
2022-03-16T06:55:04.000Z
|
2022-03-18T07:03:25.000Z
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/test_input_defaults.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from dagster import (
DagsterInvalidDefinitionError,
InputDefinition,
Nothing,
Optional,
composite_solid,
execute_pipeline,
execute_solid,
lambda_solid,
pipeline,
)
def test_none():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=None)])
def none_x(x):
return x
result = execute_solid(none_x)
assert result.output_value() == None
def test_none_infer():
@lambda_solid
def none_x(x=None):
return x
result = execute_solid(none_x)
assert result.output_value() == None
def test_int():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
result = execute_solid(int_x)
assert result.output_value() == 1337
def test_int_infer():
@lambda_solid
def int_x(x=1337):
return x
result = execute_solid(int_x)
assert result.output_value() == 1337
def test_early_fail():
with pytest.raises(
DagsterInvalidDefinitionError,
match="Type check failed for the default_value of InputDefinition x of type Int",
):
@lambda_solid(input_defs=[InputDefinition("x", int, default_value="foo")])
def _int_x(x):
return x
with pytest.raises(
DagsterInvalidDefinitionError,
match="Type check failed for the default_value of InputDefinition x of type String",
):
@lambda_solid(input_defs=[InputDefinition("x", str, default_value=1337)])
def _int_x(x):
return x
# we can't catch bad default_values except for scalars until runtime since the type_check function depends on
# a context that has access to resources etc.
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value="number")])
def bad_default(x):
return x
def test_mismatch():
result = execute_solid(bad_default, raise_on_error=False)
assert result.success == False
assert result.input_events_during_compute[0].step_input_data.type_check_data.success == False
def test_env_precedence():
result = execute_solid(
bad_default,
run_config={"solids": {"bad_default": {"inputs": {"x": 1}}}},
raise_on_error=False,
)
assert result.success == True
assert result.output_value() == 1
def test_input_precedence():
@lambda_solid
def emit_one():
return 1
@pipeline
def pipe():
bad_default(emit_one())
result = execute_pipeline(pipe)
assert result.success
assert result.output_for_solid("bad_default") == 1
def test_nothing():
with pytest.raises(DagsterInvalidDefinitionError):
@lambda_solid(input_defs=[InputDefinition("x", Nothing, default_value=None)])
def _nothing():
pass
def test_composite_outer_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int])])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 42
def test_composite_inner_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int])])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 1337
def test_composite_precedence_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int], default_value=1337)])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
result = execute_solid(wrap)
assert result.success
assert result.output_value() == 42
def test_composite_mid_default():
@lambda_solid(input_defs=[InputDefinition("x", Optional[int])])
def int_x(x):
return x
@composite_solid(input_defs=[InputDefinition("y", Optional[int], default_value=42)])
def wrap(y):
return int_x(y)
@composite_solid(input_defs=[InputDefinition("z", Optional[int])])
def outter_wrap(z):
return wrap(z)
result = execute_solid(outter_wrap)
assert result.success
assert result.output_value() == 42
| 25.36 | 109 | 0.677332 | 0 | 0 | 0 | 0 | 1,951 | 0.439612 | 0 | 0 | 408 | 0.091933 |
d2213ea96c7a47974d92d29c00540c2195a53bed
| 69 |
py
|
Python
|
vivid/__init__.py
|
blacktanktop/vivid
|
e85837bcd86575f8a275517250dd026aac3e451f
|
[
"BSD-2-Clause-FreeBSD"
] | 39 |
2020-05-13T18:13:25.000Z
|
2022-03-02T10:46:53.000Z
|
vivid/__init__.py
|
blacktanktop/vivid
|
e85837bcd86575f8a275517250dd026aac3e451f
|
[
"BSD-2-Clause-FreeBSD"
] | 29 |
2020-05-13T18:04:09.000Z
|
2022-02-27T04:43:18.000Z
|
vivid/__init__.py
|
blacktanktop/vivid
|
e85837bcd86575f8a275517250dd026aac3e451f
|
[
"BSD-2-Clause-FreeBSD"
] | 3 |
2020-05-13T19:17:01.000Z
|
2020-10-28T21:29:42.000Z
|
from .core import BaseBlock
from .runner import Runner, create_runner
| 34.5 | 41 | 0.84058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2214310a3d3e2da5645867f809ad278174b1b1c
| 473 |
py
|
Python
|
rsa.py
|
overrkill/security
|
cd473013652903d6b21fa83f2c57a07f289078e6
|
[
"MIT"
] | 1 |
2020-05-08T07:32:16.000Z
|
2020-05-08T07:32:16.000Z
|
rsa.py
|
overrkill/security
|
cd473013652903d6b21fa83f2c57a07f289078e6
|
[
"MIT"
] | null | null | null |
rsa.py
|
overrkill/security
|
cd473013652903d6b21fa83f2c57a07f289078e6
|
[
"MIT"
] | null | null | null |
import math as m
p=int(input("enter a prime integer p "))
q=int(input("enter a prime integer q "))
num=int(input("enter a number to encrypt "))
n=p*q
z=(p-1)*(q-1)
for e in range(2,z):
if m.gcd(e,z)==1:
break
for i in range(1,10):
x=1+i*z
if x%e==0:
d=int(x/e)
break
alpha=pow(num,e)
ctt=alpha % n
beta=pow(ctt,d)
ptt=beta % n
print("PUBLIC-KEY({},{}) PRIVATE-KEY({},{})".format(n,e,n,d))
print("cipher \n{}".format(ctt))
print("plaintext \n{}".format(ptt))
| 17.518519 | 61 | 0.610994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.310782 |
d221a299320cc8e2a6ab063e29d7c98428b76ee2
| 831 |
py
|
Python
|
python_2_script/komand_python_2_script/actions/run/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
python_2_script/komand_python_2_script/actions/run/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
python_2_script/komand_python_2_script/actions/run/action.py
|
GreyNoise-Intelligence/insightconnect-plugins
|
2ba3121d42fd96e1267bb095bc76b962678c1f56
|
[
"MIT"
] | null | null | null |
import komand
from .schema import RunInput, RunOutput
# Custom imports below
class Run(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="run", description="Run command", input=RunInput(), output=RunOutput()
)
def construct(self, func):
func += "\n\nreturn run(p)\n"
func = "\t" + "\t".join(func.splitlines(True))
f = (
"""
def python_custom_handler(p={}):
%s
"""
% func
)
self.logger.debug("%s", f)
return f
def run(self, params={}):
"""Run action"""
exec(self.construct(params["function"])) # noqa: B102
result = python_custom_handler(params["input"])
return result or {}
def test(self, params={}):
"""Test action"""
return {}
| 22.459459 | 87 | 0.545126 | 750 | 0.902527 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.2142 |
d221e2f598eaeab4c5c60286a3134659beef83e8
| 636 |
py
|
Python
|
config/configSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | null | null | null |
config/configSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | 3 |
2021-06-02T00:56:48.000Z
|
2021-09-08T01:35:53.000Z
|
config/configSample.py
|
snipeso/sample_psychopy
|
332cd34cf2c584f9ba01302050964649dd2e5367
|
[
"Linux-OpenIB"
] | null | null | null |
from config.updateConfig import UpdateConfig
sampleCONF = {
"task": {
"name": "sample",
},
"instructions": {
"text": "Give instructions",
"startPrompt": "Press any key to continue. Press q to quit.",
"alarm": "horn.wav",
"questionnaireReminder": "answerQuestionnaire.wav"
},
"stimuli": {
"backgroundColor": {"versionMain": "black", "versionDemo": "blue", "versionDebug": "gray"},
},
}
sampleTriggers = {
"example": 10
}
updateCofig = UpdateConfig()
updateCofig.addContent(sampleCONF)
updateCofig.addTriggers(sampleTriggers)
CONF = updateCofig.getConfig()
| 22.714286 | 99 | 0.630503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.433962 |
d2222f7d6b30cad257fa79d950b134ab33ead31c
| 2,994 |
py
|
Python
|
oneflow/python/test/onnx/util.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | 1 |
2020-10-13T03:03:40.000Z
|
2020-10-13T03:03:40.000Z
|
oneflow/python/test/onnx/util.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/onnx/util.py
|
basicv8vc/oneflow
|
2a0480b3f4ff42a59fcae945a3b3bb2d208e37a3
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
def convert_to_onnx_and_check(
job_func,
print_outlier=False,
explicit_init=True,
external_data=False,
ort_optimize=True,
opset=None,
):
check_point = flow.train.CheckPoint()
if explicit_init:
# it is a trick to keep check_point.save() from hanging when there is no variable
@flow.global_function(flow.FunctionConfig())
def add_var():
return flow.get_variable(
name="trick",
shape=(1,),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
check_point.init()
flow_weight_dir = tempfile.TemporaryDirectory()
check_point.save(flow_weight_dir.name)
# TODO(daquexian): a more elegant way?
while not os.path.exists(os.path.join(flow_weight_dir.name, "snapshot_done")):
pass
onnx_model_dir = tempfile.TemporaryDirectory()
onnx_model_path = os.path.join(onnx_model_dir.name, "model.onnx")
flow.onnx.export(
job_func,
flow_weight_dir.name,
onnx_model_path,
opset=opset,
external_data=external_data,
)
flow_weight_dir.cleanup()
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
sess = ort.InferenceSession(onnx_model_path, sess_options=ort_sess_opt)
onnx_model_dir.cleanup()
assert len(sess.get_outputs()) == 1
assert len(sess.get_inputs()) <= 1
ipt_dict = OrderedDict()
for ipt in sess.get_inputs():
ipt_data = np.random.uniform(low=-10, high=10, size=ipt.shape).astype(
np.float32
)
ipt_dict[ipt.name] = ipt_data
onnx_res = sess.run([], ipt_dict)[0]
oneflow_res = job_func(*ipt_dict.values()).get().numpy()
rtol, atol = 1e-2, 1e-5
if print_outlier:
a = onnx_res.flatten()
b = oneflow_res.flatten()
for i in range(len(a)):
if np.abs(a[i] - b[i]) > atol + rtol * np.abs(b[i]):
print("a[{}]={}, b[{}]={}".format(i, a[i], i, b[i]))
assert np.allclose(onnx_res, oneflow_res, rtol=rtol, atol=atol)
| 33.640449 | 89 | 0.671343 | 0 | 0 | 0 | 0 | 274 | 0.091516 | 0 | 0 | 762 | 0.254509 |
d2233790f33ba2cc856d503da044f2647bccf7b5
| 237 |
py
|
Python
|
pymodule1/Hello1.py
|
debjava/pymodule1
|
8e5f63660f0b835709896cc50ed1147b386422a2
|
[
"MIT"
] | null | null | null |
pymodule1/Hello1.py
|
debjava/pymodule1
|
8e5f63660f0b835709896cc50ed1147b386422a2
|
[
"MIT"
] | null | null | null |
pymodule1/Hello1.py
|
debjava/pymodule1
|
8e5f63660f0b835709896cc50ed1147b386422a2
|
[
"MIT"
] | null | null | null |
'''
Created on Mar 30, 2019
@author: PIKU
'''
def justSayHello():
print("Hello ...")
def getHello():
return "Hello guys"
if __name__ == '__main__':
justSayHello()
x = getHello()
print(x)
| 11.85 | 27 | 0.523207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.350211 |
d22400f5a3ef8a9ceac1f66b5070a0a5f8fc69d4
| 1,090 |
py
|
Python
|
scripts/Evaluation_Metrics/mean_average.py
|
Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/Evaluation_Metrics/mean_average.py
|
Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/Evaluation_Metrics/mean_average.py
|
Mr-TalhaIlyas/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
dc1b645ad1a3a00ef650b170a4ac4c26ab0d687a
|
[
"CC-BY-4.0"
] | 1 |
2021-03-29T01:49:49.000Z
|
2021-03-29T01:49:49.000Z
|
from Evaluation_Metrics.Average_Precision import ElevenPointInterpolatedAP
from Evaluation_Metrics.New_Metric import TP_FP
def mean_AP(GT,PRED,TH):
Rice_GT=GT[:,:,:,1]
Weed_GT=GT[:,:,:,0]
Rice_P=PRED[:,:,:,1]
Weed_P=PRED[:,:,:,0]
TP_R,FP_R=TP_FP(Rice_GT,Rice_P,TH)
Acc_TPR=[]
s=0
for i in TP_R:
s=i+s
Acc_TPR.append(s)
Acc_FPR=[]
s=0
for i in FP_R:
s=i+s
Acc_FPR.append(s)
precR=[]
recR=[]
for i, j in zip(Acc_TPR, Acc_FPR):
C=i/(i+j)
precR.append(C)
recR.append(i/238)
TP_W,FP_W=TP_FP(Weed_GT,Weed_P,TH)
Acc_TPW=[]
s=0
for i in TP_W:
s=i+s
Acc_TPW.append(s)
Acc_FPW=[]
s=0
for i in FP_W:
s=i+s
Acc_FPW.append(s)
precW=[]
recW=[]
for i, j in zip(Acc_TPW, Acc_FPW):
C=i/(i+j)
precW.append(C)
recW.append(i/93)
AP1= ElevenPointInterpolatedAP(recW, precW)
AP2= ElevenPointInterpolatedAP(recR, precR)
mAP=(AP1+AP2)/2
return AP1,AP2,mAP
| 22.244898 | 74 | 0.547706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d22588027964a9ce9520023258895efa1631a6bd
| 5,001 |
py
|
Python
|
src/peter_sslers/lib/errors.py
|
jvanasco/pyramid_letsencrypt_admin
|
6db37d30ef8028ff978bf6083cdf978fc88a4782
|
[
"MIT"
] | 35 |
2016-04-21T18:55:31.000Z
|
2022-03-30T08:22:43.000Z
|
src/peter_sslers/lib/errors.py
|
jvanasco/pyramid_letsencrypt_admin
|
6db37d30ef8028ff978bf6083cdf978fc88a4782
|
[
"MIT"
] | 8 |
2018-05-23T13:38:49.000Z
|
2021-03-19T21:05:44.000Z
|
src/peter_sslers/lib/errors.py
|
jvanasco/pyramid_letsencrypt_admin
|
6db37d30ef8028ff978bf6083cdf978fc88a4782
|
[
"MIT"
] | 2 |
2016-08-18T21:07:11.000Z
|
2017-01-11T09:47:40.000Z
|
def formstash_to_querystring(formStash):
err = []
for (k, v) in formStash.errors.items():
err.append(("%s--%s" % (k, v)).replace("\n", "+").replace(" ", "+"))
err = sorted(err)
err = "---".join(err)
return err
class _UrlSafeException(Exception):
@property
def as_querystring(self):
return str(self).replace("\n", "+").replace(" ", "+")
class GarfieldMinusGarfield(Exception):
"""
An exception for those odd moments
"""
pass
class InvalidTransition(Exception):
"""raised when a transition is invalid"""
pass
class ObjectExists(Exception):
"""raised when an object already exists, no need to create"""
pass
class ConflictingObject(Exception):
"""
raised when an object already exists
args[0] = tuple(conflicting_object, error_message_string)
"""
pass
class OpenSslError(Exception):
pass
class OpenSslError_CsrGeneration(OpenSslError):
pass
class OpenSslError_InvalidKey(OpenSslError):
pass
class OpenSslError_InvalidCSR(OpenSslError):
pass
class OpenSslError_InvalidCertificate(OpenSslError):
pass
class OpenSslError_VersionTooLow(OpenSslError):
pass
class QueueProcessingError(Exception):
pass
class AcmeError(_UrlSafeException):
pass
class AcmeDuplicateAccount(AcmeError):
"""
args[0] MUST be the duplicate AcmeAccount
"""
pass
class AcmeDuplicateChallenges(AcmeError):
pass
class AcmeDuplicateChallengesExisting(AcmeDuplicateChallenges):
"""the first arg should be a list of the active challenges"""
def __str__(self):
return (
"""One or more domains already have active challenges: %s."""
% ", ".join(
[
"`%s` (%s)" % (ac.domain.domain_name, ac.acme_challenge_type)
for ac in self.args[0]
]
)
)
class AcmeDuplicateChallenge(AcmeDuplicateChallenges):
"""the first arg should be a single active challenge"""
def __str__(self):
return (
"""This domain already has active challenges: `%s`."""
% self.args[0].domain.domain_name
)
class AcmeDuplicateOrderlessDomain(AcmeDuplicateChallenges):
pass
class AcmeServerError(AcmeError):
pass
class AcmeServer404(AcmeServerError):
pass
class AcmeCommunicationError(AcmeError):
pass
class AcmeAuthorizationFailure(AcmeError):
"""raised when an Authorization fails"""
pass
class AcmeOrphanedObject(AcmeError):
pass
class AcmeOrderError(AcmeError):
pass
class AcmeOrderFatal(AcmeOrderError):
"""
The AcmeOrder has a fatal error.
Authorizations should be killed.
"""
pass
class AcmeOrderCreatedError(AcmeOrderError):
"""
If an exception occurs AFTER an AcmeOrder is created, raise this.
It should have two attributes:
args[0] - AcmeOrder
args[1] - original exception
"""
def __str__(self):
return "An AcmeOrder-{0} was created but errored".format(self.args[0])
@property
def acme_order(self):
return self.args[0]
@property
def original_exception(self):
return self.args[1]
class AcmeOrderProcessing(AcmeOrderCreatedError):
"""
raise when the AcmeOrder is `processing` (RFC status)
this should generally indicate the user should retry their action
"""
def __str__(self):
return "An AcmeOrder-{0} was created. The order is still processing.".format(
self.args[0]
)
class AcmeOrderValid(AcmeOrderCreatedError):
"""
raise when the AcmeOrder is `valid` (RFC status)
this should generally indicate the user should retry their action
"""
def __str__(self):
return "An AcmeOrder-{0} was created. The order is valid and the CertificateSigned can be downloaded.".format(
self.args[0]
)
class AcmeMissingChallenges(AcmeError):
"""There are no Acme Challenges"""
pass
class AcmeChallengeFailure(AcmeError):
pass
class AcmeDomainsInvalid(AcmeError):
def __str__(self):
return "The following Domains are invalid: {0}".format(", ".join(self.args[0]))
class AcmeDomainsBlocklisted(AcmeDomainsInvalid):
def __str__(self):
return "The following Domains are blocklisted: {0}".format(
", ".join(self.args[0])
)
class AcmeDomainsRequireConfigurationAcmeDNS(AcmeDomainsInvalid):
def __str__(self):
return "The following Domains are not configured with ACME-DNS: {0}".format(
", ".join(self.args[0])
)
class DomainVerificationError(AcmeError):
pass
class DisplayableError(_UrlSafeException):
pass
class InvalidRequest(_UrlSafeException):
"""
raised when an end-user wants to do something invalid/not-allowed
"""
pass
# class TransitionError(_UrlSafeException):
# pass
# class OperationsContextError(_UrlSafeException):
# pass
| 20.084337 | 118 | 0.659868 | 4,536 | 0.907019 | 0 | 0 | 235 | 0.046991 | 0 | 0 | 1,770 | 0.353929 |
d22743bfb3140f3685546e3e673c4427883f8ae7
| 771 |
py
|
Python
|
tips-lib/tools/ordo/cc.py
|
cosmoss-jigu/tips
|
386b992894363b535876020d1e60aa95f3d05f7c
|
[
"Apache-2.0"
] | 13 |
2021-07-16T07:52:15.000Z
|
2022-02-13T10:52:46.000Z
|
tips-lib/tools/ordo/cc.py
|
cosmoss-jigu/tips
|
386b992894363b535876020d1e60aa95f3d05f7c
|
[
"Apache-2.0"
] | null | null | null |
tips-lib/tools/ordo/cc.py
|
cosmoss-jigu/tips
|
386b992894363b535876020d1e60aa95f3d05f7c
|
[
"Apache-2.0"
] | 5 |
2021-08-09T13:16:23.000Z
|
2022-03-09T08:50:19.000Z
|
#!/usr/bin/env python3
import sys
offset_table = []
def get_freq(a):
v = {}
for i in a:
if i in v:
v[i] += 1
else:
v.update({i: 1})
return v
def process_file(filename):
freq = {}
a = []
median = 0
with open(filename, "r") as f:
a = f.readlines()
for i in range(0, len(a)):
a[i] = int(a[i])
a.sort()
s = int(len(a))
if s % 2 == 0:
median = (a[int(s/2) -1] + a[int(s/2)]) / 2
else:
median = a[int(s/2)-1]
print("min: %d max: %d median: %d" % (a[0], a[int(s)-1], median))
def main():
if len(sys.argv) < 2:
print("%s filename" % sys.argv[0])
return -1
process_file(sys.argv[1])
if __name__ == '__main__':
main()
| 17.133333 | 69 | 0.460441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.098573 |
d2293531f48224d20922b0077cb19bb8cfd631bb
| 18,212 |
py
|
Python
|
cognitive_services/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 3 |
2020-07-15T19:45:31.000Z
|
2020-09-30T16:15:48.000Z
|
cognitive_services/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 12 |
2020-07-15T17:00:24.000Z
|
2021-01-19T21:02:00.000Z
|
cognitive_services/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 2 |
2020-07-15T18:59:02.000Z
|
2020-10-07T17:22:52.000Z
|
#! /usr/bin/env python3
import os
import sys
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import argparse
import json
import logging
import logging.config
import inspect, time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests, uuid
import configparser
import ServerSideExtension_pb2 as SSE
import grpc
import qlist
import cognitive_services as cs
from google.protobuf.json_format import MessageToDict
from ssedata import ArgType, FunctionType, ReturnType
# import helper .py files
#from scripteval import ScriptEval
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the HelloWorld example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
#self.ScriptEval = ScriptEval()
os.makedirs('logs', exist_ok=True)
log_file = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'logger.config')
logging.config.fileConfig(log_file)
logging.info(log_file)
logging.info(self._function_definitions)
logging.info('Logging enabled')
function_name = "none"
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_rest_single',
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
@staticmethod
def _rest_single(request, context):
"""
Rest using single variable
"""
logging.info('Entering {} TimeStamp: {}' .format(function_name, datetime.now().strftime("%H:%M:%S.%f")))
bCache= config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() =="true"):
logging.info("Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info("Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
request_counter = 1
#if(q_function_name=='translate'):
endpoint = config.get(q_function_name, 'endpoint')
logging.debug("endpoint is set to {}" .format(endpoint))
key = config.get(q_function_name, 'key')
logging.debug("key is set to {}" .format(key))
region = config.get(q_function_name, 'region')
logging.debug("region is set to {}" .format(region))
for request_rows in request:
logging.debug('Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter = request_counter +1
for row in request_rows.rows:
param = [d.strData for d in row.duals]
logging.debug("The incoming parameter {}" .format(param))
result =""
if (len(param[0])==0):
param[0] = "NA"
if(q_function_name=='translate'):
language = '&to=' + param[1]
logging.debug('Showing Language to Translate to : {}'.format(language))
client = cs.translate(key, region, endpoint)
finished_url = client[1] +language
logging.debug('Showing finished url to : {}'.format(finished_url))
input_text = param[0].replace('"','\\').replace(',','\\,')
body = [{'text' : input_text}]
logging.debug('Showing message body: {}'.format(body))
request = requests.post(finished_url, headers=client[0], json=body)
resp= request.json()
logging.debug('Show Payload Response as Text: {}'.format(resp))
if(param[-1] =='score'):
result = str(resp[0]['detectedLanguage']['score'])
logging.debug('Score: {}'.format(result))
print(result)
print(type(result))
#duals = iter([SSE.Dual(strData=result)])
if(param[-1] =='text'):
result = resp[0]['translations'][0]['text']
print(type(result))
logging.debug('Translation: {}'.format(result))
elif(q_function_name=='language_detection'):
client = cs.authenticate_client(key, endpoint)
result =cs.language_detection(client, param)
logging.debug('language detection: {}'.format(result))
elif(q_function_name=='key_phrase_extraction'):
client = cs.authenticate_client(key, endpoint)
output =cs.key_phrase_extraction(client, param)
result = output[0]
logging.debug('key_phrase_extraction: {}'.format(result))
elif(q_function_name=='sentiment_analysis'):
client = cs.authenticate_client(key, endpoint)
output =cs.sentiment_analysis(client, param)
print(output)
print(type(output))
if(param[-1] =='sentiment'):
result = output.sentiment
elif(param[-1] =='positive_score'):
result = str(output.confidence_scores.positive)
elif(param[-1] =='negative_score'):
result = str(output.confidence_scores.negative)
elif(param[-1] =='neutral_score'):
result = str(output.confidence_scores.neutral)
else:
result = output.sentiment
logging.debug('key_phrase_extraction: {}'.format(result))
else:
result=""
duals = iter([SSE.Dual(strData=result)])
logging.debug('result {}' .format(result))
response_rows.append(SSE.Row(duals=duals))
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting {} TimeStamp: {}' .format(function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _cache(request, context):
"""
Cache enabled. Add the datetime stamp to the end of each string value.
:param request: iterable sequence of bundled rows
:param context: not used.
:return: string
"""
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
@staticmethod
def _no_cache(request, context):
"""
Cache disabled. Add the datetime stamp to the end of each string value.
:param request:
:param context: used for disabling the cache in the header.
:return: string
"""
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
def _get_call_info(self, context):
"""
Retreive useful information for the function call.
:param context: context
:return: string containing header info
"""
# Get metadata for the call from the context
metadata = dict(context.invocation_metadata())
# Get the function ID
func_header = SSE.FunctionRequestHeader()
func_header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
func_id = func_header.functionId
# Get the common request header
common_header = SSE.CommonRequestHeader()
common_header.ParseFromString(metadata['qlik-commonrequestheader-bin'])
# Get capabilities
if not hasattr(self, 'capabilities'):
self.capabilities = self.GetCapabilities(None, context)
# Get the name of the capability called in the function
capability = [function.name for function in self.capabilities.functions if function.functionId == func_id][0]
# Get the user ID using a regular expression
match = re.match(r"UserDirectory=(?P<UserDirectory>\w*)\W+UserId=(?P<UserId>\w*)", common_header.userId, re.IGNORECASE)
if match:
userId = match.group('UserDirectory') + '/' + match.group('UserId')
else:
userId = common_header.userId
# Get the app ID
appId = common_header.appId
# Get the call's origin
peer = context.peer()
return "{0} - Capability '{1}' called by user {2} from app {3}".format(peer, capability, userId, appId)
def EvaluateScript(self, request, context):
"""
This plugin supports full script functionality, that is, all function types and all data types.
:param request:
:param context:
:return:
"""
logging.debug('In EvaluateScript: Main')
# Parse header for script request
metadata = dict(context.invocation_metadata())
logging.debug('Metadata {}',metadata)
header = SSE.ScriptRequestHeader()
header.ParseFromString(metadata['qlik-scriptrequestheader-bin'])
logging.debug('Header is : {}'.format(header))
logging.debug('Request is : {}' .format(request))
logging.debug("Context is: {}" .format(context))
return self.ScriptEval.EvaluateScript(header, request, context)
@staticmethod
def _echo_table(request, context):
"""
Echo the input table.
:param request:
:param context:
:return:
"""
for request_rows in request:
response_rows = []
for row in request_rows.rows:
response_rows.append(row)
yield SSE.BundledRows(rows=response_rows)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
#self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))['Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name ='_' + current_qrap_type
logging.debug('This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug('Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials([(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info('*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info('*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
conf_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'config', 'qrag.ini')
#config.read(os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini'))
logging.debug(conf_file)
logging.info('Location of qrag.ini {}' .format(conf_file))
config.read(conf_file)
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?', default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.definition_file)
#print(def_file)
calc = ExtensionService(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(args.port, args.pem_dir, def_file,datetime.now().isoformat()))
calc.Serve(args.port, args.pem_dir)
| 43.361905 | 167 | 0.597024 | 16,297 | 0.89485 | 7,326 | 0.402262 | 8,121 | 0.445915 | 0 | 0 | 6,150 | 0.337689 |
d2296fe0c90ef20ef9cee97c8335c9349c8e3dec
| 1,534 |
py
|
Python
|
spirecomm/spire/card.py
|
ysjin94/Slaying_the_Spire_AI
|
172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634
|
[
"MIT"
] | null | null | null |
spirecomm/spire/card.py
|
ysjin94/Slaying_the_Spire_AI
|
172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634
|
[
"MIT"
] | null | null | null |
spirecomm/spire/card.py
|
ysjin94/Slaying_the_Spire_AI
|
172b2e44b9da81f35cbdfa1ee0fd2a4ecbc66634
|
[
"MIT"
] | 2 |
2020-07-13T18:21:46.000Z
|
2020-08-04T21:18:10.000Z
|
from enum import Enum
class CardType(Enum):
ATTACK = 1
SKILL = 2
POWER = 3
STATUS = 4
CURSE = 5
class CardRarity(Enum):
BASIC = 1
COMMON = 2
UNCOMMON = 3
RARE = 4
SPECIAL = 5
CURSE = 6
class Card:
def __init__(self, card_id, name, card_type, rarity, upgrades=0, has_target=False, cost=0, uuid="", misc=0, price=0, is_playable=False, exhausts=False):
self.card_id = card_id
self.name = name
self.type = card_type
self.rarity = rarity
self.upgrades = upgrades
self.has_target = has_target
self.cost = cost
self.uuid = uuid
self.misc = misc
self.price = price
self.is_playable = is_playable
self.exhausts = exhausts
def __repr__(self):
return self.name
@classmethod
def from_json(cls, json_object):
return cls(
card_id=json_object["id"],
name=json_object["name"],
card_type=CardType[json_object["type"]],
rarity=CardRarity[json_object["rarity"]],
upgrades=json_object["upgrades"],
has_target=json_object["has_target"],
cost=json_object["cost"],
uuid=json_object["uuid"],
misc=json_object.get("misc", 0),
price=json_object.get("price", 0),
is_playable=json_object.get("is_playable", False),
exhausts=json_object.get("exhausts", False)
)
def __eq__(self, other):
return self.uuid == other.uuid
| 26.448276 | 156 | 0.582138 | 1,503 | 0.979791 | 0 | 0 | 646 | 0.421121 | 0 | 0 | 96 | 0.062581 |
d229bf33f366491dd645f2b26164b3b0a59e7d44
| 114 |
py
|
Python
|
src/typeDefs/lineFlowSumm.py
|
nagasudhirpulla/wrldc_scada_mumbai_dashboard
|
bc107ef47568781b588316f0c5c0c0d2a08adac8
|
[
"MIT"
] | null | null | null |
src/typeDefs/lineFlowSumm.py
|
nagasudhirpulla/wrldc_scada_mumbai_dashboard
|
bc107ef47568781b588316f0c5c0c0d2a08adac8
|
[
"MIT"
] | null | null | null |
src/typeDefs/lineFlowSumm.py
|
nagasudhirpulla/wrldc_scada_mumbai_dashboard
|
bc107ef47568781b588316f0c5c0c0d2a08adac8
|
[
"MIT"
] | null | null | null |
from typing import TypedDict
class ILineFlowSumm(TypedDict):
inst: dict
maxFlow: dict
maxTime: dict
| 14.25 | 31 | 0.719298 | 82 | 0.719298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d22a005c486e400a70fdda2609e473e34cb98a87
| 1,280 |
py
|
Python
|
eval/user.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
eval/user.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
eval/user.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
import os
import random
import numpy as np
import torch
from chatbot_agent.nlu import BERTNLU
from chatbot_agent.policy.rule import RulePolicy
from chatbot_agent.nlg import TemplateNLG
from chatbot_agent.agent import PipelineAgent
from chatbot_agent.analyzer import Analyzer
def set_seed(r_seed):
random.seed(r_seed)
np.random.seed(r_seed)
torch.manual_seed(r_seed)
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print("root: ", root)
user_nlu = BERTNLU(
model_dir=os.path.join(root, "model/sys_context"),
vocab_dir=os.path.join(root, "data/agent/vocab"),
)
user_dst = None
user_policy = RulePolicy(
goal_model_path=os.path.join(root, "model/goal/new_goal_model.pkl"),
db_path=os.path.join(root, "data/agent/db"),
vocab_path=os.path.join(root, "data/agent/vocab/"),
character="usr",
)
user_nlg = TemplateNLG(
is_user=True,
template_dir=os.path.join(root, "data/agent/template")
)
user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user')
analyzer = Analyzer(
db_path=os.path.join(root, "data/agent/db"),
user_agent=user_agent,
dataset='multiwoz'
)
text = "How about rosa's bed and breakfast ? Their postcode is cb22ha."
nlu_res = user_nlu.predict(text)
print(nlu_res)
| 26.666667 | 82 | 0.742188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.180469 |
d22b9934bc74f943c4699852c43f6be8c7246c45
| 3,027 |
py
|
Python
|
insights/parsers/tests/test_zipl_conf.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 121 |
2017-05-30T20:23:25.000Z
|
2022-03-23T12:52:15.000Z
|
insights/parsers/tests/test_zipl_conf.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 1,977 |
2017-05-26T14:36:03.000Z
|
2022-03-31T10:38:53.000Z
|
insights/parsers/tests/test_zipl_conf.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 244 |
2017-05-30T20:22:57.000Z
|
2022-03-26T10:09:39.000Z
|
from insights.parsers.zipl_conf import ZiplConf
from insights.tests import context_wrap
from insights.parsers import ParseException
import pytest
ZIPL_CONF = """
[defaultboot]
defaultauto
prompt=1
timeout=5
default=linux
target=/boot
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
[linux-0-rescue-a27932c8d57248e390cee3798bbd3709]
image=/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709
ramdisk=/boot/initramfs-0-rescue-a27932c8d57248e390cee3798bbd3709.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0"
[other]
image=/boot/vmlinuz
ramdisk=/boot/initramfs.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100
# Configuration for dumping to SCSI disk
# Separate IPL and dump partitions
[dumpscsi]
target=/boot
dumptofs=/dev/sda2
parameters="dump_dir=/mydumps dump_compress=none dump_mode=auto"
# Menu containing two DASD boot configurations
:menu1
1=linux
2=linux-0-rescue-a27932c8d57248e390cee3798bbd3709
default=1
prompt=1
timeout=30
""".strip()
ZIPL_CONF_INVALID = """
prompt=1
timeout=5
default=linux
[linux]
image=/boot/vmlinuz-3.10.0-693.el7.s390x
ramdisk=/boot/initramfs-3.10.0-693.el7.s390x.img
parameters="root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100 rd.dasd=0.0.0101 rd.dasd=0.0.0102 rd.lvm.lv=rhel_gss5/root rd.lvm.lv=rhel_gss5/swap net.ifnames=0 rd.znet=qeth,0.0.0600,0.0.0601,0.0.0602,layer2=0,portname=gss5,portno=0 LANG=en_US.UTF-8"
""".strip()
def test_zipl_conf():
res = ZiplConf(context_wrap(ZIPL_CONF))
assert res.get('linux').get('image') == "/boot/vmlinuz-3.10.0-693.el7.s390x"
assert res['linux']['image'] == "/boot/vmlinuz-3.10.0-693.el7.s390x"
assert res[':menu1']['1'] == 'linux'
assert 'defaultauto' in res['defaultboot']
assert res['defaultboot']['defaultauto'] is True
assert res['other']['parameters'] == '"root=/dev/mapper/rhel_gss5-root crashkernel=auto rd.dasd=0.0.0100'
assert res.images == {
'linux': '/boot/vmlinuz-3.10.0-693.el7.s390x',
'linux-0-rescue-a27932c8d57248e390cee3798bbd3709': '/boot/vmlinuz-0-rescue-a27932c8d57248e390cee3798bbd3709',
'other': '/boot/vmlinuz'
}
assert res.dumptofses == {'dumpscsi': '/dev/sda2'}
def test_zipl_conf_invalid():
with pytest.raises(ParseException) as pe:
ZiplConf(context_wrap(ZIPL_CONF_INVALID))
assert "Invalid zipl configuration file is found." in str(pe)
| 41.465753 | 269 | 0.720846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,280 | 0.753221 |
d22d10f837e5ad288e126f1c5e79e0d962cba280
| 6,560 |
py
|
Python
|
tests/services/http_service.py
|
the-gw/tomodachi
|
a1e2efc1abe6f4e2de4a580e58184323660b4299
|
[
"MIT"
] | null | null | null |
tests/services/http_service.py
|
the-gw/tomodachi
|
a1e2efc1abe6f4e2de4a580e58184323660b4299
|
[
"MIT"
] | null | null | null |
tests/services/http_service.py
|
the-gw/tomodachi
|
a1e2efc1abe6f4e2de4a580e58184323660b4299
|
[
"MIT"
] | null | null | null |
import asyncio
import os
import signal
import tomodachi
from typing import Any, Dict, Tuple, Callable, Union # noqa
from aiohttp import web
from tomodachi.transport.http import http, http_error, http_static, websocket, Response, RequestHandler
from tomodachi.discovery.dummy_registry import DummyRegistry
async def middleware_function(func: Callable, service: Any, request: web.Request, context: Dict, *args: Any, **kwargs: Any) -> Any:
if request.headers.get('X-Use-Middleware') == 'Set':
service.middleware_called = True
if request.headers.get('X-Use-Middleware') == 'Before':
return 'before'
return_value = await func()
if request.headers.get('X-Use-Middleware') == 'After':
return 'after'
return return_value
@tomodachi.service
class HttpService(tomodachi.Service):
name = 'test_http'
discovery = [DummyRegistry]
options = {
'http': {
'port': None,
'access_log': True,
'real_ip_from': '127.0.0.1'
}
}
uuid = None
closer = asyncio.Future() # type: Any
http_middleware = [middleware_function]
slow_request = False
middleware_called = False
function_triggered = False
websocket_connected = False
websocket_received_data = None
@http('GET', r'/test/?')
async def test(self, request: web.Request) -> str:
return_value = 'test'
return return_value
@http('GET', r'/test/(?P<id>[^/]+?)/?')
async def test_with_id(self, request: web.Request, id: str) -> str:
return 'test {}'.format(id)
@http('GET', r'/middleware-before/?')
async def middleware_before(self, request: web.Request) -> str:
self.function_triggered = True
return 'test'
@http('GET', r'/slow/?')
async def test_slow(self, request: web.Request) -> str:
await asyncio.sleep(2.0)
self.slow_request = True
return 'test'
@http(['GET'], r'/dict/?')
async def test_dict(self, request: web.Request) -> Dict:
return {
'status': 200,
'body': 'test dict',
'headers': {
'X-Dict': 'test'
}
}
@http('GET', r'/tuple/?')
async def test_tuple(self, request: web.Request) -> Tuple:
return (200, 'test tuple', {
'X-Tuple': 'test'
})
@http('GET', r'/aiohttp/?')
async def test_aiohttp(self, request: web.Request) -> web.Response:
return web.Response(body='test aiohttp', status=200, headers={
'X-Aiohttp': 'test'
})
@http('GET', r'/response/?')
async def test_response_object(self, request: web.Request) -> Response:
return Response(body='test tomodachi response', status=200, headers={
'X-Tomodachi-Response': 'test'
})
@http('GET', r'/exception/?')
async def test_exception(self, request: web.Request) -> None:
raise Exception('test')
@http('GET', r'/slow-exception/?')
async def test_slow_exception(self, request: web.Request) -> None:
await asyncio.sleep(2.0)
raise Exception('test')
@http('GET', r'/test-weird-content-type/?')
async def test_weird_content_type(self, request: web.Request) -> web.Response:
return web.Response(body='test', status=200, headers={
'Content-Type': 'text/plain; '
})
@http('GET', r'/test-charset/?')
async def test_charset(self, request: web.Request) -> web.Response:
return web.Response(body='test', status=200, headers={
'Content-Type': 'text/plain; charset=utf-8'
})
@http('GET', r'/test-charset-encoding-correct/?')
async def test_charset_encoding_correct(self, request: web.Request) -> Response:
return Response(body='test \xe5\xe4\xf6', status=200, headers={
'Content-Type': 'text/plain; charset=iso-8859-1'
})
@http('GET', r'/test-charset-encoding-error/?')
async def test_charset_encoding_error(self, request: web.Request) -> Response:
return Response(body='test 友達', status=200, headers={
'Content-Type': 'text/plain; charset=iso-8859-1'
})
@http('GET', r'/test-charset-invalid/?')
async def test_charset_invalid(self, request: web.Request) -> Response:
return Response(body='test', status=200, headers={
'Content-Type': 'text/plain; charset=utf-9'
})
@http('GET', r'/empty-data/?')
async def empty_data(self, request: web.Request) -> str:
return ''
@http('GET', r'/byte-data/?')
async def byte_data(self, request: web.Request) -> bytes:
return b'test \xc3\xa5\xc3\xa4\xc3\xb6'
@http('GET', r'/none-data/?')
async def none_data(self, request: web.Request) -> None:
return None
@http('GET', r'/forwarded-for/?')
async def forwarded_for(self, request: web.Request) -> str:
return RequestHandler.get_request_ip(request) or ''
@http('GET', r'/authorization/?')
async def authorization(self, request: web.Request) -> str:
return request._cache.get('auth').login if request._cache.get('auth') else ''
@http_static('../static_files', r'/static/')
async def static_files_filename_append(self) -> None:
pass
@http_static('../static_files', r'/download/(?P<filename>[^/]+?)/image')
async def static_files_filename_existing(self) -> None:
pass
@http_error(status_code=404)
async def test_404(self, request: web.Request) -> str:
return 'test 404'
@websocket(r'/websocket-simple')
async def websocket_simple(self, websocket: web.WebSocketResponse) -> None:
self.websocket_connected = True
@websocket(r'/websocket-data')
async def websocket_data(self, websocket: web.WebSocketResponse) -> Callable:
async def _receive(data: Union[str, bytes]) -> None:
self.websocket_received_data = data
return _receive
async def _started_service(self) -> None:
async def _async() -> None:
async def sleep_and_kill() -> None:
await asyncio.sleep(10.0)
if not self.closer.done():
self.closer.set_result(None)
task = asyncio.ensure_future(sleep_and_kill())
await self.closer
if not task.done():
task.cancel()
os.kill(os.getpid(), signal.SIGINT)
asyncio.ensure_future(_async())
def stop_service(self) -> None:
if not self.closer.done():
self.closer.set_result(None)
| 33.989637 | 131 | 0.611738 | 5,778 | 0.880256 | 0 | 0 | 5,797 | 0.883151 | 4,479 | 0.682358 | 1,274 | 0.194089 |
d22d16cc4c908be77ff9ce274ee5534ee91f29e1
| 13,624 |
py
|
Python
|
mantrid/loadbalancer.py
|
epio/mantrid
|
1c699f1a4b33888b533c19cb6d025173f2160576
|
[
"BSD-3-Clause"
] | 30 |
2015-01-01T00:32:47.000Z
|
2021-09-07T20:25:01.000Z
|
mantrid/loadbalancer.py
|
epio/mantrid
|
1c699f1a4b33888b533c19cb6d025173f2160576
|
[
"BSD-3-Clause"
] | null | null | null |
mantrid/loadbalancer.py
|
epio/mantrid
|
1c699f1a4b33888b533c19cb6d025173f2160576
|
[
"BSD-3-Clause"
] | 9 |
2015-05-12T05:09:12.000Z
|
2021-12-29T19:07:01.000Z
|
import eventlet
import errno
import logging
import traceback
import mimetools
import resource
import json
import os
import sys
import argparse
from eventlet import wsgi
from eventlet.green import socket
from .actions import Unknown, Proxy, Empty, Static, Redirect, NoHosts, Spin
from .config import SimpleConfig
from .management import ManagementApp
from .stats_socket import StatsSocket
from .greenbody import GreenBody
class Balancer(object):
"""
Main loadbalancer class.
"""
nofile = 102400
save_interval = 10
action_mapping = {
"proxy": Proxy,
"empty": Empty,
"static": Static,
"redirect": Redirect,
"unknown": Unknown,
"spin": Spin,
"no_hosts": NoHosts,
}
def __init__(self, external_addresses, internal_addresses, management_addresses, state_file, uid=None, gid=65535, static_dir="/etc/mantrid/static/"):
"""
Constructor.
Takes one parameter, the dict of ports to listen on.
The key in this dict is the port number, and the value
is if it's an internal endpoint or not.
Internal endpoints do not have X-Forwarded-* stripped;
other ones do, and have X-Forwarded-For added.
"""
self.external_addresses = external_addresses
self.internal_addresses = internal_addresses
self.management_addresses = management_addresses
self.state_file = state_file
self.uid = uid
self.gid = gid
self.static_dir = static_dir
@classmethod
def main(cls):
# Parse command-line args
parser = argparse.ArgumentParser(description='The Mantrid load balancer')
parser.add_argument('--debug', dest='debug', action='store_const', const=True, help='Enable debug logging')
parser.add_argument('-c', '--config', dest='config', default=None, metavar="PATH", help='Path to the configuration file')
args = parser.parse_args()
# Set up logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Output to stderr, always
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(
fmt = "%(asctime)s - %(levelname)8s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
))
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
# Check they have root access
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (cls.nofile, cls.nofile))
except (ValueError, resource.error):
logging.warning("Cannot raise resource limits (run as root/change ulimits)")
# Load settings from the config file
if args.config is None:
if os.path.exists("/etc/mantrid/mantrid.conf"):
args.config = "/etc/mantrid/mantrid.conf"
logging.info("Using configuration file %s" % args.config)
else:
args.config = "/dev/null"
logging.info("No configuration file found - using defaults.")
else:
logging.info("Using configuration file %s" % args.config)
config = SimpleConfig(args.config)
balancer = cls(
config.get_all_addresses("bind", set([(("::", 80), socket.AF_INET6)])),
config.get_all_addresses("bind_internal"),
config.get_all_addresses("bind_management", set([(("127.0.0.1", 8042), socket.AF_INET), (("::1", 8042), socket.AF_INET6)])),
config.get("state_file", "/var/lib/mantrid/state.json"),
config.get_int("uid", 4321),
config.get_int("gid", 4321),
config.get("static_dir", "/etc/mantrid/static/"),
)
balancer.run()
def load(self):
"Loads the state from the state file"
try:
if os.path.getsize(self.state_file) <= 1:
raise IOError("File is empty.")
with open(self.state_file) as fh:
state = json.load(fh)
assert isinstance(state, dict)
self.hosts = state['hosts']
self.stats = state['stats']
for key in self.stats:
self.stats[key]['open_requests'] = 0
except (IOError, OSError):
# There is no state file; start empty.
self.hosts = {}
self.stats = {}
def save(self):
"Saves the state to the state file"
with open(self.state_file, "w") as fh:
json.dump({
"hosts": self.hosts,
"stats": self.stats,
}, fh)
def run(self):
# First, initialise the process
self.load()
self.running = True
# Try to ensure the state file is readable
state_dir = os.path.dirname(self.state_file)
if not os.path.isdir(state_dir):
os.makedirs(state_dir)
if self.uid is not None:
try:
os.chown(state_dir, self.uid, -1)
except OSError:
pass
try:
os.chown(self.state_file, self.uid, -1)
except OSError:
pass
# Then, launch the socket loops
pool = GreenBody(
len(self.external_addresses) +
len(self.internal_addresses) +
len(self.management_addresses) +
1
)
pool.spawn(self.save_loop)
for address, family in self.external_addresses:
pool.spawn(self.listen_loop, address, family, internal=False)
for address, family in self.internal_addresses:
pool.spawn(self.listen_loop, address, family, internal=True)
for address, family in self.management_addresses:
pool.spawn(self.management_loop, address, family)
# Give the other threads a chance to open their listening sockets
eventlet.sleep(0.5)
# Drop to the lesser UID/GIDs, if supplied
if self.gid:
try:
os.setegid(self.gid)
os.setgid(self.gid)
except OSError:
logging.error("Cannot change to GID %i (probably not running as root)" % self.gid)
else:
logging.info("Dropped to GID %i" % self.gid)
if self.uid:
try:
os.seteuid(0)
os.setuid(self.uid)
os.seteuid(self.uid)
except OSError:
logging.error("Cannot change to UID %i (probably not running as root)" % self.uid)
else:
logging.info("Dropped to UID %i" % self.uid)
# Ensure we can save to the state file, or die hard.
try:
open(self.state_file, "a").close()
except (OSError, IOError):
logging.critical("Cannot write to state file %s" % self.state_file)
sys.exit(1)
# Wait for one to exit, or for a clean/forced shutdown
try:
pool.wait()
except (KeyboardInterrupt, StopIteration, SystemExit):
pass
except:
logging.error(traceback.format_exc())
# We're done
self.running = False
logging.info("Exiting")
### Management ###
def save_loop(self):
"""
Saves the state if it has changed.
"""
last_hash = hash(repr(self.hosts))
while self.running:
eventlet.sleep(self.save_interval)
next_hash = hash(repr(self.hosts))
if next_hash != last_hash:
self.save()
last_hash = next_hash
def management_loop(self, address, family):
"""
Accepts management requests.
"""
try:
sock = eventlet.listen(address, family)
except socket.error, e:
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Actually serve management
logging.info("Listening for management on %s" % (address, ))
management_app = ManagementApp(self)
try:
with open("/dev/null", "w") as log_dest:
wsgi.server(
sock,
management_app.handle,
log = log_dest,
)
finally:
sock.close()
### Client handling ###
def listen_loop(self, address, family, internal=False):
"""
Accepts incoming connections.
"""
try:
sock = eventlet.listen(address, family)
except socket.error, e:
if e.errno == errno.EADDRINUSE:
logging.critical("Cannot listen on (%s, %s): already in use" % (address, family))
raise
elif e.errno == errno.EACCES and address[1] <= 1024:
logging.critical("Cannot listen on (%s, %s) (you might need to launch as root)" % (address, family))
return
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Start serving
logging.info("Listening for requests on %s" % (address, ))
try:
eventlet.serve(
sock,
lambda sock, addr: self.handle(sock, addr, internal),
concurrency = 10000,
)
finally:
sock.close()
def resolve_host(self, host, protocol="http"):
# Special case for empty hosts dict
if not self.hosts:
return NoHosts(self, host, "unknown")
# Check for an exact or any subdomain matches
bits = host.split(".")
for i in range(len(bits)):
for prefix in ["%s://" % protocol, ""]:
subhost = prefix + (".".join(bits[i:]))
if subhost in self.hosts:
action, kwargs, allow_subs = self.hosts[subhost]
if allow_subs or i == 0:
action_class = self.action_mapping[action]
return action_class(
balancer = self,
host = host,
matched_host = subhost,
**kwargs
)
return Unknown(self, host, "unknown")
def handle(self, sock, address, internal=False):
"""
Handles an incoming HTTP connection.
"""
try:
sock = StatsSocket(sock)
rfile = sock.makefile('rb', 4096)
# Read the first line
first = rfile.readline().strip("\r\n")
words = first.split()
# Ensure it looks kind of like HTTP
if not (2 <= len(words) <= 3):
sock.sendall("HTTP/1.0 400 Bad Request\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
return
path = words[1]
# Read the headers
headers = mimetools.Message(rfile, 0)
# Work out the host
try:
host = headers['Host']
except KeyError:
host = "unknown"
headers['Connection'] = "close"
if not internal:
headers['X-Forwarded-For'] = address[0]
headers['X-Forwarded-Protocol'] = ""
headers['X-Forwarded-Proto'] = ""
# Make sure they're not using odd encodings
if "Transfer-Encoding" in headers:
sock.sendall("HTTP/1.0 411 Length Required\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
return
# Match the host to an action
protocol = "http"
if headers.get('X-Forwarded-Protocol', headers.get('X-Forwarded-Proto', "")).lower() in ("ssl", "https"):
protocol = "https"
action = self.resolve_host(host, protocol)
# Record us as an open connection
stats_dict = self.stats.setdefault(action.matched_host, {})
stats_dict['open_requests'] = stats_dict.get('open_requests', 0) + 1
# Run the action
try:
rfile._rbuf.seek(0)
action.handle(
sock = sock,
read_data = first + "\r\n" + str(headers) + "\r\n" + rfile._rbuf.read(),
path = path,
headers = headers,
)
finally:
stats_dict['open_requests'] -= 1
stats_dict['completed_requests'] = stats_dict.get('completed_requests', 0) + 1
stats_dict['bytes_sent'] = stats_dict.get('bytes_sent', 0) + sock.bytes_sent
stats_dict['bytes_received'] = stats_dict.get('bytes_received', 0) + sock.bytes_received
except socket.error, e:
if e.errno not in (errno.EPIPE, errno.ETIMEDOUT, errno.ECONNRESET):
logging.error(traceback.format_exc())
except:
logging.error(traceback.format_exc())
try:
sock.sendall("HTTP/1.0 500 Internal Server Error\r\n\r\nThere has been an internal error in the load balancer.")
except socket.error, e:
if e.errno != errno.EPIPE:
raise
finally:
try:
sock.close()
rfile.close()
except:
logging.error(traceback.format_exc())
if __name__ == "__main__":
Balancer.main()
| 38.485876 | 153 | 0.545435 | 13,152 | 0.965355 | 0 | 0 | 2,202 | 0.161627 | 0 | 0 | 3,478 | 0.255285 |
d22daea1e02414a246423f9065c5355093e77a88
| 18,989 |
py
|
Python
|
pyhelp/managers.py
|
FHuchet/pyhelp
|
9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68
|
[
"MIT"
] | 1 |
2020-07-20T20:32:15.000Z
|
2020-07-20T20:32:15.000Z
|
pyhelp/managers.py
|
FHuchet/pyhelp
|
9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68
|
[
"MIT"
] | null | null | null |
pyhelp/managers.py
|
FHuchet/pyhelp
|
9d658f5c6f6d8aee8e528ca9946a40eac0ff3a68
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2018 PyHelp Project Contributors
# https://github.com/jnsebgosselin/pyhelp
#
# This file is part of PyHelp.
# Licensed under the terms of the GNU General Public License.
# ---- Standard Library Imports
import os
import os.path as osp
# ---- Third Party imports
import numpy as np
import geopandas as gpd
import netCDF4
import pandas as pd
# ---- Local Libraries Imports
from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs
from pyhelp.processing import run_help_allcells
from pyhelp.utils import savedata_to_hdf5
from pyhelp.weather_reader import (
save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP,
read_cweeds_file, join_daily_cweeds_wy2_and_wy3)
FNAME_CONN_TABLES = 'connect_table.npy'
class HELPManager(object):
def __init__(self, workdir, year_range, path_togrid=None):
super(HELPManager, self).__init__()
self.year_range = year_range
self.set_workdir(workdir)
self._setup_connect_tables()
if path_togrid is not None:
self.load_grid(path_togrid)
else:
self.grid = None
@property
def cellnames(self):
if self.grid is not None:
return self.grid['cid'].tolist()
else:
return []
@property
def inputdir(self):
"""
Return the path to the folder where the HELP input files are going to
be saved in the working directory. This folder is created in case it
doesn't already exist in the file system.
"""
inputdir = osp.join(self.workdir, 'help_input_files')
if not osp.exists(inputdir):
os.makedirs(inputdir)
return inputdir
@property
def workdir(self):
"""Return the path to the current working directory."""
return os.getcwd()
def set_workdir(self, dirname):
"""Set the working directory of the manager."""
if not osp.exists(dirname):
os.makedirs(dirname)
os.chdir(dirname)
# ---- Connect tables
@property
def path_connect_tables(self):
return osp.join(self.inputdir, FNAME_CONN_TABLES)
def _setup_connect_tables(self):
"""Setup the connect tables dictionary."""
if osp.exists(self.path_connect_tables):
self.connect_tables = np.load(self.path_connect_tables).item()
else:
self.connect_tables = {}
def _save_connect_tables(self):
"""Save the connect tables dictionary to a numpy binary file."""
np.save(self.path_connect_tables, self.connect_tables)
# ---- HELP grid
def load_grid(self, path_togrid):
"""
Load the grid that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
self.grid = load_grid_from_csv(path_togrid)
return self.grid
# ---- Input files creation
def generate_d13_from_cweeds(self, d13fname, fpath_cweed2, fpath_cweed3,
cellnames=None):
"""
Generate the HELP D13 input file for solar radiation from wy2 and
wy3 CWEEDS files at a given location.
"""
d13fpath = osp.join(self.inputdir, d13fname)
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
print('Reading CWEEDS files...', end=' ')
daily_wy2 = read_cweeds_file(fpath_cweed2, format_to_daily=True)
daily_wy3 = read_cweeds_file(fpath_cweed3, format_to_daily=True)
wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)
indexes = np.where((wy23_df['Years'] >= self.year_range[0]) &
(wy23_df['Years'] <= self.year_range[1]))[0]
print('done')
print('Generating HELP D13 file for solar radiation...', end=' ')
save_solrad_to_HELP(d13fpath,
wy23_df['Years'][indexes],
wy23_df['Irradiance'][indexes],
'CAN_QC_MONTREAL-INTL-A_7025251',
wy23_df['Latitude'])
print('done')
if self.year_range[1] > np.max(wy23_df['Years']):
print("Warning: there is no solar radiation data after year %d."
% np.max(wy23_df['Years']))
if self.year_range[0] < np.min(wy23_df['Years']):
print("Warning: there is no solar radiation data before year %d."
% np.min(wy23_df['Years']))
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
d13_connect_table = {cid: d13fpath for cid in cellnames}
self.connect_tables['D13'] = d13_connect_table
self._save_connect_tables()
print("done")
def generate_d10d11_input_files(self, cellnames=None, sf_edepth=1,
sf_ulai=1):
"""Prepare the D10 and D11 input datafiles for each cell."""
d10d11_inputdir = osp.join(self.inputdir, 'd10d11_input_files')
if not osp.exists(d10d11_inputdir):
os.makedirs(d10d11_inputdir)
# Only keep the cells that are going to be run in HELP because we
# don't need the D10 or D11 input files for those that aren't.
cellnames = self.get_run_cellnames(cellnames)
d10data, d11data = format_d10d11_inputs(self.grid, cellnames,
sf_edepth, sf_ulai)
# Write the D10 and D11 input files.
d10_conn_tbl, d11_conn_tbl = write_d10d11_allcells(
d10d11_inputdir, d10data, d11data)
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D10'] = d10_conn_tbl
self.connect_tables['D11'] = d11_conn_tbl
self._save_connect_tables()
print("done")
def generate_d4d7_from_MDELCC_grid(self, path_netcdf_dir, cellnames=None):
"""
Prepare the D4 and D7 input datafiles for each cell from the
interpolated grid of the MDDELCC.
"""
d4d7_inputdir = osp.join(self.inputdir, 'd4d7_input_files')
if not osp.exists(d4d7_inputdir):
os.makedirs(d4d7_inputdir)
cellnames = self.get_run_cellnames(cellnames)
N = len(cellnames)
# Get the latitudes and longitudes of the resulting cells.
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
# Generate the connectivity table between the HELP grid and the
# MDDELCC interpolated daily weather grid.
print('Generating the connectivity table for each cell...', end=' ')
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
d4_conn_tbl = {}
d7_conn_tbl = {}
data = []
for i, cellname in enumerate(cellnames):
lat_idx, lon_idx = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
d4fname = osp.join(
d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))
d7fname = osp.join(
d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))
d4_conn_tbl[cellnames[i]] = d4fname
d7_conn_tbl[cellnames[i]] = d7fname
data.append([lat_idx, lon_idx, d4fname, d7fname])
print('done')
# Fetch the daily weather data from the netCDF files.
data = np.unique(data, axis=0)
lat_indx = data[:, 0].astype(int)
lon_idx = data[:, 1].astype(int)
years = range(self.year_range[0], self.year_range[1]+1)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_idx, years)
# Convert and save the weather data to D4 and D7 HELP input files.
N = len(data)
for i in range(N):
print(("\rGenerating HELP D4 and D7 files for location " +
"%d of %d (%0.1f%%)...") % (i+1, N, (i+1)/N * 100), end=' ')
lat = meteo_manager.lat[lat_indx[i]]
lon = meteo_manager.lon[lon_idx[i]]
d4fname, d7fname = data[i, 2], data[i, 3]
city = 'Meteo Grid at lat/lon %0.1f ; %0.1f' % (lat, lon)
# Fill -999 with 0 in daily precip.
precip_i = precip[:, i]
precip_i[precip_i == -999] = 0
# Fill -999 with linear interpolation in daily air temp.
tasavg_i = tasavg[:, i]
time_ = np.arange(len(tasavg_i))
indx = np.where(tasavg_i != -999)[0]
tasavg_i = np.interp(time_, time_[indx], tasavg_i[indx])
if not osp.exists(d4fname):
save_precip_to_HELP(d4fname, years, precip_i, city)
if not osp.exists(d7fname):
save_airtemp_to_HELP(d7fname, years, tasavg_i, city)
print('done')
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D4'] = d4_conn_tbl
self.connect_tables['D7'] = d7_conn_tbl
self._save_connect_tables()
print('done')
def run_help_for(self, path_outfile=None, cellnames=None, tfsoil=0):
"""
Run help for the cells listed in cellnames and save the result in
an hdf5 file.
"""
# Convert from Celcius to Farenheight
tfsoil = (tfsoil * 1.8) + 32
tempdir = osp.join(self.inputdir, ".temp")
if not osp.exists(tempdir):
os.makedirs(tempdir)
run_cellnames = self.get_run_cellnames(cellnames)
cellparams = {}
for cellname in run_cellnames:
fpath_d4 = self.connect_tables['D4'][cellname]
fpath_d7 = self.connect_tables['D7'][cellname]
fpath_d13 = self.connect_tables['D13'][cellname]
fpath_d10 = self.connect_tables['D10'][cellname]
fpath_d11 = self.connect_tables['D11'][cellname]
fpath_out = osp.abspath(osp.join(tempdir, str(cellname) + '.OUT'))
daily_out = 0
monthly_out = 1
yearly_out = 0
summary_out = 0
unit_system = 2 # IP if 1 else SI
simu_nyear = self.year_range[1] - self.year_range[0] + 1
cellparams[cellname] = (fpath_d4, fpath_d7, fpath_d13, fpath_d11,
fpath_d10, fpath_out, daily_out,
monthly_out, yearly_out, summary_out,
unit_system, simu_nyear, tfsoil)
output = run_help_allcells(cellparams)
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
def calc_surf_water_cells(self, evp_surf, path_netcdf_dir,
path_outfile=None, cellnames=None):
cellnames = self.get_water_cellnames(cellnames)
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
N = len(cellnames)
lat_indx = np.empty(N).astype(int)
lon_indx = np.empty(N).astype(int)
for i, cellname in enumerate(cellnames):
lat_indx[i], lon_indx[i] = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
year_range = np.arange(
self.year_range[0], self.year_range[1] + 1).astype(int)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_indx, year_range)
# Fill -999 with 0 in daily precip.
precip[precip == -999] = 0
nyr = len(year_range)
output = {}
for i, cellname in enumerate(cellnames):
data = {}
data['years'] = year_range
data['rain'] = np.zeros(nyr)
data['evapo'] = np.zeros(nyr) + evp_surf
data['runoff'] = np.zeros(nyr)
for k, year in enumerate(year_range):
indx = np.where(years == year)[0]
data['rain'][k] = np.sum(precip[indx, i])
data['runoff'][k] = data['rain'][k] - evp_surf
output[cellname] = data
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
# # For cells for which the context is 2, convert recharge and deep
# # subrunoff into superfical subrunoff.
# cellnames_con_2 = cellnames[self.grid[fcon] == 2].tolist()
# for cellname in cellnames_con_2:
# output[cellname]['subrun1'] += output[cellname]['subrun2']
# output[cellname]['subrun1'] += output[cellname]['recharge']
# output[cellname]['subrun2'][:] = 0
# output[cellname]['recharge'][:] = 0
# # For cells for which the context is 3, convert recharge into
# # deep runoff.
# cellnames_con_3 = cellnames[self.grid[fcon] == 3].tolist()
# for cellname in cellnames_con_3:
# output[cellname]['subrun2'] += output[cellname]['recharge']
# output[cellname]['recharge'][:] = 0
# # Comput water budget for cells for which the context is 0.
# cellnames_con_2 = cellnames[self.grid[fcon] == 0].tolist()
# # meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
# # for cellname in cellnames_run0:
# Save the result to an hdf5 file.
# ---- Utilities
def get_water_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are considered
to be in a surface water area.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells for which context is 0.
cellnames = self.grid['cid'][cellnames][self.grid['context'] == 0]
return cellnames.tolist()
def get_run_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are in the grid
and for which HELP can be run.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells that are going to be run in HELP because we
# don't need the D4 or D7 input files for those that aren't.
cellnames = self.grid['cid'][cellnames][self.grid['run'] == 1].tolist()
return cellnames
def get_latlon_for_cellnames(self, cells):
"""
Return a numpy array with latitudes and longitudes of the provided
cells cid. Latitude and longitude for cids that are missing from
the grid are set to nan.
"""
lat = np.array(self.grid['lat_dd'].reindex(cells).tolist())
lon = np.array(self.grid['lon_dd'].reindex(cells).tolist())
return lat, lon
class NetCDFMeteoManager(object):
def __init__(self, dirpath_netcdf):
super(NetCDFMeteoManager, self).__init__()
self.dirpath_netcdf = dirpath_netcdf
self.lat = []
self.lon = []
self.setup_ncfile_list()
self.setup_latlon_grid()
def setup_ncfile_list(self):
"""Read all the available netCDF files in dirpath_netcdf."""
self.ncfilelist = []
for file in os.listdir(self.dirpath_netcdf):
if file.endswith('.nc'):
self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))
def setup_latlon_grid(self):
if self.ncfilelist:
netcdf_dset = netCDF4.Dataset(self.ncfilelist[0], 'r+')
self.lat = np.array(netcdf_dset['lat'])
self.lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
def get_idx_from_latlon(self, latitudes, longitudes, unique=False):
"""
Get the i and j indexes of the grid meshes from a list of latitude
and longitude coordinates. If unique is True, only the unique pairs of
i and j indexes will be returned.
"""
try:
lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]
lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]
if unique:
ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})
lat_idx = ijdx[:, 0].tolist()
lon_idx = ijdx[:, 1].tolist()
except TypeError:
lat_idx = np.argmin(np.abs(self.lat - latitudes))
lon_idx = np.argmin(np.abs(self.lon - longitudes))
return lat_idx, lon_idx
def get_data_from_latlon(self, latitudes, longitudes, years):
"""
Return the daily minimum, maximum and average air temperature and daily
precipitation
"""
lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)
return self.get_data_from_idx(lat_idx, lon_idx, years)
def get_data_from_idx(self, lat_idx, lon_idx, years):
try:
len(lat_idx)
except TypeError:
lat_idx, lon_idx = [lat_idx], [lon_idx]
tasmax_stacks = []
tasmin_stacks = []
precip_stacks = []
years_stack = []
for year in years:
print('\rFetching daily weather data for year %d...' % year,
end=' ')
filename = osp.join(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
tasmax_stacks.append(
np.array(netcdf_dset['tasmax'])[:, lat_idx, lon_idx])
tasmin_stacks.append(
np.array(netcdf_dset['tasmin'])[:, lat_idx, lon_idx])
precip_stacks.append(
np.array(netcdf_dset['pr'])[:, lat_idx, lon_idx])
years_stack.append(
np.zeros(len(precip_stacks[-1][:])).astype(int) + year)
netcdf_dset.close()
print('done')
tasmax = np.vstack(tasmax_stacks)
tasmin = np.vstack(tasmin_stacks)
precip = np.vstack(precip_stacks)
years = np.hstack(years_stack)
return (tasmax + tasmin)/2, precip, years
def load_grid_from_csv(path_togrid):
"""
Load the csv that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
print('Reading HELP grid from csv...', end=' ')
grid = pd.read_csv(path_togrid)
print('done')
fname = osp.basename(path_togrid)
req_keys = ['cid', 'lat_dd', 'lon_dd', 'run']
for key in req_keys:
if key not in grid.keys():
raise KeyError("No attribute '%s' found in %s" % (key, fname))
# Make sure that cid is a str.
grid['cid'] = np.array(grid['cid']).astype(str)
# Set 'cid' as the index of the dataframe.
grid.set_index(['cid'], drop=False, inplace=True)
return grid
| 37.015595 | 79 | 0.596609 | 17,503 | 0.921696 | 0 | 0 | 793 | 0.041759 | 0 | 0 | 5,478 | 0.288468 |
d22e790f560b51447016ed3ce2c5663688b5fd74
| 6,131 |
py
|
Python
|
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
tests/unit/test_types.py
|
OvalMoney/momapper
|
9bcf1909a80677cab831132444be27fa4adaa2a5
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import pytest
from bson import Decimal128
from momapper import MappedClass, Field
from momapper.mongodb.collection import MappedCollection
from momapper.types import (
DecimalType,
ValidationError,
IntType,
FloatType,
StringType,
ByteType,
BoolType,
ListType,
DictType,
)
@pytest.mark.parametrize("value, exception", [(0, None), (object(), ValidationError)])
def test_int_type(mongo_client, value, exception):
class DocWithInt(MappedClass):
value = Field("value", type_=IntType)
if exception:
with pytest.raises(exception):
DocWithInt(value=value)
else:
doc = DocWithInt(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithInt
)
collection.insert_one(doc)
@pytest.mark.parametrize("value, exception", [(0.0, None), (object(), ValidationError)])
def test_float_type(mongo_client, value, exception):
class DocWithFloat(MappedClass):
value = Field("value", type_=FloatType)
if exception:
with pytest.raises(exception):
DocWithFloat(value=value)
else:
doc = DocWithFloat(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithFloat
)
collection.insert_one(doc)
@pytest.mark.parametrize("amount", [0, 0.0, Decimal("10")])
def test_decimal_type(mongo_client, amount):
class DocWithDecimal(MappedClass):
amount = Field("amount", type_=DecimalType)
doc = DocWithDecimal(amount=amount)
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimal
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
def test_decimal_type_if_missing(mongo_client):
class DocWithDecimalRequired(MappedClass):
amount = Field(
"amount", type_=DecimalType, required=True, if_missing=Decimal(5)
)
doc = DocWithDecimalRequired()
assert isinstance(doc.amount, Decimal)
assert isinstance(doc._document["amount"], Decimal128)
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDecimalRequired
)
doc_id = collection.insert_one(doc).inserted_id
fetched_doc = collection.find_one({"_id": doc_id})
assert isinstance(fetched_doc.amount, Decimal)
assert isinstance(fetched_doc._document["amount"], Decimal128)
assert doc.amount == fetched_doc.amount
@pytest.mark.parametrize(
"value, exception", [("value", None), (object(), ValidationError)]
)
def test_string_type(mongo_client, value, exception):
class DocWithString(MappedClass):
value = Field("value", type_=StringType)
if exception:
with pytest.raises(exception):
DocWithString(value=value)
else:
doc = DocWithString(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithString
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(b"value", None), (object(), ValidationError)]
)
def test_bytes_type(mongo_client, value, exception):
class DocWithBytes(MappedClass):
value = Field("value", type_=ByteType)
if exception:
with pytest.raises(exception):
DocWithBytes(value=value)
else:
doc = DocWithBytes(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBytes
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(False, None), (True, None), (object(), ValidationError)]
)
def test_bool_type(mongo_client, value, exception):
class DocWithBool(MappedClass):
value = Field("value", type_=BoolType)
if exception:
with pytest.raises(exception):
DocWithBool(value=value)
else:
doc = DocWithBool(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithBool
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [(["value"], None), (object(), ValidationError)]
)
def test_list_type(mongo_client, value, exception):
class DocWithList(MappedClass):
value = Field("value", type_=ListType)
if exception:
with pytest.raises(exception):
DocWithList(value=value)
else:
doc = DocWithList(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithList
)
collection.insert_one(doc)
@pytest.mark.parametrize(
"value, exception", [({"value": "value"}, None), (object(), ValidationError)]
)
def test_dict_type(mongo_client, value, exception):
class DocWithDict(MappedClass):
value = Field("value", type_=DictType)
if exception:
with pytest.raises(exception):
DocWithDict(value=value)
else:
doc = DocWithDict(value=value)
assert doc.value == value
assert doc._document["value"] == value
collection = MappedCollection(
mongo_client.db, mongo_client.collection, impl=DocWithDict
)
collection.insert_one(doc)
| 31.280612 | 88 | 0.669222 | 791 | 0.129016 | 0 | 0 | 5,032 | 0.820747 | 0 | 0 | 330 | 0.053825 |
d230b8b07301d92ab203c4ea79e6dcb73031cdf8
| 36 |
py
|
Python
|
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | 1 |
2021-02-27T18:00:39.000Z
|
2021-02-27T18:00:39.000Z
|
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | null | null | null |
deepleaps/workspace/src/ipc/CustomCommand.py
|
Longseabear/deep-leaps-pytorch
|
abcb87f3079c0612bde4a4f94c75d7c05d5aee3a
|
[
"MIT"
] | null | null | null |
import deepleaps.ipc.RunningCommand
| 18 | 35 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d230ba96d95fc33b542202e8343f1394390c32cd
| 26,878 |
py
|
Python
|
sharpy/solvers/dynamiccoupled.py
|
ostodieck/sharpy
|
b85aa1c001a0ec851af4eb259cce7c01dfa68b9e
|
[
"BSD-3-Clause"
] | 1 |
2020-07-27T05:15:35.000Z
|
2020-07-27T05:15:35.000Z
|
sharpy/solvers/dynamiccoupled.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
sharpy/solvers/dynamiccoupled.py
|
briandesilva/sharpy
|
aed86428ff88fd14d36cabd91cf7e04b5fc9a39a
|
[
"BSD-3-Clause"
] | null | null | null |
import ctypes as ct
import time
import copy
import numpy as np
import sharpy.aero.utils.mapping as mapping
import sharpy.utils.cout_utils as cout
import sharpy.utils.solver_interface as solver_interface
import sharpy.utils.controller_interface as controller_interface
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
import sharpy.structure.utils.xbeamlib as xbeam
import sharpy.utils.exceptions as exc
@solver
class DynamicCoupled(BaseSolver):
"""
The ``DynamicCoupled`` solver couples the aerodynamic and structural solvers of choice to march forward in time
the aeroelastic system's solution.
Using the ``DynamicCoupled`` solver requires that an instance of the ``StaticCoupled`` solver is called in the
SHARPy solution ``flow`` when defining the problem case.
"""
solver_id = 'DynamicCoupled'
solver_classification = 'Coupled'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Write status to screen'
settings_types['structural_solver'] = 'str'
settings_default['structural_solver'] = None
settings_description['structural_solver'] = 'Structural solver to use in the coupled simulation'
settings_types['structural_solver_settings'] = 'dict'
settings_default['structural_solver_settings'] = None
settings_description['structural_solver_settings'] = 'Dictionary of settings for the structural solver'
settings_types['aero_solver'] = 'str'
settings_default['aero_solver'] = None
settings_description['aero_solver'] = 'Aerodynamic solver to use in the coupled simulation'
settings_types['aero_solver_settings'] = 'dict'
settings_default['aero_solver_settings'] = None
settings_description['aero_solver_settings'] = 'Dictionary of settings for the aerodynamic solver'
settings_types['n_time_steps'] = 'int'
settings_default['n_time_steps'] = None
settings_description['n_time_steps'] = 'Number of time steps for the simulation'
settings_types['dt'] = 'float'
settings_default['dt'] = None
settings_description['dt'] = 'Time step'
settings_types['fsi_substeps'] = 'int'
settings_default['fsi_substeps'] = 70
settings_description['fsi_substeps'] = 'Max iterations in the FSI loop'
settings_types['fsi_tolerance'] = 'float'
settings_default['fsi_tolerance'] = 1e-5
settings_description['fsi_tolerance'] = 'Convergence threshold for the FSI loop'
settings_types['structural_substeps'] = 'int'
settings_default['structural_substeps'] = 0 # 0 is normal coupled sim.
settings_description['structural_substeps'] = 'Number of extra structural time steps per aero time step. 0 is a fully coupled simulation.'
settings_types['relaxation_factor'] = 'float'
settings_default['relaxation_factor'] = 0.2
settings_description['relaxation_factor'] = 'Relaxation parameter in the FSI iteration. 0 is no relaxation and -> 1 is very relaxed'
settings_types['final_relaxation_factor'] = 'float'
settings_default['final_relaxation_factor'] = 0.0
settings_description['final_relaxation_factor'] = 'Relaxation factor reached in ``relaxation_steps`` with ``dynamic_relaxation`` on'
settings_types['minimum_steps'] = 'int'
settings_default['minimum_steps'] = 3
settings_description['minimum_steps'] = 'Number of minimum FSI iterations before convergence'
settings_types['relaxation_steps'] = 'int'
settings_default['relaxation_steps'] = 100
settings_description['relaxation_steps'] = 'Length of the relaxation factor ramp between ``relaxation_factor`` and ``final_relaxation_factor`` with ``dynamic_relaxation`` on'
settings_types['dynamic_relaxation'] = 'bool'
settings_default['dynamic_relaxation'] = False
settings_description['dynamic_relaxation'] = 'Controls if relaxation factor is modified during the FSI iteration process'
settings_types['postprocessors'] = 'list(str)'
settings_default['postprocessors'] = list()
settings_description['postprocessors'] = 'List of the postprocessors to run at the end of every time step'
settings_types['postprocessors_settings'] = 'dict'
settings_default['postprocessors_settings'] = dict()
settings_description['postprocessors_settings'] = 'Dictionary with the applicable settings for every ``psotprocessor``. Every ``postprocessor`` needs its entry, even if empty'
settings_types['controller_id'] = 'dict'
settings_default['controller_id'] = dict()
settings_description['controller_id'] = 'Dictionary of id of every controller (key) and its type (value)'
settings_types['controller_settings'] = 'dict'
settings_default['controller_settings'] = dict()
settings_description['controller_settings'] = 'Dictionary with settings (value) of every controller id (key)'
settings_types['cleanup_previous_solution'] = 'bool'
settings_default['cleanup_previous_solution'] = False
settings_description['cleanup_previous_solution'] = 'Controls if previous ``timestep_info`` arrays are reset before running the solver'
settings_types['include_unsteady_force_contribution'] = 'bool'
settings_default['include_unsteady_force_contribution'] = False
settings_description['include_unsteady_force_contribution'] = 'If on, added mass contribution is added to the forces. This depends on the time derivative of the bound circulation. Check ``filter_gamma_dot`` in the aero solver'
settings_types['steps_without_unsteady_force'] = 'int'
settings_default['steps_without_unsteady_force'] = 0
settings_description['steps_without_unsteady_force'] = 'Number of initial timesteps that don\'t include unsteady forces contributions. This avoids oscillations due to no perfectly trimmed initial conditions'
settings_types['pseudosteps_ramp_unsteady_force'] = 'int'
settings_default['pseudosteps_ramp_unsteady_force'] = 0
settings_description['pseudosteps_ramp_unsteady_force'] = 'Length of the ramp with which unsteady force contribution is introduced every time step during the FSI iteration process'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.data = None
self.settings = None
self.structural_solver = None
self.aero_solver = None
self.print_info = False
self.res = 0.0
self.res_dqdt = 0.0
self.res_dqddt = 0.0
self.previous_force = None
self.dt = 0.
self.substep_dt = 0.
self.initial_n_substeps = None
self.predictor = False
self.residual_table = None
self.postprocessors = dict()
self.with_postprocessors = False
self.controllers = None
self.time_aero = 0.
self.time_struc = 0.
def get_g(self):
"""
Getter for ``g``, the gravity value
"""
return self.structural_solver.settings['gravity'].value
def set_g(self, new_g):
"""
Setter for ``g``, the gravity value
"""
self.structural_solver.settings['gravity'] = ct.c_double(new_g)
def get_rho(self):
"""
Getter for ``rho``, the density value
"""
return self.aero_solver.settings['rho'].value
def set_rho(self, new_rho):
"""
Setter for ``rho``, the density value
"""
self.aero_solver.settings['rho'] = ct.c_double(new_rho)
def initialise(self, data, custom_settings=None):
"""
Controls the initialisation process of the solver, including processing
the settings and initialising the aero and structural solvers, postprocessors
and controllers.
"""
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings,
self.settings_types,
self.settings_default)
self.original_settings = copy.deepcopy(self.settings)
self.dt = self.settings['dt']
self.substep_dt = (
self.dt.value/(self.settings['structural_substeps'].value + 1))
self.initial_n_substeps = self.settings['structural_substeps'].value
self.print_info = self.settings['print_info']
if self.settings['cleanup_previous_solution']:
# if there's data in timestep_info[>0], copy the last one to
# timestep_info[0] and remove the rest
self.cleanup_timestep_info()
self.structural_solver = solver_interface.initialise_solver(
self.settings['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
self.aero_solver = solver_interface.initialise_solver(
self.settings['aero_solver'])
self.aero_solver.initialise(self.structural_solver.data,
self.settings['aero_solver_settings'])
self.data = self.aero_solver.data
# initialise postprocessors
self.postprocessors = dict()
if self.settings['postprocessors']:
self.with_postprocessors = True
for postproc in self.settings['postprocessors']:
self.postprocessors[postproc] = solver_interface.initialise_solver(
postproc)
self.postprocessors[postproc].initialise(
self.data, self.settings['postprocessors_settings'][postproc])
# initialise controllers
self.controllers = dict()
self.with_controllers = False
if self.settings['controller_id']:
self.with_controllers = True
for controller_id, controller_type in self.settings['controller_id'].items():
self.controllers[controller_id] = (
controller_interface.initialise_controller(controller_type))
self.controllers[controller_id].initialise(
self.settings['controller_settings'][controller_id],
controller_id)
# print information header
if self.print_info:
self.residual_table = cout.TablePrinter(8, 12, ['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e'])
self.residual_table.field_length[0] = 5
self.residual_table.field_length[1] = 6
self.residual_table.field_length[2] = 4
self.residual_table.print_header(['ts', 't', 'iter', 'struc ratio', 'iter time', 'residual vel',
'FoR_vel(x)', 'FoR_vel(z)'])
def cleanup_timestep_info(self):
if max(len(self.data.aero.timestep_info), len(self.data.structure.timestep_info)) > 1:
# copy last info to first
self.data.aero.timestep_info[0] = self.data.aero.timestep_info[-1]
self.data.structure.timestep_info[0] = self.data.structure.timestep_info[-1]
# delete all the rest
while len(self.data.aero.timestep_info) - 1:
del self.data.aero.timestep_info[-1]
while len(self.data.structure.timestep_info) - 1:
del self.data.structure.timestep_info[-1]
self.data.ts = 0
def process_controller_output(self, controlled_state):
"""
This function modified the solver properties and parameters as
requested from the controller.
This keeps the main loop much cleaner, while allowing for flexibility
Please, if you add options in here, always code the possibility of
that specific option not being there without the code complaining to
the user.
If it possible, use the same Key for the new setting as for the
setting in the solver. For example, if you want to modify the
`structural_substeps` variable in settings, use that Key in the
`info` dictionary.
As a convention: a value of None returns the value to the initial
one specified in settings, while the key not being in the dict
is ignored, so if any change was made before, it will stay there.
"""
try:
info = controlled_state['info']
except KeyError:
return controlled_state['structural'], controlled_state['aero']
# general copy-if-exists, restore if == None
for info_k, info_v in info.items():
if info_k in self.settings:
if info_v is not None:
self.settings[info_k] = info_v
else:
self.settings[info_k] = self.original_settings[info_k]
# specifics of every option
for info_k, info_v in info.items():
if info_k in self.settings:
if info_k == 'structural_substeps':
if info_v is not None:
self.substep_dt = (
self.settings['dt'].value/(
self.settings['structural_substeps'].value + 1))
if info_k == 'structural_solver':
if info_v is not None:
self.structural_solver = solver_interface.initialise_solver(
info['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
return controlled_state['structural'], controlled_state['aero']
def run(self):
"""
Run the time stepping procedure with controllers and postprocessors
included.
"""
# dynamic simulations start at tstep == 1, 0 is reserved for the initial state
for self.data.ts in range(
len(self.data.structure.timestep_info),
self.settings['n_time_steps'].value + len(self.data.structure.timestep_info)):
initial_time = time.perf_counter()
structural_kstep = self.data.structure.timestep_info[-1].copy()
aero_kstep = self.data.aero.timestep_info[-1].copy()
# Add the controller here
if self.with_controllers:
state = {'structural': structural_kstep,
'aero': aero_kstep}
for k, v in self.controllers.items():
state = v.control(self.data, state)
# this takes care of the changes in options for the solver
structural_kstep, aero_kstep = self.process_controller_output(
state)
self.time_aero = 0.0
self.time_struc = 0.0
# Copy the controlled states so that the interpolation does not
# destroy the previous information
controlled_structural_kstep = structural_kstep.copy()
controlled_aero_kstep = aero_kstep.copy()
k = 0
for k in range(self.settings['fsi_substeps'].value + 1):
if (k == self.settings['fsi_substeps'].value and
self.settings['fsi_substeps']):
cout.cout_wrap('The FSI solver did not converge!!!')
break
# generate new grid (already rotated)
aero_kstep = controlled_aero_kstep.copy()
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
# compute unsteady contribution
force_coeff = 0.0
unsteady_contribution = False
if self.settings['include_unsteady_force_contribution'].value:
if self.data.ts > self.settings['steps_without_unsteady_force'].value:
unsteady_contribution = True
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
force_coeff = k/self.settings['pseudosteps_ramp_unsteady_force'].value
else:
force_coeff = 1.
# run the solver
ini_time_aero = time.perf_counter()
self.data = self.aero_solver.run(aero_kstep,
structural_kstep,
convect_wake=True,
unsteady_contribution=unsteady_contribution)
self.time_aero += time.perf_counter() - ini_time_aero
previous_kstep = structural_kstep.copy()
structural_kstep = controlled_structural_kstep.copy()
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep,
aero_kstep)
self.map_forces(aero_kstep,
structural_kstep,
force_coeff)
# relaxation
relax_factor = self.relaxation_factor(k)
relax(self.data.structure,
structural_kstep,
previous_kstep,
relax_factor)
# check if nan anywhere.
# if yes, raise exception
if np.isnan(structural_kstep.steady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in steady_applied_forces!')
if np.isnan(structural_kstep.unsteady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in unsteady_applied_forces!')
copy_structural_kstep = structural_kstep.copy()
ini_time_struc = time.perf_counter()
for i_substep in range(
self.settings['structural_substeps'].value + 1):
# run structural solver
coeff = ((i_substep + 1)/
(self.settings['structural_substeps'].value + 1))
structural_kstep = self.interpolate_timesteps(
step0=self.data.structure.timestep_info[-1],
step1=copy_structural_kstep,
out_step=structural_kstep,
coeff=coeff)
self.data = self.structural_solver.run(
structural_step=structural_kstep,
dt=self.substep_dt)
self.time_struc += time.perf_counter() - ini_time_struc
# check convergence
if self.convergence(k,
structural_kstep,
previous_kstep):
# move the aerodynamic surface according to the structural one
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
break
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep, aero_kstep)
self.aero_solver.add_step()
self.data.aero.timestep_info[-1] = aero_kstep.copy()
self.structural_solver.add_step()
self.data.structure.timestep_info[-1] = structural_kstep.copy()
final_time = time.perf_counter()
if self.print_info:
self.residual_table.print_line([self.data.ts,
self.data.ts*self.dt.value,
k,
self.time_struc/(self.time_aero + self.time_struc),
final_time - initial_time,
np.log10(self.res_dqdt),
structural_kstep.for_vel[0],
structural_kstep.for_vel[2],
np.sum(structural_kstep.steady_applied_forces[:, 0]),
np.sum(structural_kstep.steady_applied_forces[:, 2])])
self.structural_solver.extract_resultants()
# run postprocessors
if self.with_postprocessors:
for postproc in self.postprocessors:
self.data = self.postprocessors[postproc].run(online=True)
if self.print_info:
cout.cout_wrap('...Finished', 1)
return self.data
def convergence(self, k, tstep, previous_tstep):
r"""
Check convergence in the FSI loop.
Convergence is determined as:
.. math:: \epsilon_q^k = \frac{|| q^k - q^{k - 1} ||}{q^0}
.. math:: \epsilon_\dot{q}^k = \frac{|| \dot{q}^k - \dot{q}^{k - 1} ||}{\dot{q}^0}
FSI converged if :math:`\epsilon_q^k < \mathrm{FSI\ tolerance}` and :math:`\epsilon_\dot{q}^k < \mathrm{FSI\ tolerance}`
"""
# check for non-convergence
if not all(np.isfinite(tstep.q)):
import pdb
pdb.set_trace()
raise Exception(
'***Not converged! There is a NaN value in the forces!')
if not k:
# save the value of the vectors for normalising later
self.base_q = np.linalg.norm(tstep.q.copy())
self.base_dqdt = np.linalg.norm(tstep.dqdt.copy())
if self.base_dqdt == 0:
self.base_dqdt = 1.
return False
# relative residuals
self.res = (np.linalg.norm(tstep.q-
previous_tstep.q)/
self.base_q)
self.res_dqdt = (np.linalg.norm(tstep.dqdt-
previous_tstep.dqdt)/
self.base_dqdt)
# we don't want this to converge before introducing the gamma_dot forces!
if self.settings['include_unsteady_force_contribution'].value:
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
return False
# convergence
if k > self.settings['minimum_steps'].value - 1:
if self.res < self.settings['fsi_tolerance'].value:
if self.res_dqdt < self.settings['fsi_tolerance'].value:
return True
return False
def map_forces(self, aero_kstep, structural_kstep, unsteady_forces_coeff=1.0):
# set all forces to 0
structural_kstep.steady_applied_forces.fill(0.0)
structural_kstep.unsteady_applied_forces.fill(0.0)
# aero forces to structural forces
struct_forces = mapping.aero2struct_force_mapping(
aero_kstep.forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
dynamic_struct_forces = unsteady_forces_coeff*mapping.aero2struct_force_mapping(
aero_kstep.dynamic_forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
# prescribed forces + aero forces
try:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = (
(dynamic_struct_forces + self.data.structure.dynamic_input[max(self.data.ts - 1, 0)]['dynamic_forces']).
astype(dtype=ct.c_double, order='F', copy=True))
except KeyError:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = dynamic_struct_forces
def relaxation_factor(self, k):
initial = self.settings['relaxation_factor'].value
if not self.settings['dynamic_relaxation'].value:
return initial
final = self.settings['final_relaxation_factor'].value
if k >= self.settings['relaxation_steps'].value:
return final
value = initial + (final - initial)/self.settings['relaxation_steps'].value*k
return value
@staticmethod
def interpolate_timesteps(step0, step1, out_step, coeff):
"""
Performs a linear interpolation between step0 and step1 based on coeff
in [0, 1]. 0 means info in out_step == step0 and 1 out_step == step1.
Quantities interpolated:
* `steady_applied_forces`
* `unsteady_applied_forces`
* `velocity` input in Lagrange constraints
"""
if not 0.0 <= coeff <= 1.0:
return out_step
# forces
out_step.steady_applied_forces[:] = (
(1.0 - coeff)*step0.steady_applied_forces +
(coeff)*(step1.steady_applied_forces))
out_step.unsteady_applied_forces[:] = (
(1.0 - coeff)*step0.unsteady_applied_forces +
(coeff)*(step1.unsteady_applied_forces))
# multibody if necessary
if out_step.mb_dict is not None:
for key in step1.mb_dict.keys():
if 'constraint_' in key:
try:
out_step.mb_dict[key]['velocity'][:] = (
(1.0 - coeff)*step0.mb_dict[key]['velocity'] +
(coeff)*step1.mb_dict[key]['velocity'])
except KeyError:
pass
return out_step
def relax(beam, timestep, previous_timestep, coeff):
timestep.steady_applied_forces[:] = ((1.0 - coeff)*timestep.steady_applied_forces +
coeff*previous_timestep.steady_applied_forces)
timestep.unsteady_applied_forces[:] = ((1.0 - coeff)*timestep.unsteady_applied_forces +
coeff*previous_timestep.unsteady_applied_forces)
def normalise_quaternion(tstep):
tstep.dqdt[-4:] = algebra.unit_vector(tstep.dqdt[-4:])
tstep.quat = tstep.dqdt[-4:].astype(dtype=ct.c_double, order='F', copy=True)
| 43.775244 | 230 | 0.613625 | 25,839 | 0.961344 | 0 | 0 | 25,847 | 0.961641 | 0 | 0 | 8,267 | 0.307575 |
d232def19f888f5ef15eb9c21425eef07dc01fdd
| 4,734 |
py
|
Python
|
pony/orm/tests/test_generator_db_session.py
|
ProgHaj/pony
|
52720af1728ab2931364be8615e18ad8714a7c9e
|
[
"Apache-2.0"
] | 2,628 |
2015-01-02T17:55:28.000Z
|
2022-03-31T10:36:42.000Z
|
pony/orm/tests/test_generator_db_session.py
|
ProgHaj/pony
|
52720af1728ab2931364be8615e18ad8714a7c9e
|
[
"Apache-2.0"
] | 525 |
2015-01-03T20:30:08.000Z
|
2022-03-23T12:30:01.000Z
|
pony/orm/tests/test_generator_db_session.py
|
ProgHaj/pony
|
52720af1728ab2931364be8615e18ad8714a7c9e
|
[
"Apache-2.0"
] | 256 |
2015-01-02T17:55:31.000Z
|
2022-03-20T17:01:37.000Z
|
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.core import local
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
class TestGeneratorDbSession(unittest.TestCase):
def setUp(self):
db = Database()
class Account(db.Entity):
id = PrimaryKey(int)
amount = Required(int)
setup_database(db)
self.db = db
self.Account = Account
with db_session:
a1 = Account(id=1, amount=1000)
a2 = Account(id=2, amount=2000)
a3 = Account(id=3, amount=3000)
def tearDown(self):
teardown_database(self.db)
assert local.db_session is None
self.db = self.Account = None
@raises_exception(TypeError, 'db_session with `retry` option cannot be applied to generator function')
def test1(self):
@db_session(retry=3)
def f(): yield
@raises_exception(TypeError, 'db_session with `ddl` option cannot be applied to generator function')
def test2(self):
@db_session(ddl=True)
def f(): yield
@raises_exception(TypeError, 'db_session with `serializable` option cannot be applied to generator function')
def test3(self):
@db_session(serializable=True)
def f(): yield
def test4(self):
@db_session(immediate=True)
def f(): yield
@raises_exception(TransactionError, '@db_session-wrapped generator cannot be used inside another db_session')
def test5(self):
@db_session
def f(): yield
with db_session:
next(f())
def test6(self):
@db_session
def f():
x = local.db_session
self.assertTrue(x is not None)
yield self.db._get_cache()
self.assertEqual(local.db_session, x)
a1 = self.Account[1]
yield a1.amount
self.assertEqual(local.db_session, x)
a2 = self.Account[2]
yield a2.amount
gen = f()
cache = next(gen)
self.assertTrue(cache.is_alive)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 1000)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 2000)
self.assertEqual(local.db_session, None)
try: next(gen)
except StopIteration:
self.assertFalse(cache.is_alive)
else:
self.fail()
def test7(self):
@db_session
def f(id1):
a1 = self.Account[id1]
id2 = yield a1.amount
a2 = self.Account[id2]
amount = yield a2.amount
a1.amount -= amount
a2.amount += amount
commit()
gen = f(1)
amount1 = next(gen)
self.assertEqual(amount1, 1000)
amount2 = gen.send(2)
self.assertEqual(amount2, 2000)
try:
gen.send(100)
except StopIteration:
pass
else:
self.fail()
with db_session:
a1 = self.Account[1]
self.assertEqual(a1.amount, 900)
a2 = self.Account[2]
self.assertEqual(a2.amount, 2100)
@raises_exception(TransactionError, 'You need to manually commit() changes before suspending the generator')
def test8(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
yield a1.amount
for amount in f(1):
pass
def test9(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
commit()
yield a1.amount
for amount in f(1):
pass
def test10(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
a1.amount += 100
with db_session:
a = self.Account[1].amount
for amount in f(1):
pass
with db_session:
b = self.Account[1].amount
self.assertEqual(b, a + 100)
def test12(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.close()
@raises_exception(TypeError, 'error message')
def test13(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.throw(TypeError('error message'))
if __name__ == '__main__':
unittest.main()
| 25.451613 | 113 | 0.555978 | 4,440 | 0.937896 | 3,194 | 0.674694 | 2,358 | 0.498099 | 0 | 0 | 404 | 0.08534 |
d234e5a37645a98c004023879e482d81ecedb1c6
| 725 |
py
|
Python
|
private_sharing/migrations/0008_featuredproject.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 57 |
2016-09-01T21:55:52.000Z
|
2022-03-27T22:15:32.000Z
|
private_sharing/migrations/0008_featuredproject.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 464 |
2015-03-23T18:08:28.000Z
|
2016-08-25T04:57:36.000Z
|
private_sharing/migrations/0008_featuredproject.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 25 |
2017-01-24T16:23:27.000Z
|
2021-11-07T01:51:42.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-05 01:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('private_sharing', '0007_auto_20171220_2038'),
]
operations = [
migrations.CreateModel(
name='FeaturedProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='private_sharing.DataRequestProject')),
],
),
]
| 30.208333 | 133 | 0.623448 | 575 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.268966 |
d235a418647a421cc3cde687c03b74bacf4239b5
| 5,759 |
py
|
Python
|
Tests/Validation/Optimization/test_zdt3.py
|
magnetron/pyleecan
|
2a3338f4ab080ad6488b5ab8746c3fea1f36f177
|
[
"Apache-2.0"
] | 1 |
2021-02-26T12:28:45.000Z
|
2021-02-26T12:28:45.000Z
|
Tests/Validation/Optimization/test_zdt3.py
|
magnetron/pyleecan
|
2a3338f4ab080ad6488b5ab8746c3fea1f36f177
|
[
"Apache-2.0"
] | null | null | null |
Tests/Validation/Optimization/test_zdt3.py
|
magnetron/pyleecan
|
2a3338f4ab080ad6488b5ab8746c3fea1f36f177
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Test Pyleecan optimization module using Zitzler–Deb–Thiele's function N. 3
"""
import pytest
from ....definitions import PACKAGE_NAME
from ....Tests.Validation.Machine.SCIM_001 import SCIM_001
from ....Classes.InputCurrent import InputCurrent
from ....Classes.MagFEMM import MagFEMM
from ....Classes.Simu1 import Simu1
from ....Classes.Output import Output
from ....Classes.OptiDesignVar import OptiDesignVar
from ....Classes.OptiObjFunc import OptiObjFunc
from ....Classes.OptiConstraint import OptiConstraint
from ....Classes.OptiProblem import OptiProblem
from ....Classes.ImportMatrixVal import ImportMatrixVal
from ....Classes.ImportGenVectLin import ImportGenVectLin
from ....Classes.OptiGenAlgNsga2Deap import OptiGenAlgNsga2Deap
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import random
@pytest.mark.validation
@pytest.mark.optimization
def test_zdt3():
# ### Defining reference Output
# Definition of the enforced output of the electrical module
Nt = 2
Nr = ImportMatrixVal(value=np.ones(Nt) * 3000)
Is = ImportMatrixVal(
value=np.array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
# [-6.97244208e-06, -2.25353053e02, 2.25353060e02],
# [2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
Ir = ImportMatrixVal(value=np.zeros(30))
time = ImportGenVectLin(start=0, stop=0.015, num=Nt, endpoint=True)
angle = ImportGenVectLin(
start=0, stop=2 * np.pi, num=64, endpoint=False
) # num=1024
# Definition of the simulation
simu = Simu1(name="Test_machine", machine=SCIM_001)
simu.input = InputCurrent(
Is=Is,
Ir=Ir, # zero current for the rotor
Nr=Nr,
angle_rotor=None, # Will be computed
time=time,
angle=angle,
angle_rotor_initial=0.5216 + np.pi,
)
# Definition of the magnetic simulation
simu.mag = MagFEMM(
is_stator_linear_BH=2,
is_rotor_linear_BH=2,
is_symmetry_a=True,
is_antiper_a=False,
)
simu.mag.Kmesh_fineness = 0.01
# simu.mag.Kgeo_fineness=0.02
simu.mag.sym_a = 4
simu.struct = None
output = Output(simu=simu)
# ### Design variable
my_vars = {}
for i in range(30):
my_vars["var_" + str(i)] = OptiDesignVar(
name="output.simu.input.Ir.value[" + str(i) + "]",
type_var="interval",
space=[0, 1],
function=lambda space: np.random.uniform(*space),
)
# ### Objectives
objs = {
"obj1": OptiObjFunc(
description="Maximization of the torque average",
func=lambda output: output.mag.Tem_av,
),
"obj2": OptiObjFunc(
description="Minimization of the torque ripple",
func=lambda output: output.mag.Tem_rip,
),
}
# ### Evaluation
def evaluate(output):
x = output.simu.input.Ir.value
f1 = lambda x: x[0]
g = lambda x: 1 + (9 / 29) * np.sum(x[1:])
h = lambda f1, g: 1 - np.sqrt(f1 / g) - (f1 / g) * np.sin(10 * np.pi * f1)
output.mag.Tem_av = f1(x)
output.mag.Tem_rip = g(x) * h(f1(x), g(x))
# ### Defining the problem
my_prob = OptiProblem(
output=output, design_var=my_vars, obj_func=objs, eval_func=evaluate
)
solver = OptiGenAlgNsga2Deap(problem=my_prob, size_pop=40, nb_gen=100, p_mutate=0.5)
res = solver.solve()
def plot_pareto(self):
"""Plot every fitness values with the pareto front for 2 fitness
Parameters
----------
self : OutputMultiOpti
"""
# TODO Add a feature to return the design_varibles of each indiv from the Pareto front
# Get fitness and ngen
is_valid = np.array(self.is_valid)
fitness = np.array(self.fitness)
ngen = np.array(self.ngen)
# Keep only valid values
indx = np.where(is_valid)[0]
fitness = fitness[indx]
ngen = ngen[indx]
# Get pareto front
pareto = list(np.unique(fitness, axis=0))
# Get dominated values
to_remove = []
N = len(pareto)
for i in range(N):
for j in range(N):
if all(pareto[j] <= pareto[i]) and any(pareto[j] < pareto[i]):
to_remove.append(pareto[i])
break
# Remove dominated values
for i in to_remove:
for l in range(len(pareto)):
if all(i == pareto[l]):
pareto.pop(l)
break
pareto = np.array(pareto)
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
# Plot Pareto front
axs[0].scatter(
pareto[:, 0],
pareto[:, 1],
facecolors="b",
edgecolors="b",
s=0.8,
label="Pareto Front",
)
axs[0].autoscale()
axs[0].legend()
axs[0].set_title("Pyleecan results")
axs[0].set_xlabel(r"$f_1(x)$")
axs[0].set_ylabel(r"$f_2(x)$")
try:
img_to_find = img.imread(
"pyleecan\\Tests\\Validation\\Optimization\\zdt3.jpg", format="jpg"
)
axs[1].imshow(img_to_find, aspect="auto")
axs[1].axis("off")
axs[1].set_title("Pareto front of the problem")
except (TypeError, ValueError):
print("Pillow is needed to import jpg files")
return fig
fig = plot_pareto(res)
fig.savefig(PACKAGE_NAME + "/Tests/Results/Validation/test_zdt3.png")
| 30.310526 | 94 | 0.581177 | 0 | 0 | 0 | 0 | 4,901 | 0.850425 | 0 | 0 | 1,302 | 0.225924 |
d2366db96566571009998f46fd017359e1980f42
| 325 |
py
|
Python
|
comm_lib/import_lib.py
|
GUTLY/machine_learning_in_action
|
7820c948014c615ed10f693f03ea116a0f7d6b96
|
[
"Apache-2.0"
] | null | null | null |
comm_lib/import_lib.py
|
GUTLY/machine_learning_in_action
|
7820c948014c615ed10f693f03ea116a0f7d6b96
|
[
"Apache-2.0"
] | null | null | null |
comm_lib/import_lib.py
|
GUTLY/machine_learning_in_action
|
7820c948014c615ed10f693f03ea116a0f7d6b96
|
[
"Apache-2.0"
] | null | null | null |
"""
@Time : 12/4/2020 13:57
@Author : Young lee
@File : import_lib
@Project : machine_learning_in_action
"""
import collections
import math
import os
import random
import sys
import tarfile
import time
import zipfile
import operator
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
| 16.25 | 37 | 0.775385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.353846 |
d236f9020f43723fb7080a085f23e82a9664de09
| 590 |
py
|
Python
|
example/example.py
|
fmilthaler/HTMLParser
|
ebe343796e32a25726b6659742196ceaab30bb3d
|
[
"MIT"
] | null | null | null |
example/example.py
|
fmilthaler/HTMLParser
|
ebe343796e32a25726b6659742196ceaab30bb3d
|
[
"MIT"
] | null | null | null |
example/example.py
|
fmilthaler/HTMLParser
|
ebe343796e32a25726b6659742196ceaab30bb3d
|
[
"MIT"
] | null | null | null |
from htmlparser import HTMLParser
import pandas
# Here we scrap a page from Wikipedia, parse it for tables, and convert the first table found into a `pandas.DataFrame`.
url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
hp = HTMLParser(url)
# scrapping the webpage
page = hp.scrap_url()
# extracting only tables from the webpage
element = 'table'
params = {'class': 'wikitable sortable'}
elements = hp.get_page_elements(page, element=element, params=params)
# get a pandas.DataFrame from the (first) html table
df = hp.parse_html_table(elements[0])
print(df.columns.values)
| 36.875 | 120 | 0.772881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.557627 |
d2374979329fc2d21717d5eca2294d35f3c0c1d9
| 2,099 |
py
|
Python
|
project_name/common/models.py
|
brevetech/breve_drf_template
|
125e476810641f919296cb878980f91f4c091cf2
|
[
"MIT"
] | null | null | null |
project_name/common/models.py
|
brevetech/breve_drf_template
|
125e476810641f919296cb878980f91f4c091cf2
|
[
"MIT"
] | 17 |
2021-04-05T00:22:13.000Z
|
2022-01-11T04:53:47.000Z
|
project_name/common/models.py
|
brevetech/breve_drf_template
|
125e476810641f919296cb878980f91f4c091cf2
|
[
"MIT"
] | 1 |
2022-01-07T05:48:19.000Z
|
2022-01-07T05:48:19.000Z
|
from django.db import models
# https://stackoverflow.com/questions/1737017/django-auto-now-and-auto-now-add/1737078#1737078
from {{project_name}}.common.enums import PersonSexEnum
class TimeStampedModel(models.Model):
"""
Defines a timestamped model with create_date (auto_now_add) and update_date (auto_now)
"""
create_date = models.DateField(
auto_now_add=True, editable=False, verbose_name="Fecha de creación"
)
update_date = models.DateField(
auto_now=True, editable=False, verbose_name="Última modificación"
)
class Meta:
abstract = True
class PersonModel(TimeStampedModel):
"""Defines a generic representation of a person data model"""
first_name = models.CharField(
max_length=50, null=False, blank=False, verbose_name="Primer Nombre"
)
second_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Segundo Nombre"
)
first_surname = models.CharField(
max_length=50, null=False, blank=False, verbose_name="Primer Apellido"
)
second_surname = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Segundo Apellido"
)
address = models.TextField(null=False, blank=False, verbose_name="Dirección")
id_number = models.CharField(
max_length=16,
verbose_name="Cédula",
unique=True,
null=False,
blank=False,
)
birthdate = models.DateField(null=False, blank=False, verbose_name="Fecha de Nacimiento")
phone = models.CharField(max_length=25, verbose_name="Télefono", null=True, blank=True)
email = models.EmailField(
max_length=50, null=True, blank=True, verbose_name="Correo Electrónico"
)
sex = models.CharField(
max_length=1,
null=False,
blank=False,
verbose_name="Sexo",
choices=PersonSexEnum.choices,
default=PersonSexEnum.FEMALE,
)
class Meta:
abstract = True
def __str__(self):
return f"{self.first_name} {self.second_name} {self.first_surname} {self.second_surname}"
| 32.292308 | 97 | 0.682706 | 1,919 | 0.911206 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.250712 |
d2387686143e714809862b9c318c59cf934f177d
| 4,881 |
py
|
Python
|
PikaBus/tools/PikaTools.py
|
alexbodn/PikaBus
|
5faf2e48f4d4deecb4428707f94bcf72a81cc3ee
|
[
"MIT"
] | 7 |
2020-03-21T12:22:18.000Z
|
2022-02-10T11:43:51.000Z
|
PikaBus/tools/PikaTools.py
|
alexbodn/PikaBus
|
5faf2e48f4d4deecb4428707f94bcf72a81cc3ee
|
[
"MIT"
] | null | null | null |
PikaBus/tools/PikaTools.py
|
alexbodn/PikaBus
|
5faf2e48f4d4deecb4428707f94bcf72a81cc3ee
|
[
"MIT"
] | 1 |
2021-06-21T10:56:56.000Z
|
2021-06-21T10:56:56.000Z
|
from typing import Union, List
import pika
import pika.exceptions
import time
import logging
def CreateDurableQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str,
settings: dict = None):
if settings is None:
settings = {}
channel.queue_declare(queue,
passive=settings.get('passive', False),
durable=settings.get('durable', True),
exclusive=settings.get('exclusive', False),
auto_delete=settings.get('auto_delete', False),
arguments=settings.get('arguments', None))
def CreateExchange(channel: pika.adapters.blocking_connection.BlockingChannel, exchange: str,
settings: dict = None):
if settings is None:
settings = {}
channel.exchange_declare(exchange,
exchange_type=settings.get('exchange_type', 'direct'),
passive=settings.get('passive', False),
durable=settings.get('durable', True),
auto_delete=settings.get('auto_delete', False),
internal=settings.get('internal', False),
arguments=settings.get('arguments', None))
def BindQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str, exchange: str, topic: str,
arguments: dict = None):
channel.queue_bind(queue, exchange, routing_key=topic, arguments=arguments)
def UnbindQueue(channel: pika.adapters.blocking_connection.BlockingChannel, queue: str, exchange: str, topic: str,
arguments: dict = None):
channel.queue_unbind(queue, exchange, routing_key=topic, arguments=arguments)
def AssertDurableQueueExists(connection: pika.BlockingConnection, queue: str, retries: int = 0, logger=logging.getLogger(__name__)):
count = 0
while count <= retries:
channel: pika.adapters.blocking_connection.BlockingChannel = connection.channel()
try:
channel.queue_declare(queue, durable=True, passive=True)
channel.close()
return
except Exception as e:
count += 1
if count <= retries:
time.sleep(1)
msg = f"Queue {queue} does not exist!"
logger.error(msg)
raise Exception(msg)
def SafeCloseChannel(channel: pika.BlockingConnection.channel, acceptAllFailures: bool = True):
if channel.is_closed:
return
try:
channel.close()
except pika.exceptions.ChannelWrongStateError:
# channel already closed
pass
except:
if not acceptAllFailures:
raise
def SafeCloseConnection(connection: pika.BlockingConnection, acceptAllFailures: bool = True):
if connection.is_closed:
return
try:
connection.close()
except pika.exceptions.ConnectionWrongStateError:
# connection already closed
pass
except:
if not acceptAllFailures:
raise
def BasicSend(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, destination: str, body: bytes,
properties: pika.spec.BasicProperties = None,
mandatory: bool = True):
BindQueue(channel, queue=destination, exchange=exchange, topic=destination)
channel.basic_publish(exchange, destination, body, properties=properties, mandatory=mandatory)
def BasicPublish(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: str, body: bytes,
properties: pika.spec.BasicProperties = None,
mandatory: bool = True):
channel.basic_publish(exchange, topic, body, properties=properties, mandatory=mandatory)
def BasicSubscribe(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: Union[List[str], str], queue: str,
arguments: dict = None):
if isinstance(topic, list):
topics = topic
else:
topics = [topic]
for topic in topics:
if isinstance(topic, dict):
arguments = topic.get('arguments', None)
topic = topic.get('topic', None)
BindQueue(channel, queue, exchange, topic, arguments=arguments)
def BasicUnsubscribe(channel: pika.adapters.blocking_connection.BlockingChannel,
exchange: str, topic: Union[List[str], str], queue: str,
arguments: dict = None):
if isinstance(topic, list):
topics = topic
else:
topics = [topic]
for topic in topics:
if isinstance(topic, dict):
arguments = topic.get('arguments', None)
topic = topic.get('topic', None)
UnbindQueue(channel, queue, exchange, topic, arguments=arguments)
| 38.132813 | 132 | 0.629379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.050604 |
d23a8dd5865bbf7ea08abcad56ee55962f12112f
| 16,087 |
py
|
Python
|
roundup/backends/blobfiles.py
|
Noschvie/roundup
|
996377ed0d12c69a01c7565dc5f47d6fb0ccaf19
|
[
"MIT"
] | 1 |
2015-12-17T08:09:28.000Z
|
2015-12-17T08:09:28.000Z
|
roundup/backends/blobfiles.py
|
Noschvie/roundup
|
996377ed0d12c69a01c7565dc5f47d6fb0ccaf19
|
[
"MIT"
] | null | null | null |
roundup/backends/blobfiles.py
|
Noschvie/roundup
|
996377ed0d12c69a01c7565dc5f47d6fb0ccaf19
|
[
"MIT"
] | 1 |
2015-07-10T08:16:24.000Z
|
2015-07-10T08:16:24.000Z
|
#
# Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
# This module is free software, and you may redistribute it and/or modify
# under the same terms as Python, so long as this copyright message and
# disclaimer are retained in their original form.
#
# IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
# OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
"""This module exports file storage for roundup backends.
Files are stored into a directory hierarchy.
"""
__docformat__ = 'restructuredtext'
import os
def files_in_dir(dir):
if not os.path.exists(dir):
return 0
num_files = 0
for dir_entry in os.listdir(dir):
full_filename = os.path.join(dir,dir_entry)
if os.path.isfile(full_filename):
num_files = num_files + 1
elif os.path.isdir(full_filename):
num_files = num_files + files_in_dir(full_filename)
return num_files
class FileStorage(object):
"""Store files in some directory structure
Some databases do not permit the storage of arbitrary data (i.e.,
file content). And, some database schema explicitly store file
content in the fielsystem. In particular, if a class defines a
'filename' property, it is assumed that the data is stored in the
indicated file, outside of whatever database Roundup is otherwise
using.
In these situations, it is difficult to maintain the transactional
abstractions used elsewhere in Roundup. In particular, if a
file's content is edited, but then the containing transaction is
not committed, we do not want to commit the edit. Similarly, we
would like to guarantee that if a transaction is committed to the
database, then the edit has in fact taken place.
This class provides an approximation of these transactional
requirements.
For classes that do not have a 'filename' property, the file name
used to store the file's content is a deterministic function of
the classname and nodeid for the file. The 'filename' function
computes this name. The name will contain directories and
subdirectories, but, suppose, for the purposes of what follows,
that the filename is 'file'.
Edit Procotol
-------------
When a file is created or edited, the following protocol is used:
1. The new content of the file is placed in 'file.tmp'.
2. A transaction is recored in 'self.transactions' referencing the
'doStoreFile' method of this class.
3. At some subsequent point, the database 'commit' function is
called. This function first performs a traditional database
commit (for example, by issuing a SQL command to commit the
current transaction), and, then, runs the transactions recored
in 'self.transactions'.
4. The 'doStoreFile' method renames the 'file.tmp' to 'file'.
If Step 3 never occurs, but, instead, the database 'rollback'
method is called, then that method, after rolling back the
database transaction, calls 'rollbackStoreFile', which removes
'file.tmp'.
Race Condition
--------------
If two Roundup instances (say, the mail gateway and a web client,
or two web clients running with a multi-process server) attempt
edits at the same time, both will write to 'file.tmp', and the
results will be indeterminate.
Crash Analysis
--------------
There are several situations that may occur if a crash (whether
because the machine crashes, because an unhandled Python exception
is raised, or because the Python process is killed) occurs.
Complexity ensues because backuping up an RDBMS is generally more
complex than simply copying a file. Instead, some command is run
which stores a snapshot of the database in a file. So, if you
back up the database to a file, and then back up the filesystem,
it is likely that further database transactions have occurred
between the point of database backup and the point of filesystem
backup.
For the purposes, of this analysis, we assume that the filesystem
backup occurred after the database backup. Furthermore, we assume
that filesystem backups are atomic; i.e., the at the filesystem is
not being modified during the backup.
1. Neither the 'commit' nor 'rollback' methods on the database are
ever called.
In this case, the '.tmp' file should be ignored as the
transaction was not committed.
2. The 'commit' method is called. Subsequently, the machine
crashes, and is restored from backups.
The most recent filesystem backup and the most recent database
backup are not in general from the same instant in time.
This problem means that we can never be sure after a crash if
the contents of a file are what we intend. It is always
possible that an edit was made to the file that is not
reflected in the filesystem.
3. A crash occurs between the point of the database commit and the
call to 'doStoreFile'.
If only one of 'file' and 'file.tmp' exists, then that
version should be used. However, if both 'file' and 'file.tmp'
exist, there is no way to know which version to use.
Reading the File
----------------
When determining the content of the file, we use the following
algorithm:
1. If 'self.transactions' reflects an edit of the file, then use
'file.tmp'.
We know that an edit to the file is in process so 'file.tmp' is
the right choice. If 'file.tmp' does not exist, raise an
exception; something has removed the content of the file while
we are in the process of editing it.
2. Otherwise, if 'file.tmp' exists, and 'file' does not, use
'file.tmp'.
We know that the file is supposed to exist because there is a
reference to it in the database. Since 'file' does not exist,
we assume that Crash 3 occurred during the initial creation of
the file.
3. Otherwise, use 'file'.
If 'file.tmp' is not present, this is obviously the best we can
do. This is always the right answer unless Crash 2 occurred,
in which case the contents of 'file' may be newer than they
were at the point of database backup.
If 'file.tmp' is present, we know that we are not actively
editing the file. The possibilities are:
a. Crash 1 has occurred. In this case, using 'file' is the
right answer, so we will have chosen correctly.
b. Crash 3 has occurred. In this case, 'file.tmp' is the right
answer, so we will have chosen incorrectly. However, 'file'
was at least a previously committed value.
Future Improvements
-------------------
One approach would be to take advantage of databases which do
allow the storage of arbitary date. For example, MySQL provides
the HUGE BLOB datatype for storing up to 4GB of data.
Another approach would be to store a version ('v') in the actual
database and name files 'file.v'. Then, the editing protocol
would become:
1. Generate a new version 'v', guaranteed to be different from all
other versions ever used by the database. (The version need
not be in any particular sequence; a UUID would be fine.)
2. Store the content in 'file.v'.
3. Update the database to indicate that the version of the node is
'v'.
Now, if the transaction is committed, the database will refer to
'file.v', where the content exists. If the transaction is rolled
back, or not committed, 'file.v' will never be referenced. In the
event of a crash, under the assumptions above, there may be
'file.v' files that are not referenced by the database, but the
database will be consistent, so long as unreferenced 'file.v'
files are never removed until after the database has been backed
up.
"""
tempext = '.tmp'
"""The suffix added to files indicating that they are uncommitted."""
def __init__(self, umask):
self.umask = umask
def subdirFilename(self, classname, nodeid, property=None):
"""Determine what the filename and subdir for nodeid + classname is."""
if property:
name = '%s%s.%s'%(classname, nodeid, property)
else:
# roundupdb.FileClass never specified the property name, so don't
# include it
name = '%s%s'%(classname, nodeid)
# have a separate subdir for every thousand messages
subdir = str(int(nodeid) // 1000)
return os.path.join(subdir, name)
def _tempfile(self, filename):
"""Return a temporary filename.
'filename' -- The name of the eventual destination file."""
return filename + self.tempext
def _editInProgress(self, classname, nodeid, property):
"""Return true if the file indicated is being edited.
returns -- True if the current transaction includes an edit to
the file indicated."""
for method, args in self.transactions:
if (method == self.doStoreFile and
args == (classname, nodeid, property)):
return True
return False
def filename(self, classname, nodeid, property=None, create=0):
"""Determine what the filename for the given node and optionally
property is.
Try a variety of different filenames - the file could be in the
usual place, or it could be in a temp file pre-commit *or* it
could be in an old-style, backwards-compatible flat directory.
"""
filename = os.path.join(self.dir, 'files', classname,
self.subdirFilename(classname, nodeid, property))
# If the caller is going to create the file, return the
# post-commit filename. It is the callers responsibility to
# add self.tempext when actually creating the file.
if create:
return filename
tempfile = self._tempfile(filename)
# If an edit to this file is in progress, then return the name
# of the temporary file containing the edited content.
if self._editInProgress(classname, nodeid, property):
if not os.path.exists(tempfile):
raise IOError('content file for %s not found'%tempfile)
return tempfile
if os.path.exists(filename):
return filename
# Otherwise, if the temporary file exists, then the probable
# explanation is that a crash occurred between the point that
# the database entry recording the creation of the file
# occured and the point at which the file was renamed from the
# temporary name to the final name.
if os.path.exists(tempfile):
try:
# Clean up, by performing the commit now.
os.rename(tempfile, filename)
except OSError:
pass
# If two Roundup clients both try to rename the file
# at the same time, only one of them will succeed.
# So, tolerate such an error -- but no other.
if not os.path.exists(filename):
raise IOError('content file for %s not found'%filename)
return filename
# ok, try flat (very old-style)
if property:
filename = os.path.join(self.dir, 'files', '%s%s.%s'%(classname,
nodeid, property))
else:
filename = os.path.join(self.dir, 'files', '%s%s'%(classname,
nodeid))
if os.path.exists(filename):
return filename
# file just ain't there
raise IOError('content file for %s not found'%filename)
def filesize(self, classname, nodeid, property=None, create=0):
filename = self.filename(classname, nodeid, property, create)
return os.path.getsize(filename)
def storefile(self, classname, nodeid, property, content):
"""Store the content of the file in the database. The property may be
None, in which case the filename does not indicate which property
is being saved.
"""
# determine the name of the file to write to
name = self.filename(classname, nodeid, property, create=1)
# make sure the file storage dir exists
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
# save to a temp file
name = self._tempfile(name)
# make sure we don't register the rename action more than once
if not self._editInProgress(classname, nodeid, property):
# save off the rename action
self.transactions.append((self.doStoreFile, (classname, nodeid,
property)))
# always set umask before writing to make sure we have the proper one
# in multi-tracker (i.e. multi-umask) or modpython scenarios
# the umask may have changed since last we set it.
os.umask(self.umask)
open(name, 'wb').write(content)
def getfile(self, classname, nodeid, property):
"""Get the content of the file in the database.
"""
filename = self.filename(classname, nodeid, property)
f = open(filename, 'rb')
try:
# snarf the contents and make sure we close the file
return f.read()
finally:
f.close()
def numfiles(self):
"""Get number of files in storage, even across subdirectories.
"""
files_dir = os.path.join(self.dir, 'files')
return files_in_dir(files_dir)
def doStoreFile(self, classname, nodeid, property, **databases):
"""Store the file as part of a transaction commit.
"""
# determine the name of the file to write to
name = self.filename(classname, nodeid, property, 1)
# the file is currently ".tmp" - move it to its real name to commit
if name.endswith(self.tempext):
# creation
dstname = os.path.splitext(name)[0]
else:
# edit operation
dstname = name
name = self._tempfile(name)
# content is being updated (and some platforms, eg. win32, won't
# let us rename over the top of the old file)
if os.path.exists(dstname):
os.remove(dstname)
os.rename(name, dstname)
# return the classname, nodeid so we reindex this content
return (classname, nodeid)
def rollbackStoreFile(self, classname, nodeid, property, **databases):
"""Remove the temp file as a part of a rollback
"""
# determine the name of the file to delete
name = self.filename(classname, nodeid, property)
if not name.endswith(self.tempext):
name += self.tempext
os.remove(name)
def isStoreFile(self, classname, nodeid):
"""See if there is actually any FileStorage for this node.
Is there a better way than using self.filename?
"""
try:
fname = self.filename(classname, nodeid)
return True
except IOError:
return False
def destroy(self, classname, nodeid):
"""If there is actually FileStorage for this node
remove it from the filesystem
"""
if self.isStoreFile(classname, nodeid):
os.remove(self.filename(classname, nodeid))
# vim: set filetype=python ts=4 sw=4 et si
| 39.525799 | 82 | 0.654752 | 14,620 | 0.908808 | 0 | 0 | 0 | 0 | 0 | 0 | 11,292 | 0.701933 |
d23c5a7f0d13366045cfa8ea9d83ec4de2417ed0
| 1,467 |
py
|
Python
|
LeetCode/E2 - Add Two Numbers/solution.py
|
ltdangkhoa/Computer-Science-Fundamental
|
b70ba714e1dd13fcb377125e047c5fc08d3a82b3
|
[
"MIT"
] | null | null | null |
LeetCode/E2 - Add Two Numbers/solution.py
|
ltdangkhoa/Computer-Science-Fundamental
|
b70ba714e1dd13fcb377125e047c5fc08d3a82b3
|
[
"MIT"
] | null | null | null |
LeetCode/E2 - Add Two Numbers/solution.py
|
ltdangkhoa/Computer-Science-Fundamental
|
b70ba714e1dd13fcb377125e047c5fc08d3a82b3
|
[
"MIT"
] | null | null | null |
"""solution.py"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
"""
T: O(?)
S: O(?)
"""
l3 = ListNode(0)
temp_l3 = l3
carry = 0
while l1 or l2:
l1v = 0 if l1 is None else l1.val
l2v = 0 if l2 is None else l2.val
sum_l1v_l2v_carry = l1v + l2v + carry
if sum_l1v_l2v_carry > 9:
carry = 1
l3v = sum_l1v_l2v_carry%10
else:
carry = 0
l3v = sum_l1v_l2v_carry
temp_l3.next = ListNode(l3v)
temp_l3 = temp_l3.next
if l1 is not None:
l1 = l1.next
if l2 is not None:
l2 = l2.next
if carry != 0:
temp_l3.next = ListNode(carry)
return l3.next
"""
T: O(?)
S: O(?)
"""
l3 = temp_l3 = ListNode(0)
all_carry = 0
while l1 or l2 or all_carry:
if l1:
all_carry += l1.val
l1 = l1.next
if l2:
all_carry += l2.val
l2 = l2.next
temp_l3.next = ListNode(all_carry%10)
temp_l3 = temp_l3.next
all_carry = all_carry // 10
return l3.next
| 23.66129 | 68 | 0.445808 | 1,408 | 0.959782 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.100204 |
d23c85c65422eeb7798338451574df0f59e40725
| 1,984 |
py
|
Python
|
networking_mlnx/dhcp/mlnx_dhcp.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/dhcp/mlnx_dhcp.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
networking_mlnx/dhcp/mlnx_dhcp.py
|
stackhpc/networking-mlnx
|
6a297fd040ff09e26e477b90f2fb229dc6a691b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron.agent.linux import dhcp
class DhcpOpt(object):
def __init__(self, **kwargs):
self.__dict__.update(ip_version=4)
self.__dict__.update(kwargs)
def __str__(self):
return str(self.__dict__)
class MlnxDnsmasq(dhcp.Dnsmasq):
_PREFIX = 'ff:00:00:00:00:00:02:00:00:02:c9:00:'
_MIDDLE = ':00:00:'
def _gen_client_id(self, port):
mac_address = port.mac_address
mac_first = mac_address[:8]
mac_last = mac_address[9:]
client_id = ''.join([self._PREFIX, mac_first, self._MIDDLE, mac_last])
return client_id
def _gen_client_id_opt(self, client_id):
return DhcpOpt(opt_name=edo_ext.DHCP_OPT_CLIENT_ID,
opt_value=client_id)
def _get_port_extra_dhcp_opts(self, port):
client_id = self._gen_client_id(port)
if hasattr(port, edo_ext.EXTRADHCPOPTS):
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.DHCP_OPT_CLIENT_ID:
opt.opt_value = client_id
return port.extra_dhcp_opts
port.extra_dhcp_opts.append(self._gen_client_id_opt(client_id))
else:
setattr(port, edo_ext.EXTRADHCPOPTS,
[self._gen_client_id_opt(client_id)])
return port.extra_dhcp_opts
| 34.807018 | 78 | 0.681452 | 1,282 | 0.646169 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.315524 |
d23df24d42dc33a797b2ad6f76f674f1c588ed01
| 679 |
py
|
Python
|
solution/practice/algorithms/warmup/plus-minus/solution.py
|
benevolentPreta/HackerRank_Py3
|
03c4bd9e2db2d91645b72b62b060d73f5ec7e437
|
[
"BSD-2-Clause"
] | null | null | null |
solution/practice/algorithms/warmup/plus-minus/solution.py
|
benevolentPreta/HackerRank_Py3
|
03c4bd9e2db2d91645b72b62b060d73f5ec7e437
|
[
"BSD-2-Clause"
] | 1 |
2020-06-06T19:56:54.000Z
|
2020-06-06T19:56:54.000Z
|
solution/practice/algorithms/warmup/plus-minus/solution.py
|
benevolentPreta/HackerRank_Py3
|
03c4bd9e2db2d91645b72b62b060d73f5ec7e437
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the plusMinus function below.
def plusMinus(arr):
'''
There is probably a better solution than this
but this would be the trivial solution, and it
is successful.
'''
pos, neg, zero = 0, 0, 0
size = len(arr)
for i in range(size):
if arr[i] > 0:
pos+=1
elif arr[i] < 0:
neg+=1
else:
zero+=1
print(float((pos/size)))
print(float((neg/size)))
print(float((zero/size)))
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
| 17.868421 | 51 | 0.564065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.28866 |
d23e3eac1aa7a46a82d21a527d06862f245b4e29
| 4,273 |
py
|
Python
|
youtube_dl/extractor/gorillavid.py
|
builder07/ytdl
|
2c0a5d50af7ecc7302c813d649ee72dcd457a50a
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/gorillavid.py
|
builder07/ytdl
|
2c0a5d50af7ecc7302c813d649ee72dcd457a50a
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/gorillavid.py
|
builder07/ytdl
|
2c0a5d50af7ecc7302c813d649ee72dcd457a50a
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
encode_dict,
int_or_none,
)
class GorillaVidIE(InfoExtractor):
IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
(?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
_FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
_TESTS = [{
'url': 'http://gorillavid.in/06y9juieqpmi',
'md5': '5ae4a3580620380619678ee4875893ba',
'info_dict': {
'id': '06y9juieqpmi',
'ext': 'flv',
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://gorillavid.in/embed-z08zf8le23c6-960x480.html',
'only_matching': True,
}, {
'url': 'http://daclips.in/3rso4kdn6f9m',
'md5': '1ad8fd39bb976eeb66004d3a4895f106',
'info_dict': {
'id': '3rso4kdn6f9m',
'ext': 'mp4',
'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
'thumbnail': 're:http://.*\.jpg',
}
}, {
# video with countdown timeout
'url': 'http://fastvideo.in/1qmdn1lmsmbw',
'md5': '8b87ec3f6564a3108a0e8e66594842ba',
'info_dict': {
'id': '1qmdn1lmsmbw',
'ext': 'mp4',
'title': 'Man of Steel - Trailer',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://realvid.net/ctn2y6p2eviw',
'md5': 'b2166d2cf192efd6b6d764c18fd3710e',
'info_dict': {
'id': 'ctn2y6p2eviw',
'ext': 'flv',
'title': 'rdx 1955',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
}, {
'url': 'http://filehoot.com/3ivfabn7573c.html',
'info_dict': {
'id': '3ivfabn7573c',
'ext': 'mp4',
'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
'thumbnail': 're:http://.*\.jpg',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://%s/%s' % (mobj.group('host'), video_id)
webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
countdown = int_or_none(self._search_regex(
r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
webpage, 'countdown', default=None))
if countdown:
self._sleep(countdown, video_id)
post = compat_urllib_parse.urlencode(encode_dict(fields))
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id, 'Downloading video page')
title = self._search_regex(
[r'style="z-index: [0-9]+;">([^<]+)</span>', r'<td nowrap>([^<]+)</td>', r'>Watch (.+) '],
webpage, 'title', default=None) or self._og_search_title(webpage)
video_url = self._search_regex(
r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
thumbnail = self._search_regex(
r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False)
formats = [{
'format_id': 'sd',
'url': video_url,
'quality': 1,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| 33.645669 | 110 | 0.528902 | 4,006 | 0.937295 | 0 | 0 | 0 | 0 | 0 | 0 | 1,931 | 0.451802 |
d23f4d942f6df091ea30d280bbf61284f173aee1
| 7,552 |
py
|
Python
|
Tests/test_GenBank_unittest.py
|
cbrueffer/biopython
|
1ffb1d92d4735166089e28ac07ee614d5ec80070
|
[
"PostgreSQL"
] | null | null | null |
Tests/test_GenBank_unittest.py
|
cbrueffer/biopython
|
1ffb1d92d4735166089e28ac07ee614d5ec80070
|
[
"PostgreSQL"
] | null | null | null |
Tests/test_GenBank_unittest.py
|
cbrueffer/biopython
|
1ffb1d92d4735166089e28ac07ee614d5ec80070
|
[
"PostgreSQL"
] | null | null | null |
# Copyright 2013 by Kai Blin.
# Revisions copyright 2015 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import unittest
import warnings
from os import path
from Bio import BiopythonParserWarning
from Bio import GenBank
from Bio import SeqIO
class GenBankTests(unittest.TestCase):
def test_invalid_product_line_raises_value_error(self):
"""Test GenBank parsing invalid product line raises ValueError"""
def parse_invalid_product_line():
rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'),
'genbank')
self.assertRaises(ValueError, parse_invalid_product_line)
def test_genbank_read(self):
with open(path.join("GenBank", "NC_000932.gb")) as handle:
record = GenBank.read(handle)
self.assertEqual(['NC_000932'], record.accession)
def test_genbank_read_multirecord(self):
with open(path.join("GenBank", "cor6_6.gb")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
def test_genbank_read_invalid(self):
with open(path.join("GenBank", "NC_000932.faa")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
def test_genbank_read_no_origin_no_end(self):
with open(path.join("GenBank", "no_origin_no_end.gb")) as handle:
self.assertRaises(ValueError, GenBank.read, handle)
# Evil hack with 000 to manipulate sort order to ensure this is tested
# first (otherwise something silences the warning)
def test_000_genbank_bad_loc_wrap_warning(self):
with warnings.catch_warnings():
warnings.simplefilter("error", BiopythonParserWarning)
with open(path.join("GenBank", "bad_loc_wrap.gb")) as handle:
# self.assertRaises(BiopythonParserWarning, GenBank.read, handle)
try:
record = GenBank.read(handle)
except BiopythonParserWarning as e:
self.assertEqual(str(e), "Non-standard feature line wrapping (didn't break on comma)?")
else:
self.assertTrue(False, "Expected specified BiopythonParserWarning here.")
# Similar hack as we also want to catch that warning here
def test_001_negative_location_warning(self):
with warnings.catch_warnings():
warnings.simplefilter("error", BiopythonParserWarning)
try:
SeqIO.read(path.join("GenBank", "negative_location.gb"), "genbank")
except BiopythonParserWarning as e:
self.assertEqual(str(e), "Couldn't parse feature location: '-2..492'")
else:
self.assertTrue(False, "Expected specified BiopythonParserWarning here.")
def test_genbank_bad_loc_wrap_parsing(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
with open(path.join("GenBank", "bad_loc_wrap.gb")) as handle:
record = GenBank.read(handle)
self.assertEqual(1, len(record.features))
loc = record.features[0].location
self.assertEqual(loc, "join(3462..3615,3698..3978,4077..4307,4408..4797,4876..5028,5141..5332)")
def test_negative_location(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec = SeqIO.read(path.join("GenBank", "negative_location.gb"), "genbank")
self.assertEqual(None, rec.features[-1].location)
def test_dot_lineage(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec = SeqIO.read("GenBank/bad_loc_wrap.gb", "genbank")
self.assertEqual(rec.annotations["organism"], ".")
self.assertEqual(rec.annotations["taxonomy"], [])
def test_dblink(self):
"""GenBank record with old DBLINK project entry."""
record = SeqIO.read("GenBank/NC_005816.gb", "gb")
self.assertEqual(record.dbxrefs, ["Project:58037"])
embl = record.format("embl")
self.assertTrue("XX\nPR Project:58037;\nXX\n" in embl, embl)
def test_dblink_two(self):
"""GenBank record with old and new DBLINK project entries."""
record = SeqIO.read("GenBank/NP_416719.gbwithparts", "gb")
self.assertEqual(record.dbxrefs,
["Project:57779", "BioProject:PRJNA57779"])
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA57779;\nXX\n" in embl, embl)
def test_dbline_gb_embl(self):
"""GenBank / EMBL paired records with PR project entry: GenBank"""
record = SeqIO.read("GenBank/DS830848.gb", "gb")
self.assertTrue("BioProject:PRJNA16232" in record.dbxrefs, record.dbxrefs)
gb = record.format("gb")
self.assertTrue("\nDBLINK BioProject:PRJNA16232\n" in gb, gb)
# Also check EMBL output
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA16232;\nXX\n" in embl, embl)
def test_dbline_embl_gb(self):
"""GenBank / EMBL paired records with PR project entry: EMBL"""
record = SeqIO.read("EMBL/DS830848.embl", "embl")
# TODO: Should we map this to BioProject:PRJNA16232
self.assertTrue("Project:PRJNA16232" in record.dbxrefs, record.dbxrefs)
gb = record.format("gb")
self.assertTrue("\nDBLINK Project:PRJNA16232\n" in gb, gb)
embl = record.format("embl")
self.assertTrue("XX\nPR Project:PRJNA16232;\nXX\n" in embl, embl)
def test_structured_comment_parsing(self):
# GISAID_EpiFlu(TM)Data, HM138502.gbk has both 'comment' and 'structured_comment'
record = SeqIO.read(path.join('GenBank', 'HM138502.gbk'), 'genbank')
self.assertEqual(record.annotations['comment'],
"Swine influenza A (H1N1) virus isolated during human swine flu\noutbreak of 2009.")
self.assertEqual(record.annotations['structured_comment']['GISAID_EpiFlu(TM)Data']['Lineage'], 'swl')
self.assertEqual(len(record.annotations['structured_comment']['GISAID_EpiFlu(TM)Data']), 3)
# FluData structured comment
record = SeqIO.read(path.join('GenBank', 'EU851978.gbk'), 'genbank')
self.assertEqual(record.annotations['structured_comment']['FluData']['LabID'], '2008704957')
self.assertEqual(len(record.annotations['structured_comment']['FluData']), 5)
# Assembly-Data structured comment
record = SeqIO.read(path.join('GenBank', 'KF527485.gbk'), 'genbank')
self.assertEqual(record.annotations['structured_comment']['Assembly-Data']['Assembly Method'], 'Lasergene v. 10')
self.assertEqual(len(record.annotations['structured_comment']['Assembly-Data']), 2)
# No structured comment in NC_000932.gb, just a regular comment
record = SeqIO.read(path.join('GenBank', 'NC_000932.gb'), 'genbank')
self.assertFalse("structured_comment" in record.annotations)
self.assertEqual(record.annotations['comment'],
'REVIEWED REFSEQ: This record has been curated by NCBI staff. The\n'
'reference sequence was derived from AP000423.\n'
'COMPLETENESS: full length.')
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 50.346667 | 121 | 0.659428 | 7,054 | 0.934057 | 0 | 0 | 0 | 0 | 0 | 0 | 2,852 | 0.377648 |
d24018cb7c01fc32bd606207dd5f57d954a62e7b
| 6,618 |
py
|
Python
|
segtrain/trainer/trainer.py
|
parthi-bharathi/semantic-image-segmentation
|
5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c
|
[
"Apache-2.0"
] | 2 |
2020-08-26T00:13:37.000Z
|
2022-01-07T07:59:59.000Z
|
segtrain/trainer/trainer.py
|
parthi-bharathi/semantic-image-segmentation
|
5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c
|
[
"Apache-2.0"
] | 1 |
2020-10-20T13:37:29.000Z
|
2020-10-27T09:59:32.000Z
|
segtrain/trainer/trainer.py
|
parthi-bharathi/semantic-image-segmentation
|
5dd34db4d74b0fe3d6cc9033a0e55ddf6e73420c
|
[
"Apache-2.0"
] | 1 |
2022-03-02T10:57:37.000Z
|
2022-03-02T10:57:37.000Z
|
import os
import tensorflow.keras.backend as K
from dataflow import (
BatchData, RepeatedData, MultiProcessRunnerZMQ)
from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from tensorflow.keras.callbacks import LearningRateScheduler
from .modelcheckpoint import CustomModelCheckpointCallback
import tensorflow as tf
def get_interval_lrscheduler_callback(model, epoch_interval, factor):
def scheduler(epoch):
if epoch % epoch_interval == 0 and epoch != 0:
lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, lr * factor)
print("lr changed to {}".format(lr * factor))
return K.get_value(model.optimizer.lr)
lr_decay = LearningRateScheduler(scheduler)
return lr_decay
def model_saver_callback(trainer, epoch_interval):
class ModelSaver(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (epoch % epoch_interval == 0 and epoch != 0):
trainer.save_model_hd5('model_snapshot' + str(epoch))
return ModelSaver()
class LearningRatePrinter(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
_lr = K.eval(self.model.optimizer.lr)
print('lr:', _lr)
class KerasTrainer:
def __init__(self, train_ds, model, prefix, model_save_dir, val_ds=None):
self.train_ds = train_ds
self.model = model
self.prefix = prefix
self.model_save_dir = model_save_dir
self.val_ds = val_ds
self.train_losses = []
self.val_losses = []
self.val_eval_epochs = []
def train(self, batch_size, num_epochs, steps_per_epoch=None, lr_decay_type='plateau', init_learn_rate=None,
verbose=0, data_grouper=None, additional_callbacks=[], val_batch_size=64, hook_tensorbord=True,
chkpt_monitor=('val_loss', 'auto'), prefetch_data = False):
"""
:param batch_size:
:param num_epochs:
:param steps_per_epoch:
:param lr_decay_type: 'interval' or 'plateau'
:return:
"""
assert lr_decay_type == 'plateau' or lr_decay_type == 'interval', 'invalid option for lr_decay_type'
ds_train_ = BatchData(self.train_ds, batch_size, remainder=False)
if (steps_per_epoch is None):
steps_per_epoch = ds_train_.size()
if (data_grouper is not None):
ds_train_ = data_grouper(ds_train_)
#for parallel loading
if(prefetch_data): ds_train_ = MultiProcessRunnerZMQ(ds_train_, num_proc=15)
#ds_train_ = BatchData(ds_train_, 256)
ds_train_ = RepeatedData(ds_train_, -1)
ds_train_.reset_state()
batcher_train = ds_train_.get_data()
ds_val_ = BatchData(self.val_ds, val_batch_size, remainder=True)
if (data_grouper is not None):
ds_val_ = data_grouper(ds_val_)
# ds_val_ = FixedSizeData(ds_val_ , ds_val_.size()/1) #only evaluate on the first 50% of the data
val_steps = ds_val_.size()
ds_val_ = RepeatedData(ds_val_, -1)
ds_val_.reset_state()
batcher_val = ds_val_.get_data()
# val_steps = 20#ds_val_.size()/2 # only evaluate on 50% of data
if (init_learn_rate is not None):
K.set_value(self.model.model_train.optimizer.lr, init_learn_rate)
print("Training with: ")
print(' nepochs', num_epochs)
print(' number of iterations/epoch', steps_per_epoch)
print('lr before for loop', K.get_value(self.model.model_train.optimizer.lr))
if (lr_decay_type == 'plateau'):
reduce_lr = ReduceLROnPlateau(monitor='val_loss', mode='auto', factor=0.25, patience=5, min_lr=1e-6)
else:
reduce_lr = get_interval_lrscheduler_callback(self.model.model_train, epoch_interval=18, factor=0.1)
if (not os.path.exists(self.model_save_dir)):
os.mkdir(self.model_save_dir)
model_filepath = os.path.join(self.model_save_dir, self.prefix + '.hd5')
monitor, mode = chkpt_monitor
if (self.model.multigpu_train):
model_checkpoint = CustomModelCheckpointCallback(model_filepath, self.model.model_main, monitor=monitor,
verbose=1, save_best_only=True,
save_weights_only=True, mode=mode, period=1)
else:
model_checkpoint = ModelCheckpoint(model_filepath, monitor=monitor, verbose=1, save_best_only=True,
save_weights_only=True, mode=mode, period=1)
lr_printer = LearningRatePrinter()
tensor_board = TensorBoard(log_dir=self.model_save_dir)
callbacks = [reduce_lr, lr_printer, model_checkpoint]
callbacks.extend(additional_callbacks)
if (hook_tensorbord):
callbacks.append(tensor_board)
self.save_model_json()
def new_batcher(b):
for d in b:
yield tuple(d)#(d[0], d[1])
tfv =tf.__version__.split('.')[0]
if(tfv=='1'):
self.model.model_train.fit_generator(new_batcher(batcher_train), steps_per_epoch=steps_per_epoch, epochs=num_epochs,
verbose=verbose,
callbacks=callbacks, validation_data=batcher_val,
validation_steps=val_steps)
else:
#for tensorflow 2.0.2 and greater
#does not work for versions < 2.0.2 and >=1.0.x
self.model.model_train.fit(new_batcher(batcher_train), steps_per_epoch=steps_per_epoch, epochs=num_epochs, verbose=verbose,
callbacks=callbacks, validation_data=new_batcher(batcher_val),
validation_steps=val_steps)
def save_model_json(self):
print("Saving model structure as json")
if (not os.path.exists(self.model_save_dir)):
os.mkdir(self.model_save_dir)
with open(os.path.join(self.model_save_dir, self.prefix + ".json"), "w") as text_file:
text_file.write(self.model.model_main.to_json())
print("Done")
def save_model_hd5(self, filename_prefix):
print("Saving model structure as json")
if (not os.path.exists(self.model_save_dir)):
os.mkdir(self.model_save_dir)
self.model.model_main.save(os.path.join(self.model_save_dir, filename_prefix + '.hd5'))
| 39.159763 | 135 | 0.630553 | 5,732 | 0.866123 | 4,326 | 0.653672 | 0 | 0 | 0 | 0 | 813 | 0.122847 |
d24182845a6b7e4d2904f9bc95447b5c4c1ca7fd
| 1,570 |
py
|
Python
|
turtle/pyramid.py
|
luscra0/Turtle-Experiments
|
df9693c871dd176673667c231f7f81250a479348
|
[
"MIT"
] | null | null | null |
turtle/pyramid.py
|
luscra0/Turtle-Experiments
|
df9693c871dd176673667c231f7f81250a479348
|
[
"MIT"
] | 6 |
2021-08-30T01:08:10.000Z
|
2021-08-30T23:04:55.000Z
|
turtle/pyramid.py
|
luscra0/Turtle-Shape-Thingy
|
df9693c871dd176673667c231f7f81250a479348
|
[
"MIT"
] | null | null | null |
import turtle
import math
from time import sleep
def calculate_points(pos, r1, r2, angles):
points = []
for a in angles:
x = pos[0] + (r1 * math.cos(math.radians(a)))
y = pos[1] + (r2 * math.sin(math.radians(a)))
points.append((x, y))
return points
def draw_pyramid(tur, draw=False):
y_points = calculate_points(pyramid_pos, 0, pyramid_height//2, pyramid_y_angles)
x_points = calculate_points(y_points[1], pyramid_width, pyramid_width*math.cos(math.radians(pyramid_y_angles[1])), pyramid_x_angles)
if draw:
screen.tracer(1)
jump(tur, x_points[0])
for p in x_points:
tur.goto(p)
tur.goto(y_points[0])
jump(t1, p)
tur.goto(x_points[0])
screen.tracer(0)
def jump(tur, pos):
tur.pu()
tur.goto(pos)
tur.pd()
screen = turtle.Screen()
t1 = turtle.Turtle()
t1.hideturtle()
pyramid_base_sides = 4
pyramid_height = 200
pyramid_width = 100
spin_x = True
spin_y = True
pyramid_pos = [0, 0]
pyramid_x_angles = [x for x in range(15, 375, 360//pyramid_base_sides)]
pyramid_y_angles = [80, 260]
draw_pyramid(t1, True)
while True:
draw_pyramid(t1)
if spin_x:
for i in range(len(pyramid_x_angles)):
pyramid_x_angles[i] += 1
if pyramid_x_angles[i] >= 360:
pyramid_x_angles[i] -= 360
if spin_y:
for i in range(len(pyramid_y_angles)):
pyramid_y_angles[i] += 1
if pyramid_y_angles[i] >= 360:
pyramid_y_angles[i] -= 360
screen.update()
sleep(.01)
t1.clear()
| 26.166667 | 136 | 0.625478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2423e50a292004365a346d8a0b8d79733015061
| 5,791 |
py
|
Python
|
docker_leash/config.py
|
docker-leash/docker-leash
|
d98c0a98ddecac2c9775e839d1e64382b811a3cf
|
[
"MIT"
] | 1 |
2018-01-15T12:29:20.000Z
|
2018-01-15T12:29:20.000Z
|
docker_leash/config.py
|
docker-leash/docker-leash
|
d98c0a98ddecac2c9775e839d1e64382b811a3cf
|
[
"MIT"
] | 92 |
2018-01-12T21:04:42.000Z
|
2018-04-08T17:25:26.000Z
|
docker_leash/config.py
|
docker-leash/docker-leash
|
d98c0a98ddecac2c9775e839d1e64382b811a3cf
|
[
"MIT"
] | 2 |
2018-01-13T16:52:54.000Z
|
2020-04-24T22:45:46.000Z
|
# vim:set ts=4 sw=4 et:
'''
Config
======
'''
import re
from .action_mapper import Action
from .checks_list import Checks
from .exceptions import ConfigurationException
class Config(object):
"""The :class:`Config` class is responsible for storing application groups
and policies read from the datastore.
It has some handy functions to extract values from the configuration.
It can respond to questions such as:
* "Which are the groups for a user?"
* "Which policies user belong to?"
* "Which tests are enabled for a user?"
:var groups: The groups.
:vartype groups: dict or None
:var policies: The policies.
:vartype policies: dict or None
:param groups: The groups.
:type groups: dict or None
:param policies: The policies.
:type policies: dict or None
"""
#: The loaded policies
policies = None
#: The loaded groups
groups = None
def __init__(self, groups=None, policies=None):
self.update(groups, policies)
def update(self, groups=None, policies=None):
"""Update the stored configuration with the provided values.
:param groups: The groups.
:type groups: dict or None
:param policies: The policies.
:type policies: dict or None
"""
if groups:
if self.groups:
self.groups.update(groups)
else:
self.groups = groups
if policies:
self.policies = policies
def get_rules(self, payload):
"""Return the rules for a payload.
:param str payload: The current payload.
:return: The rules concerned by the payload.
:rtype: list
"""
username = payload.user
action = Action(method=payload.method, query=payload.uri)
hostname = payload.get_host()
for rule in self.policies:
if not self._match_host(hostname, rule["hosts"]):
continue
if "policies" not in rule:
return self._default_rule(rule)
policies = self._get_policy_by_member(username, rule["policies"])
if policies is None:
return self._default_rule(rule)
rules = self._match_rules(action, policies)
if not rules:
return self._default_rule(rule)
return rules
@staticmethod
def _default_rule(rule):
"""Construct a default rule
:param dict rule: The current parsed rule
:return: A :class:`~docker_leash.checks_list.Checks_list.Checks` containing only the default rule
:rtype: :class:`~docker_leash.checks_list.Checks_list.Checks`
"""
checks = Checks()
checks.add(rule["default"])
return checks
@staticmethod
def _match_host(hostname, host_rules):
"""Validate if a hostname match hosts regex list
:param str hostname: The hostname
:param list host_rules: List of hosts regex
:return: True if hostname match host rules
:rtype: bool
:raises ConfigurationException: if the host rules are invalid.
"""
match = False
for hosts_reg in host_rules:
mode = hosts_reg[0]
regex = hosts_reg[1:]
if mode == '+':
if re.match(regex, hostname):
match = True
continue
elif mode == '-':
if re.match(regex, hostname):
match = False
continue
else:
raise ConfigurationException(
"'hosts' regex (%s) is missing '+' or '-'" % hosts_reg
)
return match
def _get_policy_by_member(self, username, policies):
"""Extract the policies for a user name.
Return the concerned policies:
* If the user match in a group
* If the user is None, and "members" contains "Anonymous"
* Else return None
:param str username: The username
:param dict policies: The policies to filter
:return: The policies for username
:rtype: None or dict
"""
for policy in policies:
for group in policy["members"]:
if group in self.groups:
if username in self.groups[group] \
or "*" in self.groups[group] \
or (username is None and "Anonymous" in self.groups[group]):
return policy["rules"]
return None
@staticmethod
def _match_rules(action, actions):
"""Extract the checks for an action.
First match for exact comparison, then for the "any" keyword,
and finally for "parents" action name.
:param docker_leash.action_mapper.Action action: The current action
:param dict actions: The actions from the policies
:return: The filtered actions list
:rtype: `~docker_leash.checks_list.Checks`
"""
assert isinstance(action, Action), 'expected Action, got {!r}'.format(action)
checks = Checks()
action_name = action.name
parent_action = action.namespace_name
# Look for "normal" Actions
if action_name in actions:
for check, args in actions[action_name].iteritems():
checks.add({check: args})
# Look for "parents" Actions
elif parent_action in actions:
for check, args in actions[parent_action].iteritems():
checks.add({check: args})
# Look for "any" Actions
elif "any" in actions:
for check, args in actions["any"].iteritems():
checks.add({check: args})
return checks
| 30.967914 | 105 | 0.583319 | 5,617 | 0.969953 | 0 | 0 | 2,546 | 0.439648 | 0 | 0 | 2,692 | 0.464859 |
d242ed9d3520b1a1062f3207cee3beda75ae982b
| 1,039 |
py
|
Python
|
printapp/migrations/0002_auto_20180217_1917.py
|
sumanlearning/potpapa2018
|
1557dd5aca645cb55a08e5b92623804e51fa8dfe
|
[
"Unlicense"
] | null | null | null |
printapp/migrations/0002_auto_20180217_1917.py
|
sumanlearning/potpapa2018
|
1557dd5aca645cb55a08e5b92623804e51fa8dfe
|
[
"Unlicense"
] | null | null | null |
printapp/migrations/0002_auto_20180217_1917.py
|
sumanlearning/potpapa2018
|
1557dd5aca645cb55a08e5b92623804e51fa8dfe
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-02-17 12:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('printapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='historybayar',
name='bayar_now',
field=models.DecimalField(decimal_places=2, max_digits=14, verbose_name='pembayaran sekarang'),
),
migrations.AlterField(
model_name='kontrak',
name='nhari_har',
field=models.IntegerField(default=0, verbose_name='masa pemeliharaan (hari)'),
),
migrations.AlterField(
model_name='kontrak',
name='tgl_due',
field=models.DateField(default=datetime.date.today, verbose_name='tgl jatuh tempo'),
),
migrations.AlterField(
model_name='termin',
name='nth_termin',
field=models.IntegerField(default=1, verbose_name='termin ke-'),
),
]
| 29.685714 | 107 | 0.599615 | 930 | 0.895091 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.221367 |
d243084a9d78e560bb874101db60f382836bb734
| 7,569 |
py
|
Python
|
waller.py
|
fredrikwahlberg/harvesters
|
205dadeb3b6e25203843e71b95cb99aaf840c712
|
[
"MIT"
] | 1 |
2018-02-20T16:34:26.000Z
|
2018-02-20T16:34:26.000Z
|
waller.py
|
fredrikwahlberg/harvesters
|
205dadeb3b6e25203843e71b95cb99aaf840c712
|
[
"MIT"
] | null | null | null |
waller.py
|
fredrikwahlberg/harvesters
|
205dadeb3b6e25203843e71b95cb99aaf840c712
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Fredrik Wahlberg <[email protected]>
"""
import requests
import json
import os.path
import re
class Waller():
def __init__(self, datapath, verbose=False):
super(Waller, self).__init__()
self.reprname = "Waller"
assert os.path.exists(datapath), "Path not found"
self.datapath = datapath
self.verbose = verbose
filename = os.path.join(self.datapath, "metadata.json.gz")
if os.path.exists(filename):
import gzip
f = gzip.open(filename, 'r')
self.data = json.loads(f.read().decode('utf-8'))
f.close()
else:
self.data = {}
def save(self):
filename = os.path.join(self.datapath, "metadata.json.gz")
import gzip
f = gzip.open(filename, 'w')
f.write(json.dumps(self.data, sort_keys=True, indent=2, separators=(',', ': ')).encode('utf-8'))
f.close()
def getNumbers(self):
return [int(key) for key in self.data.keys() if self.data[key]['response_status_code'] == 200]
def keys(self):
return self.getNumbers()
def dataIterator(self, numbers):
import copy
for number in numbers:
data = copy.deepcopy(self[number])
data['id'] = number
yield data
def populate(self):
"""Populate the data base."""
downloadCounter = 0
# id range is 12062 to 49502
for number in range(12050, 49550):
if str(number) in self.data.keys():
respose_code = self.data[str(number)]['response_status_code']
if self.verbose:
print("%i, already checked, response was %i" % (number, respose_code))
else:
respose_code = 0
if not (respose_code == 200 or respose_code == 404):
self._download(number)
downloadCounter += 1
if downloadCounter > 1000:
if self.verbose:
print("Writing database to file")
self.save()
downloadCounter = 0
self.save()
def __getitem__(self, number):
import copy
return copy.deepcopy(self.data[str(number)])
def _get_waller_url(self, number):
return "http://waller.ub.uu.se/" + str(number) + ".html"
def _getMetaDataFromTemplate(self, response, markerTemplate):
idx = response.content.find(markerTemplate)
if idx >= 0:
text = response.content[idx+len(markerTemplate):]
text = text[:text.find(markerTemplate)]
removedSomething = True
while removedSomething:
removelist = ['\r', '\n', ' ']
removedSomething = False
for t in removelist:
if text[0] == t:
removedSomething = True
text = text[1:]
if text[0] == '<':
removedSomething = True
while text[0] != '>':
text = text[1:]
text = text[1:]
return text[:text.find('<')]
else:
return None
def _trimUntil(self, text, template):
idx = text.find(template)
if idx >= 0:
return text[idx+len(template):]
else:
return text
def _trimFrom(self, text, template):
idx = text.find(template)
if idx >= 0:
return text[:idx]
else:
return text
def _ltrim(self, text):
while len(text)>0 and text[0] == ' ':
text = text[1:]
return text
def _rtrim(self, text):
while len(text)>0 and text[-1] == ' ':
text = text[:-1]
return text
def _tagRemover(self, text):
s = ""
l = []
w = True
for c in text:
if c == '<':
w = False
if w and c != '\n':
s += c
if c == '>':
w = True
s = self._rtrim(self._ltrim(s))
if len(s) > 0:
l.append(s)
s = ""
return l
def _download(self, number):
if self.verbose:
print ("%i, checking..." % number, end="")
response = requests.get(self._get_waller_url(number))
if self.verbose:
print("response was %i" % response.status_code)
newentry = {'response_status_code': response.status_code}
if response.status_code == 200:
response_content = response.content.decode('utf-8')
text = self._trimUntil(response_content, "PHYSICAL DESCRIPTION")
text = self._trimUntil(text, "</tr>")
text = self._trimFrom(text, "DESCRIPTION OF CONTENTS")
entries1 = self._tagRemover(text)
tags = [("Shelfmark:", "shelfmark"), ("Type of object:", "type_of_object"),
("Dimensions:", "dimensions"), ("Extent:", "extent"), ("Material:", "material")]
def parseSingleTags(entries, tags):
physical = {}
i = 0
while i < len(entries):
for texttag, dictkey in tags:
if entries[i].find(texttag) >= 0:
i += 1
physical[dictkey] = entries[i]
i += 1
return physical
newentry['physical_description'] = parseSingleTags(entries1, tags)
text = self._trimUntil(response_content, "DESCRIPTION OF CONTENTS")
text = self._trimUntil(text, "</tr>")
text = self._trimFrom(text, "IMAGES")
entries2 = self._tagRemover(text)
tags = [("Type of element:", "type_of_element"), ("Extent:", "extent"),
("Language:", "language"), ("Place:", "place"), ("Date:", "date"),
("Short summary:", "short_summary")]
content = parseSingleTags(entries2, tags)
tags = [("Person:", "person")]
i = 0
while i < len(entries2):
for texttag, dictkey in tags:
if entries2[i].find(texttag) >= 0:
i += 1
if dictkey in content.keys():
content[dictkey].append(entries2[i])
else:
content[dictkey] = [entries2[i]]
i += 1
newentry['description_of_contents'] = content
tags = [("Comments:", "comments")]
comment = parseSingleTags(entries2, tags)
if 'comments' in comment.keys():
newentry['comments'] = comment['comments']
text = self._trimUntil(response_content, "IMAGES")
images = []
match = re.findall(r'href=[\'"]?([^\'" >]+)', text)
for m in match:
u = "http://waller.ub.uu.se/images/"
if m[:len(u)] == u:
images.append(m)
newentry['image_urls'] = images
self.data[str(number)] = newentry
def __repr__(self):
return "Waller collection, containing %i entries" % len(self.getNumbers())
if __name__=='__main__':
datapath = os.path.expanduser("~/tmp/Waller")
db = Waller(datapath=datapath, verbose=1)
db.populate()
db.save()
print(db)
| 35.369159 | 104 | 0.492139 | 7,257 | 0.958779 | 185 | 0.024442 | 0 | 0 | 0 | 0 | 1,080 | 0.142687 |
d244090a382037591d1f8d9a0c4ab8297cd9b302
| 701 |
py
|
Python
|
helper_functions_class.py
|
lucaschatham/lambdata
|
125087c521847e4f7659a4c8e34008994f3fb01b
|
[
"MIT"
] | null | null | null |
helper_functions_class.py
|
lucaschatham/lambdata
|
125087c521847e4f7659a4c8e34008994f3fb01b
|
[
"MIT"
] | null | null | null |
helper_functions_class.py
|
lucaschatham/lambdata
|
125087c521847e4f7659a4c8e34008994f3fb01b
|
[
"MIT"
] | null | null | null |
"""
Here are two different functions used for common data cleaning tasks.
You can use these functions to load data into a pandas Dataframe.
"""
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
class CleanData:
def __init__(self):
"""
This init function instantiates objects
"""
return
# This function randomizes variables
def randomize(self, df, seed):
random_this = shuffle(df,random_state=seed)
return random_this
# This function determines if there are missing values in the specified DataFrame
def null_count(self, df):
num_nulls = df.isnull().sum().sum()
return num_nulls
| 22.612903 | 84 | 0.673324 | 476 | 0.67903 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.466476 |
d245456046b81bffbc996ce46fc7291edbaf4e36
| 870 |
py
|
Python
|
services/web/apps/crm/supplierprofile/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84 |
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/web/apps/crm/supplierprofile/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22 |
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/web/apps/crm/supplierprofile/views.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23 |
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# crm.supplierprofile application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.crm.models.supplierprofile import SupplierProfile
from noc.core.translation import ugettext as _
class SupplierProfileApplication(ExtDocApplication):
"""
SupplierProfile application
"""
title = _("Supplier Profile")
menu = [_("Setup"), _("Supplier Profiles")]
model = SupplierProfile
query_fields = ["name__icontains", "description__icontains"]
def field_row_class(self, o):
return o.style.css_class_name if o.style else ""
| 33.461538 | 71 | 0.558621 | 368 | 0.422989 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.522989 |
d24561aa431196a52ec81712ae5c3dded61222c7
| 2,849 |
py
|
Python
|
all-python-codes/bagels/main.py
|
abdussalam02/py-projects
|
653ba4e6923ee1f55a64aef23174515c1db68758
|
[
"MIT"
] | null | null | null |
all-python-codes/bagels/main.py
|
abdussalam02/py-projects
|
653ba4e6923ee1f55a64aef23174515c1db68758
|
[
"MIT"
] | null | null | null |
all-python-codes/bagels/main.py
|
abdussalam02/py-projects
|
653ba4e6923ee1f55a64aef23174515c1db68758
|
[
"MIT"
] | null | null | null |
from random import shuffle
NUM_DIGIT = 3
MAX_GUESSES = 10
def main(): # main game
print(
f"""
Bagels, a detective logic game.
By Ibrahim raimi
I am thinking of a number {NUM_DIGIT} number with no repeted digits.
Try to guess what it is. Here re some clues:
When i say: That means:
Pico One digit is correct but in the wrong position.
Fermi One digit is correct and in the right position.
Bagels No digit is correct.
For example, if the secret number was 248 and your guess was 843, the clues would be Fermi Pico
"""
)
while True: # main game loop
secret_num = get_secret_num()
print("I have though of a number")
print(f"You have {MAX_GUESSES} guesses to get it.")
num_guesses = 1
while num_guesses <= MAX_GUESSES:
guess = ""
# keep looping until they enter a valid guess:
while len(guess) != NUM_DIGIT or not guess.isdecimal():
print(f"Guess {num_guesses}")
guess = input("> ")
clues = get_clues(guess, secret_num)
print(clues)
num_guesses += 1
if guess == secret_num:
break
if num_guesses > MAX_GUESSES:
print("You ran out of guesses.")
print(f"The nswer was {secret_num}")
break
print("Do you want to play again? (yes or no)")
if not input("> ").lower().startswith("y"):
break
print("Thanks for playing")
def get_secret_num():
""" returns a string made up of {NUM_DIGITS} uniqe random digits """
numbers = list("0123456789") # create a list of digits 0 - 9
shuffle(numbers) # shuffle them into random order
""" get the first {NUM_DIGITS} digits in the list for the secret number """
secret_num = ""
for i in range(NUM_DIGIT):
secret_num += str(numbers[i])
return secret_num
def get_clues(guess, secret_num):
""" returns a string with the pico, fermi, bagels clues for a guess and secret number pair """
if guess == secret_num:
return "You got it!"
clues = []
for i in range(len(guess)):
if guess[i] == secret_num[i]:
# a correct digit is in the correct place
clues.append("Fermi")
elif guess[i] in secret_num:
# a correct digit is in the incorrect place
clues.append("Pico")
if len(clues) == 0:
return "Bagels" # there are no correct digit at all
else:
# sort the clues into alphabetical order so their original order does not give information away
clues.sort()
return " ".join(clues)
if __name__ == "__main__":
main()
| 30.634409 | 107 | 0.570727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,414 | 0.496314 |
d24580d757e7e7fcbb4b8b0a5b6d34e117acf284
| 2,652 |
py
|
Python
|
NetEmbs/DataProcessing/unique_signatures.py
|
AlexWorldD/NetEmbs
|
ea3dc5769e2feb728dac8f21ec677a9807def3df
|
[
"Apache-2.0"
] | 1 |
2021-09-02T16:47:27.000Z
|
2021-09-02T16:47:27.000Z
|
NetEmbs/DataProcessing/unique_signatures.py
|
AlexWorldD/NetEmbs
|
ea3dc5769e2feb728dac8f21ec677a9807def3df
|
[
"Apache-2.0"
] | null | null | null |
NetEmbs/DataProcessing/unique_signatures.py
|
AlexWorldD/NetEmbs
|
ea3dc5769e2feb728dac8f21ec677a9807def3df
|
[
"Apache-2.0"
] | 1 |
2019-12-25T08:38:55.000Z
|
2019-12-25T08:38:55.000Z
|
# encoding: utf-8
__author__ = 'Aleksei Maliutin'
"""
unique_signatures.py
Created by lex at 2019-03-28.
"""
import pandas as pd
from NetEmbs.CONFIG import N_DIGITS
def get_signature(df: pd.DataFrame) -> pd.Series:
"""
Aggregation function over GroupBy object: to extract unique signature for the given business process.
If business process includes only 1-1 flow (e.g. from Cash to Tax), used amount value.
If business process includes more than 2 transactions, used Credit/Debit values respectfully.
Parameters
----------
df : DataFrame
Unique business process as GroupBy DataFrame
Returns
-------
Pandas Series with ID and Signature
"""
signature_l = list()
signature_r = list()
if df.shape[0] == 2:
signature_l = list(
zip(df["FA_Name"][df["Credit"] > 0.0].values, df["amount"][df["Credit"] > 0.0].values.round(N_DIGITS)))
signature_r = list(
zip(df["FA_Name"][df["Debit"] > 0.0].values, df["amount"][df["Debit"] > 0.0].values.round(N_DIGITS)))
elif df.shape[0] > 2:
# Business process includes more that 2 transactions, hence, can use relative amount for creation signature
signature_l = sorted(
list(
zip(df["FA_Name"][df["Credit"] > 0.0].values, df["Credit"][df["Credit"] > 0.0].values.round(N_DIGITS))),
key=lambda x: x[0])
signature_r = sorted(
list(zip(df["FA_Name"][df["Debit"] > 0.0].values, df["Debit"][df["Debit"] > 0.0].values.round(N_DIGITS))),
key=lambda x: x[0])
return pd.Series({"ID": df["ID"].values[0], "Signature": str((signature_l, signature_r))})
def get_signature_df(df: pd.DataFrame) -> pd.DataFrame:
"""
Create DataFrame with ID and Signature
Parameters
----------
df : DataFrame to be processed
Returns
-------
DataFrame with Signature column
"""
"""
Helper function for extraction a signature of BP (as a combination of coefficients from left and right part)
:param original_df:
:return: DataFrame with BP ID and extracted signature
"""
res = df.groupby("ID", as_index=False).apply(get_signature)
return res.drop_duplicates(["Signature"])
def leave_unique_business_processes(df: pd.DataFrame) -> pd.DataFrame:
"""
Filtering original DF with respect to unique BP's signatures
Parameters
----------
df : DataFrame to be processed
Returns
-------
DataFrame with remove duplicated w.r.t. extracted signatures
"""
signatures = get_signature_df(df)
return signatures.merge(df, on="ID", how="left")
| 34 | 120 | 0.633107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,474 | 0.555807 |
d248471875d205a42c77cea45df52d51bb8e0b18
| 6,008 |
py
|
Python
|
books/api/RecurringInvoicesApi.py
|
harshal-choudhari/books-python-wrappers
|
43616ee451a78ef2f02facc1cfb1d7f1121a1464
|
[
"MIT"
] | 1 |
2021-04-21T06:40:48.000Z
|
2021-04-21T06:40:48.000Z
|
books/api/RecurringInvoicesApi.py
|
harshal-choudhari/books-python-wrappers
|
43616ee451a78ef2f02facc1cfb1d7f1121a1464
|
[
"MIT"
] | null | null | null |
books/api/RecurringInvoicesApi.py
|
harshal-choudhari/books-python-wrappers
|
43616ee451a78ef2f02facc1cfb1d7f1121a1464
|
[
"MIT"
] | 1 |
2021-04-21T07:31:47.000Z
|
2021-04-21T07:31:47.000Z
|
#$Id$#
from books.util.ZohoHttpClient import ZohoHttpClient
from books.parser.RecurringInvoiceParser import RecurringInvoiceParser
from .Api import Api
from json import dumps
base_url = Api().base_url + 'recurringinvoices/'
parser = RecurringInvoiceParser()
zoho_http_client = ZohoHttpClient()
class RecurringInvoicesApi:
"""Recurring invoice api class is used:
1.To list all the recurring invoices with pagination.
2.To get details of a recurring invoice.
3.To create a recurring invoice.
4.To update an existing recurring invoice.
5.To delete an existing recurring invoice.
6.To stop an active recurring invoice.
7.To resume a stopped recurring invoice.
8.To update the pdf template associated with the recurring invoice.
9.To get the complete history and comments of a recurring invoice.
"""
def __init__(self, authtoken, organization_id):
"""Initialize Contacts Api using user's authtoken and organization id.
Args:
authtoken(str): User's authtoken.
organization_id(str): User's organization id.
"""
self.headers = {
'Authorization': 'Zoho-oauthtoken ' + authtoken,
}
self.details = {
'organization_id': organization_id
}
def get_recurring_invoices(self, parameter=None):
"""List of recurring invoices with pagination.
Args:
parameter(dict, optional): Filter with which the list has to be
displayed. Defaults to None.
Returns:
instance: Recurring invoice list object.
"""
response = zoho_http_client.get(base_url, self.details, self.headers, parameter)
return parser.recurring_invoices(response)
def get_recurring_invoice(self, recurring_invoice_id):
"""Get recurring invoice details.
Args:
recurring_invoice_id(str): Recurring invoice id.
Returns:
instance: Recurring invoice object.
"""
url = base_url + recurring_invoice_id
response = zoho_http_client.get(url, self.details, self.headers)
return parser.recurring_invoice(response)
def create(self, recurring_invoice):
"""Create recurring invoice.
Args:
recurring_invoice(instance): Recurring invoice object.
Returns:
instance: Recurring invoice object.
"""
json_object = dumps(recurring_invoice.to_json())
data = {
'JSONString': json_object
}
response = zoho_http_client.post(base_url, self.details, self.headers, data)
return parser.recurring_invoice(response)
def update(self, recurring_invoice_id, recurring_invoice):
"""Update an existing recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
recurring_invoice(instance): Recurring invoice object.
Returns:
instance: Recurring invoice object.
"""
url = base_url + recurring_invoice_id
json_object = dumps(recurring_invoice.to_json())
data = {
'JSONString': json_object
}
response = zoho_http_client.put(url, self.details, self.headers, data)
return parser.recurring_invoice(response)
def delete(self, recurring_invoice_id):
"""Delete an existing recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
Returns:
str: Success message('The recurring invoice has been deleted.').
"""
url = base_url + recurring_invoice_id
response = zoho_http_client.delete(url, self.details, self.headers)
return parser.get_message(response)
def stop_recurring_invoice(self, recurring_invoice_id):
"""Stop an active recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
Returns:
str: Success message ('The recurring invoice has been stopped.').
"""
url = base_url + recurring_invoice_id + '/status/stop'
response = zoho_http_client.post(url, self.details, self.headers, '')
return parser.get_message(response)
def resume_recurring_invoice(self, recurring_invoice_id):
"""Resume an active recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
Returns:
str: Success message ('The recurring invoice has been activated.').
"""
url = base_url + recurring_invoice_id + '/status/resume'
response = zoho_http_client.post(url, self.details, self.headers, '')
return parser.get_message(response)
def update_recurring_invoice_template(self,
recurring_invoice_id, template_id):
"""Update the pdf template associated with the recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
template_id(str): Template id.
Returns:
str: Success message ('Recurring invoice information has been
updated.').
"""
url = base_url + recurring_invoice_id + '/templates/' + template_id
response = zoho_http_client.put(url, self.details, self.headers, '')
return parser.get_message(response)
def list_recurring_invoice_history(self, recurring_invoice_id):
"""List the complete history and comments of a recurring invoice.
Args:
recurring_invoice_id(str): Recurring invoice id.
Returns:
instance: Recurring invoice history and comments list object.
"""
url = base_url + recurring_invoice_id + '/comments'
response = zoho_http_client.get(url, self.details, self.headers)
return parser.recurring_invoice_history_list(response)
| 33.19337 | 89 | 0.636152 | 5,693 | 0.94757 | 0 | 0 | 0 | 0 | 0 | 0 | 3,081 | 0.512816 |
d249639feb0e944a523bdb5fe34255236bfa3990
| 661 |
py
|
Python
|
api/settings/local.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
api/settings/local.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
api/settings/local.py
|
hartliddell/api
|
73d44d2271c01fe7540fedeee9174c4032cbbbc0
|
[
"MIT"
] | null | null | null |
"""Define the django settings for a local setup."""
from .base import * # noqa
# SECURITY WARNING: don't run with debug turned on in production!
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# Allow all host headers
# SECURITY WARNING: don't run with this setting in production!
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# CORS settings.
# https://github.com/ottoyiu/django-cors-headers#cors_origin_allow_all
CORS_ORIGIN_ALLOW_ALL = True
| 33.05 | 70 | 0.747352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 526 | 0.795764 |
d24974e9a9f24d16218c96318a69ab049db6dc83
| 1,457 |
py
|
Python
|
scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py
|
miltondp/phenomexcan
|
38390ac21987f1e72835c42919c53abd1a35cb7e
|
[
"MIT"
] | 3 |
2020-12-07T15:06:41.000Z
|
2021-05-25T06:03:38.000Z
|
scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py
|
miltondp/phenomexcan
|
38390ac21987f1e72835c42919c53abd1a35cb7e
|
[
"MIT"
] | 1 |
2020-07-01T14:45:38.000Z
|
2020-07-01T15:15:55.000Z
|
scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py
|
miltondp/phenomexcan
|
38390ac21987f1e72835c42919c53abd1a35cb7e
|
[
"MIT"
] | 1 |
2020-08-20T13:23:40.000Z
|
2020-08-20T13:23:40.000Z
|
#!/usr/bin/env python
import os
import argparse
import sqlite3
from glob import glob
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--gtex-models-dir', type=str, required=True)
parser.add_argument('--variants-file-with-gtex-id', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
all_models = glob(os.path.join(args.gtex_models_dir, '*.db'))
assert len(all_models) == 49, len(all_models)
all_variants_ids = set()
for m in all_models:
print(f'Processing {m}')
with sqlite3.connect(m) as conn:
df = pd.read_sql('select varID from weights', conn)['varID']
all_variants_ids.update(set(df.values))
print(f'Read {len(all_variants_ids)} unique variants in GTEx models')
print(f'Reading {args.variants_file_with_gtex_id}')
variants_gtexid = pd.read_csv(args.variants_file_with_gtex_id, sep='\t', usecols=['panel_variant_id'], squeeze=True).dropna()
variants_gtexid = set(variants_gtexid.values)
print(f' Read {len(variants_gtexid)} variants')
print('Merging GTEx and other variants')
merged_variants = variants_gtexid.intersection(all_variants_ids)
print(f'Final number of merged variants: {len(merged_variants)}')
print(f'Coverage of GTEx variants: {(len(merged_variants) / len(all_variants_ids)) * 100:.2f}%')
print(f'Writing to {args.output_file}')
pd.DataFrame({'rsid': list(merged_variants)}).to_csv(args.output_file, index=False)
| 33.883721 | 125 | 0.753603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.363075 |
d24abb7e1be3b51950c14587cbae8b44aa330b06
| 5,676 |
py
|
Python
|
h/security/predicates.py
|
hypothesis/h
|
92c1a326c305a3d94fe48f87402135fd7beb6a20
|
[
"BSD-2-Clause"
] | 2,103 |
2015-01-07T12:47:49.000Z
|
2022-03-29T02:38:25.000Z
|
h/security/predicates.py
|
hypothesis/h
|
92c1a326c305a3d94fe48f87402135fd7beb6a20
|
[
"BSD-2-Clause"
] | 4,322 |
2015-01-04T17:18:01.000Z
|
2022-03-31T17:06:02.000Z
|
h/security/predicates.py
|
hypothesis/h
|
92c1a326c305a3d94fe48f87402135fd7beb6a20
|
[
"BSD-2-Clause"
] | 389 |
2015-01-24T04:10:02.000Z
|
2022-03-28T08:00:16.000Z
|
"""
Define authorization predicates.
These are functions which accept an `Identity` object and a context object and
return a truthy value. These represent building blocks of our permission map
which define when people do, or don't have permissions.
For example a predicate might define "group_created_by_user" which is only
true when a user is present, a group is present and the user created that
group.
"""
from itertools import chain
from h.models.group import JoinableBy, ReadableBy, WriteableBy
def requires(*parent_predicates):
"""
Decorate a predicate to say it requires other predicates to be True first.
:param parent_predicates: A list of predicates that have to be true for
this predicate to be true as well.
"""
def decorator(function):
function.requires = parent_predicates
return function
return decorator
# Identity things
def authenticated(identity, _context):
return identity
# The `@requires` here means that this predicate needs `authenticate` to be
# True before it's True. It also avoids attribute errors if identity is None
@requires(authenticated)
def authenticated_user(identity, _context):
return identity.user
@requires(authenticated_user)
def user_is_staff(identity, _context):
return identity.user.staff
@requires(authenticated_user)
def user_is_admin(identity, _context):
return identity.user.admin
@requires(authenticated)
def authenticated_client(identity, _context):
return identity.auth_client
@requires(authenticated_client)
def authenticated_client_is_lms(identity, _context):
authority = identity.auth_client.authority
return authority.startswith("lms.") and authority.endswith(".hypothes.is")
# Users
def user_found(_identity, context):
return hasattr(context, "user") and context.user
@requires(authenticated_client, user_found)
def user_authority_matches_authenticated_client(identity, context):
return context.user.authority == identity.auth_client.authority
# Annotations
def annotation_found(_identity, context):
return hasattr(context, "annotation") and context.annotation
@requires(annotation_found)
def annotation_shared(_identity, context):
return context.annotation.shared
@requires(annotation_found)
def annotation_not_shared(_identity, context):
return not context.annotation.shared
@requires(annotation_found)
def annotation_live(_identity, context):
return not context.annotation.deleted
@requires(authenticated_user, annotation_found)
def annotation_created_by_user(identity, context):
return identity.user.userid == context.annotation.userid
# Groups
def group_found(_identity, context):
return hasattr(context, "group") and context.group
def group_not_found(_identity, context):
return not hasattr(context, "group") or not context.group
@requires(group_found)
def group_writable_by_members(_identity, context):
return context.group.writeable_by == WriteableBy.members
@requires(group_found)
def group_writable_by_authority(_identity, context):
return context.group.writeable_by == WriteableBy.authority
@requires(group_found)
def group_readable_by_world(_identity, context):
return context.group.readable_by == ReadableBy.world
@requires(group_found)
def group_readable_by_members(_identity, context):
return context.group.readable_by == ReadableBy.members
@requires(group_found)
def group_joinable_by_authority(_identity, context):
return context.group.joinable_by == JoinableBy.authority
@requires(authenticated_user, group_found)
def group_created_by_user(identity, context):
return context.group.creator and context.group.creator.id == identity.user.id
@requires(authenticated_user, group_found)
def group_has_user_as_member(identity, context):
# With detached groups like we have with the websocket, this doesn't work
# as SQLAlchemy does not consider them equal:
# return context.group in identity.user.groups
return any(user_group.id == context.group.id for user_group in identity.user.groups)
@requires(authenticated_user, group_found)
def group_matches_user_authority(identity, context):
return context.group.authority == identity.user.authority
@requires(authenticated_client, group_found)
def group_matches_authenticated_client_authority(identity, context):
return context.group.authority == identity.auth_client.authority
def resolve_predicates(mapping):
"""
Expand predicates with requirements into concrete lists of predicates.
This takes a permission map which contains predicates which reference
other ones (using `@requires`), and converts each clause to include the
parents in parent first order. This means any parent which is referred to
by a predicate is executed before it, and no predicate appears more than once.
"""
return {
key: [_expand_clause(clause) for clause in clauses]
for key, clauses in mapping.items()
}
def _expand_clause(clause):
"""Generate all of the predicates + parents in a clause without dupes."""
seen_before = set()
# The chain.from_iterable here flattens nested iterables
return list(
chain.from_iterable(
_expand_predicate(predicate, seen_before) for predicate in clause
)
)
def _expand_predicate(predicate, seen_before):
"""Generate all of the parents and the predicate in parents first order."""
if hasattr(predicate, "requires"):
for parent in predicate.requires:
yield from _expand_predicate(parent, seen_before)
if predicate not in seen_before:
seen_before.add(predicate)
yield predicate
| 27.687805 | 88 | 0.767442 | 0 | 0 | 367 | 0.064658 | 2,811 | 0.495243 | 0 | 0 | 1,640 | 0.288936 |
d24c807fe0e09931fae3e0caaf649694c890f3db
| 3,325 |
py
|
Python
|
gdm/planing_tool/models/empresas.py
|
Deonstudios/GDM
|
ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8
|
[
"Apache-2.0"
] | null | null | null |
gdm/planing_tool/models/empresas.py
|
Deonstudios/GDM
|
ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8
|
[
"Apache-2.0"
] | null | null | null |
gdm/planing_tool/models/empresas.py
|
Deonstudios/GDM
|
ad6c8182d3e70a6c4d1490f452b2c16e12dc85d8
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from planing_tool.models.plazas import State, Country, City
from django.contrib.gis.db.models import PointField
from datetime import datetime, timedelta
from libs.currency_utils import CurrencyUtils
from django.contrib.gis.db.models.manager import GeoManager
from django.db import models
from simple_history.models import HistoricalRecords
from django.utils.translation import ugettext as _
from autoslug import AutoSlugField
ACTIVITY_CHOICES = (
(0, _(u'Empresa')),
(1, _(u'Comerciante Individual')),
(2, _(u'Profesional')),
(3, _(u'Productor')),
)
class Empresa(models.Model):
objects = GeoManager()
slug = AutoSlugField(
max_length=100, populate_from='name',
always_update=True, unique=True
)
name = models.CharField(max_length=200, db_index=True,
verbose_name=_(u"Nombre de la compañía"))
description = models.TextField(
verbose_name=_(u'Descripción'), null=True,
help_text='Descripción de la empresa. 999 caracteres maximo'
)
address_street = models.CharField(max_length=200, verbose_name=_(u"Calle"))
address_number = models.CharField(max_length=30, verbose_name=_(u'Número'))
postal_code = models.CharField(
max_length=50, verbose_name=_(u"Código Postal")
)
cuit = models.CharField(
max_length=20, null=True, blank=True,
help_text='por ej.: 20-12345698-7'
)
email = models.EmailField(
null=True, blank=True, help_text='Por ej.: usuario@nombre_empresa.com'
)
activity = models.IntegerField(
choices=ACTIVITY_CHOICES, verbose_name=_(u'Actividad'), default=0
)
city = models.ForeignKey(City)
geo_coords = PointField(
null=True, blank=True, verbose_name=_(u'Coordenadas'),
help_text='Ingresar los valores de lat,long')
phone_area_code = models.CharField(
max_length=200, verbose_name=_(u'Código de area'),
help_text='Por ej.: 0351 para Córdoba o 011 para Buenos Aires.')
phone = models.CharField(
max_length=200, verbose_name=_(u"Teléfono"),
help_text='Por ej.: 1234567 o 1512345678 para teléfonos móviles')
class Inversiones(models.Model):
"""Representa un Presupuesto"""
slug = AutoSlugField(
max_length=250, populate_from='nombre', always_update=True, unique=True
)
description = models.TextField(
verbose_name=_(u'Descripción'), null=True,
help_text='Descripción de la Inversion que una empresa recibe'
)
desde = models.DateTimeField(
auto_now_add=True,
null=False,
blank=False)
hasta = models.DateTimeField(
default=datetime.now()+timedelta(days=30),
null=False,
blank=False)
monto = models.DecimalField(
max_digits=20, decimal_places=2, blank=True,
null=True, db_index=True, verbose_name=_(u"Precio")
)
currency = models.IntegerField(
default=1, choices=CurrencyUtils.CURRENCY_CHOICES,
db_index=True, verbose_name=_(u"Moneda")
)
empresa = models.ForeignKey(
Empresa,
default=None,
null=False,
blank=False,
verbose_name=_(u"Inversion a Realizar")
)
history = HistoricalRecords()
def __unicode__(self):
return self.nombre
| 29.166667 | 79 | 0.67218 | 2,749 | 0.823547 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.184841 |
d24ca4e55e2ea29a960fa8ecd6a05a6ef87a0584
| 8,346 |
py
|
Python
|
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | 12 |
2020-07-24T13:21:35.000Z
|
2021-11-08T10:13:24.000Z
|
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | null | null | null |
network.py
|
tonyhu20116543/Playing-20-Question-Game-with-Policy-Based-Reinforcement-Learning
|
fb9b20181dd3e3273fcbc28144d60f01185ceffd
|
[
"MIT"
] | 7 |
2020-07-24T13:28:44.000Z
|
2021-11-08T10:13:25.000Z
|
import os
import tensorflow as tf
from util import masked_softmax
class PolicyNetwork(object):
""" Policy Function approximator. """
def __init__(self, input_size, output_size, learning_rate=0.001, summaries_dir=None, scope="policy_estimator"):
with tf.variable_scope(scope):
# Writes Tensorboard summaries to disk
self.summary_writer = None
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.action = tf.placeholder(dtype=tf.int32, name="action")
self.target = tf.placeholder(dtype=tf.float64, name="target")
self.mask = tf.placeholder(dtype=tf.float64, shape=[1, output_size], name="mask")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=len(env.state),
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
# self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))
self.action_probs = tf.squeeze(masked_softmax(self.output_layer, self.mask))
self.picked_action_prob = tf.gather(self.action_probs, self.action)
# Loss and train op
self.loss = -tf.log(self.picked_action_prob) * self.target
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, mask, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.action_probs, {self.state: state.reshape(1, -1),
self.mask: mask.reshape(1, -1)})
def update(self, state, target, action, mask, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target,
self.action: action, self.mask: mask.reshape(1, -1)}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class ValueNetwork(object):
""" Value Function approximator. """
def __init__(self, input_size, output_size=1, learning_rate=0.01, scope="value_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.target = tf.placeholder(dtype=tf.float64, name="target")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=input_size,
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1)})
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
class ObjectAwareRewardNetwork(object):
""" Object-aware Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.object = tf.placeholder(shape=[], dtype=tf.int32, name="person_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
object_vec = tf.one_hot(self.object, input_size, dtype=tf.float64)
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
concat_vec = tf.concat([object_vec, action_vec], 0)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(concat_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, object, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action, self.object: object})
def update(self, state, action, object, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.object: object, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class RewardNetwork(object):
""" Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(action_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action})
def update(self, state, action, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
| 46.88764 | 122 | 0.642104 | 8,267 | 0.990534 | 0 | 0 | 0 | 0 | 0 | 0 | 837 | 0.100288 |
d24d2defb1725aab6afee3638c1358468609f75a
| 32,111 |
py
|
Python
|
tests/test_reusable_executor.py
|
hoodmane/loky
|
00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b
|
[
"BSD-3-Clause"
] | 153 |
2020-01-29T07:26:58.000Z
|
2022-03-31T23:30:55.000Z
|
tests/test_reusable_executor.py
|
hoodmane/loky
|
00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b
|
[
"BSD-3-Clause"
] | 98 |
2020-01-17T09:14:16.000Z
|
2022-03-10T15:32:14.000Z
|
tests/test_reusable_executor.py
|
hoodmane/loky
|
00fbd9d5e8ebc8f9427096a0f64d7d7ad51b9f9b
|
[
"BSD-3-Clause"
] | 22 |
2020-01-17T09:26:38.000Z
|
2022-02-02T09:27:59.000Z
|
import os
import sys
import gc
import ctypes
import psutil
import pytest
import warnings
import threading
from time import sleep
from multiprocessing import util, current_process
from pickle import PicklingError, UnpicklingError
from distutils.version import LooseVersion
import loky
from loky import cpu_count
from loky import get_reusable_executor
from loky.process_executor import _RemoteTraceback, TerminatedWorkerError
from loky.process_executor import BrokenProcessPool, ShutdownExecutorError
from loky.reusable_executor import _ReusablePoolExecutor
import cloudpickle
from ._executor_mixin import ReusableExecutorMixin
from .utils import TimingWrapper, id_sleep, check_python_subprocess_call
from .utils import filter_match
cloudpickle_version = LooseVersion(cloudpickle.__version__)
# Compat windows
if sys.platform == "win32":
from signal import SIGTERM as SIGKILL
libc = ctypes.cdll.msvcrt
else:
from signal import SIGKILL
from ctypes.util import find_library
libc = ctypes.CDLL(find_library("c"))
try:
import numpy as np
except ImportError:
np = None
# Backward compat for python2 cPickle module
PICKLING_ERRORS = (PicklingError,)
try:
import cPickle
PICKLING_ERRORS += (cPickle.PicklingError,)
except ImportError:
pass
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, reg):
getattr(mod, reg).clear()
def wait_dead(worker, n_tries=1000, delay=0.001):
"""Wait for process pid to die"""
for i in range(n_tries):
if worker.exitcode is not None:
return
sleep(delay)
raise RuntimeError("Process %d failed to die for at least %0.3fs" %
(worker.pid, delay * n_tries))
def crash():
"""Induces a segfault"""
import faulthandler
faulthandler._sigsegv()
def exit():
"""Induces a sys exit with exitcode 0"""
sys.exit(0)
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode)
def sleep_then_check_pids_exist(arg):
"""Sleep for some time and the check if all the passed pids exist"""
time, pids = arg
sleep(time)
res = True
for p in pids:
res &= psutil.pid_exists(p)
return res
def kill_friend(pid, delay=0):
"""Function that send SIGKILL at process pid"""
sleep(delay)
try:
os.kill(pid, SIGKILL)
except (PermissionError, ProcessLookupError) as e:
if psutil.pid_exists(pid):
util.debug("Fail to kill an alive process?!?")
raise e
util.debug("process {} was already dead".format(pid))
def raise_error(etype=UnpicklingError, message=None):
"""Function that raises an Exception in process"""
raise etype(message)
def return_instance(cls):
"""Function that returns a instance of cls"""
return cls()
class SayWhenError(ValueError):
pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
def do_nothing(arg):
"""Function that return True, test passing argument"""
return True
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return exit, ()
class CExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
c_exit()
class CExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return c_exit, ()
class ErrorAtPickle(object):
"""Bad object that raises an error at pickling time."""
def __init__(self, fail=True):
self.fail = fail
def __reduce__(self):
if self.fail:
raise PicklingError("Error in pickle")
else:
return id, (42, )
class ErrorAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __init__(self, etype=UnpicklingError, message='the error message'):
self.etype = etype
self.message = message
def __reduce__(self):
return raise_error, (self.etype, self.message)
class CrashAtGCInWorker(object):
"""Bad object that triggers a segfault at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
crash()
class CExitAtGCInWorker(object):
"""Exit worker at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
c_exit()
class TestExecutorDeadLock(ReusableExecutorMixin):
crash_cases = [
# Check problem occuring while pickling a task in
(id, (ExitAtPickle(),), PicklingError, None),
(id, (ErrorAtPickle(),), PicklingError, None),
# Check problem occuring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool, r"SystemExit"),
(id, (CExitAtUnpickle(),), TerminatedWorkerError, r"EXIT\(0\)"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool, r"UnpicklingError"),
(id, (CrashAtUnpickle(),), TerminatedWorkerError, r"SIGSEGV"),
# Check problem occuring during function execution on workers
(crash, (), TerminatedWorkerError, r"SIGSEGV"),
(exit, (), SystemExit, None),
(c_exit, (), TerminatedWorkerError, r"EXIT\(0\)"),
(raise_error, (RuntimeError,), RuntimeError, None),
# Check problem occuring while pickling a task result
# on workers
(return_instance, (CrashAtPickle,), TerminatedWorkerError, r"SIGSEGV"),
(return_instance, (ExitAtPickle,), SystemExit, None),
(return_instance, (CExitAtPickle,), TerminatedWorkerError,
r"EXIT\(0\)"),
(return_instance, (ErrorAtPickle,), PicklingError, None),
# Check problem occuring while unpickling a task in
# the result_handler thread
(return_instance, (ExitAtUnpickle,), BrokenProcessPool, r"SystemExit"),
(return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
r"UnpicklingError"),
]
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_crashes(self, func, args, expected_err, match):
"""Test various reusable_executor crash handling"""
executor = get_reusable_executor(max_workers=2)
res = executor.submit(func, *args)
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
res.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_in_callback_submit_with_crash(self, func, args, expected_err,
match):
"""Test the recovery from callback crash"""
executor = get_reusable_executor(max_workers=2, timeout=12)
def in_callback_submit(future):
future2 = get_reusable_executor(
max_workers=2, timeout=12).submit(func, *args)
# Store the future of the job submitted in the callback to make it
# easy to introspect.
future.callback_future = future2
future.callback_done.set()
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.callback_done = threading.Event()
f.add_done_callback(in_callback_submit)
assert f.result() == 42
if not f.callback_done.wait(timeout=3):
raise AssertionError('callback not done before timeout')
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
f.callback_future.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
def test_callback_crash_on_submit(self):
"""Errors in the callback execution directly in queue manager thread.
This case can break the process executor and we want to make sure
that we can detect the issue and recover by calling
get_reusable_executor.
"""
executor = get_reusable_executor(max_workers=2)
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: exit())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.1).result() == 42
executor = get_reusable_executor(max_workers=2)
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: raise_error())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.).result() == 42
def test_deadlock_kill(self):
"""Test deadlock recovery for reusable_executor"""
executor = get_reusable_executor(max_workers=1, timeout=None)
# trigger the spawning of the worker process
executor.submit(sleep, 0.1)
worker = next(iter(executor._processes.values()))
with pytest.warns(UserWarning) as recorded_warnings:
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
os.kill(worker.pid, SIGKILL)
wait_dead(worker)
# wait for the executor to be able to detect the issue and set itself
# in broken state:
sleep(.5)
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
executor.submit(id_sleep, 42, 0.1).result()
# the get_reusable_executor factory should be able to create a new
# working instance
executor = get_reusable_executor(max_workers=2, timeout=None)
assert executor.submit(id_sleep, 42, 0.).result() == 42
@pytest.mark.parametrize("n_proc", [1, 2, 5, 13])
def test_crash_races(self, n_proc):
"""Test the race conditions in reusable_executor crash handling"""
if (sys.platform == 'win32' and sys.version_info >= (3, 8)
and n_proc > 5):
pytest.skip(
"On win32, the paging size can be too small to import numpy "
"multiple times in the sub-processes (imported when loading "
"this file). Skipping while no better solution is found. See "
"https://github.com/joblib/loky/issues/279 for more details."
)
# Test for external crash signal comming from neighbor
# with various race setup
executor = get_reusable_executor(max_workers=n_proc, timeout=None)
executor.map(id, range(n_proc)) # trigger the creation of the workers
pids = list(executor._processes.keys())
assert len(pids) == n_proc
assert None not in pids
res = executor.map(sleep_then_check_pids_exist,
[(.0001 * (j // 2), pids)
for j in range(2 * n_proc)])
assert all(list(res))
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
res = executor.map(kill_friend, pids[::-1])
list(res)
def test_imap_handle_iterable_exception(self):
# The catch of the errors in imap generation depend on the
# builded version of python
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(10, 3),
chunksize=1)
# SayWhenError seen at start of problematic chunk's results
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=2)
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=4)
def test_queue_full_deadlock(self):
executor = get_reusable_executor(max_workers=1)
fs_fail = [executor.submit(do_nothing, ErrorAtPickle(True))
for i in range(100)]
fs = [executor.submit(do_nothing, ErrorAtPickle(False))
for i in range(100)]
with pytest.raises(PicklingError):
fs_fail[99].result()
assert fs[99].result()
def test_informative_error_when_fail_at_unpickle(self):
executor = get_reusable_executor(max_workers=2)
obj = ErrorAtUnpickle(RuntimeError, 'message raised in child')
f = executor.submit(id, obj)
with pytest.raises(BrokenProcessPool) as exc_info:
f.result()
assert 'RuntimeError' in str(exc_info.value.__cause__)
assert 'message raised in child' in str(exc_info.value.__cause__)
@pytest.mark.skipif(np is None, reason="requires numpy")
def test_numpy_dot_parent_and_child_no_freeze(self):
"""Test that no freeze happens in child process when numpy's thread
pool is started in the parent.
"""
a = np.random.randn(1000, 1000)
np.dot(a, a) # trigger the thread pool init in the parent process
executor = get_reusable_executor(max_workers=2)
executor.submit(np.dot, a, a).result()
executor.shutdown(wait=True)
class TestTerminateExecutor(ReusableExecutorMixin):
def test_shutdown_kill(self):
"""Test reusable_executor termination handling"""
from itertools import repeat
executor = get_reusable_executor(max_workers=5)
res1 = executor.map(id_sleep, range(100), repeat(.001))
res2 = executor.map(id_sleep, range(100), repeat(1))
assert list(res1) == list(range(100))
shutdown = TimingWrapper(executor.shutdown)
shutdown(wait=True, kill_workers=True)
assert shutdown.elapsed < 5
# We should get an error as the executor shutdowned before we fetched
# all the results from the long running operation.
with pytest.raises(ShutdownExecutorError):
list(res2)
def test_shutdown_deadlock(self):
"""Test recovery if killed after resize call"""
# Test the executor.shutdown call do not cause deadlock
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # start the worker processes
executor.submit(kill_friend, (next(iter(executor._processes.keys())),
.0))
sleep(.01)
executor.shutdown(wait=True)
def test_kill_workers_on_new_options(self):
# submit a long running job with no timeout
executor = get_reusable_executor(max_workers=2, timeout=None)
f = executor.submit(sleep, 10000)
# change the constructor parameter while requesting not to wait
# for the long running task to complete (the workers will get
# shutdown forcibly)
executor = get_reusable_executor(max_workers=2, timeout=5,
kill_workers=True)
with pytest.raises(ShutdownExecutorError):
f.result()
f2 = executor.submit(id_sleep, 42, 0)
assert f2.result() == 42
@pytest.mark.parametrize("bad_object, match", [
(CrashAtGCInWorker, r"SIGSEGV"), (CExitAtGCInWorker, r"EXIT\(0\)")])
def test_call_item_gc_crash_or_exit(self, bad_object, match):
executor = get_reusable_executor(max_workers=1)
bad_object = bad_object()
f = executor.submit(id, bad_object)
# The worker will successfully send back its result to the master
# process before crashing so this future can always be collected:
assert f.result() is not None
# The executor should automatically detect that the worker has crashed
# when processing subsequently dispatched tasks:
with pytest.raises(TerminatedWorkerError, match=filter_match(match)):
executor.submit(gc.collect).result()
for r in executor.map(sleep, [.1] * 100):
pass
class TestResizeExecutor(ReusableExecutorMixin):
def test_reusable_executor_resize(self):
"""Test reusable_executor resizing"""
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2))
# Decreasing the executor should drop a single process and keep one of
# the old one as it is still in a good shape. The resize should not
# occur while there are on going works.
pids = list(executor._processes.keys())
res1 = executor.submit(sleep_then_check_pids_exist, (.3, pids))
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(w) == 1
expected_msg = "Trying to resize an executor with running jobs"
assert expected_msg in str(w[0].message)
assert res1.result(), ("Resize should wait for current processes "
" to finish")
assert len(executor._processes) == 1
assert next(iter(executor._processes.keys())) in pids
# Requesting the same number of process should not impact the executor
# nor kill the processed
old_pid = next(iter((executor._processes.keys())))
unchanged_executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(unchanged_executor._processes) == 1
assert unchanged_executor is executor
assert next(iter(unchanged_executor._processes.keys())) == old_pid
# Growing the executor again should add a single process and keep the
# old one as it is still in a good shape
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(executor._processes) == 2
assert old_pid in list(executor._processes.keys())
@pytest.mark.parametrize("reuse", [True, False])
@pytest.mark.parametrize("kill_workers", [True, False])
def test_reusable_executor_resize_many_times(self, kill_workers, reuse):
# Tentative non-regression test for a deadlock when shutting down
# the workers of an executor prior to resizing it.
kwargs = {
'timeout': None,
'kill_workers': kill_workers,
'reuse': reuse,
}
with warnings.catch_warnings(record=True):
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
for size in [12, 2, 1, 12, 6, 1, 8, 5]:
executor = get_reusable_executor(max_workers=size, **kwargs)
executor.map(sleep, [0.01] * 6)
# Do not wait for the tasks to complete.
executor.shutdown()
def test_kill_after_resize_call(self):
"""Test recovery if killed after resize call"""
# Test the executor resizing called before a kill arrive
executor = get_reusable_executor(max_workers=2, timeout=None)
executor.map(id, range(2)) # trigger the creation of worker processes
pid = next(iter(executor._processes.keys()))
executor.submit(kill_friend, (pid, .1))
with pytest.warns(UserWarning) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=1, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
assert executor.submit(id_sleep, 42, 0.).result() == 42
executor.shutdown()
def test_resize_after_timeout(self):
with warnings.catch_warnings(record=True) as recorded_warnings:
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=8, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
sleep(.1)
executor = get_reusable_executor(max_workers=2, timeout=.001)
assert executor.submit(id_sleep, 42, 0.).result() == 42
if len(recorded_warnings) > 1:
expected_msg = 'A worker stopped'
assert expected_msg in recorded_warnings[0].message.args[0]
class TestGetReusableExecutor(ReusableExecutorMixin):
def test_invalid_process_number(self):
"""Raise error on invalid process number"""
with pytest.raises(ValueError):
get_reusable_executor(max_workers=0)
with pytest.raises(ValueError):
get_reusable_executor(max_workers=-1)
executor = get_reusable_executor()
with pytest.raises(ValueError):
executor._resize(max_workers=None)
@pytest.mark.skipif(sys.platform == "win32", reason="No fork on windows")
@pytest.mark.skipif(sys.version_info <= (3, 4),
reason="No context before 3.4")
def test_invalid_context(self):
"""Raise error on invalid context"""
with pytest.warns(UserWarning):
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context="fork")
def test_pass_start_method_name_as_context(self):
executor = get_reusable_executor(max_workers=2, context='loky')
assert executor.submit(id, 42).result() >= 0
with pytest.raises(ValueError):
get_reusable_executor(max_workers=2, context='bad_start_method')
def test_interactively_defined_executor_no_main(self):
# check that the init_main_module parameter works properly
# when using -c option, we don't need the safeguard if __name__ ..
# and thus test LokyProcess without the extra argument. For running
# a script, it is necessary to use init_main_module=False.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor()
e.submit(id, 42).result()
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_reused_flag(self):
executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert reused
executor.shutdown(kill_workers=True)
executor, reused = _ReusablePoolExecutor.get_reusable_executor(
max_workers=2
)
assert not reused
@pytest.mark.xfail(cloudpickle_version >= LooseVersion("0.5.4") and
cloudpickle_version <= LooseVersion("0.7.0"),
reason="Known issue in cloudpickle")
# https://github.com/cloudpipe/cloudpickle/pull/240
def test_interactively_defined_nested_functions(self):
# Check that it's possible to call nested interactively defined
# functions and furthermore that changing the code interactively
# is taken into account by the single worker process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
# Test that it's possible to call interactively defined, nested
# functions:
def inner_func(x):
return -x
def outer_func(x):
return inner_func(x)
assert e.submit(outer_func, 1).result() == outer_func(1) == -1
# Test that changes to the definition of the inner function are
# taken into account in subsequent calls to the outer function.
def inner_func(x):
return x
assert e.submit(outer_func, 1).result() == outer_func(1) == 1
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_interactively_defined_recursive_functions(self):
# Check that it's possible to call a recursive function defined
# in a closure.
# Also check that calling several function that stems from the same
# factory with different closure states results in the expected result:
# the function definitions should not collapse in the single worker
# process.
code = """if True:
from loky import get_reusable_executor
e = get_reusable_executor(max_workers=1)
# Force a start of the children process:
e.submit(id, 42).result()
def make_func(seed):
def func(x):
if x <= 0:
return seed
return func(x - 1) + 1
return func
func = make_func(0)
assert e.submit(func, 5).result() == func(5) == 5
func = make_func(1)
assert e.submit(func, 5).result() == func(5) == 6
print("ok")
"""
check_python_subprocess_call(code, stdout_regex=r"ok")
def test_compat_with_concurrent_futures_exception(self):
# It should be possible to use a loky process pool executor as a dropin
# replacement for a ProcessPoolExecutor, including when catching
# exceptions:
concurrent = pytest.importorskip('concurrent')
from concurrent.futures.process import BrokenProcessPool as BPPExc
with pytest.raises(BPPExc):
get_reusable_executor(max_workers=2).submit(crash).result()
e = get_reusable_executor(max_workers=2)
f = e.submit(id, 42)
# Ensure that loky.Future are compatible with concurrent.futures
# (see #155)
assert isinstance(f, concurrent.futures.Future)
(done, running) = concurrent.futures.wait([f], timeout=15)
assert len(running) == 0
thread_configurations = [
('constant', 'clean_start'),
('constant', 'broken_start'),
('varying', 'clean_start'),
('varying', 'broken_start'),
]
@pytest.mark.parametrize("workers, executor_state", thread_configurations)
def test_reusable_executor_thread_safety(self, workers, executor_state):
if executor_state == 'clean_start':
# Create a new shared executor and ensures that it's workers are
# ready:
get_reusable_executor(reuse=False).submit(id, 42).result()
else:
# Break the shared executor before launching the threads:
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGSEGV")):
executor = get_reusable_executor(reuse=False)
executor.submit(return_instance, CrashAtPickle).result()
def helper_func(output_collector, max_workers=2, n_outer_steps=5,
n_inner_steps=10):
with warnings.catch_warnings(): # ignore resize warnings
warnings.simplefilter("always")
executor = get_reusable_executor(max_workers=max_workers)
for i in range(n_outer_steps):
results = executor.map(
lambda x: x ** 2, range(n_inner_steps))
expected_result = [x ** 2 for x in range(n_inner_steps)]
assert list(results) == expected_result
output_collector.append('ok')
if workers == 'constant':
max_workers = [2] * 10
else:
max_workers = [(i % 4) + 1 for i in range(10)]
# Use the same executor with the same number of workers concurrently
# in different threads:
output_collector = []
threads = [threading.Thread(
target=helper_func, args=(output_collector, w),
name='test_thread_%02d_max_workers_%d' % (i, w))
for i, w in enumerate(max_workers)]
with warnings.catch_warnings(record=True):
for t in threads:
t.start()
for t in threads:
t.join()
assert output_collector == ['ok'] * len(threads)
def test_reusable_executor_reuse_true(self):
executor = get_reusable_executor(max_workers=3, timeout=42)
executor.submit(id, 42).result()
assert len(executor._processes) == 3
assert executor._timeout == 42
executor2 = get_reusable_executor(reuse=True)
executor2.submit(id, 42).result()
assert len(executor2._processes) == 3
assert executor2._timeout == 42
assert executor2 is executor
executor3 = get_reusable_executor()
executor3.submit(id, 42).result()
assert len(executor3._processes) == cpu_count()
assert executor3._timeout == 10
assert executor3 is not executor
executor4 = get_reusable_executor()
assert executor4 is executor3
class TestExecutorInitializer(ReusableExecutorMixin):
def _initializer(self, x):
loky._initialized_state = x
def _test_initializer(self, delay=0):
sleep(delay)
return getattr(loky, "_initialized_state", "uninitialized")
def test_reusable_initializer(self):
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=('done',))
assert executor.submit(self._test_initializer).result() == 'done'
# when the initializer change, the executor is re-spawned
executor = get_reusable_executor(
max_workers=2, initializer=self._initializer, initargs=(42,))
assert executor.submit(self._test_initializer).result() == 42
# With reuse=True, the executor use the same initializer
executor = get_reusable_executor(max_workers=4, reuse=True)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 42
# With reuse='auto', the initializer is not used anymore
executor = get_reusable_executor(max_workers=4)
for x in executor.map(self._test_initializer, delay=.1):
assert x == 'uninitialized'
| 38.687952 | 79 | 0.64112 | 28,839 | 0.898103 | 164 | 0.005107 | 9,899 | 0.308274 | 0 | 0 | 9,385 | 0.292267 |
d24e25a2e5e83961161f51930a9dbcf5a8859141
| 3,781 |
py
|
Python
|
modules/common/parsers/timetable_parser.py
|
hgyoseo/hdmeal
|
f6f96c9190701b38eb6f08e2238f4f5214b95d3b
|
[
"MIT"
] | 2 |
2020-03-01T13:15:57.000Z
|
2020-03-25T18:53:21.000Z
|
modules/common/parsers/timetable_parser.py
|
hgyoseo/hdmeal
|
f6f96c9190701b38eb6f08e2238f4f5214b95d3b
|
[
"MIT"
] | null | null | null |
modules/common/parsers/timetable_parser.py
|
hgyoseo/hdmeal
|
f6f96c9190701b38eb6f08e2238f4f5214b95d3b
|
[
"MIT"
] | null | null | null |
# ██╗ ██╗██████╗ ███╗ ███╗███████╗ █████╗ ██╗
# ██║ ██║██╔══██╗████╗ ████║██╔════╝██╔══██╗██║
# ███████║██║ ██║██╔████╔██║█████╗ ███████║██║
# ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ██╔══██║██║
# ██║ ██║██████╔╝██║ ╚═╝ ██║███████╗██║ ██║███████╗
# ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝
# Copyright 2019-2020, Hyungyo Seo
# timetable_parser.py - 컴시간 서버에 접속하여 시간표정보를 파싱해오는 스크립트입니다.
import datetime
import json
import os
import urllib.error
import urllib.request
from itertools import groupby
from modules.common import conf, log
# 설정 불러오기
NEIS_OPENAPI_TOKEN = conf.configs['Tokens']['NEIS'] # NEUS 오픈API 인증 토큰
ATPT_OFCDC_SC_CODE = conf.configs['School']['NEIS']['ATPT_OFCDC_SC_CODE'] # 시도교육청코드
SD_SCHUL_CODE = conf.configs['School']['NEIS']['SD_SCHUL_CODE'] # 표준학교코드
timetable = {}
def parse(tt_grade, tt_class, year, month, date, req_id, debugging):
global timetable
timetable_raw_data = []
tt_date = datetime.date(year, month, date)
tt_grade = str(tt_grade)
tt_class = str(tt_class)
date_string = tt_date.strftime("%Y-%m-%d")
filename = 'data/cache/TT-%s.json' % date_string
log.info(
"[#%s] parse@timetable_parser.py: Started Parsing Timetable(%s-%s, %s)" % (req_id, tt_grade, tt_class, tt_date))
if tt_date.weekday() > 4:
return None
# 데이터 가져오기
def fetch():
global timetable
req = urllib.request.urlopen("https://open.neis.go.kr/hub/hisTimetable?KEY=%s&Type=json&pSize=1000"
"&ATPT_OFCDC_SC_CODE=%s&SD_SCHUL_CODE=%s&ALL_TI_YMD=%s" %
(NEIS_OPENAPI_TOKEN, ATPT_OFCDC_SC_CODE, SD_SCHUL_CODE, tt_date.strftime("%Y%m%d")),
timeout=2)
data = json.loads(req.read())
print(data)
try:
for i in data["hisTimetable"][1]["row"]:
timetable_raw_data.append([i["GRADE"], i["CLASS_NM"], i["ITRT_CNTNT"]])
except (urllib.error.HTTPError, urllib.error.URLError) as e:
log.err("[#%s] fetch.parse@timetable_parser.py: Failed to Parse Timetable(%s-%s, %s) because %s" % (
req_id, tt_grade, tt_class, tt_date, e))
raise ConnectionError
print(timetable_raw_data)
for grade, x in groupby(timetable_raw_data, lambda i: i[0]):
timetable[grade] = {}
for class_, y in groupby(x, lambda i: i[1]):
timetable[grade][class_] = [i[2] for i in y if i[2] != "토요휴업일"]
if timetable:
with open(filename, 'w',
encoding="utf-8") as make_file:
json.dump(timetable, make_file, ensure_ascii=False)
print("File Created")
if os.path.isfile(filename): # 캐시 있으면
try:
log.info("[#%s] parse@timetable_parser.py: Read Data in Cache" % req_id)
with open(filename, encoding="utf-8") as data_file: # 캐시 읽기
timetable = json.load(data_file)
except Exception: # 캐시 읽을 수 없으면
try:
# 캐시 삭제
os.remove('data/cache/TT.json')
except Exception as error:
log.err("[#%s] parse@timetable_parser.py: Failed to Delete Cache" % req_id)
return error
fetch() # 파싱
else: # 캐시 없으면
log.info("[#%s] parse@timetable_parser.py: No Cache" % req_id)
fetch() # 파싱
log.info("[#%s] parse@timetable_parser.py: Succeeded(%s-%s, %s)" % (
req_id, tt_grade, tt_class, tt_date))
return timetable[tt_grade][tt_class]
# 디버그
if __name__ == "__main__":
print(parse(3, 11, 2019, 10, 25, "****DEBUG****", True))
| 37.81 | 122 | 0.524729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,960 | 0.437598 |
d24e88624ecd17dbeb714acc8fe1596a1a4493c1
| 34,597 |
py
|
Python
|
gittle/gittle.py
|
justecorruptio/gittle
|
e046fe4731ebe4168884e51ac5baa26c79f0567d
|
[
"Apache-2.0"
] | 1 |
2016-09-10T15:21:30.000Z
|
2016-09-10T15:21:30.000Z
|
gittle/gittle.py
|
justecorruptio/gittle
|
e046fe4731ebe4168884e51ac5baa26c79f0567d
|
[
"Apache-2.0"
] | null | null | null |
gittle/gittle.py
|
justecorruptio/gittle
|
e046fe4731ebe4168884e51ac5baa26c79f0567d
|
[
"Apache-2.0"
] | null | null | null |
# From the future
from __future__ import absolute_import
# Python imports
import os
import copy
import logging
from hashlib import sha1
from shutil import rmtree
from functools import partial, wraps
# Dulwich imports
from dulwich.repo import Repo as DulwichRepo
from dulwich.client import get_transport_and_path
from dulwich.index import build_index_from_tree, changes_from_tree
from dulwich.objects import Tree, Blob
from dulwich.server import update_server_info
# Funky imports
import funky
# Local imports
from gittle.auth import GittleAuth
from gittle.exceptions import InvalidRemoteUrl
from gittle import utils
# Exports
__all__ = ('Gittle',)
# Guarantee that a diretory exists
def mkdir_safe(path):
if path and not(os.path.exists(path)):
os.makedirs(path)
return path
# Useful decorators
# A better way to do this in the future would maybe to use Mixins
def working_only(method):
@wraps(method)
def f(self, *args, **kwargs):
assert self.is_working, "%s can not be called on a bare repository" % method.func_name
return method(self, *args, **kwargs)
return f
def bare_only(method):
@wraps(method)
def f(self, *args, **kwargs):
assert self.is_bare, "%s can not be called on a working repository" % method.func_name
return method(self, *args, **kwargs)
return f
class Gittle(object):
"""All paths used in Gittle external methods must be paths relative to the git repository
"""
DEFAULT_COMMIT = 'HEAD'
DEFAULT_BRANCH = 'master'
DEFAULT_REMOTE = 'origin'
DEFAULT_MESSAGE = '**No Message**'
DEFAULT_USER_INFO = {
'name': None,
'email': None,
}
DIFF_FUNCTIONS = {
'classic': utils.git.classic_tree_diff,
'dict': utils.git.dict_tree_diff,
'changes': utils.git.dict_tree_diff
}
DEFAULT_DIFF_TYPE = 'dict'
HIDDEN_REGEXES = [
# Hide git directory
r'.*\/\.git\/.*',
]
# References
REFS_BRANCHES = 'refs/heads/'
REFS_REMOTES = 'refs/remotes/'
REFS_TAGS = 'refs/tags/'
# Name pattern truths
# Used for detecting if files are :
# - deleted
# - added
# - changed
PATTERN_ADDED = (False, True)
PATTERN_REMOVED = (True, False)
PATTERN_MODIFIED = (True, True)
# Permissions
MODE_DIRECTORY = 040000 # Used to tell if a tree entry is a directory
# Tree depth
MAX_TREE_DEPTH = 1000
# Acceptable Root paths
ROOT_PATHS = (os.path.curdir, os.path.sep)
def __init__(self, repo_or_path, origin_uri=None, auth=None, report_activity=None, *args, **kwargs):
if isinstance(repo_or_path, DulwichRepo):
self.repo = repo_or_path
elif isinstance(repo_or_path, Gittle):
self.repo = DulwichRepo(repo_or_path.path)
elif isinstance(repo_or_path, basestring):
path = os.path.abspath(repo_or_path)
self.repo = DulwichRepo(path)
else:
logging.warning('Repo is of type %s' % type(repo_or_path))
raise Exception('Gittle must be initialized with either a dulwich repository or a string to the path')
# Set path
self.path = self.repo.path
# The remote url
self.origin_uri = origin_uri
# Report client activty
self._report_activity = report_activity
# Build ignore filter
self.hidden_regexes = copy.copy(self.HIDDEN_REGEXES)
self.hidden_regexes.extend(self._get_ignore_regexes())
self.ignore_filter = utils.paths.path_filter_regex(self.hidden_regexes)
self.filters = [
self.ignore_filter,
]
# Get authenticator
if auth:
self.authenticator = auth
else:
self.auth(*args, **kwargs)
def report_activity(self, *args, **kwargs):
if not self._report_activity:
return
return self._report_activity(*args, **kwargs)
def _format_author(self, name, email):
return "%s <%s>" % (name, email)
def _format_userinfo(self, userinfo):
name = userinfo.get('name')
email = userinfo.get('email')
if name and email:
return self._format_author(name, email)
return None
def _format_ref(self, base, extra):
return ''.join([base, extra])
def _format_ref_branch(self, branch_name):
return self._format_ref(self.REFS_BRANCHES, branch_name)
def _format_ref_remote(self, remote_name):
return self._format_ref(self.REFS_REMOTES, remote_name)
def _format_ref_tag(self, tag_name):
return self._format_ref(self.REFS_TAGS, tag_name)
@property
def head(self):
"""Return SHA of the current HEAD
"""
return self.repo.head()
@property
def is_bare(self):
"""Bare repositories have no working directories or indexes
"""
return self.repo.bare
@property
def is_working(self):
return not(self.is_bare)
def has_index(self):
"""Opposite of is_bare
"""
return self.repo.has_index()
@property
def has_commits(self):
"""
If the repository has no HEAD we consider that is has no commits
"""
try:
self.repo.head()
except KeyError:
return False
return True
def ref_walker(self, ref=None):
"""
Very simple, basic walker
"""
ref = ref or 'HEAD'
sha = self._commit_sha(ref)
return self.repo.revision_history(sha)
def branch_walker(self, branch):
branch = branch or self.DEFAULT_BRANCH
ref = self._format_ref_branch(branch)
return self.ref_walker(ref)
def commit_info(self, start=0, end=None, branch=None):
"""Return a generator of commits with all their attached information
"""
if not self.has_commits:
return []
commits = [utils.git.commit_info(entry) for entry in self.branch_walker(branch)]
if not end:
return commits
return commits[start:end]
@funky.uniquify
def recent_contributors(self, n=None, branch=None):
n = n or 10
return funky.pluck(self.commit_info(end=n, branch=branch), 'author')
@property
def commit_count(self):
try:
return len(self.ref_walker())
except KeyError:
return 0
def commits(self):
"""Return a list of SHAs for all the concerned commits
"""
return [commit['sha'] for commit in self.commit_info()]
@property
def git_dir(self):
return self.repo.controldir()
def auth(self, *args, **kwargs):
self.authenticator = GittleAuth(*args, **kwargs)
return self.authenticator
# Generate a branch selector (used for pushing)
def _wants_branch(self, branch_name=None):
branch_name = branch_name or self.DEFAULT_BRANCH
refs_key = self._format_ref_branch(branch_name)
sha = self.branches[branch_name]
def wants_func(old):
refs_key = self._format_ref_branch(branch_name)
return {
refs_key: sha
}
return wants_func
def _get_ignore_regexes(self):
gitignore_filename = os.path.join(self.path, '.gitignore')
if not os.path.exists(gitignore_filename):
return []
lines = open(gitignore_filename).readlines()
globers = map(lambda line: line.rstrip(), lines)
return utils.paths.globers_to_regex(globers)
# Get the absolute path for a file in the git repo
def abspath(self, repo_file):
return os.path.abspath(
os.path.join(self.path, repo_file)
)
# Get the relative path from the absolute path
def relpath(self, abspath):
return os.path.relpath(abspath, self.path)
@property
def last_commit(self):
return self[self.repo.head()]
@property
def index(self):
return self.repo.open_index()
@classmethod
def init(cls, path, bare=None, *args, **kwargs):
"""Initialize a repository"""
mkdir_safe(path)
# Constructor to use
if bare:
constructor = DulwichRepo.init_bare
else:
constructor = DulwichRepo.init
# Create dulwich repo
repo = constructor(path)
# Create Gittle repo
return cls(repo, *args, **kwargs)
@classmethod
def init_bare(cls, *args, **kwargs):
kwargs.setdefault('bare', True)
return cls.init(*args, **kwargs)
def get_client(self, origin_uri=None, **kwargs):
# Get the remote URL
origin_uri = origin_uri or self.origin_uri
# Fail if inexistant
if not origin_uri:
raise InvalidRemoteUrl()
client_kwargs = {}
auth_kwargs = self.authenticator.kwargs()
client_kwargs.update(auth_kwargs)
client_kwargs.update(kwargs)
client_kwargs.update({
'report_activity': self.report_activity
})
client, remote_path = get_transport_and_path(origin_uri, **client_kwargs)
return client, remote_path
def push_to(self, origin_uri, branch_name=None, progress=None, progress_stderr=None):
selector = self._wants_branch(branch_name=branch_name)
client, remote_path = self.get_client(origin_uri, progress_stderr=progress_stderr)
return client.send_pack(
remote_path,
selector,
self.repo.object_store.generate_pack_contents,
progress=progress
)
# Like: git push
def push(self, origin_uri=None, branch_name=None, progress=None, progress_stderr=None):
return self.push_to(origin_uri, branch_name, progress, progress_stderr)
# Not recommended at ALL ... !!!
def dirty_pull_from(self, origin_uri, branch_name=None):
# Remove all previously existing data
rmtree(self.path)
mkdir_safe(self.path)
self.repo = DulwichRepo.init(self.path)
# Fetch brand new copy from remote
return self.pull_from(origin_uri, branch_name)
def pull_from(self, origin_uri, branch_name=None):
return self.fetch(origin_uri)
# Like: git pull
def pull(self, origin_uri=None, branch_name=None):
return self.pull_from(origin_uri, branch_name)
def fetch_remote(self, origin_uri=None):
# Get client
client, remote_path = self.get_client(origin_uri=origin_uri)
# Fetch data from remote repository
remote_refs = client.fetch(remote_path, self.repo)
return remote_refs
def _setup_fetched_refs(self, refs, origin, bare):
remote_tags = utils.git.subrefs(refs, 'refs/tags')
remote_heads = utils.git.subrefs(refs, 'refs/heads')
# Filter refs
clean_remote_tags = utils.git.clean_refs(remote_tags)
clean_remote_heads = utils.git.clean_refs(remote_heads)
# Base of new refs
heads_base = 'refs/remotes/' + origin
if bare:
heads_base = 'refs/heads'
# Import branches
self.import_refs(
heads_base,
clean_remote_heads
)
# Import tags
self.import_refs(
'refs/tags',
clean_remote_tags
)
# Update HEAD
for k, v in refs.items():
self[k] = v
def fetch(self, origin_uri=None, bare=None, origin=None):
bare = bare or False
origin = origin or self.DEFAULT_REMOTE
# Remote refs
remote_refs = self.fetch_remote(origin_uri)
# Update head
# Hit repo because head doesn't yet exist so
# print("REFS = %s" % remote_refs)
# Update refs (branches, tags, HEAD)
self._setup_fetched_refs(remote_refs, origin, bare)
# Checkout working directories
if not bare and self.has_commits:
self.checkout_all()
else:
self.update_server_info()
@classmethod
def clone(cls, origin_uri, local_path, auth=None, mkdir=True, bare=False, *args, **kwargs):
"""Clone a remote repository"""
mkdir_safe(local_path)
# Initialize the local repository
if bare:
local_repo = cls.init_bare(local_path)
else:
local_repo = cls.init(local_path)
repo = cls(local_repo, origin_uri=origin_uri, auth=auth, *args, **kwargs)
repo.fetch(bare=bare)
# Add origin
# TODO
return repo
@classmethod
def clone_bare(cls, *args, **kwargs):
"""Same as .clone except clones to a bare repository by default
"""
kwargs.setdefault('bare', True)
return cls.clone(*args, **kwargs)
def _commit(self, committer=None, author=None, message=None, files=None, tree=None, *args, **kwargs):
if not tree:
# If no tree then stage files
modified_files = files or self.modified_files
logging.warning("STAGING : %s" % modified_files)
self.add(modified_files)
# Messages
message = message or self.DEFAULT_MESSAGE
author_msg = self._format_userinfo(author)
committer_msg = self._format_userinfo(committer)
return self.repo.do_commit(
message=message,
author=author_msg,
committer=committer_msg,
encoding='UTF-8',
tree=tree,
*args, **kwargs
)
def _tree_from_structure(self, structure):
# TODO : Support directories
tree = Tree()
for file_info in structure:
# str only
try:
data = file_info['data'].encode('ascii')
name = file_info['name'].encode('ascii')
mode = file_info['mode']
except:
# Skip file on encoding errors
continue
blob = Blob()
blob.data = data
# Store file's contents
self.repo.object_store.add_object(blob)
# Add blob entry
tree.add(
name,
mode,
blob.id
)
# Store tree
self.repo.object_store.add_object(tree)
return tree.id
# Like: git commmit -a
def commit(self, name=None, email=None, message=None, files=None, *args, **kwargs):
user_info = {
'name': name,
'email': email,
}
return self._commit(
committer=user_info,
author=user_info,
message=message,
files=files,
*args,
**kwargs
)
def commit_structure(self, name=None, email=None, message=None, structure=None, *args, **kwargs):
"""Main use is to do commits directly to bare repositories
For example doing a first Initial Commit so the repo can be cloned and worked on right away
"""
if not structure:
return
tree = self._tree_from_structure(structure)
user_info = {
'name': name,
'email': email,
}
return self._commit(
committer=user_info,
author=user_info,
message=message,
tree=tree,
*args,
**kwargs
)
# Push all local commits
# and pull all remote commits
def sync(self, origin_uri=None):
self.push(origin_uri)
return self.pull(origin_uri)
def lookup_entry(self, relpath, trackable_files=set()):
if not relpath in trackable_files:
raise KeyError
abspath = self.abspath(relpath)
with open(abspath, 'rb') as git_file:
data = git_file.read()
s = sha1()
s.update("blob %u\0" % len(data))
s.update(data)
return (s.hexdigest(), os.stat(abspath).st_mode)
@property
@funky.transform(set)
def tracked_files(self):
return list(self.index)
@property
@funky.transform(set)
def raw_files(self):
return utils.paths.subpaths(self.path)
@property
@funky.transform(set)
def ignored_files(self):
return utils.paths.subpaths(self.path, filters=self.filters)
@property
@funky.transform(set)
def trackable_files(self):
return self.raw_files - self.ignored_files
@property
@funky.transform(set)
def untracked_files(self):
return self.trackable_files - self.tracked_files
"""
@property
@funky.transform(set)
def modified_staged_files(self):
"Checks if the file has changed since last commit"
timestamp = self.last_commit.commit_time
index = self.index
return [
f
for f in self.tracked_files
if index[f][1][0] > timestamp
]
"""
# Return a list of tuples
# representing the changed elements in the git tree
def _changed_entries(self, ref=None):
ref = ref or self.DEFAULT_COMMIT
if not self.has_commits:
return []
obj_sto = self.repo.object_store
tree_id = self[ref].tree
names = self.trackable_files
lookup_func = partial(self.lookup_entry, trackable_files=names)
# Format = [((old_name, new_name), (old_mode, new_mode), (old_sha, new_sha)), ...]
tree_diff = changes_from_tree(names, lookup_func, obj_sto, tree_id, want_unchanged=False)
return list(tree_diff)
@funky.transform(set)
def _changed_entries_by_pattern(self, pattern):
changed_entries = self._changed_entries()
filtered_paths = [
funky.first_true(names)
for names, modes, sha in changed_entries
if tuple(map(bool, names)) == pattern and funky.first_true(names)
]
return filtered_paths
@property
@funky.transform(set)
def removed_files(self):
return self._changed_entries_by_pattern(self.PATTERN_REMOVED) - self.ignored_files
@property
@funky.transform(set)
def added_files(self):
return self._changed_entries_by_pattern(self.PATTERN_ADDED) - self.ignored_files
@property
@funky.transform(set)
def modified_files(self):
modified_files = self._changed_entries_by_pattern(self.PATTERN_MODIFIED) - self.ignored_files
return modified_files
@property
@funky.transform(set)
def modified_unstaged_files(self):
timestamp = self.last_commit.commit_time
return [
f
for f in self.tracked_files
if os.stat(self.abspath(f)).st_mtime > timestamp
]
@property
def pending_files(self):
"""
Returns a list of all files that could be possibly staged
"""
# Union of both
return self.modified_files | self.added_files | self.removed_files
@property
def pending_files_by_state(self):
files = {
'modified': self.modified_files,
'added': self.added_files,
'removed': self.removed_files
}
# "Flip" the dictionary
return {
path: state
for state, paths in files.items()
for path in paths
}
"""
@property
@funky.transform(set)
def modified_files(self):
return self.modified_staged_files | self.modified_unstaged_files
"""
# Like: git add
@funky.arglist_method
def stage(self, files):
return self.repo.stage(files)
def add(self, *args, **kwargs):
return self.stage(*args, **kwargs)
# Like: git rm
@funky.arglist_method
def rm(self, files, force=False):
index = self.index
index_files = filter(lambda f: f in index, files)
for f in index_files:
del self.index[f]
return index.write()
def mv_fs(self, file_pair):
old_name, new_name = file_pair
os.rename(old_name, new_name)
# Like: git mv
@funky.arglist_method
def mv(self, files_pair):
index = self.index
files_in_index = filter(lambda f: f[0] in index, files_pair)
map(self.mv_fs, files_in_index)
old_files = map(funky.first, files_in_index)
new_files = map(funky.last, files_in_index)
self.add(new_files)
self.rm(old_files)
self.add(old_files)
return
@working_only
def _checkout_tree(self, tree):
return build_index_from_tree(
self.repo.path,
self.repo.index_path(),
self.repo.object_store,
tree
)
def checkout_all(self, commit_sha=None):
commit_sha = commit_sha or self.head
commit_tree = self._commit_tree(commit_sha)
# Rebuild index from the current tree
return self._checkout_tree(commit_tree)
def checkout(self, commit_sha=None, files=None):
"""Checkout only a select amount of files
"""
commit_sha = commit_sha or self.head
files = files or []
return self
@funky.arglist_method
def reset(self, files, commit='HEAD'):
pass
def rm_all(self):
self.index.clear()
return self.index.write()
def _to_commit(self, commit_obj):
"""Allows methods to accept both SHA's or dulwich Commit objects as arguments
"""
if isinstance(commit_obj, basestring):
return self.repo[commit_obj]
return commit_obj
def _commit_sha(self, commit_obj):
"""Extracts a Dulwich commits SHA
"""
if utils.git.is_sha(commit_obj):
return commit_obj
elif isinstance(commit_obj, basestring):
# Can't use self[commit_obj] to avoid infinite recursion
commit_obj = self.repo[commit_obj]
return commit_obj.id
def _blob_data(self, sha):
"""Return a blobs content for a given SHA
"""
return self[sha].data
# Get the nth parent back for a given commit
def get_parent_commit(self, commit, n=None):
""" Recursively gets the nth parent for a given commit
Warning: Remember that parents aren't the previous commits
"""
if n is None:
n = 1
commit = self._to_commit(commit)
parents = commit.parents
if n <= 0 or not parents:
# Return a SHA
return self._commit_sha(commit)
parent_sha = parents[0]
parent = self[parent_sha]
# Recur
return self.get_parent_commit(parent, n - 1)
def get_previous_commit(self, commit_ref, n=None):
commit_sha = self._parse_reference(commit_ref)
n = n or 1
commits = self.commits()
return funky.next(commits, commit_sha, n=n, default=commit_sha)
def _parse_reference(self, ref_string):
# COMMIT_REF~x
if '~' in ref_string:
ref, count = ref_string.split('~')
count = int(count)
commit_sha = self._commit_sha(ref)
return self.get_previous_commit(commit_sha, count)
return self._commit_sha(ref_string)
def _commit_tree(self, commit_sha):
"""Return the tree object for a given commit
"""
return self[commit_sha].tree
def diff(self, commit_sha, compare_to=None, diff_type=None, filter_binary=True):
diff_type = diff_type or self.DEFAULT_DIFF_TYPE
diff_func = self.DIFF_FUNCTIONS[diff_type]
if not compare_to:
compare_to = self.get_previous_commit(commit_sha)
return self._diff_between(compare_to, commit_sha, diff_function=diff_func)
def diff_working(self, ref=None, filter_binary=True):
"""Diff between the current working directory and the HEAD
"""
return utils.git.diff_changes_paths(
self.repo.object_store,
self.path,
self._changed_entries(ref=ref),
filter_binary=filter_binary
)
def get_commit_files(self, commit_sha, parent_path=None, is_tree=None, paths=None):
"""Returns a dict of the following Format :
{
"directory/filename.txt": {
'name': 'filename.txt',
'path': "directory/filename.txt",
"sha": "xxxxxxxxxxxxxxxxxxxx",
"data": "blablabla",
"mode": 0xxxxx",
},
...
}
"""
# Default values
context = {}
is_tree = is_tree or False
parent_path = parent_path or ''
if is_tree:
tree = self[commit_sha]
else:
tree = self[self._commit_tree(commit_sha)]
for mode, path, sha in tree.entries():
# Check if entry is a directory
if mode == self.MODE_DIRECTORY:
context.update(
self.get_commit_files(sha, parent_path=os.path.join(parent_path, path), is_tree=True, paths=paths)
)
continue
subpath = os.path.join(parent_path, path)
# Only add the files we want
if not(paths is None or subpath in paths):
continue
# Add file entry
context[subpath] = {
'name': path,
'path': subpath,
'mode': mode,
'sha': sha,
'data': self._blob_data(sha),
}
return context
def file_versions(self, path):
"""Returns all commits where given file was modified
"""
versions = []
commits_info = self.commit_info()
seen_shas = set()
for commit in commits_info:
try:
files = self.get_commit_files(commit['sha'], paths=[path])
file_path, file_data = files.items()[0]
except IndexError:
continue
file_sha = file_data['sha']
if file_sha in seen_shas:
continue
else:
seen_shas.add(file_sha)
# Add file info
commit['file'] = file_data
versions.append(file_data)
return versions
def _diff_between(self, old_commit_sha, new_commit_sha, diff_function=None, filter_binary=True):
"""Internal method for getting a diff between two commits
Please use .diff method unless you have very speciic needs
"""
# If commit is first commit (new_commit_sha == old_commit_sha)
# then compare to an empty tree
if new_commit_sha == old_commit_sha:
old_tree = Tree()
else:
old_tree = self._commit_tree(old_commit_sha)
new_tree = self._commit_tree(new_commit_sha)
return diff_function(self.repo.object_store, old_tree, new_tree, filter_binary=filter_binary)
def changes(self, *args, **kwargs):
""" List of changes between two SHAs
Returns a list of lists of tuples :
[
[
(oldpath, newpath), (oldmode, newmode), (oldsha, newsha)
],
...
]
"""
kwargs['diff_type'] = 'changes'
return self.diff(*args, **kwargs)
def changes_count(self, *args, **kwargs):
return len(self.changes(*args, **kwargs))
def _refs_by_pattern(self, pattern):
refs = self.refs
def item_filter(key_value):
"""Filter only concered refs"""
key, value = key_value
return key.startswith(pattern)
def item_map(key_value):
"""Rewrite keys"""
key, value = key_value
new_key = key[len(pattern):]
return (new_key, value)
return dict(
map(item_map,
filter(
item_filter,
refs.items()
)
)
)
@property
def refs(self):
return self.repo.get_refs()
def set_refs(refs_dict):
for k, v in refs_dict.items():
self.repo[k] = v
def import_refs(self, base, other):
return self.repo.refs.import_refs(base, other)
@property
def branches(self):
return self._refs_by_pattern(self.REFS_BRANCHES)
def _active_branch(self, refs=None, head=None):
head = head or self.head
refs = refs or self.branches
try:
return {
branch: branch_head
for branch, branch_head in refs.items()
if branch_head == head
}.items()[0]
except IndexError:
pass
return (None, None)
@property
def active_branch(self):
return self._active_branch()[0]
@property
def active_sha(self):
return self._active_branch()[1]
@property
def remote_branches(self):
return self._refs_by_pattern(self.REFS_REMOTES)
@property
def tags(self):
return self._refs_by_pattern(self.REFS_TAGS)
@property
def remotes(self):
""" Dict of remotes
{
'origin': 'http://friendco.de/some_user/repo.git',
...
}
"""
config = self.repo.get_config()
return {
keys[1]: values['url']
for keys, values in config.items()
if keys[0] == 'remote'
}
def add_ref(self, new_ref, old_ref):
self.repo.refs[new_ref] = self.repo.refs[old_ref]
self.update_server_info()
def remove_ref(self, ref_name):
# Returns False if ref doesn't exist
if not ref_name in self.repo.refs:
return False
del self.repo.refs[ref_name]
self.update_server_info()
return True
def create_branch(self, base_branch, new_branch, tracking=None):
"""Try creating a new branch which tracks the given remote
if such a branch does not exist then branch off a local branch
"""
# The remote to track
tracking = self.DEFAULT_REMOTE
# Already exists
if new_branch in self.branches:
raise Exception("branch %s already exists" % new_branch)
# Get information about remote_branch
remote_branch = os.path.sep.join([tracking, base_branch])
# Fork Local
if base_branch in self.branches:
base_ref = self._format_ref_branch(base_branch)
# Fork remote
elif remote_branch in self.remote_branches:
base_ref = self._format_ref_remote(remote_branch)
# TODO : track
else:
raise Exception("Can not find the branch named '%s' to fork either locally or in '%s'" % (base_branch, tracking))
# Reference of new branch
new_ref = self._format_ref_branch(new_branch)
# Copy reference to create branch
self.add_ref(new_ref, base_ref)
return new_ref
def remove_branch(self, branch_name):
ref = self._format_ref_branch(branch_name)
return self.remove_ref(ref)
def switch_branch(self, branch_name, tracking=None, create=None):
"""Changes the current branch
"""
if create is None:
create = True
# Check if branch exists
if not branch_name in self.branches:
self.create_branch(branch_name, branch_name, tracking=tracking)
# Get branch reference
branch_ref = self._format_ref_branch(branch_name)
# Change main branch
self.repo.refs.set_symbolic_ref('HEAD', branch_ref)
if self.is_working:
# Remove all files
self.clean_working()
# Add files for the current branch
self.checkout_all()
def clean(self, force=None, directories=None):
untracked_files = self.untracked_files
map(os.remove, untracked_files)
return untracked_files
def clean_working(self):
"""Purges all the working (removes everything except .git)
used by checkout_all to get clean branch switching
"""
return self.clean()
def _get_fs_structure(self, tree_sha, depth=None, parent_sha=None):
tree = self[tree_sha]
structure = {}
if depth is None:
depth = self.MAX_TREE_DEPTH
elif depth == 0:
return structure
for mode, path, sha in tree.entries():
# tree
if mode == self.MODE_DIRECTORY:
# Recur
structure[path] = self._get_fs_structure(sha, depth=depth - 1, parent_sha=tree_sha)
# commit
else:
structure[path] = sha
structure['.'] = tree_sha
structure['..'] = parent_sha or tree_sha
return structure
def _get_fs_structure_by_path(self, tree_sha, path):
parts = path.split(os.path.sep)
depth = len(parts) + 1
structure = self._get_fs_structure(tree_sha, depth=depth)
return funky.subkey(structure, parts)
def commit_ls(self, ref, subpath=None):
"""List a "directory" for a given commit
using the tree of that commit
"""
tree_sha = self._commit_tree(ref)
# Root path
if subpath in self.ROOT_PATHS or not subpath:
return self._get_fs_structure(tree_sha, depth=1)
# Any other path
return self._get_fs_structure_by_path(tree_sha, subpath)
def commit_file(self, ref, path):
"""Return info on a given file for a given commit
"""
name, info = self.get_commit_files(ref, paths=[path]).items()[0]
return info
def commit_tree(self, ref, *args, **kwargs):
tree_sha = self._commit_tree(ref)
return self._get_fs_structure(tree_sha, *args, **kwargs)
def update_server_info(self):
if not self.is_bare:
return
update_server_info(self.repo)
def _is_fast_forward(self):
pass
def _merge_fast_forward(self):
pass
def __hash__(self):
"""This is required otherwise the memoize function will just mess it up
"""
return hash(self.path)
def __getitem__(self, key):
sha = self._parse_reference(key)
return self.repo[sha]
def __setitem__(self, key, value):
self.repo[key] = value
def __contains__(self, key):
return key in self.repo
# Alias to clone_bare
fork = clone_bare
log = commit_info
diff_count = changes_count
contributors = recent_contributors
| 29.394223 | 125 | 0.598144 | 33,244 | 0.960893 | 0 | 0 | 6,888 | 0.199092 | 0 | 0 | 6,780 | 0.195971 |
d24ee59db0447d71e371a28fd126b436b147eeac
| 992 |
py
|
Python
|
testg.py
|
dcn01/AndroidDropFrameAnalysis
|
630d75dc999a8d1e4eec71edc0a1220334166d0a
|
[
"MIT"
] | 2 |
2018-12-10T03:49:03.000Z
|
2018-12-10T13:43:26.000Z
|
testg.py
|
dcn01/AndroidDropFrameAnalysis
|
630d75dc999a8d1e4eec71edc0a1220334166d0a
|
[
"MIT"
] | null | null | null |
testg.py
|
dcn01/AndroidDropFrameAnalysis
|
630d75dc999a8d1e4eec71edc0a1220334166d0a
|
[
"MIT"
] | null | null | null |
# fpsAllFrameRead = open("profileAllFrame.txt", "r")
# profileDataReadList =[]
# t = []
# for line in fpsAllFrameRead.readlines():
# profileDataReadList.append(line)
#
# for line in profileDataReadList:
# splitByComma = line.split(",")
# l = len(splitByComma)
# print str(l)
a = 34.4/(1000/60)
print str(a)
# fin = ""
# c = 0
# e = len(willBeInsertIntoSqlList)
# for tmplist in willBeInsertIntoSqlList:
# splitByT = tmplist.split("\t")
# if c==0:
# fin = fin +"{"
#
# if c==e -1:
# fin = fin+str(c)+":{\"Draw\":"+splitByT[1]+",\"Prepare\":"+splitByT[2]+",\"Process\":"+splitByT[3]+",\"Execute\":"+splitByT[4].strip()+"}}"
# else:
# fin = fin+str(c)+":{\"Draw\":"+splitByT[1]+",\"Prepare\":"+splitByT[2]+",\"Process\":"+splitByT[3]+",\"Execute\":"+splitByT[4].strip()+"},"
#
# c = c+1
# fin = "var person_data = "+fin+";\nvar svg_width = 88350;"
# dataWrite = open("./output/js/data.js", "w")
# dataWrite.write(fin)
| 31 | 149 | 0.5625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 931 | 0.938508 |
d24f47bb348b9648ed9893766e4cb276bd461df6
| 452 |
py
|
Python
|
app/core/urls.py
|
vatsamail/django-profiles
|
d9738fcb129e4f50ecde28126f5ffcccdf1999e0
|
[
"MIT"
] | 1 |
2019-05-24T14:22:04.000Z
|
2019-05-24T14:22:04.000Z
|
app/core/urls.py
|
vatsamail/django-profiles
|
d9738fcb129e4f50ecde28126f5ffcccdf1999e0
|
[
"MIT"
] | 9 |
2020-06-05T18:17:48.000Z
|
2022-03-11T23:21:33.000Z
|
app/core/urls.py
|
vatsamail/django-profiles
|
d9738fcb129e4f50ecde28126f5ffcccdf1999e0
|
[
"MIT"
] | 1 |
2018-06-22T05:54:58.000Z
|
2018-06-22T05:54:58.000Z
|
from django.urls import include, path, re_path
from . import views
from django.contrib.auth.views import (
login,
logout,
password_reset,
password_reset_done,
password_reset_confirm,
password_reset_complete,
)
app_name = 'core'
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
re_path(r'friending/(?P<operation>.+)/(?P<pk>\d+)/$', views.friending, name='friend_unfriend'),
]
| 26.588235 | 99 | 0.64823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.165929 |
d250a6fd3bfdb7ab11ae4c2f8ffe9bfe5c487a4e
| 745 |
py
|
Python
|
Python/lab2/temp_convert_FtoC.py
|
varuneagle555/BSA-STEM-Merit-Badge-Week
|
04da40973c99eb64184bb98b58d8bf87b337456c
|
[
"MIT"
] | 3 |
2016-03-22T07:05:35.000Z
|
2021-01-08T21:46:32.000Z
|
Python/lab2/temp_convert_FtoC.py
|
varuneagle555/BSA-STEM-Merit-Badge-Week
|
04da40973c99eb64184bb98b58d8bf87b337456c
|
[
"MIT"
] | null | null | null |
Python/lab2/temp_convert_FtoC.py
|
varuneagle555/BSA-STEM-Merit-Badge-Week
|
04da40973c99eb64184bb98b58d8bf87b337456c
|
[
"MIT"
] | 4 |
2017-02-10T22:21:18.000Z
|
2022-02-20T01:06:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""temp_convert.py: Convert temperature F to C."""
# initialize looping variable, assume yes as first answer
continueYN = "Y"
while continueYN.upper() == "Y":
# get temperature input from the user, and prompt them for what we expect
degF = int(raw_input("Enter temperature in degrees Fahrenheit (°F) to convert: "))
degC = (degF - 32) * 5/9
print "Temperature in degrees C is: {temp}".format(temp=degC)
# check for temperature below freezing...
if degC < 0:
print "Pack long underwear!"
# check for it being a very hot day...
if degF > 100:
print "Remember to hydrate!"
continueYN = raw_input("Would you like to enter another (Y/N)? ")
| 25.689655 | 86 | 0.64698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.658177 |
d25277187f27f31c782ae6f4bfb336436c74c318
| 2,197 |
py
|
Python
|
test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 3,027 |
2019-04-04T18:52:17.000Z
|
2022-03-30T09:38:34.000Z
|
test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 4,080 |
2019-04-04T19:51:11.000Z
|
2022-03-31T23:45:21.000Z
|
test/connector/exchange/wazirx/test_wazirx_user_stream_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1,342 |
2019-04-04T20:50:53.000Z
|
2022-03-31T15:22:36.000Z
|
#!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import conf
from hummingbot.connector.exchange.wazirx.wazirx_api_order_book_data_source import WazirxAPIOrderBookDataSource
from hummingbot.connector.exchange.wazirx.wazirx_user_stream_tracker import WazirxUserStreamTracker
from hummingbot.connector.exchange.wazirx.wazirx_auth import WazirxAuth
import asyncio
from hummingbot.core.utils.async_utils import safe_ensure_future
import logging
import unittest
trading_pairs = ["BTC-INR", "ZRX-INR"]
class WazirxUserStreamTrackerUnitTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
api_key = conf.wazirx_api_key
secret_key = conf.wazirx_secret_key
cls.wazirx_auth = WazirxAuth(api_key, secret_key)
cls.wazirx_orderbook_data_source = WazirxAPIOrderBookDataSource(trading_pairs=trading_pairs)
cls.user_stream_tracker: WazirxUserStreamTracker = WazirxUserStreamTracker(cls.wazirx_auth, trading_pairs)
def run_async(self, task):
return self.ev_loop.run_until_complete(task)
async def _iter_user_event_queue(self):
while True:
try:
yield await self.user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
raise
async def _user_stream_event_listener(self):
""" Wait for 5 events to be seen """
count = 0
async for event_message in self._iter_user_event_queue():
logging.info(event_message)
if count > 5:
return
count += 1
def test_user_stream(self):
safe_ensure_future(self.user_stream_tracker.start())
# Wait process some msgs.
self.ev_loop.run_until_complete(self._user_stream_event_listener())
logging.info(self.user_stream_tracker.user_stream)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| 35.435484 | 115 | 0.690942 | 1,469 | 0.668639 | 271 | 0.12335 | 470 | 0.213928 | 563 | 0.256259 | 129 | 0.058716 |
d252d60d44fc7e878fae2a2e799df7cff950fbd9
| 597 |
py
|
Python
|
setup.py
|
jaspershen/getDB
|
6f767279775e201f9505bb1e98dd141ffe0335f7
|
[
"MIT"
] | null | null | null |
setup.py
|
jaspershen/getDB
|
6f767279775e201f9505bb1e98dd141ffe0335f7
|
[
"MIT"
] | null | null | null |
setup.py
|
jaspershen/getDB
|
6f767279775e201f9505bb1e98dd141ffe0335f7
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='getDB',
version='0.0.4',
description="This module can be used to download HMDB and KEGG database.",
license='MIT',
author='Xiaotao Shen',
author_email='[email protected]',
url='https://github.com/jaspershen/getDB',
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=['requests', 'pandas', 'bs4', 'numpy'],
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
| 35.117647 | 80 | 0.624791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.458961 |
d2536eb6f6ea1a24212cca3d6076bd9bd30877a3
| 7,706 |
py
|
Python
|
lib/pts2angmap.py
|
samsafadi/PointRCNN
|
761d4cadb3e634dc0994f2e95318240c37fbb485
|
[
"MIT"
] | 1 |
2020-11-16T20:11:26.000Z
|
2020-11-16T20:11:26.000Z
|
lib/pts2angmap.py
|
samsafadi/PointRCNN
|
761d4cadb3e634dc0994f2e95318240c37fbb485
|
[
"MIT"
] | null | null | null |
lib/pts2angmap.py
|
samsafadi/PointRCNN
|
761d4cadb3e634dc0994f2e95318240c37fbb485
|
[
"MIT"
] | null | null | null |
"""
modified from sparsify.py file. This file gnerate angle map of [H=64,W=1024,4] from velodyne lidar bins
To run this:
python3 pts2angmap.py --calib_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/calib/'\
--image_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/image_2/' --ptc_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/velodyne/'\
--split_file '/root/gdrive/My Drive/PointRCNN/data/KITTI/ImageSets/train.txt' --output_path '/root/gdrive/My Drive/PointRCNN/data/KITTI/object/training/angle_map/' --W 1024 --slice 1 --H 64
git config --global user.email "[email protected]"
git config --global user.name "zhaoguangyuan123"
"""
import argparse
import os.path as osp
import time
import numpy as np
import torch
from tqdm.auto import tqdm
from data_utils.kitti_object import *
from data_utils.kitti_util import rotz, Calibration, load_image, load_velo_scan
from multiprocessing import Process, Queue, Pool
def pto_ang_map(data_idx, velo_points, H=64, W=512, slice=1, line_spec=None,
get_lines=False, fill_in_line=None, fill_in_spec=None,
fill_in_slice=None):
"""
:param H: the row num of depth map, could be 64(default), 32, 16
:param W: the col num of depth map
:param slice: output every slice lines
"""
dtheta = np.radians(0.4 * 64.0 / H)
dphi = np.radians(90.0 / W)
x, y, z, i = velo_points[:, 0], velo_points[:,
1], velo_points[:, 2], velo_points[:, 3]
# print('velo_points', velo_points[:4])
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y ** 2)
d[d == 0] = 0.000001
r[r == 0] = 0.000001
phi = np.radians(45.) - np.arcsin(y / r)
phi_ = (phi / dphi).astype(int)
phi_[phi_ < 0] = 0
phi_[phi_ >= W] = W - 1
theta = np.radians(2.) - np.arcsin(z / d)
theta_ = (theta / dtheta).astype(int)
# print('theta_', theta_.shape)
# print('theta_', theta_[:100])
theta_[theta_ < 0] = 0
theta_[theta_ >= H] = H - 1
depth_map = - np.ones((H, W, 4))
depth_map[theta_, phi_, 0] = x
depth_map[theta_, phi_, 1] = y
depth_map[theta_, phi_, 2] = z
depth_map[theta_, phi_, 3] = i
if fill_in_line is not None:
if fill_in_spec is not None:
depth_map[fill_in_spec] = fill_in_line
else:
depth_map[::fill_in_slice, :, :] = fill_in_line
if line_spec is not None:
depth_map = depth_map[line_spec, :, :]
else:
depth_map = depth_map[::slice, :, :]
if get_lines:
depth_map_lines = depth_map.copy()
# print('depth_map', depth_map.shape)
# # imageio.imwrite(depth_dir + '/' + data_idx+'.png', depth_map)
# np.save(args.output_path + str(data_idx)+ '.npy', depth_map)
# print(args.output_path + '/' + str(data_idx)+ '.npy')
# print('Finish Depth Map {}'.format(data_idx))
return depth_map
def gen_sparse_points(data_idx, args):
calib = Calibration(
osp.join(args.calib_path, "{:06d}.txt".format(data_idx)))
pc_velo = load_velo_scan(
osp.join(args.ptc_path, "{:06d}.bin".format(data_idx)))
# print('pc_velo', pc_velo.shape)
img = load_image(osp.join(args.image_path, "{:06d}.png".format(data_idx)))
img_height, img_width, img_channel = img.shape
# print('img', img.shape)
_, _, valid_inds_fov = get_lidar_in_image_fov(
pc_velo[:, :3], calib, 0, 0, img_width, img_height, True)
pc_velo = pc_velo[valid_inds_fov]
valid_inds = (pc_velo[:, 0] < 120) & \
(pc_velo[:, 0] >= 0) & \
(pc_velo[:, 1] < 50) & \
(pc_velo[:, 1] >= -50) & \
(pc_velo[:, 2] < 1.5) & \
(pc_velo[:, 2] >= -2.5)
pc_velo = pc_velo[valid_inds]
# print('pc_velo', pc_velo.shape)
if args.fill_in_map_dir is not None and (args.fill_in_spec is not None or args.fill_in_slice is not None):
fill_in_line = np.load(os.path.join(
args.fill_in_map_dir, "{:06d}.npy".format(data_idx)))
else:
fill_in_line = None
if args.store_line_map_dir is not None:
depth_map_lines, ptc = pto_ang_map(data_idx, pc_velo, H=args.H, W=args.W, slice=args.slice,
line_spec=args.line_spec, get_lines=True,
fill_in_line=fill_in_line, fill_in_spec=args.fill_in_spec,
fill_in_slice=args.fill_in_slice)
np.save(osp.join(args.store_line_map_dir,
"{:06d}".format(data_idx)), depth_map_lines)
return ptc
else:
depth_map = pto_ang_map(data_idx, pc_velo, H=args.H, W=args.W, slice=args.slice,
line_spec=args.line_spec, get_lines=False,
fill_in_line=fill_in_line, fill_in_spec=args.fill_in_spec,
fill_in_slice=args.fill_in_slice)
# should save a 3D array in size (H, M, 4)
np.save(osp.join(args.output_path,
"{:06d}".format(data_idx)), depth_map)
return depth_map
def sparse_and_save(args, data_idx):
sparse_points = gen_sparse_points(data_idx, args)
# print('sparse_points', sparse_points.shape)
# sparse_points = sparse_points.astype(np.float32)
# sparse_points.tofile(args.output_path + '/' + '%06d.bin' % (data_idx))
print('Finish angle Map {:06d}'.format(data_idx))
# print('error')
def gen_sparse_points_all(args):
with open(args.split_file) as f:
data_idx_list = [int(x.strip())
for x in f.readlines() if len(x.strip()) > 0]
if not osp.exists(args.output_path):
os.makedirs(args.output_path)
if args.store_line_map_dir is not None and not osp.exists(args.store_line_map_dir):
os.makedirs(args.store_line_map_dir)
pool = Pool(args.threads)
res = []
pbar = tqdm(total=len(data_idx_list))
def update(*a):
pbar.update()
i = 0
for data_idx in data_idx_list:
# res.append((data_idx, pool.apply_async(
# sparse_and_save, args=(args, data_idx),
# callback=update)))
sparse_and_save(args, data_idx)
# break
pool.close()
pool.join()
pbar.clear(nolock=False)
pbar.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser("Generate sparse pseudo-LiDAR points")
parser.add_argument('--calib_path', type=str,
help='path to calibration files')
parser.add_argument('--image_path', type=str,
help='path to image files')
parser.add_argument('--ptc_path', type=str,
help='path to point cloud files')
parser.add_argument('--output_path', type=str,
help='path to sparsed point cloud files')
parser.add_argument('--slice', default=1, type=int)
parser.add_argument('--H', default=64, type=int)
parser.add_argument('--W', default=1024, type=int)
parser.add_argument('--D', default=700, type=int)
parser.add_argument('--store_line_map_dir', type=str, default=None)
parser.add_argument('--line_spec', type=int, nargs='+', default=None)
parser.add_argument('--fill_in_map_dir', type=str, default=None)
parser.add_argument('--fill_in_spec', type=int,
nargs='+', default=None)
parser.add_argument('--fill_in_slice', type=int, default=None)
parser.add_argument('--split_file', type=str)
parser.add_argument('--threads', type=int, default=4)
args = parser.parse_args()
gen_sparse_points_all(args)
| 34.401786 | 194 | 0.611601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,111 | 0.273942 |
d2537e3317890ddaef34e1cff80e0e43d3fa3866
| 13,481 |
py
|
Python
|
testsuite/conversion.py
|
buganini/bsdconv
|
7830f4ebef9b04f9877a21f24a7705a48a4812c4
|
[
"BSD-2-Clause"
] | 33 |
2015-01-25T12:04:04.000Z
|
2021-12-12T23:16:55.000Z
|
testsuite/conversion.py
|
buganini/bsdconv
|
7830f4ebef9b04f9877a21f24a7705a48a4812c4
|
[
"BSD-2-Clause"
] | 14 |
2015-11-19T20:52:39.000Z
|
2021-06-15T03:18:31.000Z
|
testsuite/conversion.py
|
buganini/bsdconv
|
7830f4ebef9b04f9877a21f24a7705a48a4812c4
|
[
"BSD-2-Clause"
] | 5 |
2016-07-27T15:30:39.000Z
|
2020-07-06T11:52:15.000Z
|
# -*- coding: utf-8 -*-
import sys
import urllib
from bsdconv import Bsdconv
def bsdconv01(dt):
dt=dt.lstrip("0").upper()
if len(dt) & 1:
return "010"+dt
else:
return "01"+dt
def bnf(s):
return ",".join([bsdconv01(x) for x in s.strip().split(" ")])
iotest=[
["big5:utf-8","\xa5\x5c\x5c\xaf\xe0","功\能"],
["big5-5c,big5:utf-8","\xa5\x5c\x5c\xaf\xe0","功能"],
["utf-8:big5-5c,big5","功能","\xa5\x5c\x5c\xaf\xe0"],
["_cp950:utf-8","\xa5\x5c\xaf\xe0","功能"],
["utf-8:_cp950,ascii","喆",""],
["utf-8:_uao250,ascii","喆","\x95\xed"],
["utf-8:big5,cp950-trans","测试","\xb4\xfa\xb8\xd5"],
["ascii,3f:ascii","test測試test","test??????test"],
["ascii,any#0137:ascii","test測試test","test777777test"],
["utf-8:ascii,3f","test測試test","test??test"],
["utf-8:ascii,any#38","test測試test","test88test"],
["utf-8:uao250|_cp950,ascii,3f:utf-8","陶喆測試","陶?穘?試"],
["utf-8:uao250|_cp950,ascii,sub:utf-8","陶喆測試","陶�穘�試"],
["cns11643:utf-8","1234\x00\x01\x60\x41\x00\x01\x66\x5cabcd","1234測試abcd"],
["utf-8:cns11643","1234測試abcd","1234\x00\x01\x60\x41\x00\x01\x66\x5cabcd"],
["ansi-control,utf-8:split:bsdconv-keyword,bsdconv","a\033[1mb","0161,1B5B316D,0162,"],
["ascii-named-html-entity:utf-8","ü","ü"],
["ascii-numeric-html-entity:utf-8","測試","測試"],
["utf-8:ascii-hex-numeric-html-entity","測\n","測
"],
["utf-8:ascii-dec-numeric-html-entity","測\n","測 "],
["utf-8:ascii-named-html-entity","Ç","Ç"],
["bsdconv:utf-8","016e2c","測"],
["bsdconv:utf-8","016e2c,018a66","測試"],
["utf-8:bsdconv","測\n","016E2C010A"],
["utf-8:pass","測\n","\x01\x6e\x2c\x01\x0a"],
["utf-8:raw","測試\n","\x6e\x2c\x8a\x66\x0a"],
["bsdconv-keyword,utf-8:bsdconv-keyword,bsdconv|bsdconv-keyword,bsdconv:bsdconv-keyword,utf-8","測,試\t测,试\n","測,試\t测,试\n"],
["byte:byte","\xaa\xbb\xcc\xdd","\xaa\xbb\xcc\xdd"],
["escape:utf-8","%u6e2c","測"],
["escape:split:bsdconv-keyword,bsdconv","%u6e2c%e8%a9%a6","016E2C,03E8,03A9,03A6,"],
["escape:pass#mark&for=unicode,byte|pass#unmark,utf-8:utf-8","%u6e2c%e8%a9%a6","測試"],
["escape,utf-8:pass#mark&for=unicode,byte|pass#unmark,big5:utf-8","%u6e2c%b8%d5功能","測試功能"],
["escape,ascii-numeric-html-entity,utf-8:pass#mark&for=unicode,byte|pass#unmark,big5:utf-8","%u6e2c%b8%d5功能","測試功能"],
["escape:pass#mark&for=unicode,byte|pass#unmark,utf-8:utf-8","\\346\\270\\254\\350\\251\\246","測試"],
["utf-8:ascii,ascii-escaped-unicode","test測試","test\\u6E2C\\u8A66"],
["utf-8:ascii-html-cns11643-img","測","<img class=\"cns11643_img\" src=\"http://www.cns11643.gov.tw/AIDB/png.do?page=1&code=6041\" />"],
["utf-8:ascii-html-info","測\n","<a href=\"http://www.cns11643.gov.tw/AIDB/query_general_view.do?page=1&code=6041\"><img src=\"http://www.cns11643.gov.tw/AIDB/png.do?page=1&code=6041\" /></a><a href=\"http://www.fileformat.info/info/unicode/char/0A/index.htm\"><img class=\"unicode_img\" src=\"http://www.unicode.org/cgi-bin/refglyph?24-A\" /></a>"],
["utf-8:ascii-html-unicode-img","測","<img class=\"unicode_img\" src=\"http://www.unicode.org/cgi-bin/refglyph?24-6E2C\" />"],
["utf-8:null","blah",""],
["utf-8:ambiguous-pad:utf-8","БИ 2","Б И 2"],
["utf-8:ambiguous-unpad:utf-8","Б И 2","БИ 2"],
["ansi-control,byte:big5-defrag:byte,ansi-control|skip,big5:split:bsdconv-keyword,bsdconv","\xaf\033[1m\xe0","0180FD,1B5B316D,"],
["utf-8:chewing:utf-8","abc測試xyz","abcㄘㄜˋㄕˋxyz"],
["utf-8:chewing:han-pinyin:utf-8","測試","ce4shi4"],
["utf-8:kana-phonetic:utf-8","ドラえもん","doraemon"],
["ascii:alias-from:ascii","BIG5","UAO250"],
["ascii:alias-from:ascii","UAO250","ASCII,_UAO250"],
["ascii:alias-from:ascii","LOCALE","UTF-8"],
["ascii:alias-from:ascii","UTF-8","ASCII,_UTF-8"],
["ascii:alias-to:ascii","BIG5","CP950"],
["ascii:alias-to:ascii","CP950","_CP950,ASCII"],
["utf-8:cns11643:split:bsdconv-keyword,bsdconv","測試","02016041,0201665C,"],
["bsdconv:unicode:split:bsdconv-keyword,bsdconv","02016041,0201665C","016E2C,018A66,"],
["utf-8:upper:utf-8","testTEST","TESTTEST"],
["utf-8:lower:utf-8","testTEST","testtest"],
["utf-8:full:utf-8","testTEST1234","testTEST1234"],
["utf-8:half:utf-8","testTEST1234","testTEST1234"],
["utf-8:upsidedown:utf-8","FUNNY","Ⅎ∩ᴎᴎ⅄"],
["utf-8:unix:utf-8","a\r\nb","a\nb"],
["utf-8:mac:utf-8","a\r\nb","a\rb"],
["utf-8:win:utf-8","a\nb","a\r\nb"],
["utf-8:nl2br:utf-8","a\nb","a<br />b"],
["utf-8:trim-width#22&ambi-as-wide:utf-8","ˋˊ這是個很長的字串啊啊啊","ˋˊ這是個很長的字串啊"],
["utf-8:trim-width#22:utf-8","ˋˊ這是個很長的字串啊啊啊","ˋˊ這是個很長的字串啊啊"],
["utf-8:trim-width#10&ambiguous-as-wide:utf-8","三長兩短ˊˋ3長2短","三長兩短ˊ"],
["utf-8:zh-strings:utf-8","abd測試efg功能,hij","測試\n功能\n"],
["utf-8:zhcn:utf-8","測試","测试"],
["utf-8:zhtw:utf-8","测试之后","測試之后"],
["utf-8:zhtw:zhtw-words:utf-8","测试之后","測試之後"],
["utf-8:whitespace-derail:zhtw:zhtw-words:whitespace-rerail:utf-8","之 后","之 後"],
["utf-8:zh-decomp:zh-comp:utf-8","功夫不好不要艹我","巭孬嫑莪"],
["utf-8:ibm-37","EBCDIC test","\xc5\xc2\xc3\xc4\xc9\xc3\x40\xa3\x85\xa2\xa3"],
["utf-8:ibm-37|ibm-37:utf-8","EBCDIC test","EBCDIC test"],
["utf-8:ibm-930|ibm-930:utf-8","ドラえもん","ドラえもん"],
["utf-8:ibm-933|ibm-933:utf-8","십진법","십진법"],
["utf-8:ibm-935|ibm-935:utf-8","标准码","标准码"],
["utf-8:ibm-937|ibm-937:utf-8","編碼表","編碼表"],
["utf-8:ibm-939|ibm-939:utf-8","ドラえもん","ドラえもん"],
["utf-8:gb18030|gb18030:utf-8","标准码編碼表ドラえもん","标准码編碼表ドラえもん"],
["utf-8:ascii,escape#for=unicode&mode=16&prefix=2575","測a試b好","%u6E2Ca%u8A66b%u597D"],
["utf-8:big5|ascii,byte:ascii,escape#for=byte&mode=hex&prefix=5c78","測a試b好","\\xB4\\xFAa\\xB8\\xD5b\\xA6n"],
["utf-8:big5|ascii,byte:ascii,escape#for=byte&mode=oct&prefix=5c","測a試b好","\\264\\372a\\270\\325b\\246n"],
["utf-8:big5,pass#for=unicode&mark|pass#unmark,ascii,byte:ascii,url","測test喆試","%B4%FAtest%u5586%B8%D5"],
["utf-8:ascii,escape#for=unicode&prefix=2623&mode=10&suffix=3b","測test喆試","測test喆試"],
["utf-8:upper:utf-8","aăDžбᾥⅷⓐ","AĂDŽБᾭⅧⒶ"],
["utf-8:lower:utf-8","AĂDŽБᾭⅧⒶ","aădžбᾥⅷⓐ"],
["utf-8:nfd:utf-8","ăDžⓐ","ăDžⓐ"],
["utf-8:nfc:utf-8","ăDžⓐ","ăDžⓐ"],
["utf-8:nfkd:utf-8","ăDžⓐ","ăDža"],
["utf-8:nfkc:utf-8","ăDžⓐ","ăDža"],
["ascii,any#019644.012F:utf-8","A測B","A附/附/附/B"],
["utf-8:pass,zh-decomp:insert#after=002c:bsdconv-keyword,bsdconv","不大不要","014E0D,015927,014E0D,018981,"],
["utf-8:pass#limit=2,zh-decomp:insert#after=002c:bsdconv-keyword,bsdconv","不大不要","014E0D,015927,048D,040107,0476,"],
["bsdconv:nfd:_nf-order:insert#after=002c:bsdconv-keyword,bsdconv","011e9b,010323","01017F,010323,010307,"],
["utf-8:_nf-hangul-decomposition:utf-8","가","가"],
["utf-8:casefold:utf-8","AbяЯßẞ","abяяssss"],
["utf-8:replace#0142.0143=0132.0133:utf-8","ABCD","A23D"],
["utf-8:strings#min-len=2:utf-8","aㄎabㄎabcㄉabcd","ab\nabc\nabcd\n"],
["utf-8:strings#min-len=2&before=0128&after=0129.010a:utf-8","aㄎabㄎabcㄉabcd","(ab)\n(abc)\n(abcd)\n"],
["utf-8:whitespace-derail:zhtw:zhtw-words:whitespace-rerail:utf-8","之 后","之 後"],
["fallback-unicode:insert#after=002c:bsdconv-keyword,bsdconv", "\xe8","01E8,"],
["cp950-uda:insert#after=002c:bsdconv-keyword,bsdconv", "\xfa\x40\xfe\xfe\x8e\x40\xa0\xfe\x81\x40\x8d\xfe\xc6\xa1\xc8\xfe", "01E000,01E310,01E311,01EEB7,01EEB8,01F6B0,01F6B1,01F848,"],
["_utf-8:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", ""],
["_utf-8#cesu:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", "01010400,"],
["_utf-8#loose:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xB0\x80", "01D801,01DC00,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81", "013F,013F,013F,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80", "013F,013F,013F,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "013F,013F,013F,019644,"],
["_utf-8#cesu,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "013F,013F,013F,019644,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "01D801,019644,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "01DC00,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xe9\x99\x84", "01D801,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xe9\x99\x84", "01DC00,019644,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xA0\x81", "01D801,01D801,"],
["_utf-8#cesu&loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xED\xB0\x80", "01DC00,01DC00,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xA0\x81\xED\xA0\x81", "01D801,01D801,"],
["_utf-8#loose,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xED\xB0\x80\xED\xB0\x80", "01DC00,01DC00,"],
["_utf-8:insert#after=002c:bsdconv-keyword,bsdconv", "\xf0\x80\x80\xaf", ""],
["_utf-8#overlong:insert#after=002c:bsdconv-keyword,bsdconv", "\xf0\x80\x80\xaf", "012F,"],
["_utf-8#super:insert#after=002c:bsdconv-keyword,bsdconv", "\xf8\x80\x80\x80\xaf", ""],
["_utf-8#super&overlong:insert#after=002c:bsdconv-keyword,bsdconv", "\xf8\x80\x80\x80\xaf", "012F,"],
["_utf-8#super,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "013F,013F,012C,013F,013F,013F,012C,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8#super&overlong,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "017F,012C,0107FF,012C,01FFFF,012C,011FFFFF,012C,0103FFFFFF,"],
["_utf-8#overlong,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc1\xbf,\xe0\x9f\xbf,\xf0\x8f\xbf\xbf,\xf8\x87\xbf\xbf\xbf,\xfc\x83\xbf\xbf\xbf\xbf", "017F,012C,0107FF,012C,01FFFF,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc0\x80,\xe0\x80\x80,\xf0\x80\x80\x80,\xf8\x80\x80\x80\x80,\xfc\x80\x80\x80\x80\x80", "013F,013F,012C,013F,013F,013F,012C,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,012C,013F,013F,013F,013F,013F,013F,"],
["_utf-8#nul&overlong&super,ascii,3f:insert#after=002c:bsdconv-keyword,bsdconv", "\xc0\x80,\xe0\x80\x80,\xf0\x80\x80\x80,\xf8\x80\x80\x80\x80,\xfc\x80\x80\x80\x80\x80", "0100,012C,0100,012C,0100,012C,0100,012C,0100,"],
]
countertest=[
["utf-8:width:null","123Б測試",{"FULL":2,"AMBI":1,"HALF":3}],
["utf-8:count:null","123Б測試",{"COUNT":6}],
["utf-8:count#blah:null","123Б測試",{"BLAH":6}],
["utf-8:count#for=lala&for=cjk:null","123Б測a試bc",{"COUNT":2}],
]
passed=True
for c, i, o in iotest:
p=Bsdconv(c)
if not p:
print(Bsdconv.error())
print("Test failed at %s" % repr([c, i, o]))
del p
passed=False
continue
r=p.conv(i)
if o != r:
print("Test failed at %s" % repr([c, i, o]))
print("expected(%d): %s" % (len(o), repr(o)))
print("result(%d): %s" % (len(r), repr(r)))
passed=False
del p
for c, d, i in countertest:
p=Bsdconv(c)
if not p:
print(Bsdconv.error())
print("Test failed at %s" % repr([c, i, o]))
passed=False
continue
p.conv(d)
r=p.counter()
for k in i:
if i[k] != r[k]:
print("Test failed at %s" % repr([c, d, i]))
print("expected: %s" % repr(i))
print("result: %s" % repr(r))
passed=False
del p
url=""
f_map=open("tmp/map.txt")
for l in f_map:
l=l.strip().split("\t")
if l[0]=="NormalizationTest.txt":
url=l[1]
break
nt=open("tmp/NormalizationTest.txt")
toSRC=Bsdconv("bsdconv:insert#after=002c:bsdconv-keyword,bsdconv")
toNFC=Bsdconv("bsdconv:nfc:insert#after=002c:bsdconv-keyword,bsdconv")
toNFD=Bsdconv("bsdconv:nfd:insert#after=002c:bsdconv-keyword,bsdconv")
toNFKC=Bsdconv("bsdconv:nfkc:insert#after=002c:bsdconv-keyword,bsdconv")
toNFKD=Bsdconv("bsdconv:nfkd:insert#after=002c:bsdconv-keyword,bsdconv")
print("Normalization Tests: #"+url)
ln = 0
for l in nt:
ln += 1
if not l:
continue
if l[0]=="#":
continue
if l[0]=="@":
print("\t"+l.strip())
continue
c1,c2,c3,c4,c5,comment=l.strip().split(";",5)
c1=bnf(c1)
c2=bnf(c2)
c3=bnf(c3)
c4=bnf(c4)
c5=bnf(c5)
nftest=[
#NFC
[toSRC.conv(c2), toNFC.conv(c1), "c2 == toNFC(c1)"],
[toNFC.conv(c1), toNFC.conv(c2), "toNFC(c1) == toNFC(c2)"],
[toNFC.conv(c2), toNFC.conv(c3), "toNFC(c2) == toNFC(c3)"],
[toSRC.conv(c4), toNFC.conv(c4), "c4 == toNFC(c4)"],
[toNFC.conv(c4), toNFC.conv(c5), "toNFC(c4) == toNFC(c5)"],
#NFD
[toSRC.conv(c3), toNFD.conv(c1), "c3 == toNFD(c1)"],
[toNFD.conv(c1), toNFD.conv(c2), "toNFD(c1) == toNFD(c2)"],
[toNFD.conv(c2), toNFD.conv(c3), "toNFD(c2) == toNFD(c3)"],
[toSRC.conv(c5), toNFD.conv(c4), "c5 == toNFD(c4)"],
[toNFD.conv(c4), toNFD.conv(c5), "toNFD(c4) == toNFD(c5)"],
#NFKC
[toSRC .conv(c4), toNFKC.conv(c1), "c4 == toNFKC(c1)"],
[toNFKC.conv(c1), toNFKC.conv(c2), "toNFKC(c1) == toNFKC(c2)"],
[toNFKC.conv(c2), toNFKC.conv(c3), "toNFKC(c2) == toNFKC(c3)"],
[toNFKC.conv(c3), toNFKC.conv(c4), "toNFKC(c3) == toNFKC(c4)"],
[toNFKC.conv(c4), toNFKC.conv(c5), "toNFKC(c4) == toNFKC(c5)"],
#NFKD
[toSRC .conv(c5), toNFKD.conv(c1)," c5 == toNFKD(c1)"],
[toNFKD.conv(c1), toNFKD.conv(c2), "toNFKD(c1) == toNFKD(c2)"],
[toNFKD.conv(c2), toNFKD.conv(c3), "toNFKD(c2) == toNFKD(c3)"],
[toNFKD.conv(c3), toNFKD.conv(c4), "toNFKD(c3) == toNFKD(c4)"],
[toNFKD.conv(c4), toNFKD.conv(c5), "toNFKD(c4) == toNFKD(c5)"],
]
for a,b,desc in nftest:
if a!=b:
print ln, "Failed: ", desc, a, "!=", b, comment
print("Conversion tests finished.")
| 53.284585 | 350 | 0.656925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,138 | 0.782163 |
d2554278f5d4ba5a87659a474ac65fdd8acaa5a1
| 2,488 |
py
|
Python
|
apps/cloud/odc/apps/cloud/thredds_to_tar.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
apps/cloud/odc/apps/cloud/thredds_to_tar.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
apps/cloud/odc/apps/cloud/thredds_to_tar.py
|
robbibt/odc-tools
|
e2df2c9ef65dbd5652d97cd88617989b4b724814
|
[
"Apache-2.0"
] | null | null | null |
import tarfile
import click
import requests
from odc.io.tar import tar_mode, add_txt_file
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from urllib.parse import urlparse
from thredds_crawler.crawl import Crawl
def download(url):
parsed_uri = urlparse(url)
target_filename = url[len(parsed_uri.scheme + '://'):]
return requests.get(url).content, target_filename
@click.command('thredds-to-tar')
@click.option('--thredds_catalogue', '-c', type=str, required=True, help="The THREDDS catalogue endpoint")
@click.option('--skips', '-s', type=str, multiple=True,
help="Pattern to ignore when THREDDS crawling")
@click.option('--select', '-t', type=str, required=True,
help="Target file pattern to match for yaml")
@click.option('--workers', '-w', type=int, default=4, help="Number of thredds crawler workers to use")
@click.option('--outfile', type=str, default="metadata.tar.gz", help="Sets the output file name")
def cli(thredds_catalogue,
skips,
select,
workers,
outfile):
""" Download Metadata from THREDDS server to tarball
Example:
\b
Download files in directory that match `*yaml` and store them as a tar
> thredds-to-tar -c "http://dapds00.nci.org.au/thredds/catalog/if87/2018-11-29/"
-t ".*ARD-METADATA.yaml" -s '.*NBAR.*' -s '.*SUPPLEMENTARY.*'
-s '.*NBART.*' -s '.*/QA/.*' -w 8 --outfile 2018-11-29.tar.gz
"""
user_skips = Crawl.SKIPS
for skip in skips:
user_skips = user_skips+[skip]
print("Searching {thredds_catalogue} for matching files".format(thredds_catalogue=thredds_catalogue))
results = Crawl(thredds_catalogue + '/catalog.xml', select=[select], skip=user_skips, workers=workers).datasets
print("Found {0} metadata files".format(str(len(results))))
urls = [service['url'] for dataset in results
for service in dataset.services
if service['service'].lower() == 'httpserver']
# use a threadpool to download from thredds
pool = ThreadPool(workers)
yamls = pool.map(partial(download), urls)
pool.close()
pool.join()
# jam it all in a tar
tar_opts = dict(name=outfile, mode='w' + tar_mode(gzip=True, xz=True, is_pipe=False))
with tarfile.open(**tar_opts) as tar:
for yaml in yamls:
add_txt_file(tar=tar, content=yaml[0], fname=yaml[1])
print("Done!")
if __name__ == '__main__':
cli()
| 33.621622 | 115 | 0.663987 | 0 | 0 | 0 | 0 | 2,032 | 0.81672 | 0 | 0 | 891 | 0.358119 |
d25543f2eb84e1a829ecf2a781633ed4850daa4c
| 599 |
py
|
Python
|
examples/ec2/tests/config.py
|
dabble-of-devops-biodeploy/terraform-aws-batch
|
9d075163821f81f33d6be767820d1db20b45eb8e
|
[
"Apache-2.0"
] | 3 |
2021-12-07T18:10:16.000Z
|
2022-02-04T09:15:31.000Z
|
examples/ec2/tests/config.py
|
dabble-of-devops-biodeploy/terraform-aws-batch
|
9d075163821f81f33d6be767820d1db20b45eb8e
|
[
"Apache-2.0"
] | null | null | null |
examples/ec2/tests/config.py
|
dabble-of-devops-biodeploy/terraform-aws-batch
|
9d075163821f81f33d6be767820d1db20b45eb8e
|
[
"Apache-2.0"
] | 1 |
2022-02-22T01:48:38.000Z
|
2022-02-22T01:48:38.000Z
|
DATA_S3 = "bioanalyze-ec2-test-nf-rnaseq-06o3qdtm7v"
JOB_S3 = DATA_S3
# These come from the terraform code in auto-deployment/terraform
ECR = "dabbleofdevops/nextflow-rnaseq-tutorial"
COMPUTE_ENVIRONMENT = "bioanalyze-ec2-test-nf-rnaseq"
JOB_DEF_NAME = "bioanalyze-ec2-test-nf-rnaseq"
JOB_QUEUE_NAME = "bioanalyze-ec2-test-nf-rnaseq-default-job-queue"
JOB_ROLE = "arn:aws:iam::018835827632:role/bioanalyze-ec2-test-nf-rnaseq-batch_execution_role"
SECRET_NAME = "bioanalyze-ec2-test-nf-rnaseq"
SECRET_ARN = "arn:aws:secretsmanager:us-east-1:018835827632:secret:bioanalyze-ec2-test-nf-rnaseq-Zg7kMY"
| 49.916667 | 104 | 0.806344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.774624 |
d255a8c98ce6037d15065ccd226fd922085a64a0
| 4,067 |
py
|
Python
|
adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
adios-1.9.0/wrappers/numpy/example/utils/ncdf2bp.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Example:
$ python ./ncdf2bp.py netcdf_file
"""
from adios import *
from scipy.io import netcdf
import numpy as np
import sys
import os
import operator
def usage():
print os.path.basename(sys.argv[0]), "netcdf_file"
if len(sys.argv) < 2:
usage()
sys.exit(0)
##fname = "MERRA100.prod.assim.tavg3_3d_mst_Cp.19791010.SUB.nc"
fname = sys.argv[1]
fout = '.'.join(fname.split('.')[:-1]) + ".bp"
tname = "time"
if len(sys.argv) > 2:
tname = sys.argv[2]
## Open NetCDF file
f = netcdf.netcdf_file(fname, 'r')
## Check dimension
assert (all(map(lambda x: x is not None,
[ val for k, val in f.dimensions.items()
if k != tname])))
## Two types of variables : time-dependent or time-independent
dimvar = {n:v for n,v in f.variables.items() if n in f.dimensions.keys()}
var = {n:v for n,v in f.variables.items() if n not in f.dimensions.keys()}
tdepvar = {n:v for n,v in var.items() if tname in v.dimensions}
tindvar = {n:v for n,v in var.items() if tname not in v.dimensions}
## Time dimension
if len(tdepvar) > 0:
assert (len(set([v.dimensions.index(tname) for v in tdepvar.values()]))==1)
tdx = tdepvar.values()[0].dimensions.index(tname)
assert (all([v.data.shape[tdx] for v in tdepvar.values()]))
tdim = tdepvar.values()[0].shape[tdx]
else:
tdim = 1
## Init ADIOS without xml
init_noxml()
allocate_buffer(BUFFER_ALLOC_WHEN.NOW, 100)
gid = declare_group ("group", tname, FLAG.YES)
select_method (gid, "POSIX1", "verbose=3", "")
d1size = 0
for name, val in f.dimensions.items():
if name == tname:
continue
print "Dimension : %s (%d)" % (name, val)
define_var (gid, name, "", DATATYPE.integer, "", "", "")
d1size += 4
"""
d2size = 0
for name, var in dimvar.items():
if name == tname:
continue
if name in f.dimensions.keys():
name = "v_" + name
print "Dim variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
"",
"")
d2size += var.data.size * var.data.dtype.itemsize
"""
v1size = 0
for name, var in tindvar.items():
print "Variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
"",
"")
v1size += var.data.size * var.data.dtype.itemsize
v2size = 0
for name, var in tdepvar.items():
print "Variable : %s (%s)" % (name, ','.join(var.dimensions))
define_var (gid, name, "", np2adiostype(var.data.dtype.type),
','.join(var.dimensions),
','.join([dname for dname in var.dimensions
if dname != tname]),
"0,0,0")
v2size += var.data.size * var.data.dtype.itemsize / tdim
## Clean old file
if os.access(fout, os.F_OK):
os.remove(fout)
for it in range(tdim):
print
print "Time step : %d" % (it)
fd = open("group", fout, "a")
groupsize = d1size + v1size + v2size
set_group_size(fd, groupsize)
for name, val in f.dimensions.items():
if name == tname:
continue
print "Dimension writing : %s (%d)" % (name, val)
write_int(fd, name, val)
for name, var in tindvar.items():
try:
arr = np.array(var.data,
dtype=var.data.dtype.type)
print "Time independent variable writing : %s %s" % (name, arr.shape)
write(fd, name, arr)
except ValueError:
print "Skip:", name
for name, var in tdepvar.items():
try:
arr = np.array(var.data.take([it], axis=tdx),
dtype=var.data.dtype)
print "Time dependent variable writing : %s %s" % (name, arr.shape)
write(fd, name, arr)
except ValueError:
print "Skip:", name
close(fd)
f.close()
finalize()
print
print "Done. Saved:", fout
| 27.666667 | 81 | 0.572904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,045 | 0.256946 |
d256dc1971a485e302633a36903b74f4a74ac3ab
| 2,322 |
py
|
Python
|
airflow/operators/hive_operator.py
|
nirmeshk/airflow
|
4556450b88ef7682a006e9125131a5bb3a91df00
|
[
"Apache-2.0"
] | 1 |
2021-03-02T20:08:53.000Z
|
2021-03-02T20:08:53.000Z
|
airflow/operators/hive_operator.py
|
nirmeshk/airflow
|
4556450b88ef7682a006e9125131a5bb3a91df00
|
[
"Apache-2.0"
] | null | null | null |
airflow/operators/hive_operator.py
|
nirmeshk/airflow
|
4556450b88ef7682a006e9125131a5bb3a91df00
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from airflow.hooks import HiveCliHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class HiveOperator(BaseOperator):
"""
Executes hql code in a specific Hive database.
:param hql: the hql to be executed
:type hql: string
:param hive_cli_conn_id: reference to the Hive database
:type hive_cli_conn_id: string
:param hiveconf_jinja_translate: when True, hiveconf-type templating
${var} gets translated into jina-type templating {{ var }}. Note that
you may want to use along this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:type hiveconf_jinja_translate: boolean
:param script_begin_tag: If defined, the operator will get rid of the
part of the script before the first occurrence of `script_begin_tag`
:type script_begin_tag: str
"""
template_fields = ('hql', 'schema')
template_ext = ('.hql', '.sql',)
ui_color = '#f0e4ec'
@apply_defaults
def __init__(
self, hql,
hive_cli_conn_id='hive_cli_default',
schema='default',
hiveconf_jinja_translate=False,
script_begin_tag=None,
*args, **kwargs):
super(HiveOperator, self).__init__(*args, **kwargs)
self.hiveconf_jinja_translate = hiveconf_jinja_translate
self.hql = hql
self.schema = schema
self.hive_cli_conn_id = hive_cli_conn_id
self.script_begin_tag = script_begin_tag
def get_hook(self):
return HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
def prepare_template(self):
if self.hiveconf_jinja_translate:
self.hql = re.sub(
"(\$\{([ a-zA-Z0-9_]*)\})", "{{ \g<2> }}", self.hql)
if self.script_begin_tag and self.script_begin_tag in self.hql:
self.hql = "\n".join(self.hql.split(self.script_begin_tag)[1:])
def execute(self, context):
logging.info('Executing: ' + self.hql)
self.hook = self.get_hook()
self.hook.run_cli(hql=self.hql, schema=self.schema)
def dry_run(self):
self.hook = self.get_hook()
self.hook.test_hql(hql=self.hql)
def on_kill(self):
self.hook.kill()
| 33.652174 | 77 | 0.656331 | 2,174 | 0.936262 | 0 | 0 | 520 | 0.223945 | 0 | 0 | 883 | 0.380276 |
d2571cfece71be4e3c7267fd9fb5b654ad0b459f
| 1,042 |
py
|
Python
|
classification/prepare_model.py
|
JSC-NIIAS/TwGoA4aij2021
|
9f011f506748435190f8e4e635820c8208144b94
|
[
"MIT"
] | null | null | null |
classification/prepare_model.py
|
JSC-NIIAS/TwGoA4aij2021
|
9f011f506748435190f8e4e635820c8208144b94
|
[
"MIT"
] | null | null | null |
classification/prepare_model.py
|
JSC-NIIAS/TwGoA4aij2021
|
9f011f506748435190f8e4e635820c8208144b94
|
[
"MIT"
] | null | null | null |
import os
import yaml
import segmentation_models_pytorch as smp
import torch
import argparse
import torch.nn as nn
import timm
from model_wrapper import Classification_model
def prepare_model(opt):
with open(opt.hyp) as f:
experiment_dict = yaml.load(f, Loader=yaml.FullLoader)
model_pretrained = timm.create_model(experiment_dict['model']['name'], pretrained=experiment_dict['model']['pretrained'],num_classes=experiment_dict['model']['num_classes'])
model = Classification_model(model_pretrained,experiment_dict['model']['model_type'],experiment_dict['model']['num_classes_mt'])
model=nn.DataParallel(model)
model.load_state_dict(torch.load(experiment_dict["savepath"])["model_state_dict"])
model=model.module.model
torch.save(model, experiment_dict["final_model_path"])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--hyp', type=str, default='configs/baseline_signal.yaml', help='hyperparameters path')
opt = parser.parse_args()
prepare_model(opt)
| 41.68 | 177 | 0.764875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.200576 |
d257693b9fe3b1d9ae0d3ac5245b8412f3de31ea
| 8,259 |
py
|
Python
|
KarpuzTwitterApp/logic.py
|
bounswe/bounswe2018group5
|
d547820bfb3070da3e3935a64429e4c45aef6098
|
[
"MIT"
] | 10 |
2018-03-18T20:33:39.000Z
|
2021-03-03T07:37:33.000Z
|
KarpuzTwitterApp/logic.py
|
bounswe/bounswe2018group5
|
d547820bfb3070da3e3935a64429e4c45aef6098
|
[
"MIT"
] | 254 |
2018-02-07T15:52:26.000Z
|
2019-01-08T04:11:47.000Z
|
KarpuzTwitterApp/logic.py
|
bounswe/bounswe2018group5
|
d547820bfb3070da3e3935a64429e4c45aef6098
|
[
"MIT"
] | 5 |
2018-03-01T13:28:45.000Z
|
2021-05-24T11:07:06.000Z
|
from requests import get
from utils.TwitterService import TwitterService
import tweepy
from decouple import config
def get_tweets_with_location_and_query(search_params):
""" Searches all tweets that are in the given location and contains a query string. """
if 'geocode' not in search_params:
return {'response': False, 'errors': 'Parameter must contain geocode'}
if 'result_type' not in search_params:
return {'response': False, 'errors': 'Parameter must contain result_type'}
if 'q' not in search_params:
return {'response': False, 'errors': 'Parameter must containe query(q)'}
if 'count' not in search_params:
return {'response': False, 'errors': 'Parameter must containe count'}
geocode = search_params['geocode']
result_type = search_params['result_type']
count = search_params['count']
if geocode == '' or len(geocode.split(',')) != 3:
return {
'response': False,
'errors': "GeoCode must include three values lat/long/distance. Distance must include km."
}
lat = geocode.split(',')[0]
long = geocode.split(',')[1]
perimeter = geocode.split(',')[2]
if 'km' != perimeter[-2:]:
return {
'response': False,
'errors': "Distance must include km."
}
try:
perimeter_float = float(perimeter[:-2])
if perimeter_float <= 0:
raise ValueError
except ValueError:
return {
'response': False,
'errors': "Distance must be positive float."
}
try:
float(lat)
float(long)
except ValueError:
return {
'response': False,
'errors': "Lat and Long must be float."
}
if result_type not in ['popular', 'recent', 'mixed']:
return {'response': False, 'errors': "Result type must be in ['popular', 'recent', 'mixed']."}
if type(count) is not int and not count.isdigit():
return {'response': False, 'errors': "Count must be integer."}
else:
count = int(count)
if count not in [25, 50, 100]:
return {'response': False, 'errors': "Count type must be in [25, 50, 100]."}
search_url = '{}1.1/search/tweets.json'.format(TwitterService().get_base_url())
search_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if search_response.status_code != 200:
return {'response': False, 'errors': search_response.json()['errors']}
# Subtracts the tweets from the twitter response
tweet_data = search_response.json()
tweets = tweet_data['statuses']
return {'response': True, 'tweets': tweets}
def get_followers_of_user(search_params):
search_url = '{}1.1/followers/ids.json'.format(TwitterService().get_base_url())
search_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
if search_response.status_code != 200:
print(search_response.json())
return {'response': False, 'errors': search_response.json()}
data = search_response.json()
followers = set(data['ids'])
return {'response': True, 'followers': followers}
def get_common_followers_of_two_users(search_params):
if 'user_one' not in search_params:
return {'response': False, 'errors': 'Params must contain user_one'}
if 'user_two' not in search_params:
return {'response': False, 'errors': 'Params must contain user_two'}
params = dict()
params['screen_name'] = search_params['user_one']
response_user_one = get_followers_of_user(params)
if not response_user_one['response']:
return {'response': False, 'errors': response_user_one['errors']}
followers_user_one = response_user_one['followers']
params['screen_name'] = search_params['user_two']
response_user_two = get_followers_of_user(params)
if not response_user_two['response']:
return {'response': False, 'errors': response_user_two['errors']}
followers_user_two = response_user_two['followers']
common_followers = followers_user_one.intersection(followers_user_two)
common_follower_details = get_user_details({'user_id': common_followers})
data = {
'users': common_follower_details['data'],
'response': common_follower_details['response'],
'errors': ""
}
return data
def get_user_timeline(screen_name):
user_timeline_url = '{}1.1/statuses/user_timeline.json'.format(TwitterService().get_base_url())
#Get the screen_name from the user, other parameters are set to default values
user_timeline_params = {
'screen_name' : screen_name,
'count': 25,
'exclude_replies': True,
'include_rts': False
}
user_timeline_response = get(user_timeline_url, headers=TwitterService().get_request_headers(), params=user_timeline_params)
#If not succes
if user_timeline_response.status_code != 200:
return {'response': False, 'errors': user_timeline_response.json()['errors']}
# API directly returns tweets
tweets = user_timeline_response.json()
return {'response': True, 'tweets': tweets}
def get_followings_of_user(search_params):
search_url = '{}1.1/friends/ids.json'.format(TwitterService().get_base_url())
followings_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if followings_response.status_code != 200:
return {'response': False, 'errors': followings_response.json()}
data = followings_response.json()
followings = set(data['ids'])
return {'response': True, 'followings': followings}
def get_user_details(search_params):
search_url = '{}1.1/users/lookup.json'.format(TwitterService().get_base_url())
user_details_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if user_details_response.status_code != 200:
return {'response': False, 'errors': user_details_response.json()['errors']}
return {'response': True, 'data': user_details_response.json()}
def get_common_followings_of_two_user(search_params):
params={}
params['screen_name'] = search_params['user_one']
response_user_one = get_followings_of_user(params)
if not response_user_one['response']:
return {'response': False, 'errors': response_user_one['errors']}
followings_user_one = response_user_one['followings']
params['screen_name'] = search_params['user_two']
response_user_two = get_followings_of_user(params)
if not response_user_two['response']:
return {'response': False, 'errors': response_user_two['errors']}
followings_user_two = response_user_two['followings']
common_followings = followings_user_one.intersection(followings_user_two)
return get_user_details({'user_id': common_followings})
def search_users(query):
auth = tweepy.OAuthHandler(config("CUSTOMER_KEY"), config("CUSTOMER_SECRET_KEY"))
auth.set_access_token(config("ACCESS_TOKEN"), config("ACCESS_TOKEN_SECRET"))
api = tweepy.API(auth)
search_response = list(api.search_users(query, 18))
print(search_response)
return search_response
def search_tweets(query):
""" Searches all tweets that are in the given location and contains a query string. """
search_url = '{}1.1/search/tweets.json'.format(TwitterService().get_base_url())
search_params = {
'q' : query,
'count' : 20
}
search_response = get(search_url, headers=TwitterService().get_request_headers(), params=search_params)
# If response code different than 200 (means success), then return the error.
if search_response.status_code != 200:
return {'response': False, 'errors': search_response.json()['errors']}
# Subtracts the tweets from the twitter response
tweet_data = search_response.json()
tweets = tweet_data['statuses']
return {'response': True, 'tweets': tweets}
| 36.544248 | 128 | 0.683134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,455 | 0.297251 |
d258b7f764b2791ef696f1cad34e04a51316c183
| 4,511 |
py
|
Python
|
StepperComms.py
|
MicaelJarniac/StepperComms
|
53336a3733c1b5bb30b3d001b7fe3414f9c3fab9
|
[
"MIT"
] | null | null | null |
StepperComms.py
|
MicaelJarniac/StepperComms
|
53336a3733c1b5bb30b3d001b7fe3414f9c3fab9
|
[
"MIT"
] | null | null | null |
StepperComms.py
|
MicaelJarniac/StepperComms
|
53336a3733c1b5bb30b3d001b7fe3414f9c3fab9
|
[
"MIT"
] | null | null | null |
# Required imports
import os
import sys
import serial
import time
sys.path.append(os.path.dirname(os.path.expanduser('~/projects/Python-Playground/Debug'))) # Update path accordingly
from Debug.Debug import Debug
# Declare debug
debug = Debug(True, 3).prt # Simplifies debugging messages
# Message building blocks
RW_CMD = 0x80 # Validation check
TRANSFER_SIZE_MASK = 0x3f # Masks bits used for transfer size
BYTE_MASK = 0xff # Masks 1 byte
RW_MASK = 0x40 # Bit used for defining if 'read' or 'write' command type
READ = 1 # Command of type 'read'
WRITE = 0 # 'write'
ID_AMOUNT = 38 # Amount of remote variables
# Message size
CMD_ADDR_SIZE = 1
CMD_INFO_SIZE = 1 + CMD_ADDR_SIZE # 1 byte (basic info & transfer size) + 1 byte (address)
CMD_DATA_SIZE = 61 # 61 bytes (data)
CMD_BUFF_SIZE = CMD_INFO_SIZE + CMD_DATA_SIZE # Command info + command data
# Message buffer and related
OutCmdBuffer = [None] * CMD_BUFF_SIZE # Initializes the buffer with given size
# TODO Remove not used var
OutCmdBufferId = 0 # Holds the current buffer position
# Message parameters
CmdType = WRITE # Command type ('read' or 'write')
CmdSize = 0 # size
CmdAddr = 0 # address
CmdData = [None] * CMD_DATA_SIZE # data
# Serial configuration parameters
SerPort = "/dev/serial0" # Device
SerBaud = 9600 # Baud rate
SerTout = 1 # Timeout
SerDelay = 0.05 # Delay between quick writes
# Declare serial
ser = serial.Serial(
port = SerPort, # Serial port configurable above
baudrate = SerBaud, # Baudrate configurable above
bytesize = serial.EIGHTBITS, # Byte size hardcoded 8 bits
parity = serial.PARITY_NONE, # Parity hardcoded no parity
stopbits = serial.STOPBITS_TWO, # Stop bits hardcoded 2 stopbits
timeout = SerTout, # Timeout configurable above
xonxoff = False, # ? hardcoded false
rtscts = False, # ? hardcoded false
dsrdtr = False, # ? hardcoded false
write_timeout = SerTout, # Write timeout configurable above
inter_byte_timeout = None) # ? hardcoded none
# Remote variables
RemoteVars = [None] * ID_AMOUNT # Stores received variables
def BuildMessage():
# Iterates through entire message length
for i in range(CMD_INFO_SIZE + CmdSize):
data = 0
# Builds first byte
if i == 0:
data |= RW_CMD & BYTE_MASK # Validation check bit
data |= RW_MASK & (BYTE_MASK * CmdType) # Command type bit
data |= CmdSize & TRANSFER_SIZE_MASK # Transfer size bits
# Builds second byte
elif i == 1:
data |= CmdAddr & BYTE_MASK # Address byte
# Builds remaining bytes
else:
data |= CmdData[i - CMD_INFO_SIZE] & BYTE_MASK
# Assigns built byte to its position on the message buffer
OutCmdBuffer[i] = data & BYTE_MASK
def SendMessage():
# Iterates through info bytes + command bytes
for i in range(CMD_INFO_SIZE + CmdSize):
ser.write(serial.to_bytes([OutCmdBuffer[i] & BYTE_MASK])) # Writes current message buffer position to the serial device
debug("{1:02d} - {0:08b}".format(OutCmdBuffer[i], i))
time.sleep(SerDelay)
def ReadMessage():
# TODO Read message
def GetRemoteVars():
CmdType = READ
CmdSize = 0 # TODO Requires actual data size
for i in range(ID_AMOUNT):
CmdAddr = i
BuildMessage()
SendMessage()
RemoteVars[i] = ReadMessage()
# Main loop
while True:
# Clear serial in and out buffers
ser.reset_input_buffer()
ser.reset_output_buffer()
# Placeholders
CmdType = WRITE
CmdSize = 1
CmdAddr = 31
CmdData[0] = 0x1
BuildMessage()
SendMessage()
debug("\n")
| 38.228814 | 144 | 0.552871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,710 | 0.379073 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.