hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3591dd7e4fa04185bef35a749e2e0b73d499945 | 837 | py | Python | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
]
| 1 | 2019-07-19T10:37:08.000Z | 2019-07-19T10:37:08.000Z | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
]
| null | null | null | pocs/tests/test_state_machine.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
]
| null | null | null | import os
import pytest
import yaml
from pocs.core import POCS
from pocs.observatory import Observatory
from pocs.utils import error
@pytest.fixture
def observatory():
observatory = Observatory(simulator=['all'])
yield observatory
def test_bad_state_machine_file():
with pytest.raises(error.InvalidConfig):
POCS.load_state_table(state_table_name='foo')
def test_load_bad_state(observatory):
pocs = POCS(observatory)
with pytest.raises(error.InvalidConfig):
pocs._load_state('foo')
def test_state_machine_absolute(temp_file):
state_table = POCS.load_state_table()
assert isinstance(state_table, dict)
with open(temp_file, 'w') as f:
f.write(yaml.dump(state_table))
file_path = os.path.abspath(temp_file)
assert POCS.load_state_table(state_table_name=file_path)
| 22.026316 | 60 | 0.746714 | 0 | 0 | 90 | 0.107527 | 106 | 0.126643 | 0 | 0 | 18 | 0.021505 |
c3592d71715ada6f67b45406f9503a1122617882 | 7,033 | py | Python | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
]
| 2 | 2021-03-05T10:20:10.000Z | 2021-12-21T10:50:21.000Z | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
]
| 7 | 2021-03-03T14:27:50.000Z | 2021-07-21T09:38:27.000Z | code/BacDup/scripts/gff_parser.py | JFsanchezherrero/TFM_UOC_AMoya | 74d860d90240d96d800031ff449e21e09bad826c | [
"Unlicense"
]
| null | null | null | #!/usr/bin/env python3
##############################################################
## Jose F. Sanchez & Alba Moya ##
## Copyright (C) 2020-2021 ##
##############################################################
'''
Created on 28 oct. 2020
@author: alba
Modified in March 2021
@author: Jose F. Sanchez-Herrero
'''
## useful imports
import sys
import os
import pandas as pd
import numpy as np
import HCGB
from Bio import SeqIO, Seq
from Bio.SeqRecord import SeqRecord
from BCBio import GFF
from BacDup.scripts.functions import columns_annot_table
##################################################
def gff_parser_caller(gff_file, ref_file, output_path, debug):
'''This function calls the actual gff parser
It serves as the entry point either from a module or system call
'''
## set output paths
prot_file = os.path.abspath( os.path.join(output_path, 'proteins.fa'))
csv_file = os.path.abspath( os.path.join(output_path, 'annot_df.csv'))
csv_length = os.path.abspath( os.path.join(output_path, 'length_df.csv'))
list_out_files = [prot_file, csv_file, csv_length]
try:
with open (ref_file) as in_handle:
ref_recs = SeqIO.to_dict(SeqIO.parse(in_handle, "fasta"))
## debug messages
if (debug):
debug_message('GenBank record', 'yellow')
print (ref_recs)
## parse
with open(prot_file, "w") as out_handle:
SeqIO.write(protein_recs(gff_file, ref_recs,
list_out_files, debug=debug), out_handle, "fasta")
## return information
return (list_out_files)
except:
return (False)
############################################################
def protein_recs(gff_file, ref_recs, list_out_files, debug=False):
'''GFF parser to retrieve proteins and annotation
'''
#create an empty dataframe.
columns = columns_annot_table() ## get common column names
annot_df = pd.DataFrame(data=None, columns=columns)
genome_length = pd.DataFrame(data=None, columns=["length"])
with open(gff_file) as in_handle:
##parse the output. Generate SeqRecord and SeqFeatures for predictions
##sort by CDS type. Duplicate genes analysis just needs coding regions to proteins.
limit_info = dict(gff_type=["CDS"])
for rec in GFF.parse(in_handle, limit_info = limit_info, base_dict=ref_recs):
#get genome length for BioCircos plotting
ID = rec.id
genome_length.loc[ID,["length"]]=[len(rec.seq)]
## debug messages
if (debug):
debug_message('GFF record', 'yellow')
print(rec)
for feature in rec.features:
## Debug messages
if (debug):
debug_message('feature: ', 'yellow')
print(feature)
## strand
if feature.strand == -1:
strand = "neg"
else:
strand = "pos"
#we create an ID for each entry
protID = feature.type + "_" + rec.id + "_" + str(feature.location.nofuzzy_start) + "_" + str(feature.location.nofuzzy_end) + "_" + strand
annot_df.loc[protID, ["rec_id", "type", "start", "end", "strand"]] = [ID, feature.type, feature.location.nofuzzy_start, feature.location.nofuzzy_end, strand]
qualif = feature.qualifiers
## Debug messages
if (debug):
debug_message('protID: ' + protID, 'yellow')
debug_message('qualif: ', 'yellow')
print (qualif)
## loop
for keys, values in qualif.items():
#fill the dataframe info
if keys == "Note":
continue
annot_df.loc[protID,[keys]] = [values[0]]
## get gene sequence
gene_seq = Seq.Seq(str(rec.seq[feature.location.nofuzzy_start:feature.location.nofuzzy_end]))
## Debug messages
if (debug):
debug_message('gene_seq: ' + protID, 'yellow')
print (gene_seq)
if feature.type == "CDS":
if feature.strand == -1:
gene_seq = gene_seq.reverse_complement()
# translate genome sequence
table_code = feature.qualifiers["transl_table"][0]
protein_seq = gene_seq.translate(table=table_code, to_stop=False)
# delete STOP symbols
# we set gene_seq.translate to include all stop codons to include
# stop codons in pseudogenes. then, we removed last symbol * for
# each sequence
if protein_seq.endswith("*"):
protein_seq = protein_seq[:-1]
yield(SeqRecord(protein_seq, protID, "", ""))
## print additional information
annot_df.to_csv(list_out_files[1], header=True)
genome_length.to_csv(list_out_files[2], header=True)
#get genome length for BioCircos plotting
#genome_length = pd.DataFrame(data=None, columns=["length"])
#ID = rec.id
#length = len(rec.seq)
#genome_length.loc[ID,["length"]]=[length]
#csv_length = "%s/%s_length.csv" % (output_path, rec.id)
#genome_length.to_csv(csv_length, header=True)
## debug messages
if (debug):
debug_message('annot_df: ', 'yellow')
print(annot_df)
## empty return
return()
#################################################################
def main (gff_file, ref_file, output_folder, debug=False):
#get name
base, ext = os.path.splitext(gff_file)
gff_file = os.path.abspath(gff_file)
#create folder
output_path = HCGB.functions.file_functions.create_folder(output_path)
if (debug):
print ("## DEBUG:")
print ("base:" , base)
print ("ext:" , ext)
print ()
gff_parser_caller(gff_file, ref_file, output_path, debug)
################################################################################
if __name__ == "__main__":
if len(sys.argv) != 4:
print (__doc__)
print ("## Usage gff_parser")
print ("python %s gff_file ref_fasta_file output_folder\n" %sys.argv[0])
sys.exit()
main(*sys.argv[1:], debug=True)
#main(*sys.argv[1:])
# la variable debug no es obligatoria. tiene un "por defecto definido"
# Se utiliza el "=" para indicar el default.
| 35.882653 | 173 | 0.515854 | 0 | 0 | 4,147 | 0.589649 | 0 | 0 | 0 | 0 | 2,486 | 0.353476 |
c359a6fbb849b989ceb5b8e12f21bfb4e4e866fd | 1,729 | py | Python | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
]
| 2 | 2017-11-23T01:07:37.000Z | 2021-06-25T05:03:49.000Z | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
]
| null | null | null | PAL/Cross/client/sources-linux/build_library_zip.py | infosecsecurity/OSPTF | df3f63dc882db6d7e0b7bd80476e9bbc8471ac1f | [
"MIT"
]
| 1 | 2018-05-22T02:28:43.000Z | 2018-05-22T02:28:43.000Z | import sys
from distutils.core import setup
import os
from glob import glob
import zipfile
import shutil
sys.path.insert(0, os.path.join('resources','library_patches'))
sys.path.insert(0, os.path.join('..','..','pupy'))
import pp
import additional_imports
import Crypto
all_dependencies=set([x.split('.')[0] for x,m in sys.modules.iteritems() if not '(built-in)' in str(m) and x != '__main__'])
print "ALLDEPS: ", all_dependencies
zf = zipfile.ZipFile(os.path.join('resources','library.zip'), mode='w', compression=zipfile.ZIP_DEFLATED)
try:
for dep in all_dependencies:
mdep = __import__(dep)
print "DEPENDENCY: ", dep, mdep
if hasattr(mdep, '__path__'):
print('adding package %s'%dep)
path, root = os.path.split(mdep.__path__[0])
for root, dirs, files in os.walk(mdep.__path__[0]):
for f in list(set([x.rsplit('.',1)[0] for x in files])):
found=False
for ext in ('.pyc', '.so', '.pyo', '.py'):
if ext == '.py' and found:
continue
if os.path.exists(os.path.join(root,f+ext)):
zipname = os.path.join(root[len(path)+1:], f.split('.', 1)[0] + ext)
print('adding file : {}'.format(zipname))
zf.write(os.path.join(root, f+ext), zipname)
found=True
else:
if '<memimport>' in mdep.__file__:
continue
_, ext = os.path.splitext(mdep.__file__)
print('adding %s -> %s'%(mdep.__file__, dep+ext))
zf.write(mdep.__file__, dep+ext)
finally:
zf.close()
| 36.020833 | 124 | 0.54251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.132447 |
c35a27ffefb517296b644e56550ee85f278c4beb | 4,742 | py | Python | conans/test/functional/old/short_paths_test.py | Manu343726/conan | fe322a672307d29f99d2e7bc1c02c45c835028d7 | [
"MIT"
]
| null | null | null | conans/test/functional/old/short_paths_test.py | Manu343726/conan | fe322a672307d29f99d2e7bc1c02c45c835028d7 | [
"MIT"
]
| 1 | 2020-04-18T10:13:37.000Z | 2020-04-18T10:16:37.000Z | conans/test/functional/old/short_paths_test.py | alacasta/conan | 643a9c84fe6dc0cb2f9fcbfc9dc5bd2e789c690e | [
"MIT"
]
| 1 | 2018-09-03T05:04:23.000Z | 2018-09-03T05:04:23.000Z | import os
import platform
import unittest
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID, TestClient
class ShortPathsTest(unittest.TestCase):
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def inconsistent_cache_test(self):
conanfile = """
import os
from conans import ConanFile, tools
class TestConan(ConanFile):
name = "test"
version = "1.0"
short_paths = {0}
exports_sources = "source_file.cpp"
def source(self):
for item in os.listdir(self.source_folder):
self.output.info("SOURCE: " + str(item))
def build(self):
tools.save(os.path.join(self.build_folder, "artifact"), "")
for item in os.listdir(self.build_folder):
self.output.info("BUILD: " + str(item))
def package(self):
self.copy("source_file.cpp")
self.copy("artifact")
for item in os.listdir(self.build_folder):
self.output.info("PACKAGE: " + str(item))
"""
client = TestClient()
client.save({"conanfile.py": conanfile.format("False"),
"source_file.cpp": ""})
client.run("create . danimtb/testing")
conan_ref = ConanFileReference("test", "1.0", "danimtb", "testing")
source_folder = os.path.join(client.client_cache.conan(conan_ref), "source")
build_folder = os.path.join(client.client_cache.conan(conan_ref), "build",
NO_SETTINGS_PACKAGE_ID)
package_folder = os.path.join(client.client_cache.conan(conan_ref), "package",
NO_SETTINGS_PACKAGE_ID)
self.assertIn("SOURCE: source_file.cpp", client.out)
self.assertEqual(["source_file.cpp"], os.listdir(source_folder))
self.assertIn("BUILD: source_file.cpp", client.out)
self.assertIn("BUILD: artifact", client.out)
self.assertEqual(
sorted(["artifact", "conanbuildinfo.txt", "conaninfo.txt", "source_file.cpp"]),
sorted(os.listdir(build_folder)))
self.assertIn("PACKAGE: source_file.cpp", client.out)
self.assertIn("PACKAGE: artifact", client.out)
self.assertEqual(
sorted(["artifact", "conaninfo.txt", "conanmanifest.txt", "source_file.cpp"]),
sorted(os.listdir(package_folder)))
client.save({"conanfile.py": conanfile.format("True")})
client.run("create . danimtb/testing")
self.assertIn("SOURCE: source_file.cpp", client.out)
self.assertEqual([".conan_link"], os.listdir(source_folder))
self.assertIn("BUILD: source_file.cpp", client.out)
self.assertIn("BUILD: artifact", client.out)
self.assertEqual([".conan_link"], os.listdir(build_folder))
self.assertIn("PACKAGE: source_file.cpp", client.out)
self.assertIn("PACKAGE: artifact", client.out)
self.assertEqual([".conan_link"], os.listdir(package_folder))
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def package_output_test(self):
conanfile = """
import os
from conans import ConanFile, tools
class TestConan(ConanFile):
name = "test"
version = "1.0"
short_paths = True
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"source_file.cpp": ""})
client.run("create . danimtb/testing")
self.assertNotIn("test/1.0@danimtb/testing: Package '1' created", client.out)
self.assertIn(
"test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID,
client.out)
# try local flow still works, but no pkg id available
client.run("install .")
client.run("package .")
self.assertIn("conanfile.py (test/1.0@None/None): Package 'package' created", client.out)
# try export-pkg with package folder
client.run("remove test/1.0@danimtb/testing --force")
client.run("export-pkg . test/1.0@danimtb/testing --package-folder package")
self.assertIn(
"test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID,
client.out)
# try export-pkg without package folder
client.run("remove test/1.0@danimtb/testing --force")
client.run("export-pkg . test/1.0@danimtb/testing --install-folder .")
self.assertIn(
"test/1.0@danimtb/testing: Package '%s' created" % NO_SETTINGS_PACKAGE_ID,
client.out)
# try conan get
client.run("get test/1.0@danimtb/testing . -p %s" % NO_SETTINGS_PACKAGE_ID)
self.assertIn("conaninfo.txt", client.out)
self.assertIn("conanmanifest.txt", client.out)
| 41.234783 | 97 | 0.633066 | 4,577 | 0.965205 | 0 | 0 | 4,525 | 0.954239 | 0 | 0 | 2,215 | 0.467102 |
c35a9f8a6f746b1900b91c33a9b1be7d36fdde7f | 4,086 | py | Python | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
]
| 9 | 2018-02-06T19:08:16.000Z | 2022-03-15T13:31:57.000Z | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
]
| 37 | 2018-02-09T21:22:58.000Z | 2021-12-13T19:51:24.000Z | data_collection/json2mongodb.py | kwond2/hedgehogs | 58dbed549a1e78e401fc90c7a7041d9979cfc2e4 | [
"MIT"
]
| 10 | 2018-02-27T20:26:55.000Z | 2021-02-06T02:26:30.000Z | #-*- coding: utf-8 -*-
# import os
# from optparse import OptionParser
# from pymongo import MongoClient, bulk
# import json
# import collections
# import sys
from import_hedgehogs import *
HOST = '45.55.48.43'
PORT = 27017
DB = 'SEC_EDGAR'
class OrderedDictWithKeyEscaping(collections.OrderedDict):
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
# MongoDB complains when keys contain dots, so we call json.load with
# a modified OrderedDict class which escapes dots in keys on the fly
key = key.replace('.', '<DOT>')
super(OrderedDictWithKeyEscaping, self).__setitem__(key, value)#, dict_setitem=dict.__setitem__)
#super(OrderedDictWithKeyEscaping, self).__setitem__
#super()
def save_to_mongodb(input_file_name, collectionID, usernameID, passwordID):
with open(input_file_name) as fp:
data = fp.read()
json_ = json.loads(data, encoding='utf-8', object_pairs_hook=OrderedDictWithKeyEscaping)
client = MongoClient(HOST, PORT, username=usernameID, password=passwordID, authMechanism ='SCRAM-SHA-1')
# client.admin.authenticate('jgeorge','123',source= 'SEC_EDGAR')
# print("arguments to function:", input_file_name, usernameID, collectionID)
db = client[DB]
collection = db[collectionID]
# print(type(input_file_name))
# file = open(input_file_name, "r")
# data = json.load(file)
# print(type(data))
# print(type(file))
# data = json_util.loads(file.read())
# print(json_)
for item in json_:
collection.insert_one(item)
# file.close()
def get_collection_name(input_file_name):
data_list = json.load(open(input_file_name))
data = dict(data_list[0])
ticker = "TICKER"
quarter = "QUARTER"
try:
# year = data.get("Document And Entity Information [Abstract]")
# print(year)
year = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Year Focus").get("value")
quarter = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Period Focus").get("value")
ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value")
except AttributeError:
print("[EXCEPT] Issues with ", input_file_namex)
# except AttributeError:
# year = data.get("Document And Entity Information").get("Document Fiscal Year Focus").get("value")
# quarter = data.get("Document And Entity Information").get("Document Fiscal Period Focus").get("value")
# try:
# ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value")
# except:
# ticker = data.get("Document And Entity Information [Abstract]").get("Trading Symbol").get("value")
# try:
# ticker = data.get("Document And Entity Information [Abstract]").get("Entity Trading Symbol").get("value")
# except:
# ticker = data.get("Document And Entity Information [Abstract]").get("Trading Symbol").get("value")
# quarter = data.get("Document And Entity Information [Abstract]").get("Document Fiscal Period Focus").get("value")
return str(ticker) + "_" + str(year) + "_" + str(quarter)
def main():
cli_parser = OptionParser(
usage='usage: %prog <input.json> <username> <password>'
)
(options, args) = cli_parser.parse_args()
# Input file checks
if len(args) < 2:
cli_parser.error("You have to supply 2 arguments, USAGE: .json username")
input_file_name = args[0]
if not os.path.exists(input_file_name):
cli_parser.error("The input file %s you supplied does not exist" % input_file_name)
# JAROD's FUNCTION
collection = get_collection_name(input_file_name)
#collection = (sys.argv[1]).strip('.')
username = sys.argv[2]
password = sys.argv[3]
print("Adding to MongoDB...")
#save_to_mongodb(input_file_name, collection, username)
if __name__ == "__main__":
print("[WARNING] STILL UNDER DEVELOPMENT")
main()
| 41.272727 | 121 | 0.670338 | 506 | 0.123837 | 0 | 0 | 0 | 0 | 0 | 0 | 2,298 | 0.562408 |
c35c0c54bc5945e22f05841e1485001ae7177f54 | 2,984 | py | Python | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
]
| 1 | 2021-09-18T05:07:38.000Z | 2021-09-18T05:07:38.000Z | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
]
| null | null | null | scripts/convert_to_singlehead.py | Lollipop321/mini-decoder-network | cfdaba579b45cba1d181585e5430178c1dc60049 | [
"BSD-3-Clause"
]
| 1 | 2021-12-24T13:05:02.000Z | 2021-12-24T13:05:02.000Z | import torch
import math
import time
import struct
import argparse
import numpy as np
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-model', required=True, help="trained model prefix, also include dir, e.g. ../data/model-100")
args = parser.parse_args()
model_path = args.model
checkpoint = torch.load(model_path, map_location='cpu')
assert 'args' in checkpoint
assert 'model' in checkpoint
args = checkpoint['args']
model = checkpoint['model']
checkpoint_new = {}
model_new = {}
e = [0, 0, 0, 0, 0, 0]
d = [0, 0, 0, 0, 0, 0]
for name, w in model.items():
if "decoder" in name:
if "self_attn.in_proj" in name:
layer = eval(name.split(".")[2])
wq, wk, wv = w.chunk(3, dim=0)
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))],
wk[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))],
wv[(args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))]], dim=0)
elif "encoder_attn.in_proj" in name:
layer = eval(name.split(".")[2])
wq, wk, wv = w.chunk(3, dim=0)
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = torch.cat([wq[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))],
wk[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))],
wv[(args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))]], dim=0)
elif "self_attn.out_proj.weight" in name:
layer = eval(name.split(".")[2])
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = w[:, (args.encoder_embed_dim // 8 * e[layer]): (args.encoder_embed_dim // 8 * (e[layer] + 1))]
elif "encoder_attn.out_proj.weight" in name:
layer = eval(name.split(".")[2])
assert args.encoder_embed_dim == args.decoder_embed_dim
model_new[name] = w[:, (args.encoder_embed_dim // 8 * d[layer]): (args.encoder_embed_dim // 8 * (d[layer] + 1))]
else:
model_new[name] = w
else:
model_new[name] = w
checkpoint_new['args'] = args
checkpoint_new['args'].arch = "transformer_singlehead_t2t_wmt_en_de"
checkpoint_new['model'] = model_new
# print(checkpoint_new['args'].arch)
torch.save(checkpoint_new, 'checkpoint_singlehead.pt')
print("finished!") | 45.907692 | 146 | 0.560657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.121314 |
c35c6a6a052a8839d6a0e36986573f0ad73f479f | 3,719 | py | Python | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
]
| 1 | 2021-06-12T17:04:07.000Z | 2021-06-12T17:04:07.000Z | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
]
| 4 | 2021-05-16T08:06:25.000Z | 2021-11-13T08:46:36.000Z | tests/integration/frameworks/test_detectron2_impl.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
]
| null | null | null | import typing as t
from typing import TYPE_CHECKING
import numpy as np
import torch
import pytest
import imageio
from detectron2 import model_zoo
from detectron2.data import transforms as T
from detectron2.config import get_cfg
from detectron2.modeling import build_model
import bentoml
if TYPE_CHECKING:
from detectron2.config import CfgNode
from bentoml._internal.types import Tag
from bentoml._internal.models import ModelStore
IMAGE_URL: str = "./tests/utils/_static/detectron2_sample.jpg"
def extract_result(raw_result: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
pred_instances = raw_result["instances"]
boxes = pred_instances.pred_boxes.to("cpu").tensor.detach().numpy()
scores = pred_instances.scores.to("cpu").detach().numpy()
pred_classes = pred_instances.pred_classes.to("cpu").detach().numpy()
result = {
"boxes": boxes,
"scores": scores,
"classes": pred_classes,
}
return result
def prepare_image(
original_image: "np.ndarray[t.Any, np.dtype[t.Any]]",
) -> "np.ndarray[t.Any, np.dtype[t.Any]]":
"""Mainly to test on COCO dataset"""
_aug = T.ResizeShortestEdge([800, 800], 1333)
image = _aug.get_transform(original_image).apply_image(original_image)
return image.transpose(2, 0, 1)
def detectron_model_and_config() -> t.Tuple[torch.nn.Module, "CfgNode"]:
model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
cfg: "CfgNode" = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model_url))
# set threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url)
cloned = cfg.clone()
cloned.MODEL.DEVICE = "cpu" # running on CI
model: torch.nn.Module = build_model(cloned)
model.eval()
return model, cfg
@pytest.fixture(scope="module", name="image_array")
def fixture_image_array() -> "np.ndarray[t.Any, np.dtype[t.Any]]":
return np.asarray(imageio.imread(IMAGE_URL))
def save_procedure(metadata: t.Dict[str, t.Any], _modelstore: "ModelStore") -> "Tag":
model, config = detectron_model_and_config()
tag_info = bentoml.detectron.save(
"test_detectron2_model",
model,
model_config=config,
metadata=metadata,
model_store=_modelstore,
)
return tag_info
@pytest.mark.parametrize("metadata", [{"acc": 0.876}])
def test_detectron2_save_load(
metadata: t.Dict[str, t.Any],
image_array: "np.ndarray[t.Any, np.dtype[t.Any]]",
modelstore: "ModelStore",
) -> None:
tag = save_procedure(metadata, _modelstore=modelstore)
_model = bentoml.models.get(tag, _model_store=modelstore)
assert _model.info.metadata is not None
detectron_loaded = bentoml.detectron.load(
_model.tag,
device="cpu",
model_store=modelstore,
)
assert next(detectron_loaded.parameters()).device.type == "cpu"
image = prepare_image(image_array)
image = torch.as_tensor(image)
input_data = [{"image": image}]
raw_result = detectron_loaded(input_data)
result = extract_result(raw_result[0])
assert result["scores"][0] > 0.9
def test_detectron2_setup_run_batch(
image_array: "np.ndarray[t.Any, np.dtype[t.Any]]", modelstore: "ModelStore"
) -> None:
tag = save_procedure({}, _modelstore=modelstore)
runner = bentoml.detectron.load_runner(tag, model_store=modelstore)
assert tag in runner.required_models
assert runner.num_concurrency_per_replica == 1
assert runner.num_replica == 1
image = torch.as_tensor(prepare_image(image_array))
res = runner.run_batch(image)
result = extract_result(res[0])
assert result["boxes"] is not None
| 30.483607 | 85 | 0.705835 | 0 | 0 | 0 | 0 | 982 | 0.264049 | 0 | 0 | 565 | 0.151923 |
c35c97b552a6619198e65898ccb72250776063d5 | 1,867 | py | Python | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
]
| null | null | null | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
]
| null | null | null | molecule/default/tests/test_default.py | escalate/ansible-influxdb-docker | bbb2c259bd1de3c4c40322103a05894494af7104 | [
"MIT"
]
| null | null | null | """Role testing files using testinfra"""
def test_config_directory(host):
"""Check config directory"""
f = host.file("/etc/influxdb")
assert f.is_directory
assert f.user == "influxdb"
assert f.group == "root"
assert f.mode == 0o775
def test_data_directory(host):
"""Check data directory"""
d = host.file("/var/lib/influxdb")
assert d.is_directory
assert d.user == "influxdb"
assert d.group == "root"
assert d.mode == 0o700
def test_backup_directory(host):
"""Check backup directory"""
b = host.file("/var/backups/influxdb")
assert b.is_directory
assert b.user == "influxdb"
assert b.group == "root"
assert b.mode == 0o775
def test_influxdb_service(host):
"""Check InfluxDB service"""
s = host.service("influxdb")
assert s.is_running
assert s.is_enabled
def test_influxdb_docker_container(host):
"""Check InfluxDB docker container"""
d = host.docker("influxdb.service").inspect()
assert d["HostConfig"]["Memory"] == 1073741824
assert d["Config"]["Image"] == "influxdb:latest"
assert d["Config"]["Labels"]["maintainer"] == "[email protected]"
assert "INFLUXD_REPORTING_DISABLED=true" in d["Config"]["Env"]
assert "internal" in d["NetworkSettings"]["Networks"]
assert \
"influxdb" in d["NetworkSettings"]["Networks"]["internal"]["Aliases"]
def test_backup(host):
"""Check if the backup runs successfully"""
cmd = host.run("/usr/local/bin/backup-influxdb.sh")
assert cmd.succeeded
def test_backup_cron_job(host):
"""Check backup cron job"""
f = host.file("/var/spool/cron/crontabs/root")
assert "/usr/local/bin/backup-influxdb.sh" in f.content_string
def test_restore(host):
"""Check if the restore runs successfully"""
cmd = host.run("/usr/local/bin/restore-influxdb.sh")
assert cmd.succeeded
| 28.287879 | 77 | 0.664167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.431709 |
c35eb72d85ca1063b3957ca321301a14a1c4baba | 3,847 | py | Python | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
]
| null | null | null | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
]
| null | null | null | ZIP-v0.01/Serial_to_MQTT.py | JittoThomas/IOT | 994fa25087d14e33c2d82b9c9d526f65823b6fa8 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import cayenne.client, datetime, time, serial
# import random
#Delay Start
#print "Time now = ", datetime.datetime.now().strftime("%H-%M-%S")
#time.sleep(60)
#print "Starting now = ", datetime.datetime.now().strftime("%H-%M-%S")
# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.
MQTT_USERNAME = "6375a470-cff9-11e7-86d0-83752e057225"
MQTT_PASSWORD = "26e1dc13f900da7b30b24cad4b320f9bc6dd0d78"
MQTT_CLIENT_ID = "157d1d10-69dd-11e8-84d1-4d9372e87a68"
# Other settings that seem to be embedded in Cayenne's libraries
# MQTT_URL = "mqtt.mydevices.com"
# MQTT_PORT = "1883"
# Default location of serial port on Pi models 1 and 2
#SERIAL_PORT = "/dev/ttyAMA0"
# Default location of serial port on Pi models 3 and Zero
SERIAL_PORT = "/dev/ttyS0"
# How often shall we write values to Cayenne? (Seconds + 1)
interval = 5
#This sets up the serial port specified above. baud rate is the bits per second timeout seconds
#port = serial.Serial(SERIAL_PORT, baudrate=2400, timeout=5)
#This sets up the serial port specified above. baud rate. This WAITS for any cr/lf (new blob of data from picaxe)
port = serial.Serial(SERIAL_PORT, baudrate=2400)
# The callback for when a message is received from Cayenne.
def on_message(message):
print("def on_message reply back, message received: " + str(message))
# If there is an error processing the message return an error string, otherwise returns nothing.
client = cayenne.client.CayenneMQTTClient()
client.on_message = on_message
client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)
#Predefine Data Packet objects for python prior to trying to look for them :)
node = ":01"
channel = "A"
data = 123
cs = 0
while True:
try:
rcv = port.readline() #read buffer until cr/lf
#print("Serial Readline Data = " + rcv)
rcv = rcv.rstrip("\r\n")
node,channel,data,cs = rcv.split(",")
#Test Point print("rcv.split Data = : " + node + channel + data + cs)
if cs == '0':
#if cs = Check Sum is good = 0 then do the following
if channel == 'A':
data = float(data)/1
client.virtualWrite(1, data, "analog_sensor", "null")
client.loop()
if channel == 'B':
data = float(data)/1
client.virtualWrite(2, data, "analog_sensor", "null")
client.loop()
if channel == 'C':
data = float(data)/1
client.virtualWrite(3, data, "analog_sensor", "null")
client.loop()
if channel == 'D':
data = float(data)/1
client.virtualWrite(4, data, "analog_sensor", "null")
client.loop()
if channel == 'E':
data = float(data)/1
client.virtualWrite(5, data, "analog_sensor", "null")
client.loop()
if channel == 'F':
data = float(data)/1
client.virtualWrite(6, data, "analog_sensor", "null")
client.loop()
if channel == 'G':
data = float(data)/1
client.virtualWrite(7, data, "analog_sensor", "null")
client.loop()
if channel == 'H':
data = float(data)/1
client.virtualWrite(8, data, "analog_sensor", "null")
client.loop()
if channel == 'I':
data = float(data)/1
client.virtualWrite(9, data, "analog_sensor", "null")
client.loop()
if channel == 'J':
data = float(data)/1
client.virtualWrite(10, data, "analog_sensor", "null")
client.loop()
if channel == 'K':
data = float(data)/1
client.virtualWrite(11, data, "analog_sensor", "null")
client.loop()
if channel == 'L':
data = float(data)/1
client.virtualWrite(12, data, "analog_sensor", "null")
client.loop()
except ValueError:
#if Data Packet corrupt or malformed then...
print("Data Packet corrupt or malformed")
| 31.276423 | 114 | 0.641539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,851 | 0.481154 |
c35efbe149c76dcc538b4f5467731ccd578e9db2 | 1,841 | py | Python | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
]
| 141 | 2015-01-24T23:59:18.000Z | 2022-01-30T16:36:37.000Z | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
]
| 106 | 2015-01-13T22:49:07.000Z | 2021-02-17T15:14:11.000Z | test/test_slimta_queue_proxy.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
]
| 43 | 2015-07-29T14:55:09.000Z | 2021-09-24T22:30:38.000Z |
import unittest
from mox3.mox import MoxTestBase, IsA
from slimta.queue.proxy import ProxyQueue
from slimta.smtp.reply import Reply
from slimta.relay import Relay, TransientRelayError, PermanentRelayError
from slimta.envelope import Envelope
class TestProxyQueue(MoxTestBase, unittest.TestCase):
def setUp(self):
super(TestProxyQueue, self).setUp()
self.relay = self.mox.CreateMock(Relay)
self.env = Envelope('[email protected]', ['[email protected]'])
def test_enqueue(self):
self.relay._attempt(self.env, 0)
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
ret = q.enqueue(self.env)
self.assertEqual(1, len(ret))
self.assertEqual(2, len(ret[0]))
self.assertEqual(self.env, ret[0][0])
self.assertRegexpMatches(ret[0][1], r'[0-9a-fA-F]{32}')
def test_enqueue_relayerror(self):
err = PermanentRelayError('msg failure', Reply('550', 'Not Ok'))
self.relay._attempt(self.env, 0).AndRaise(err)
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
ret = q.enqueue(self.env)
self.assertEqual(1, len(ret))
self.assertEqual(2, len(ret[0]))
self.assertEqual(self.env, ret[0][0])
self.assertEqual(err, ret[0][1])
def test_start_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.start()
def test_kill_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.kill()
def test_flush_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.flush()
def test_add_policy_error(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
with self.assertRaises(NotImplementedError):
q.add_policy('test')
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| 29.693548 | 72 | 0.634438 | 1,556 | 0.845193 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.066812 |
c3601f9d19e300648c3ba875a58c68aa35eadc52 | 1,912 | py | Python | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
]
| 4 | 2018-01-18T19:59:56.000Z | 2020-08-25T11:56:52.000Z | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
]
| 1 | 2018-04-22T23:02:13.000Z | 2018-04-22T23:02:13.000Z | tests/potential/EamPotential/Al__born_exp_fs/test____init__.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
]
| 1 | 2019-09-14T07:04:42.000Z | 2019-09-14T07:04:42.000Z | import pytest
from pypospack.potential import EamPotential
symbols = ['Al']
func_pair_name = "bornmayer"
func_density_name = "eam_dens_exp"
func_embedding_name = "fs"
expected_parameter_names_pair_potential = []
expected_parameter_names_density_function = []
expected_parameter_names_embedding_function = []
expected_parameter_names = [
'p_AlAl_phi0', 'p_AlAl_gamma', 'p_AlAl_r0',
'd_Al_rho0', 'd_Al_beta', 'd_Al_r0',
'e_Al_F0', 'e_Al_p', 'e_Al_q', 'e_Al_F1', 'e_Al_rho0']
print(80*'-')
print("func_pair_name={}".format(func_pair_name))
print("func_density_name={}".format(func_density_name))
print("func_embedding_name={}".format(func_density_name))
print(80*'-')
def test____init__():
obj_pot = EamPotential(
symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
assert type(obj_pot) is EamPotential
assert obj_pot.potential_type == 'eam'
assert type(obj_pot.symbols) is list
assert len(obj_pot.symbols) == len(symbols)
for i,v in enumerate(symbols):
obj_pot.symbols[i] = v
assert obj_pot.is_charge is False
assert type(obj_pot.parameter_names) is list
assert len(obj_pot.parameter_names) == len(expected_parameter_names)
for i,v in enumerate(expected_parameter_names):
obj_pot.parameter_names = v
if __name__ == "__main__":
# CONSTRUCTOR TEST
pot = EamPotential(symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.parameter_names == {}'.format(\
pot.parameter_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
| 30.83871 | 72 | 0.684623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.179916 |
c36035176be4720b8166b5477e11e4a52ab157d4 | 417 | py | Python | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
]
| 4 | 2019-03-29T08:45:36.000Z | 2021-11-11T00:49:36.000Z | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
]
| 9 | 2019-04-03T18:10:19.000Z | 2020-08-16T12:13:34.000Z | backend/bin/main/enrichers/enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
]
| 4 | 2019-05-09T15:33:23.000Z | 2022-02-06T08:01:23.000Z | from typing import Dict
from main.helpers.print_helper import PrintHelper
class Enricher(object):
def __init__(self, enricher_type: str, header: str) -> None:
self.enricher_type = enricher_type
self.header = header
def get_information(self, packet: Dict[str, str], information_dict) -> None:
pass
def print(self) -> None:
PrintHelper.print_nothing(self.enricher_type)
| 26.0625 | 80 | 0.70024 | 339 | 0.81295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c360b0127afead19c24d728369419544803b4819 | 2,191 | py | Python | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| null | null | null | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| null | null | null | Modulo_5/proyecto/presentacion/form_ubicacion/formAUbicacion_designer.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from pandastable import Table
import util.generic as utl
class FormUbicacionDesigner(tk.Toplevel):
def __init__(self):
super().__init__()
def initialize_component(self):
self.config_window()
self.framePrincipal()
self.framePrincipalPanel1()
self.framePrincipalPanel2()
self.tablaEstadisticosUbicacion()
self.graficaUbicacion()
def config_window(self):
self.title('Analisis de variable de ubicación')
w, h = 1400,500
self.geometry("%dx%d+0+0" % (w, h))
self.config(bg='black')
utl.centrar_ventana(self,w,h)
def framePrincipal(self):
self.frame_zona_principal = tk.Frame(self, bd=0, relief=tk.SOLID, bg='white', width=100, height=100)
self.frame_zona_principal.pack(side="top",fill=tk.BOTH )
def framePrincipalPanel1(self):
self.frame_zona_principal_panel1 = tk.Frame(self.frame_zona_principal, bd=1, relief=tk.SOLID, bg='white', width=100, height=100)
self.frame_zona_principal_panel1.pack(side="left",fill=tk.BOTH, expand="yes")
def framePrincipalPanel2(self):
self.frame_zona_principal_panel2 = tk.Frame(self.frame_zona_principal, bd=1, relief=tk.SOLID, bg='white', width=100, height=100)
self.frame_zona_principal_panel2.pack(side="left",fill=tk.BOTH, expand="yes")
def tablaEstadisticosUbicacion(self):
self.tablaDatosUbicacion = Table(self.frame_zona_principal_panel1, showtoolbar=False, showstatusbar=False, rows=8,width=500)
self.tablaDatosUbicacion.show()
def graficaUbicacion(self):
self.figure_ubicacion = plt.Figure(figsize=(50,10))
self.canvas_figure_ubicacion = FigureCanvasTkAgg(self.figure_ubicacion, self.frame_zona_principal_panel2)
self.canvas_figure_ubicacion.get_tk_widget().pack(side=tk.LEFT, fill=tk.X, pady=20)
| 41.339623 | 137 | 0.645824 | 1,968 | 0.89781 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.046533 |
c362e5ae43a55d318ef4b490ee0fc9d950ff6b12 | 138 | py | Python | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
]
| null | null | null | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
]
| null | null | null | injector/__init__.py | vnepomuceno/kafka-events-injector | 66fb490b1a3d5f06737689005c639b1785a6bb37 | [
"MIT"
]
| null | null | null | import coloredlogs
coloredlogs.install()
custom_logger = logging.getLogger(name)
coloredlogs.install(level="INFO", logger=custom_logger)
| 23 | 55 | 0.826087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.043478 |
c3636918f6e548937ced74b698a4a4c3213be188 | 4,008 | py | Python | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
]
| 71 | 2018-12-13T20:31:18.000Z | 2022-03-26T08:44:22.000Z | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
]
| 78 | 2019-01-10T18:16:33.000Z | 2022-03-18T19:30:49.000Z | setup.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
]
| 38 | 2018-10-10T19:02:26.000Z | 2022-01-30T04:38:14.000Z | import os
import versioneer
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
if os.getenv('DEB_BUILD') == 'true' or os.getenv('USER') == 'root':
"/usr/share/doc/linuxcnc/examples/sample-configs/sim"
# list of (destination, source_file) tuples
DATA_FILES = [
('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/', [
'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py2.so',
'pyqt5designer/Qt5.7.1-64bit/libpyqt5_py3.so']),
]
# list of (destination, source_dir) tuples
DATA_DIRS = [
('/usr/share/doc/linuxcnc/examples/sample-configs/sim', 'linuxcnc/configs'),
]
if os.getenv('USER') == 'root':
try:
os.rename('/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so',
'/usr/lib/x86_64-linux-gnu/qt5/plugins/designer/libpyqt5.so.old')
except:
pass
else:
# list of (destination, source_file) tuples
DATA_FILES = [
('~/', ['scripts/.xsessionrc',]),
]
# list of (destination, source_dir) tuples
DATA_DIRS = [
('~/linuxcnc/configs/sim.qtpyvcp', 'linuxcnc/configs/sim.qtpyvcp'),
('~/linuxcnc/nc_files/qtpyvcp', 'linuxcnc/nc_files/qtpyvcp'),
# ('~/linuxcnc/vcps', 'examples'),
]
def data_files_from_dirs(data_dirs):
data_files = []
for dest_dir, source_dir in data_dirs:
dest_dir = os.path.expanduser(dest_dir)
for root, dirs, files in os.walk(source_dir):
root_files = [os.path.join(root, i) for i in files]
dest = os.path.join(dest_dir, os.path.relpath(root, source_dir))
data_files.append((dest, root_files))
return data_files
data_files = [(os.path.expanduser(dest), src_list) for dest, src_list in DATA_FILES]
data_files.extend(data_files_from_dirs(DATA_DIRS))
setup(
name="qtpyvcp",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Kurt Jacobson",
author_email="[email protected]",
description="Qt and Python based Virtual Control Panel framework for LinuxCNC.",
long_description=long_description,
long_description_content_type="text/markdown",
license="GNU General Public License v2 (GPLv2)",
url="https://github.com/kcjengr/qtpyvcp",
download_url="https://github.com/kcjengr/qtpyvcp/archive/master.zip",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Manufacturing',
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Widget Sets',
'Topic :: Software Development :: User Interfaces',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(),
data_files=data_files,
include_package_data=True,
install_requires=[
'docopt',
'qtpy',
'pyudev',
'psutil',
'HiYaPyCo',
'pyopengl',
'vtk',
'pyqtgraph',
'oyaml',
'simpleeval',
],
entry_points={
'console_scripts': [
'qtpyvcp=qtpyvcp.app:main',
'qcompile=qtpyvcp.tools.qcompile:main',
'editvcp=qtpyvcp.tools.editvcp:main',
# example VCPs
'mini=examples.mini:main',
'brender=examples.brender:main',
# test VCPs
'vtk_test=video_tests.vtk_test:main',
'opengl_test=video_tests.opengl_test:main',
'qtpyvcp_test=video_tests.qtpyvcp_test:main',
],
'qtpyvcp.example_vcp': [
'mini=examples.mini',
'brender=examples.brender',
'actions=examples.actions',
],
'qtpyvcp.test_vcp': [
'vtk_test=video_tests.vtk_test',
'opengl_test=video_tests.opengl_test',
'qtpyvcp_test=video_tests.qtpyvcp_test',
],
},
)
| 31.809524 | 87 | 0.611776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,010 | 0.501497 |
c363cccc0f9ae4f989abcc27c186813cc42c4212 | 4,366 | py | Python | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
]
| 22 | 2016-04-28T10:29:11.000Z | 2022-02-02T17:30:08.000Z | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
]
| 12 | 2016-04-24T03:29:00.000Z | 2018-11-26T22:34:37.000Z | hidparser/UsagePage.py | NZSmartie/PyHIDParser | a2758929c82a4316a665a779b9a391740103b318 | [
"MIT"
]
| 5 | 2017-02-21T13:01:25.000Z | 2021-10-04T07:13:53.000Z | from enum import Enum as _Enum
class UsageType(_Enum):
CONTROL_LINEAR = ()
CONTROL_ON_OFF = ()
CONTROL_MOMENTARY = ()
CONTROL_ONE_SHOT = ()
CONTROL_RE_TRIGGER = ()
DATA_SELECTOR = ()
DATA_STATIC_VALUE = ()
DATA_STATIC_FLAG = ()
DATA_DYNAMIC_VALUE = ()
DATA_DYNAMIC_FLAG = ()
COLLECTION_NAMED_ARRAY = ()
COLLECTION_APPLICATION = ()
COLLECTION_LOGICAL = ()
COLLECTION_PHYSICAL = ()
COLLECTION_USAGE_SWITCH = ()
COLLECTION_USAGE_MODIFIER = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
@classmethod
def control_usage_types(cls):
return (
UsageType.CONTROL_LINEAR,
UsageType.CONTROL_ON_OFF,
UsageType.CONTROL_MOMENTARY,
UsageType.CONTROL_ONE_SHOT,
UsageType.CONTROL_RE_TRIGGER,
)
@classmethod
def data_usage_types(cls):
return (
UsageType.DATA_SELECTOR,
UsageType.DATA_STATIC_VALUE,
UsageType.DATA_STATIC_FLAG,
UsageType.DATA_DYNAMIC_VALUE,
UsageType.DATA_DYNAMIC_FLAG,
)
@classmethod
def collection_usage_types(cls):
return (
UsageType.COLLECTION_NAMED_ARRAY,
# UsageType.collection_application, # Commented out as it is used for top level collections only
UsageType.COLLECTION_LOGICAL,
UsageType.COLLECTION_PHYSICAL,
UsageType.COLLECTION_USAGE_SWITCH,
UsageType.COLLECTION_USAGE_MODIFIER
)
class Usage:
def __init__(self, value, usage_types):
if not isinstance(usage_types, list):
usage_types = [usage_types,]
for usage_type in usage_types:
if not isinstance(usage_type, UsageType):
raise ValueError("usage_type {} is not instance of {}".format(
usage_type.__class__.__name__,
UsageType.__name__)
)
self.value = value
self.usage_types = usage_types
class UsagePage(_Enum):
def __init__(self, item):
if not isinstance(item, Usage):
raise ValueError("{} is not a valid {}".format(item.__name__, self.__class__.__name__))
self.index = item.value & 0xFFFF
self.usage = item
self.usage_types = item.usage_types
@classmethod
def get_usage(cls, value):
for key, member in cls.__members__.items():
if not isinstance(member.value, Usage):
continue
if member.index == value:
return member
raise ValueError("{} is not a valid {}".format(value, cls.__name__))
@classmethod
def _get_usage_page_index(cls):
raise NotImplementedError()
@classmethod
def find_usage_page(cls, value):
if not hasattr(cls, "usage_page_map"):
cls.usage_page_map = {usage_page._get_usage_page_index(): usage_page for usage_page in cls.__subclasses__()}
if value in cls.usage_page_map.keys():
return cls.usage_page_map[value]
if value not in range(0xFF00,0xFFFF):
raise ValueError("Reserved or missing usage page 0x{:04X}".format(value))
raise NotImplementedError("Yet to support Vendor defined usage pages")
class UsageRange:
def __init__(self, usage_page: UsagePage.__class__ = None, minimum = None, maximum = None):
self.usage_page = usage_page
self.minimum = minimum
self.maximum = maximum
def get_range(self):
if self.minimum is None or self.maximum is None:
raise ValueError("Usage Minimum and Usage Maximum must be set")
if isinstance(self.minimum, UsagePage):
if not isinstance(self.maximum, UsagePage):
raise ValueError("UsageRange type mismatch in minimum and maximum usages")
self.usage_page = self.minimum.__class__
return [self.usage_page.get_usage(value) for value in range(self.minimum.index & 0xFFFF, (self.maximum.index & 0xFFFF) + 1)]
if self.minimum & ~0xFFFF:
self.usage_page = UsagePage.find_usage_page((self.minimum & ~0xFFFF) >> 16)
return [self.usage_page.get_usage(value) for value in range(self.minimum & 0xFFFF, (self.maximum & 0xFFFF) + 1)]
| 35.209677 | 136 | 0.633074 | 4,324 | 0.99038 | 0 | 0 | 1,875 | 0.429455 | 0 | 0 | 378 | 0.086578 |
c3645b451a58c6438e6127bf646d7ebd0d06fa74 | 1,505 | py | Python | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
]
| 2 | 2019-05-09T07:21:25.000Z | 2019-08-05T06:37:53.000Z | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
]
| 664 | 2018-12-19T12:43:44.000Z | 2019-08-23T04:24:42.000Z | sandbox/lib/jumpscale/JumpscaleLibs/tools/legal_contracts/LegalDoc.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
]
| 7 | 2019-05-03T07:14:37.000Z | 2019-08-05T12:36:52.000Z | from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import BaseDocTemplate, Frame, PageTemplate, Paragraph
class LegalDoc:
def __init__(self, path):
self.path = path
styles = getSampleStyleSheet()
self._styleN = styles["Normal"]
self._styleH1 = styles["Heading1"]
self._styleH2 = styles["Heading2"]
self.page = 0
doc = BaseDocTemplate(self.path, pagesize=A4)
frame = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 2 * cm, id="normal")
template = PageTemplate(id="legal_doc", frames=frame, onPage=self.header_footer)
doc.addPageTemplates([template])
text = []
for i in range(111):
text.append(Paragraph("This is line %d." % i, self._styleN))
doc.build(text)
def header_footer(self, canvas, doc):
self.page += 1
canvas.saveState()
P = Paragraph("This is a multi-line header. It goes on every page. " * 2, self._styleN)
w, h = P.wrap(doc.width, doc.topMargin)
P.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - h)
canvas.restoreState()
canvas.saveState()
P = Paragraph("This is a multi-line footer:%s. It goes on every page. " % self.page, self._styleN)
w, h = P.wrap(doc.width, doc.bottomMargin)
P.drawOn(canvas, doc.leftMargin, h)
canvas.restoreState()
| 35 | 108 | 0.639203 | 1,296 | 0.86113 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.119601 |
c3681201a4fff8ff597af63f6abe3f4d4fb7b0ce | 5,627 | py | Python | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
]
| 6 | 2018-01-25T13:49:24.000Z | 2020-03-07T16:25:09.000Z | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
]
| 369 | 2018-01-17T15:22:18.000Z | 2022-03-10T19:14:56.000Z | tests/outcome/test_outcome_models.py | ConnorBarnhill/kf-api-dataservice | 547df467a307788882469a25c947a14965a26336 | [
"Apache-2.0"
]
| 3 | 2018-04-11T14:18:37.000Z | 2018-10-31T19:09:48.000Z | from datetime import datetime
import uuid
from sqlalchemy.exc import IntegrityError
from dataservice.api.study.models import Study
from dataservice.api.participant.models import Participant
from dataservice.api.outcome.models import Outcome
from dataservice.extensions import db
from tests.utils import FlaskTestCase
class ModelTest(FlaskTestCase):
"""
Test Outcome database model
"""
def test_create(self):
"""
Test create outcome
"""
# Create study
study = Study(external_id='phs001')
# Create and save participant
participant_id = 'Test subject 0'
p = Participant(external_id=participant_id, is_proband=True,
study=study)
db.session.add(p)
db.session.commit()
# Create outcomes
data = {
'external_id': 'test_0',
'vital_status': 'Alive',
'disease_related': False,
'age_at_event_days': 120,
'participant_id': p.kf_id
}
dt = datetime.now()
o1 = Outcome(**data)
db.session.add(o1)
data['vital_status'] = 'Deceased'
data['disease_related'] = 'True'
o2 = Outcome(**data)
db.session.add(o2)
db.session.commit()
self.assertEqual(Outcome.query.count(), 2)
new_outcome = Outcome.query.all()[1]
self.assertGreater(new_outcome.created_at, dt)
self.assertGreater(new_outcome.modified_at, dt)
self.assertIs(type(uuid.UUID(new_outcome.uuid)), uuid.UUID)
self.assertEqual(new_outcome.vital_status, data['vital_status'])
self.assertEqual(new_outcome.disease_related,
data['disease_related'])
def test_create_via_participant(self):
"""
Test create outcomes via creation of participant
"""
outcomes, p = self._create_outcomes()
oc = ['Deceased', 'Alive']
# Check outcomes were created
self.assertEqual(Outcome.query.count(), 2)
# Check Particpant has the outcomes
for o in Participant.query.first().outcomes:
self.assertIn(o.vital_status, oc)
# Outcomes have the participant
p = Participant.query.first()
for o in Outcome.query.all():
self.assertEqual(o.participant_id, p.kf_id)
def test_find_outcome(self):
"""
Test find one outcome
"""
outcomes, p = self._create_outcomes()
# Find outcome
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
self.assertEqual(o.vital_status, oc[0])
def test_update_outcome(self):
"""
Test update outcome
"""
outcomes, p = self._create_outcomes()
# Update and save
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
o.outcome = oc[1]
db.session.commit()
# Check updated values
o = Outcome.query.filter_by(vital_status=oc[1]).one_or_none()
self.assertIsNot(o, None)
def test_delete_outcome(self):
"""
Test delete outcome
"""
outcomes, p = self._create_outcomes()
# Choose one and delete it
oc = ['Deceased', 'Alive']
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
db.session.delete(o)
db.session.commit()
o = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
self.assertIs(o, None)
outcomes = [_o for _o in p.outcomes]
self.assertNotIn(o, outcomes)
def test_delete_outcome_via_participant(self):
"""
Test delete related outcomes via deletion of participant
"""
outcomes, p = self._create_outcomes()
# Delete participant
db.session.delete(p)
db.session.commit()
# Check that outcomes have been deleted
oc = ['Deceased', 'Alive']
o1 = Outcome.query.filter_by(vital_status=oc[0]).one_or_none()
o2 = Outcome.query.filter_by(vital_status=oc[1]).one_or_none()
self.assertIs(o1, None)
self.assertIs(o2, None)
def test_not_null_constraint(self):
"""
Test that a outcome cannot be created without required
parameters such as participant_id
"""
# Create outcome
data = {
'vital_status': 'Alive',
# non-existent required param: participant_id
}
o = Outcome(**data)
# Add to db
self.assertRaises(IntegrityError, db.session.add(o))
def test_foreign_key_constraint(self):
"""
Test that a outcome cannot be created without an existing
reference Participant. This checks foreign key constraint
"""
# Create outcome
data = {
'vital_status': 'Alive',
'participant_id': '' # empty blank foreign key
}
o = Outcome(**data)
# Add to db
self.assertRaises(IntegrityError, db.session.add(o))
def _create_outcomes(self):
"""
Create outcome and required entities
"""
# Create study
study = Study(external_id='phs001')
# Create two outcomes
oc = ['Deceased', 'Alive']
o1 = Outcome(vital_status=oc[0])
o2 = Outcome(vital_status=oc[1])
p = Participant(external_id='p1', is_proband=True, study=study)
# Add to participant and save
p.outcomes.extend([o1, o2])
db.session.add(p)
db.session.commit()
return [o1, o2], p
| 30.252688 | 72 | 0.594633 | 5,305 | 0.942776 | 0 | 0 | 0 | 0 | 0 | 0 | 1,547 | 0.274924 |
c368cab3b6e074a25c4387726e3ddcf458b2da2f | 384 | py | Python | sapextractor/utils/fields_corresp/extract_dd03t.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
]
| 2 | 2021-02-10T08:09:35.000Z | 2021-05-21T06:25:34.000Z | sapextractor/utils/fields_corresp/extract_dd03t.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
]
| null | null | null | sapextractor/utils/fields_corresp/extract_dd03t.py | aarkue/sap-meta-explorer | 613bf657bbaa72a3781a84664e5de7626516532f | [
"Apache-2.0"
]
| 3 | 2021-11-22T13:27:00.000Z | 2022-03-16T22:08:51.000Z | def apply(con, target_language="E"):
dict_field_desc = {}
try:
df = con.prepare_and_execute_query("DD03T", ["DDLANGUAGE", "FIELDNAME", "DDTEXT"], " WHERE DDLANGUAGE = '"+target_language+"'")
stream = df.to_dict("records")
for el in stream:
dict_field_desc[el["FIELDNAME"]] = el["DDTEXT"]
except:
pass
return dict_field_desc
| 34.909091 | 135 | 0.611979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.247396 |
c3693f12a03bbf78b7f7bcf22ea6cd2fd4184fd8 | 1,043 | py | Python | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
]
| null | null | null | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
]
| null | null | null | app/forms/fields/month_year_date_field.py | ons-eq-team/eq-questionnaire-runner | 8d029097faa2b9d53d9621064243620db60c62c7 | [
"MIT"
]
| null | null | null | import logging
from werkzeug.utils import cached_property
from wtforms import FormField, Form, StringField
logger = logging.getLogger(__name__)
def get_form_class(validators):
class YearMonthDateForm(Form):
year = StringField(validators=validators)
month = StringField()
@cached_property
def data(self):
data = super().data
try:
return "{year:04d}-{month:02d}".format(
year=int(data["year"]), month=int(data["month"])
)
except (TypeError, ValueError):
return None
return YearMonthDateForm
class MonthYearDateField(FormField):
def __init__(self, validators, **kwargs):
form_class = get_form_class(validators)
super().__init__(form_class, **kwargs)
def process(self, formdata, data=None):
if data is not None:
substrings = data.split("-")
data = {"year": substrings[0], "month": substrings[1]}
super().process(formdata, data)
| 26.74359 | 68 | 0.607862 | 825 | 0.790988 | 0 | 0 | 305 | 0.292426 | 0 | 0 | 53 | 0.050815 |
c36a18741da6b1e9a7e803a47b014cff09f34cfc | 310 | py | Python | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
]
| null | null | null | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
]
| null | null | null | inf_classif_analysis/descriptive_analysis.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
]
| null | null | null | #AFTER PREPROCESSING AND TARGETS DEFINITION
newdataset.describe()
LET_IS.value_counts()
LET_IS.value_counts().plot(kind='bar', color='c')
Y_unica.value_counts()
Y_unica.value_counts().plot(kind='bar', color='c')
ZSN.value_counts().plot(kind='bar', color='c')
Survive.value_counts().plot(kind='bar', color='c')
| 34.444444 | 50 | 0.748387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.241935 |
c36a8c07a26ce0690e0700b966816f0b550bb368 | 1,568 | py | Python | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
]
| null | null | null | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
]
| null | null | null | src/service/eda_service.py | LiuYuWei/service-data-eda-analysis | 7dcbf205a0a3715cf3d199356bd1814b8d47b52d | [
"Apache-2.0"
]
| null | null | null | """Confusion matrix calculation service."""
# coding=utf-8
# import relation package.
from pandas_profiling import ProfileReport
import pandas as pd
import datetime
import json
# import project package.
from config.config_setting import ConfigSetting
class EdaService:
"""Confusion matrix calculation service."""
def __init__(self):
"""Initial variable and module"""
config_setting = ConfigSetting()
self.log = config_setting.set_logger(
"[eda_service]")
self.config = config_setting.yaml_parser()
self.eda_html = None
def transform_json_to_pandas(self, data, column_name):
df = pd.DataFrame(data, columns=column_name)
payload = {}
payload["length_df"] = len(df)
payload["length_column_df"] = len(df.columns)
df.to_csv("data/csv/dataframe.csv")
return payload
def pandas_profiling_eda_transfer(self, title_name):
df = pd.read_csv("data/csv/dataframe.csv")
profile = df.profile_report(title='Pandas Profiling Report')
payload = {}
now_time = datetime.datetime.now()
payload["timestamp"] = now_time.isoformat()
payload["eda_report"] = "eda_{}.html".format(now_time.strftime("%Y%m%d_%H%M%S"))
self.eda_html = payload["eda_report"]
profile.to_file("src/templates/{}".format(payload["eda_report"]))
return payload
def show_eda_result_in_html(self):
result = None
if self.eda_html is not None:
result = self.eda_html
return result
| 32.666667 | 88 | 0.655612 | 1,313 | 0.837372 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.251276 |
c36b30969b08b61066b6a7a3898735992cd717ad | 1,385 | py | Python | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
]
| null | null | null | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
]
| null | null | null | google/cloud/bigquery_v2/types/__init__.py | KoffieLabs/python-bigquery | 33b317abdc6d69f33722cb0504bb0b78c1c80e30 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .encryption_config import (
EncryptionConfiguration,
)
from .model import (
DeleteModelRequest,
GetModelRequest,
ListModelsRequest,
ListModelsResponse,
Model,
PatchModelRequest,
)
from .model_reference import (
ModelReference,
)
from .standard_sql import (
StandardSqlDataType,
StandardSqlField,
StandardSqlStructType,
StandardSqlTableType,
)
from .table_reference import (
TableReference,
)
__all__ = (
"EncryptionConfiguration",
"DeleteModelRequest",
"GetModelRequest",
"ListModelsRequest",
"ListModelsResponse",
"Model",
"PatchModelRequest",
"ModelReference",
"StandardSqlDataType",
"StandardSqlField",
"StandardSqlStructType",
"StandardSqlTableType",
"TableReference",
)
| 25.181818 | 74 | 0.724188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.597834 |
c36b323dde6e6584446ed2e96c3983eea6ffe2a3 | 4,365 | py | Python | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
]
| null | null | null | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
]
| 7 | 2019-12-16T20:58:29.000Z | 2022-02-09T23:57:32.000Z | blurr/core/store.py | ddrightnow/blurr | a8745101d4a8a85ccf1efc608dba8486d3cebb49 | [
"Apache-2.0"
]
| null | null | null | from abc import abstractmethod, ABC
from datetime import datetime, timezone
from typing import Any, List, Tuple, Dict
from blurr.core.base import BaseSchema
from blurr.core.store_key import Key, KeyType
class StoreSchema(BaseSchema):
pass
class Store(ABC):
""" Base Store that allows for data to be persisted during / after transformation """
@abstractmethod
def get_all(self, identity: str) -> Dict[Key, Any]:
"""
Gets all the items for an identity
"""
raise NotImplementedError()
@abstractmethod
def get(self, key: Key) -> Any:
"""
Gets an item by key
"""
raise NotImplementedError()
def get_range(self,
base_key: Key,
start_time: datetime,
end_time: datetime = None,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
:param base_key: Items which don't start with the base_key are filtered out.
:param start_time: Start time to for the range query
:param end_time: End time of the range query. If None count is used.
:param count: The number of items to be returned. Used if end_time is not specified.
"""
if end_time and count:
raise ValueError('Only one of `end` or `count` can be set')
if count:
end_time = datetime.min.replace(
tzinfo=timezone.utc) if count < 0 else datetime.max.replace(tzinfo=timezone.utc)
if end_time < start_time:
start_time, end_time = end_time, start_time
if base_key.key_type == KeyType.TIMESTAMP:
start_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], start_time)
end_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], end_time)
return self._get_range_timestamp_key(start_key, end_key, count)
else:
return self._get_range_dimension_key(base_key, start_time, end_time, count)
@abstractmethod
def _get_range_timestamp_key(self, start: Key, end: Key,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a TIMESTAMP key.
"""
raise NotImplementedError()
def get_time_range(self, identity, group, start_time, end_time) -> List[Tuple[Key, Any]]:
raise NotImplementedError()
def get_count_range(self, identity, group, time, count):
raise NotImplementedError()
@abstractmethod
def _get_range_dimension_key(self,
base_key: Key,
start_time: datetime,
end_time: datetime,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a DIMENSION key.
"""
raise NotImplementedError()
@staticmethod
def _restrict_items_to_count(items: List[Tuple[Key, Any]], count: int) -> List[Tuple[Key, Any]]:
"""
Restricts items to count number if len(items) is larger than abs(count). This function
assumes that items is sorted by time.
:param items: The items to restrict.
:param count: The number of items returned.
"""
if abs(count) > len(items):
count = Store._sign(count) * len(items)
if count < 0:
return items[count:]
else:
return items[:count]
@abstractmethod
def save(self, key: Key, item: Any) -> None:
"""
Saves an item to store
"""
raise NotImplementedError()
@abstractmethod
def delete(self, key: Key) -> None:
"""
Deletes an item from the store by key
"""
raise NotImplementedError()
@abstractmethod
def finalize(self) -> None:
"""
Finalizes the store by flushing all remaining data to persistence
"""
raise NotImplementedError()
@staticmethod
def _sign(x: int) -> int:
return (1, -1)[x < 0]
| 33.837209 | 100 | 0.595647 | 4,155 | 0.95189 | 0 | 0 | 2,317 | 0.530813 | 0 | 0 | 1,437 | 0.32921 |
c36b874a06452316ba72dfbbdea4c8d952355b51 | 1,411 | py | Python | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | seamless/core/cache/tempref.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
]
| 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | import time, copy
import asyncio
class TempRefManager:
def __init__(self):
self.refs = []
self.running = False
def add_ref(self, ref, lifetime, on_shutdown):
expiry_time = time.time() + lifetime
self.refs.append((ref, expiry_time, on_shutdown))
def purge_all(self):
"""Purges all refs, regardless of expiry time
Only call this when Seamless is shutting down"""
while len(self.refs):
ref, _, on_shutdown = self.refs.pop(0)
if not on_shutdown:
continue
try:
ref()
except:
pass
def purge(self):
"""Purges expired refs"""
t = time.time()
for item in copy.copy(self.refs):
ref, expiry_time, _ = item
if expiry_time < t:
self.refs.remove(item)
ref()
async def loop(self):
if self.running:
return
self.running = True
while 1:
try:
self.purge()
except Exception:
import traceback
traceback.print_exc()
await asyncio.sleep(0.05)
self.running = False
temprefmanager = TempRefManager()
coro = temprefmanager.loop()
import asyncio
task = asyncio.ensure_future(coro)
import atexit
atexit.register(lambda *args, **kwargs: task.cancel()) | 26.12963 | 57 | 0.546421 | 1,191 | 0.844082 | 0 | 0 | 0 | 0 | 324 | 0.229624 | 127 | 0.090007 |
c36b9227e1e39aa4000c6b92c3dbf8f27a5ea7f5 | 8,775 | py | Python | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
]
| 27 | 2020-11-12T19:24:54.000Z | 2022-03-27T23:10:45.000Z | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
]
| 2 | 2020-11-02T06:30:39.000Z | 2022-02-23T18:39:55.000Z | lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/eclib/choicedlg.py | bo3b/iZ3D | ced8b3a4b0a152d0177f2e94008918efc76935d5 | [
"MIT"
]
| 3 | 2021-08-16T00:21:08.000Z | 2022-02-23T19:19:36.000Z | ###############################################################################
# Name: choicedlg.py #
# Purpose: Generic Choice Dialog #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
Editra Control Library: Choice Dialog
A generic choice dialog that uses a wx.Choice control to display its choices.
@summary: Generic Choice Dialog
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: choicedlg.py 63820 2010-04-01 21:46:22Z CJP $"
__revision__ = "$Revision: 63820 $"
__all__ = ['ChoiceDialog',]
#--------------------------------------------------------------------------#
# Imports
import wx
#--------------------------------------------------------------------------#
# Globals
ChoiceDialogNameStr = u"ChoiceDialog"
#--------------------------------------------------------------------------#
class ChoiceDialog(wx.Dialog):
"""Dialog with a wx.Choice control for showing a list of choices"""
def __init__(self, parent, id=wx.ID_ANY,
msg=u'', title=u'',
choices=None, default=u'',
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=0,
name=ChoiceDialogNameStr):
"""Create the choice dialog
@keyword msg: Dialog Message
@keyword title: Dialog Title
@keyword choices: list of strings
@keyword default: Default selection
"""
wx.Dialog.__init__(self, parent, id, title,
style=wx.CAPTION, pos=pos, size=size, name=name)
# Attributes
self._panel = ChoicePanel(self, msg=msg,
choices=choices,
default=default,
style=style)
# Layout
self.__DoLayout()
def __DoLayout(self):
"""Layout the dialogs controls"""
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self._panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
self.SetInitialSize()
def SetChoices(self, choices):
"""Set the dialogs choices
@param choices: list of strings
"""
self._panel.SetChoices(choices)
def GetSelection(self):
"""Get the selected choice
@return: string
"""
return self._panel.GetSelection()
def GetStringSelection(self):
"""Get the chosen string
@return: string
"""
return self._panel.GetStringSelection()
def SetBitmap(self, bmp):
"""Set the bitmap used in the dialog
@param bmp: wx.Bitmap
"""
self._panel.SetBitmap(bmp)
def SetStringSelection(self, sel):
"""Set the selected choice
@param sel: string
"""
self._panel.SetStringSelection(sel)
def SetSelection(self, sel):
"""Set the selected choice
@param sel: string
"""
self._panel.SetSelection(sel)
#--------------------------------------------------------------------------#
class ChoicePanel(wx.Panel):
"""Generic Choice dialog panel"""
def __init__(self, parent, msg=u'', choices=list(),
default=u'', style=wx.OK|wx.CANCEL):
"""Create the panel
@keyword msg: Display message
@keyword choices: list of strings
@keyword default: default selection
@keyword style: dialog style
"""
wx.Panel.__init__(self, parent)
# Attributes
self._msg = msg
self._choices = wx.Choice(self, wx.ID_ANY)
self._selection = default
self._selidx = 0
self._bmp = None
self._buttons = list()
# Setup
self._choices.SetItems(choices)
if default in choices:
self._choices.SetStringSelection(default)
self._selidx = self._choices.GetSelection()
else:
self._choices.SetSelection(0)
self._selidx = 0
self._selection = self._choices.GetStringSelection()
# Setup Buttons
for btn, id_ in ((wx.OK, wx.ID_OK), (wx.CANCEL, wx.ID_CANCEL),
(wx.YES, wx.ID_YES), (wx.NO, wx.ID_NO)):
if btn & style:
button = wx.Button(self, id_)
self._buttons.append(button)
if not len(self._buttons):
self._buttons.append(wx.Button(self, wx.ID_OK))
self._buttons.append(wx.Button(self, wx.ID_CANCEL))
# Layout
self.__DoLayout(style)
# Event Handlers
self.Bind(wx.EVT_CHOICE, self.OnChoice, self._choices)
self.Bind(wx.EVT_BUTTON, self.OnButton)
def __DoLayout(self, style):
"""Layout the panel"""
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer = wx.BoxSizer(wx.VERTICAL)
caption = wx.StaticText(self, label=self._msg)
# Layout the buttons
bsizer = wx.StdDialogButtonSizer()
for button in self._buttons:
bsizer.AddButton(button)
bid = button.GetId()
if bid in (wx.ID_NO, wx.ID_YES):
if wx.NO_DEFAULT & style:
if bid == wx.ID_NO:
button.SetDefault()
else:
if bid == wx.ID_YES:
button.SetDefault()
elif bid == wx.ID_OK:
button.SetDefault()
bsizer.Realize()
vsizer.AddMany([((10, 10), 0), (caption, 0), ((20, 20), 0),
(self._choices, 1, wx.EXPAND), ((10, 10), 0),
(bsizer, 1, wx.EXPAND),
((10, 10), 0)])
icon_id = wx.ART_INFORMATION
for i_id, a_id in ((wx.ICON_ERROR, wx.ART_ERROR),
(wx.ICON_WARNING, wx.ART_WARNING)):
if i_id & style:
icon_id = a_id
break
icon = wx.ArtProvider.GetBitmap(icon_id, wx.ART_MESSAGE_BOX, (64, 64))
self._bmp = wx.StaticBitmap(self, bitmap=icon)
bmpsz = wx.BoxSizer(wx.VERTICAL)
bmpsz.AddMany([((10, 10), 0), (self._bmp, 0, wx.ALIGN_CENTER_VERTICAL),
((10, 30), 0, wx.EXPAND)])
hsizer.AddMany([((10, 10), 0), (bmpsz, 0, wx.ALIGN_TOP),
((10, 10), 0), (vsizer, 1), ((10, 10), 0)])
self.SetSizer(hsizer)
self.SetInitialSize()
self.SetAutoLayout(True)
def GetChoiceControl(self):
"""Get the dialogs choice control
@return: wx.Choice
"""
return self._choices
def GetSelection(self):
"""Get the chosen index
@return: int
"""
return self._selidx
def GetStringSelection(self):
"""Get the chosen string
@return: string
"""
return self._selection
def OnButton(self, evt):
"""Handle button events
@param evt: wx.EVT_BUTTON
@type evt: wx.CommandEvent
"""
self.GetParent().EndModal(evt.GetId())
def OnChoice(self, evt):
"""Update the selection
@param evt: wx.EVT_CHOICE
@type evt: wx.CommandEvent
"""
if evt.GetEventObject() == self._choices:
self._selection = self._choices.GetStringSelection()
self._selidx = self._choices.GetSelection()
else:
evt.Skip()
def SetBitmap(self, bmp):
"""Set the dialogs bitmap
@param bmp: wx.Bitmap
"""
self._bmp.SetBitmap(bmp)
self.Layout()
def SetChoices(self, choices):
"""Set the dialogs choices
@param choices: list of strings
"""
self._choices.SetItems(choices)
self._choices.SetSelection(0)
self._selection = self._choices.GetStringSelection()
def SetSelection(self, sel):
"""Set the selected choice
@param sel: int
"""
self._choices.SetSelection(sel)
self._selection = self._choices.GetStringSelection()
self._selidx = self._choices.GetSelection()
def SetStringSelection(self, sel):
"""Set the selected choice
@param sel: string
"""
self._choices.SetStringSelection(sel)
self._selection = self._choices.GetStringSelection()
self._selidx = self._choices.GetSelection()
#--------------------------------------------------------------------------#
| 30.574913 | 79 | 0.509972 | 7,412 | 0.844672 | 0 | 0 | 0 | 0 | 0 | 0 | 2,998 | 0.341652 |
c36ce52f1b69aad8e3b2676523c1755292c1c03c | 29,068 | py | Python | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
]
| null | null | null | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
]
| null | null | null | src/flower/proto/transport_pb2.py | xinchiqiu/flower | ef12441fdebaa32f34e12dd02dfa376fa2988eaf | [
"Apache-2.0"
]
| 1 | 2020-06-01T11:06:18.000Z | 2020-06-01T11:06:18.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flower/proto/transport.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='flower/proto/transport.proto',
package='flower.transport',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x1c\x66lower/proto/transport.proto\x12\x10\x66lower.transport\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xb8\x05\n\rServerMessage\x12>\n\treconnect\x18\x01 \x01(\x0b\x32).flower.transport.ServerMessage.ReconnectH\x00\x12G\n\x0eget_parameters\x18\x02 \x01(\x0b\x32-.flower.transport.ServerMessage.GetParametersH\x00\x12\x39\n\x07\x66it_ins\x18\x03 \x01(\x0b\x32&.flower.transport.ServerMessage.FitInsH\x00\x12\x43\n\x0c\x65valuate_ins\x18\x04 \x01(\x0b\x32+.flower.transport.ServerMessage.EvaluateInsH\x00\x1a\x1c\n\tReconnect\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x0f\n\rGetParameters\x1a\xad\x01\n\x06\x46itIns\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12\x42\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x32.flower.transport.ServerMessage.FitIns.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\xb7\x01\n\x0b\x45valuateIns\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12G\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x37.flower.transport.ServerMessage.EvaluateIns.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x05\n\x03msg\"\xbc\x04\n\rClientMessage\x12@\n\ndisconnect\x18\x01 \x01(\x0b\x32*.flower.transport.ClientMessage.DisconnectH\x00\x12G\n\x0eparameters_res\x18\x02 \x01(\x0b\x32-.flower.transport.ClientMessage.ParametersResH\x00\x12\x39\n\x07\x66it_res\x18\x03 \x01(\x0b\x32&.flower.transport.ClientMessage.FitResH\x00\x12\x43\n\x0c\x65valuate_res\x18\x04 \x01(\x0b\x32+.flower.transport.ClientMessage.EvaluateResH\x00\x1a\x36\n\nDisconnect\x12(\n\x06reason\x18\x01 \x01(\x0e\x32\x18.flower.transport.Reason\x1a\x41\n\rParametersRes\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x1ak\n\x06\x46itRes\x12\x30\n\nparameters\x18\x01 \x01(\x0b\x32\x1c.flower.transport.Parameters\x12\x14\n\x0cnum_examples\x18\x02 \x01(\x03\x12\x19\n\x11num_examples_ceil\x18\x03 \x01(\x03\x1a\x31\n\x0b\x45valuateRes\x12\x14\n\x0cnum_examples\x18\x01 \x01(\x03\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x42\x05\n\x03msg*R\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x32_\n\rFlowerService\x12N\n\x04Join\x12\x1f.flower.transport.ClientMessage\x1a\x1f.flower.transport.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3'
)
_REASON = _descriptor.EnumDescriptor(
name='Reason',
full_name='flower.transport.Reason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECONNECT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWER_DISCONNECTED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WIFI_UNAVAILABLE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1458,
)
_sym_db.RegisterEnumDescriptor(_REASON)
Reason = enum_type_wrapper.EnumTypeWrapper(_REASON)
UNKNOWN = 0
RECONNECT = 1
POWER_DISCONNECTED = 2
WIFI_UNAVAILABLE = 3
_PARAMETERS = _descriptor.Descriptor(
name='Parameters',
full_name='flower.transport.Parameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensors', full_name='flower.transport.Parameters.tensors', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_type', full_name='flower.transport.Parameters.tensor_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=100,
)
_SERVERMESSAGE_RECONNECT = _descriptor.Descriptor(
name='Reconnect',
full_name='flower.transport.ServerMessage.Reconnect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='flower.transport.ServerMessage.Reconnect.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=385,
serialized_end=413,
)
_SERVERMESSAGE_GETPARAMETERS = _descriptor.Descriptor(
name='GetParameters',
full_name='flower.transport.ServerMessage.GetParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=430,
)
_SERVERMESSAGE_FITINS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='flower.transport.ServerMessage.FitIns.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='flower.transport.ServerMessage.FitIns.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='flower.transport.ServerMessage.FitIns.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=606,
)
_SERVERMESSAGE_FITINS = _descriptor.Descriptor(
name='FitIns',
full_name='flower.transport.ServerMessage.FitIns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ServerMessage.FitIns.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='flower.transport.ServerMessage.FitIns.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_FITINS_CONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=606,
)
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='flower.transport.ServerMessage.EvaluateIns.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=606,
)
_SERVERMESSAGE_EVALUATEINS = _descriptor.Descriptor(
name='EvaluateIns',
full_name='flower.transport.ServerMessage.EvaluateIns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ServerMessage.EvaluateIns.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='flower.transport.ServerMessage.EvaluateIns.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=609,
serialized_end=792,
)
_SERVERMESSAGE = _descriptor.Descriptor(
name='ServerMessage',
full_name='flower.transport.ServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reconnect', full_name='flower.transport.ServerMessage.reconnect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='get_parameters', full_name='flower.transport.ServerMessage.get_parameters', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_ins', full_name='flower.transport.ServerMessage.fit_ins', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluate_ins', full_name='flower.transport.ServerMessage.evaluate_ins', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SERVERMESSAGE_RECONNECT, _SERVERMESSAGE_GETPARAMETERS, _SERVERMESSAGE_FITINS, _SERVERMESSAGE_EVALUATEINS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='msg', full_name='flower.transport.ServerMessage.msg',
index=0, containing_type=None, fields=[]),
],
serialized_start=103,
serialized_end=799,
)
_CLIENTMESSAGE_DISCONNECT = _descriptor.Descriptor(
name='Disconnect',
full_name='flower.transport.ClientMessage.Disconnect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reason', full_name='flower.transport.ClientMessage.Disconnect.reason', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1086,
serialized_end=1140,
)
_CLIENTMESSAGE_PARAMETERSRES = _descriptor.Descriptor(
name='ParametersRes',
full_name='flower.transport.ClientMessage.ParametersRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ClientMessage.ParametersRes.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1142,
serialized_end=1207,
)
_CLIENTMESSAGE_FITRES = _descriptor.Descriptor(
name='FitRes',
full_name='flower.transport.ClientMessage.FitRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='flower.transport.ClientMessage.FitRes.parameters', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples', full_name='flower.transport.ClientMessage.FitRes.num_examples', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples_ceil', full_name='flower.transport.ClientMessage.FitRes.num_examples_ceil', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1209,
serialized_end=1316,
)
_CLIENTMESSAGE_EVALUATERES = _descriptor.Descriptor(
name='EvaluateRes',
full_name='flower.transport.ClientMessage.EvaluateRes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_examples', full_name='flower.transport.ClientMessage.EvaluateRes.num_examples', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss', full_name='flower.transport.ClientMessage.EvaluateRes.loss', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1318,
serialized_end=1367,
)
_CLIENTMESSAGE = _descriptor.Descriptor(
name='ClientMessage',
full_name='flower.transport.ClientMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disconnect', full_name='flower.transport.ClientMessage.disconnect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters_res', full_name='flower.transport.ClientMessage.parameters_res', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_res', full_name='flower.transport.ClientMessage.fit_res', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluate_res', full_name='flower.transport.ClientMessage.evaluate_res', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLIENTMESSAGE_DISCONNECT, _CLIENTMESSAGE_PARAMETERSRES, _CLIENTMESSAGE_FITRES, _CLIENTMESSAGE_EVALUATERES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='msg', full_name='flower.transport.ClientMessage.msg',
index=0, containing_type=None, fields=[]),
],
serialized_start=802,
serialized_end=1374,
)
_SERVERMESSAGE_RECONNECT.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_GETPARAMETERS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_FITINS_CONFIGENTRY.containing_type = _SERVERMESSAGE_FITINS
_SERVERMESSAGE_FITINS.fields_by_name['parameters'].message_type = _PARAMETERS
_SERVERMESSAGE_FITINS.fields_by_name['config'].message_type = _SERVERMESSAGE_FITINS_CONFIGENTRY
_SERVERMESSAGE_FITINS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY.containing_type = _SERVERMESSAGE_EVALUATEINS
_SERVERMESSAGE_EVALUATEINS.fields_by_name['parameters'].message_type = _PARAMETERS
_SERVERMESSAGE_EVALUATEINS.fields_by_name['config'].message_type = _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY
_SERVERMESSAGE_EVALUATEINS.containing_type = _SERVERMESSAGE
_SERVERMESSAGE.fields_by_name['reconnect'].message_type = _SERVERMESSAGE_RECONNECT
_SERVERMESSAGE.fields_by_name['get_parameters'].message_type = _SERVERMESSAGE_GETPARAMETERS
_SERVERMESSAGE.fields_by_name['fit_ins'].message_type = _SERVERMESSAGE_FITINS
_SERVERMESSAGE.fields_by_name['evaluate_ins'].message_type = _SERVERMESSAGE_EVALUATEINS
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['reconnect'])
_SERVERMESSAGE.fields_by_name['reconnect'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['get_parameters'])
_SERVERMESSAGE.fields_by_name['get_parameters'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['fit_ins'])
_SERVERMESSAGE.fields_by_name['fit_ins'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_SERVERMESSAGE.oneofs_by_name['msg'].fields.append(
_SERVERMESSAGE.fields_by_name['evaluate_ins'])
_SERVERMESSAGE.fields_by_name['evaluate_ins'].containing_oneof = _SERVERMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE_DISCONNECT.fields_by_name['reason'].enum_type = _REASON
_CLIENTMESSAGE_DISCONNECT.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_PARAMETERSRES.fields_by_name['parameters'].message_type = _PARAMETERS
_CLIENTMESSAGE_PARAMETERSRES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_FITRES.fields_by_name['parameters'].message_type = _PARAMETERS
_CLIENTMESSAGE_FITRES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE_EVALUATERES.containing_type = _CLIENTMESSAGE
_CLIENTMESSAGE.fields_by_name['disconnect'].message_type = _CLIENTMESSAGE_DISCONNECT
_CLIENTMESSAGE.fields_by_name['parameters_res'].message_type = _CLIENTMESSAGE_PARAMETERSRES
_CLIENTMESSAGE.fields_by_name['fit_res'].message_type = _CLIENTMESSAGE_FITRES
_CLIENTMESSAGE.fields_by_name['evaluate_res'].message_type = _CLIENTMESSAGE_EVALUATERES
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['disconnect'])
_CLIENTMESSAGE.fields_by_name['disconnect'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['parameters_res'])
_CLIENTMESSAGE.fields_by_name['parameters_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['fit_res'])
_CLIENTMESSAGE.fields_by_name['fit_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
_CLIENTMESSAGE.oneofs_by_name['msg'].fields.append(
_CLIENTMESSAGE.fields_by_name['evaluate_res'])
_CLIENTMESSAGE.fields_by_name['evaluate_res'].containing_oneof = _CLIENTMESSAGE.oneofs_by_name['msg']
DESCRIPTOR.message_types_by_name['Parameters'] = _PARAMETERS
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
DESCRIPTOR.message_types_by_name['ClientMessage'] = _CLIENTMESSAGE
DESCRIPTOR.enum_types_by_name['Reason'] = _REASON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), {
'DESCRIPTOR' : _PARAMETERS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.Parameters)
})
_sym_db.RegisterMessage(Parameters)
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
'Reconnect' : _reflection.GeneratedProtocolMessageType('Reconnect', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_RECONNECT,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.Reconnect)
})
,
'GetParameters' : _reflection.GeneratedProtocolMessageType('GetParameters', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_GETPARAMETERS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.GetParameters)
})
,
'FitIns' : _reflection.GeneratedProtocolMessageType('FitIns', (_message.Message,), {
'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_FITINS_CONFIGENTRY,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.FitIns.ConfigEntry)
})
,
'DESCRIPTOR' : _SERVERMESSAGE_FITINS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.FitIns)
})
,
'EvaluateIns' : _reflection.GeneratedProtocolMessageType('EvaluateIns', (_message.Message,), {
'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS_CONFIGENTRY,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.EvaluateIns.ConfigEntry)
})
,
'DESCRIPTOR' : _SERVERMESSAGE_EVALUATEINS,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage.EvaluateIns)
})
,
'DESCRIPTOR' : _SERVERMESSAGE,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ServerMessage)
})
_sym_db.RegisterMessage(ServerMessage)
_sym_db.RegisterMessage(ServerMessage.Reconnect)
_sym_db.RegisterMessage(ServerMessage.GetParameters)
_sym_db.RegisterMessage(ServerMessage.FitIns)
_sym_db.RegisterMessage(ServerMessage.FitIns.ConfigEntry)
_sym_db.RegisterMessage(ServerMessage.EvaluateIns)
_sym_db.RegisterMessage(ServerMessage.EvaluateIns.ConfigEntry)
ClientMessage = _reflection.GeneratedProtocolMessageType('ClientMessage', (_message.Message,), {
'Disconnect' : _reflection.GeneratedProtocolMessageType('Disconnect', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_DISCONNECT,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.Disconnect)
})
,
'ParametersRes' : _reflection.GeneratedProtocolMessageType('ParametersRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_PARAMETERSRES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.ParametersRes)
})
,
'FitRes' : _reflection.GeneratedProtocolMessageType('FitRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_FITRES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.FitRes)
})
,
'EvaluateRes' : _reflection.GeneratedProtocolMessageType('EvaluateRes', (_message.Message,), {
'DESCRIPTOR' : _CLIENTMESSAGE_EVALUATERES,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage.EvaluateRes)
})
,
'DESCRIPTOR' : _CLIENTMESSAGE,
'__module__' : 'flower.proto.transport_pb2'
# @@protoc_insertion_point(class_scope:flower.transport.ClientMessage)
})
_sym_db.RegisterMessage(ClientMessage)
_sym_db.RegisterMessage(ClientMessage.Disconnect)
_sym_db.RegisterMessage(ClientMessage.ParametersRes)
_sym_db.RegisterMessage(ClientMessage.FitRes)
_sym_db.RegisterMessage(ClientMessage.EvaluateRes)
_SERVERMESSAGE_FITINS_CONFIGENTRY._options = None
_SERVERMESSAGE_EVALUATEINS_CONFIGENTRY._options = None
_FLOWERSERVICE = _descriptor.ServiceDescriptor(
name='FlowerService',
full_name='flower.transport.FlowerService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1460,
serialized_end=1555,
methods=[
_descriptor.MethodDescriptor(
name='Join',
full_name='flower.transport.FlowerService.Join',
index=0,
containing_service=None,
input_type=_CLIENTMESSAGE,
output_type=_SERVERMESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_FLOWERSERVICE)
DESCRIPTOR.services_by_name['FlowerService'] = _FLOWERSERVICE
# @@protoc_insertion_point(module_scope)
| 39.494565 | 2,507 | 0.762901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,963 | 0.273944 |
c36ddd7acdde8453a1b9743b8e731fb3b4051614 | 80 | py | Python | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
]
| 2 | 2021-04-17T09:59:48.000Z | 2021-04-17T10:12:02.000Z | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
]
| 15 | 2021-04-01T19:54:46.000Z | 2021-04-07T22:25:40.000Z | shopyo/__init__.py | rehmanis/shopyo2 | 3e26602149f09aa4c13a1b4b6fba296bd82af99f | [
"MIT"
]
| null | null | null | version_info = (4, 0, 1)
__version__ = ".".join([str(v) for v in version_info])
| 26.666667 | 54 | 0.65 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.0375 |
c36e4faa6f3051be3ca85cd0b16d04294152aa32 | 3,748 | py | Python | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
]
| 1 | 2019-08-29T13:07:08.000Z | 2019-08-29T13:07:08.000Z | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
]
| null | null | null | check_digit_calc.py | zhoffm/Check-Digit-Calculator | 5f86304901279678c74858811a452866785bd8f4 | [
"MIT"
]
| null | null | null | from random import randint
import pandas as pd
def random_11_digit_upc():
upc_string = ''.join(["%s" % randint(0, 9) for num in range(0, 11)])
print(upc_string)
return upc_string
# Class to calculate the check digit for 11 digit UPC's
class CheckDigitCalculations:
def __init__(self):
self.input_string = None
self.input_integer = None
self.odd_sum = None
self.odd_sum_times_3 = None
self.even_sum = None
self.new_sum = None
self.m = None
self.check_digit = None
def len_check(self):
if len(self.input_string) == 11:
return True
else:
return False
def check_integer(self):
try:
self.input_integer = int(self.input_string)
except ValueError:
print('The entered string is not exclusively numeric.')
# 1. Sum the digits at odd-numbered positions (first, third, fifth,..., eleventh).
def step_1(self):
self.odd_sum = sum(int(self.input_string[i]) for i, j in enumerate(self.input_string) if i % 2 == 0)
# 2. Multiply the result by 3.
def step_2(self):
self.odd_sum_times_3 = 3 * self.odd_sum
# 3. Add the digit sum at even-numbered positions (second, fourth, sixth,..., tenth) to the result.
def step_3(self):
self.even_sum = sum(int(self.input_string[i]) for i, j in enumerate(self.input_string) if i % 2 != 0)
self.new_sum = self.even_sum + self.odd_sum_times_3
# 4. Find the result modulo 10 (i.e. the remainder, when divided by 10) and call it M.
def step_4(self):
self.m = self.new_sum % 10
# 5. If M is zero, then the check digit is 0; otherwise the check digit is 10 − M.
def step_5(self):
if self.m == 0:
self.check_digit = 0
else:
self.check_digit = 10 - self.m
# Do all the steps! This runs all the previous steps.
def compute_check_digit(self, input_upc):
self.input_string = input_upc
if self.len_check():
self.step_1()
self.step_2()
self.step_2()
self.step_3()
self.step_4()
self.step_5()
return self.check_digit
else:
return ''
def get_full_upc(self, input_upc):
self.input_string = input_upc
return self.input_string + str(self.compute_check_digit(input_upc))
class RawCSVProcessing(CheckDigitCalculations):
def __init__(self):
super().__init__()
self.input_file_path = None
self.input_file = None
self.output_file_path = None
self.output_file = None
self.upc_col = 'REFCODE'
self.upc_df = pd.DataFrame()
self.upc_list = None
self.updated_upcs = None
def read_file_into_df(self, input_file_path, input_file):
self.input_file_path = input_file_path
self.input_file = input_file
self.upc_df = pd.read_csv(
self.input_file_path + self.input_file,
dtype={self.upc_col: str}, na_filter=False,
usecols=['DESCRIPT', 'REFCODE']
)
def add_updated_upc_to_df(self):
self.upc_list = list(self.upc_df[self.upc_col])
self.updated_upcs = [(x + str(self.compute_check_digit(x))) for x in self.upc_list]
self.upc_df[self.upc_col] = self.updated_upcs
def write_upcs_to_csv(self, output_file_path, output_file):
self.output_file_path = output_file_path
self.output_file = output_file
self.upc_df.to_csv(self.output_file_path + self.output_file, index=False)
if __name__ == '__main__':
test_upc = random_11_digit_upc()
obj = CheckDigitCalculations()
print(obj.get_full_upc(test_upc))
| 32.591304 | 109 | 0.627535 | 3,355 | 0.894667 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.155467 |
c36ea7dbd20120b593de7ef575a4b4b1a54e3de9 | 4,976 | py | Python | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
]
| null | null | null | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
]
| null | null | null | test/test_load.py | ramsdalesteve/forest | 12cac1b3dd93c4475a8a4f696c522576b44f16eb | [
"BSD-3-Clause"
]
| null | null | null | import yaml
import forest
from forest import main
def test_earth_networks_loader_given_pattern():
loader = forest.Loader.from_pattern("Label", "EarthNetworks*.txt", "earth_networks")
assert isinstance(loader, forest.earth_networks.Loader)
def test_build_loader_given_files():
"""replicate main.py as close as possible"""
files = ["file_20190101T0000Z.nc"]
args = main.parse_args.parse_args(files)
config = forest.config.from_files(args.files, args.file_type)
group = config.file_groups[0]
loader = forest.Loader.group_args(group, args)
assert isinstance(loader, forest.data.DBLoader)
assert loader.locator.paths == files
def test_build_loader_given_database(tmpdir):
"""replicate main.py as close as possible"""
database_file = str(tmpdir / "database.db")
config_file = str(tmpdir / "config.yml")
settings = {
"files": [
{
"label": "UM",
"pattern": "*.nc",
"locator": "database"
}
]
}
with open(config_file, "w") as stream:
yaml.dump(settings, stream)
args = main.parse_args.parse_args([
"--database", database_file,
"--config-file", config_file])
config = forest.config.load_config(args.config_file)
group = config.file_groups[0]
database = forest.db.Database.connect(database_file)
loader = forest.Loader.group_args(group, args, database=database)
database.close()
assert hasattr(loader.locator, "connection")
assert loader.locator.directory is None
def test_build_loader_given_database_and_directory(tmpdir):
database_file = str(tmpdir / "database.db")
config_file = str(tmpdir / "config.yml")
args = main.parse_args.parse_args([
"--database", database_file,
"--config-file", config_file])
label = "UM"
pattern = "*.nc"
directory = "/some/dir"
group = forest.config.FileGroup(
label,
pattern,
directory=directory,
locator="database")
database = forest.db.Database.connect(database_file)
loader = forest.Loader.group_args(group, args, database=database)
database.close()
assert hasattr(loader.locator, "connection")
assert loader.locator.directory == directory
def test_build_loader_given_config_file_pattern(tmpdir):
config_file = str(tmpdir / "config.yml")
path = str(tmpdir / "file_20190101T0000Z.nc")
with open(path, "w"):
pass
args = main.parse_args.parse_args([
"--config-file", config_file])
label = "UM"
pattern = "*.nc"
directory = str(tmpdir)
group = forest.config.FileGroup(
label,
pattern,
directory=directory,
locator="file_system")
loader = forest.Loader.group_args(group, args)
assert loader.locator.paths == [path]
def test_build_loader_given_eida50_file_type():
label = "EIDA50"
pattern = "eida50*.nc"
file_type = "eida50"
loader = forest.Loader.from_pattern(label, pattern, file_type)
assert isinstance(loader, forest.satellite.EIDA50)
assert isinstance(loader.locator, forest.satellite.Locator)
def test_build_loader_given_rdt_file_type():
loader = forest.Loader.from_pattern(
"Label", "*.json", "rdt")
assert isinstance(loader, forest.rdt.Loader)
assert isinstance(loader.locator, forest.rdt.Locator)
def test_replace_dir_given_args_dir_only():
check_replace_dir("args/dir", None, "args/dir")
def test_replace_dir_given_group_dir_only():
check_replace_dir(None, "group/dir", "group/dir")
def test_replace_dir_given_relative_group_dir_appends_to_args_dir():
check_replace_dir("args/dir", "leaf", "args/dir/leaf")
def test_replace_dir_given_absolute_group_dir_overrides_rel_args_dir():
check_replace_dir("args/relative", "/group/absolute", "/group/absolute")
def test_replace_dir_given_absolute_group_dir_overrides_abs_args_dir():
check_replace_dir("/args/absolute", "/group/absolute", "/group/absolute")
def check_replace_dir(args_dir, group_dir, expected):
actual = forest.Loader.replace_dir(args_dir, group_dir)
assert actual == expected
def test_full_pattern_given_name_only():
check_full_pattern("file.nc", None, None, "file.nc")
def test_full_pattern_given_relative_prefix_dir():
check_full_pattern("file.nc", None, "prefix", "prefix/file.nc")
def test_full_pattern_given_relative_leaf_and_prefix_dir():
check_full_pattern("file.nc", "leaf", "prefix", "prefix/leaf/file.nc")
def test_full_pattern_given_absolute_leaf_ignores_relative_prefix():
check_full_pattern("file.nc", "/leaf", "prefix", "/leaf/file.nc")
def test_full_pattern_given_absolute_leaf_ignores_absolute_prefix():
check_full_pattern("file.nc", "/leaf", "/prefix", "/leaf/file.nc")
def check_full_pattern(name, leaf, prefix, expected):
actual = forest.Loader.full_pattern(name, leaf, prefix)
assert actual == expected
| 32.103226 | 88 | 0.696744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 840 | 0.16881 |
c3700515c9a1fa4d06df6da6a4d88cc145398124 | 481 | py | Python | tournaments/binarySearch/binarySearch.py | gurfinkel/codeSignal | 114817947ac6311bd53a48f0f0e17c0614bf7911 | [
"MIT"
]
| 5 | 2020-02-06T09:51:22.000Z | 2021-03-19T00:18:44.000Z | tournaments/binarySearch/binarySearch.py | gurfinkel/codeSignal | 114817947ac6311bd53a48f0f0e17c0614bf7911 | [
"MIT"
]
| null | null | null | tournaments/binarySearch/binarySearch.py | gurfinkel/codeSignal | 114817947ac6311bd53a48f0f0e17c0614bf7911 | [
"MIT"
]
| 3 | 2019-09-27T13:06:21.000Z | 2021-04-20T23:13:17.000Z | def binarySearch(inputArray, searchElement):
minIndex = -1
maxIndex = len(inputArray)
while minIndex < maxIndex - 1:
currentIndex = (minIndex + maxIndex) // 2
currentElement = inputArray[currentIndex]
if currentElement < searchElement:
minIndex = currentIndex
else:
maxIndex = currentIndex
if maxIndex == len(inputArray) or inputArray[maxIndex] != searchElement:
return -1
return maxIndex
| 26.722222 | 76 | 0.634096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c3704e5ac8ab23d0d2914d6aa73d29d45471acf6 | 4,309 | py | Python | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
]
| null | null | null | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
]
| 2 | 2019-09-24T23:45:34.000Z | 2019-10-11T20:06:54.000Z | swagger_server/models/rule.py | Capping-WAR/API | 981823732f2b4f8bc007da657d5195579eb7dad3 | [
"MIT"
]
| null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Rule(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, rule_id: int=None, rule_name: str=None, description: str=None, priority: int=None): # noqa: E501
"""Rule - a model defined in Swagger
:param rule_id: The rule_id of this Rule. # noqa: E501
:type rule_id: int
:param rule_name: The rule_name of this Rule. # noqa: E501
:type rule_name: str
:param description: The description of this Rule. # noqa: E501
:type description: str
:param priority: The priority of this Rule. # noqa: E501
:type priority: int
"""
self.swagger_types = {
'rule_id': int,
'rule_name': str,
'description': str,
'priority': int
}
self.attribute_map = {
'rule_id': 'ruleID',
'rule_name': 'ruleName',
'description': 'description',
'priority': 'priority'
}
self._rule_id = rule_id
self._rule_name = rule_name
self._description = description
self._priority = priority
@classmethod
def from_dict(cls, dikt) -> 'Rule':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The rule of this Rule. # noqa: E501
:rtype: Rule
"""
return util.deserialize_model(dikt, cls)
@property
def rule_id(self) -> int:
"""Gets the rule_id of this Rule.
Unique ID of the rule # noqa: E501
:return: The rule_id of this Rule.
:rtype: int
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id: int):
"""Sets the rule_id of this Rule.
Unique ID of the rule # noqa: E501
:param rule_id: The rule_id of this Rule.
:type rule_id: int
"""
self._rule_id = rule_id
@property
def rule_name(self) -> str:
"""Gets the rule_name of this Rule.
name of rule # noqa: E501
:return: The rule_name of this Rule.
:rtype: str
"""
return self._rule_name
@rule_name.setter
def rule_name(self, rule_name: str):
"""Sets the rule_name of this Rule.
name of rule # noqa: E501
:param rule_name: The rule_name of this Rule.
:type rule_name: str
"""
if rule_name is None:
raise ValueError("Invalid value for `rule_name`, must not be `None`") # noqa: E501
self._rule_name = rule_name
@property
def description(self) -> str:
"""Gets the description of this Rule.
description of rule # noqa: E501
:return: The description of this Rule.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this Rule.
description of rule # noqa: E501
:param description: The description of this Rule.
:type description: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def priority(self) -> int:
"""Gets the priority of this Rule.
the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501
:return: The priority of this Rule.
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority: int):
"""Sets the priority of this Rule.
the current value it has towards the dataset, used to get more of a ceartin rule # noqa: E501
:param priority: The priority of this Rule.
:type priority: int
"""
if priority is None:
raise ValueError("Invalid value for `priority`, must not be `None`") # noqa: E501
self._priority = priority
| 27.44586 | 120 | 0.589464 | 4,070 | 0.944535 | 0 | 0 | 2,816 | 0.653516 | 0 | 0 | 2,522 | 0.585287 |
c371765c42e0c448d7d486fc65c3f350acc4e5ed | 864 | py | Python | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
]
| null | null | null | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
]
| null | null | null | Project1/mazes/gen_sparses.py | VFerrari/MC906 | b04d3df58ef56203882fc59c03874f92c0d223fe | [
"MIT"
]
| null | null | null | import os
import re
import numpy as np
# WARNING: this function overrides the mazes in sparse directory; don't run it
# as the idea is that everyone test the same mazes
def gen_sparses(dir_path):
''' Randomly remove points from dense instances '''
pattern = re.compile('^([0-9]+[a-zA-Z]+)')
denses_fn = [x for x in os.listdir(dir_path + '/dense') if pattern.match(x)]
print(denses_fn)
for dense_fn in denses_fn:
sparse = np.genfromtxt(dir_path + '/dense/' + dense_fn, dtype='str', delimiter=1)
for r in range(0, len(sparse)):
for c in range(0, len(sparse[0])):
if sparse[r][c] == '.':
sparse[r][c] = ' ' if bool(np.random.choice(np.arange(0,2), p=[0.25,0.75])) else '.'
np.savetxt(dir_path + '/sparse/' + dense_fn, sparse, fmt='%s', delimiter='')
gen_sparses('.') | 34.56 | 102 | 0.605324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.288194 |
c3718e6eac42b785991cffcfe402fff63a2a5da0 | 1,592 | py | Python | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
]
| 1 | 2018-09-16T12:29:04.000Z | 2018-09-16T12:29:04.000Z | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
]
| null | null | null | cryomem/cmtools/lib/jjivarray2.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
]
| null | null | null | """
Analyze JJ IV curve array (core) v.2
BB, 2016
"""
import numpy as np
from . import jjiv2 as jjiv
import sys
def fit2rsj_arr(iarr, varr, **kwargs):
"""Fit IV array to 2 Ic RSJ model and return arrays of fit params, error.
Keyword arguments:
guess: array of (Ic+, Ic-, Rn, Vo)
io: fixed Io.
updateguess: guess update ratio 0 to 1
"""
if 'guess' in kwargs:
kwargs['guess'] = np.array(kwargs['guess']) # array type
update = kwargs.get('updateguess', 0.95)
n = len(iarr)
npopt = 4
popt_arr, pcov_arr = np.zeros((n, npopt)), np.zeros((n, npopt, npopt))
for k in range(n):
try:
done = False; l = 0
while not done:
# fit
popt, pcov = jjiv.fit2rsj(iarr[k], varr[k], **kwargs)
# update guess
if k == 0:
kwargs['guess'] = popt
else:
kwargs['guess'] = (1-update)*kwargs['guess'] + update*popt
# check if fit is good
l += 1
if np.shape(pcov)==(4,4):
perr = np.sqrt(np.diag(pcov))
else:
perr = (np.inf, np.inf, np.inf, np.inf)
if (np.amax(perr) < .05) or (l > 5):
done = True
popt_arr[k], pcov_arr[k] = popt, pcov
else:
print('Fit not good. Index: {}, Trial: {}'.format(k,l))
except RuntimeError:
print('Can\'t fit. Index: {}!'.format(k))
return popt_arr, pcov_arr
| 28.945455 | 78 | 0.478643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.268216 |
c372b444a020f4105b4dff97edb032deea88f217 | 567 | py | Python | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
]
| null | null | null | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
]
| null | null | null | python/0122.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
]
| null | null | null | from leetcode_tester import Tester
from typing import Optional, List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
r = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
r += prices[i] - prices[i - 1]
return r
if __name__ == '__main__':
solution = Solution()
test = Tester(solution.maxProfit)
test.addTest(
[7, 1, 5, 3, 6, 4], 7
)
test.addTest(
[1, 2, 3, 4, 5], 4
)
test.addTest(
[7, 6, 4, 3, 1], 0
)
test.doTest()
| 19.551724 | 50 | 0.511464 | 226 | 0.398589 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.017637 |
c37355b23d392a1bb9299b5a5621376e2bdb4e8e | 1,406 | py | Python | dataset.py | songrotek/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
]
| 26 | 2018-01-10T12:23:54.000Z | 2018-02-24T06:31:34.000Z | dataset.py | floodsung/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
]
| 3 | 2018-06-20T17:28:31.000Z | 2018-07-03T13:35:36.000Z | dataset.py | songrotek/wechat_jump_end_to_end_train | 119e8a172bf31b70da1004c88567c41d3183711a | [
"MIT"
]
| 10 | 2018-01-11T12:42:42.000Z | 2018-03-12T04:51:35.000Z | import torch
import json
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
data_folder = "./dataset/images"
press_times = json.load(open("./dataset/dataset.json"))
image_roots = [os.path.join(data_folder,image_file) \
for image_file in os.listdir(data_folder)]
class JumpDataset(Dataset):
def __init__(self,transform = None):
self.image_roots = image_roots
self.press_times = press_times
self.transform = transform
def __len__(self):
return len(self.image_roots)
def __getitem__(self,idx):
image_root = self.image_roots[idx]
image_name = image_root.split("/")[-1]
image = Image.open(image_root)
image = image.convert('RGB')
image = image.resize((224,224), resample=Image.LANCZOS)
#image = np.array(image, dtype=np.float32)
if self.transform is not None:
image = self.transform(image)
press_time = self.press_times[image_name]
return image,press_time
def jump_data_loader():
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
transform = transforms.Compose([transforms.ToTensor(),normalize])
dataset = JumpDataset(transform=transform)
return DataLoader(dataset,batch_size = 32,shuffle = True)
| 30.565217 | 103 | 0.687767 | 720 | 0.512091 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.065434 |
c373e158e091fc846ebe00cd19f68260787532ea | 921 | py | Python | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
]
| 515 | 2016-06-16T20:01:30.000Z | 2022-03-29T03:03:24.000Z | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
]
| 159 | 2016-12-06T03:06:58.000Z | 2022-03-17T16:10:40.000Z | grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | ea824c908c0b98ff934cfe3efdf90121b6edd49d | [
"MIT"
]
| 195 | 2016-07-19T06:00:13.000Z | 2022-03-09T05:58:32.000Z | import json
from grafana_backup.dashboardApi import create_snapshot
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
snapshot = json.loads(data)
try:
snapshot['name'] = snapshot['dashboard']['title']
except KeyError:
snapshot['name'] = "Untitled Snapshot"
(status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if status == 200:
print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content))
else:
print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
| 35.423077 | 125 | 0.674267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.237785 |
c37733d1ef97d9bfcb5fc78d09053dd294d1f132 | 1,928 | py | Python | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
]
| 38 | 2019-06-10T04:19:42.000Z | 2022-02-15T05:21:23.000Z | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
]
| 4 | 2019-07-30T19:00:23.000Z | 2019-09-26T01:35:05.000Z | examples/keras_ssd_example.py | jiayunhan/perceptron-benchmark | 39958a15e9f8bfa82938a3f81d4f216457744b22 | [
"Apache-2.0"
]
| 10 | 2019-06-10T05:45:33.000Z | 2021-04-22T08:33:28.000Z | """ Test case for Keras """
from perceptron.zoo.ssd_300.keras_ssd300 import SSD300
from perceptron.models.detection.keras_ssd300 import KerasSSD300Model
from perceptron.utils.image import load_image
from perceptron.benchmarks.brightness import BrightnessMetric
from perceptron.utils.criteria.detection import TargetClassMiss
from perceptron.utils.tools import bcolors
from perceptron.utils.tools import plot_image_objectdetection
# instantiate the model from keras applications
ssd300 = SSD300()
# initialize the KerasResNet50RetinaNetModel
kmodel = KerasSSD300Model(ssd300, bounds=(0, 255))
# get source image and label
# the model expects values in [0, 1], and channles_last
image = load_image(shape=(300, 300), bounds=(0, 255), fname='car.png')
metric = BrightnessMetric(kmodel, criterion=TargetClassMiss(7))
print(bcolors.BOLD + 'Process start' + bcolors.ENDC)
adversary = metric(image, unpack=False)
print(bcolors.BOLD + 'Process finished' + bcolors.ENDC)
if adversary.image is None:
print(bcolors.WARNING + 'Warning: Cannot find an adversary!' + bcolors.ENDC)
exit(-1)
################### print summary info #####################################
keywords = ['Keras', 'SSD300', 'TargetClassMiss', 'BrightnessMetric']
print(bcolors.HEADER + bcolors.UNDERLINE + 'Summary:' + bcolors.ENDC)
print('Configuration:' + bcolors.CYAN + ' --framework %s '
'--model %s --criterion %s '
'--metric %s' % tuple(keywords) + bcolors.ENDC)
print('Minimum perturbation required: %s' % bcolors.BLUE
+ str(adversary.distance) + bcolors.ENDC)
print('\n')
# print the original image and the adversary
plot_image_objectdetection(adversary, kmodel, bounds=(0, 255), title=", ".join(keywords), figname='examples/images/%s.png' % '_'.join(keywords))
| 41.021277 | 145 | 0.669087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.317427 |
c377c853596a16e597f271a0e7e5269f859cd807 | 224 | py | Python | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
]
| null | null | null | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
]
| null | null | null | math/470.ImplementRand10UsingRand7.py | bzd111/leetcode | fd8db9ef52000cd4373f00a8f60d131caa5fe25d | [
"Apache-2.0"
]
| null | null | null | import sys
def rand7() -> int:
...
class Solution:
def rand10(self) -> int:
index = sys.maxsize
while index > 40:
index = 7 * (rand7() - 1) + rand7() - 1
return index % 10 + 1
| 16 | 51 | 0.482143 | 180 | 0.803571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c37854af006991db33cfa5319fe951302a09dbf2 | 164 | py | Python | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
]
| 506 | 2020-06-12T01:07:56.000Z | 2022-03-26T00:56:52.000Z | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
]
| 85 | 2020-06-12T04:51:31.000Z | 2022-03-23T16:19:44.000Z | segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 7645bc1cf51e3ebc85153666f26f8630a407b52b | [
"Apache-2.0"
]
| 102 | 2020-06-12T06:45:44.000Z | 2022-03-22T14:03:04.000Z | from .build import build_transforms
from .pre_augmentation_transforms import Resize
from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator
| 41 | 79 | 0.896341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c379116efb10da15e4d433c54d3c5da28ac9b233 | 46,937 | py | Python | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | plasmapy/diagnostics/proton_radiography.py | MarikinPaulina/PlasmaPy | 9a9e4200981618fdfba4bd9347180b6cbe3040d7 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | """
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return np.min(candidates)
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert np.allclose(plane_eq, 0, atol=1e-6)
return x
def _remove_deflected_particles(self):
r"""
Removes any particles that have been deflected away from the detector
plane (eg. those that will never hit the grid)
"""
dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector)
v_towards_det = np.dot(self.v, -self.det_n)
# If particles have not yet reached the detector plane and are moving
# away from it, they will never reach the detector.
# So, we can remove them from the arrays
# Find the indices of all particles that we should keep:
# i.e. those still moving towards the detector.
ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0]
# Drop the other particles
self.x = self.x[ind, :]
self.v = self.v[ind, :]
self.v_init = self.v_init[ind, :]
self.nparticles_grid = self.x.shape[0]
# Store the number of particles deflected
self.fract_deflected = (self.nparticles - ind.size) / self.nparticles
# Warn the user if a large number of particles are being deflected
if self.fract_deflected > 0.05:
warnings.warn(
f"{100*self.fract_deflected:.1f}% particles have been "
"deflected away from the detector plane. The fields "
"provided may be too high to successfully radiograph "
"with this particle energy.",
RuntimeWarning,
)
def _push(self):
r"""
Advance particles using an implementation of the time-centered
Boris algorithm
"""
# Get a list of positions (input for interpolator)
pos = self.x[self.grid_ind, :] * u.m
# Update the list of particles on and off the grid
self.on_grid = self.grid.on_grid(pos)
# entered_grid is zero at the end if a particle has never
# entered the grid
self.entered_grid += self.on_grid
# Estimate the E and B fields for each particle
# Note that this interpolation step is BY FAR the slowest part of the push
# loop. Any speed improvements will have to come from here.
if self.field_weighting == "volume averaged":
Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
elif self.field_weighting == "nearest neighbor":
Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
# Create arrays of E and B as required by push algorithm
E = np.array(
[Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value]
)
E = np.moveaxis(E, 0, -1)
B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value])
B = np.moveaxis(B, 0, -1)
# Calculate the adaptive timestep from the fields currently experienced
# by the particles
# If user sets dt explicitly, that's handled in _adpative_dt
dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz)
# TODO: Test v/c and implement relativistic Boris push when required
# vc = np.max(v)/_c
x = self.x[self.grid_ind, :]
v = self.v[self.grid_ind, :]
boris_push(x, v, B, E, self.q, self.m, dt)
self.x[self.grid_ind, :] = x
self.v[self.grid_ind, :] = v
def _stop_condition(self):
r"""
The stop condition is that most of the particles have entered the grid
and almost all have now left it.
"""
# Count the number of particles who have entered, which is the
# number of non-zero entries in entered_grid
self.num_entered = np.nonzero(self.entered_grid)[0].size
# How many of the particles have entered the grid
self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid
# Of the particles that have entered the grid, how many are currently
# on the grid?
# if/else avoids dividing by zero
if np.sum(self.num_entered) > 0:
still_on = np.sum(self.on_grid) / np.sum(self.num_entered)
else:
still_on = 0.0
if self.fract_entered > 0.1 and still_on < 0.001:
# Warn user if < 10% of the particles ended up on the grid
if self.num_entered < 0.1 * self.nparticles:
warnings.warn(
f"Only {100*self.num_entered/self.nparticles:.2f}% of "
"particles entered the field grid: consider "
"decreasing the max_theta to increase this "
"number.",
RuntimeWarning,
)
return True
else:
return False
def run(
self, dt=None, field_weighting="volume averaged",
):
r"""
Runs a particle-tracing simulation.
Timesteps are adaptively calculated based on the
local grid resolution of the particles and the electric and magnetic
fields they are experiencing. After all particles
have left the grid, they are advanced to the
detector plane where they can be used to construct a synthetic
diagnostic image.
Parameters
----------
dt : `~astropy.units.Quantity`, optional
An explicitly set timestep in units convertable to seconds.
Setting this optional keyword overrules the adaptive time step
capability and forces the use of this timestep throughout. If a tuple
of timesteps is provided, the adaptive timstep will be clamped
between the first and second values.
field_weighting : str
String that selects the field weighting algorithm used to determine
what fields are felt by the particles. Options are:
* 'nearest neighbor': Particles are assigned the fields on
the grid vertex closest to them.
* 'volume averaged' : The fields experienced by a particle are a
volume-average of the eight grid points surrounding them.
The default is 'volume averaged'.
Returns
-------
None.
"""
# Load and validate inputs
field_weightings = ["volume averaged", "nearest neighbor"]
if field_weighting in field_weightings:
self.field_weighting = field_weighting
else:
raise ValueError(
f"{field_weighting} is not a valid option for ",
"field_weighting. Valid choices are",
f"{field_weightings}",
)
if dt is None:
# Set dt as an infinite range by default (auto dt with no restrictions)
self.dt = np.array([0.0, np.inf]) * u.s
else:
self.dt = dt
self.dt = (self.dt).to(u.s).value
# Check to make sure particles have already been generated
if not hasattr(self, "x"):
raise ValueError(
"Either the create_particles or load_particles method must be "
"called before running the particle tracing algorithm."
)
# If meshes have been added, apply them now
for mesh in self.mesh_list:
self._apply_wire_mesh(**mesh)
# Store a copy of the initial velocity distribution in memory
# This will be used later to calculate the maximum deflection
self.v_init = np.copy(self.v)
# Calculate the maximum velocity
# Used for determining the grid crossing maximum timestep
self.vmax = np.max(np.linalg.norm(self.v, axis=-1))
# Determine which particles should be tracked
# This array holds the indices of all particles that WILL hit the grid
# Only these particles will actually be pushed through the fields
self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0]
self.nparticles_grid = len(self.grid_ind)
self.fract_tracked = self.nparticles_grid / self.nparticles
# Create flags for tracking when particles during the simulation
# on_grid -> zero if the particle is off grid, 1
self.on_grid = np.zeros([self.nparticles_grid])
# Entered grid -> non-zero if particle EVER entered the grid
self.entered_grid = np.zeros([self.nparticles_grid])
# Generate a null distribution of points (the result in the absence of
# any fields) for statistical comparison
self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir)
# Advance the particles to the near the start of the grid
self._coast_to_grid()
# Initialize a "progress bar" (really more of a meter)
# Setting sys.stdout lets this play nicely with regular print()
pbar = tqdm(
initial=0,
total=self.nparticles_grid + 1,
disable=not self.verbose,
desc="Particles on grid",
unit="particles",
bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}",
file=sys.stdout,
)
# Push the particles until the stop condition is satisfied
# (no more particles on the simulation grid)
while not self._stop_condition():
n_on_grid = np.sum(self.on_grid)
pbar.n = n_on_grid
pbar.last_print_n = n_on_grid
pbar.update()
self._push()
pbar.close()
# Remove particles that will never reach the detector
self._remove_deflected_particles()
# Advance the particles to the image plane
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x)
# Log a summary of the run
self._log("Run completed")
self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%")
self._log(
"Fraction of tracked particles that entered the grid: "
f"{self.fract_entered*100:.1f}%"
)
self._log(
"Fraction of tracked particles deflected away from the "
"detector plane: "
f"{self.fract_deflected*100}%"
)
@property
def max_deflection(self):
"""
The maximum deflection experienced by one of the particles, determined
by comparing their initial and final velocitiy vectors.
This value can be used to determine the charged particle radiography regime
using the dimensionless number defined by Kugland et al. 2012
Returns
-------
max_deflection : float
The maximum deflection in radians
"""
# Normalize the initial and final velocities
v_norm = self.v / np.linalg.norm(self.v, axis=1, keepdims=True)
v_init_norm = self.v_init / np.linalg.norm(self.v_init, axis=1, keepdims=True)
# Compute the dot product
proj = np.sum(v_norm * v_init_norm, axis=1)
# In case of numerical errors, make sure the output is within the domain of
# arccos
proj = np.where(proj > 1, 1, proj)
max_deflection = np.max(np.arccos(proj))
return max_deflection * u.rad
# *************************************************************************
# Synthetic diagnostic methods (creating output)
# *************************************************************************
def synthetic_radiograph(
self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
size : `~astropy.units.Quantity`, shape (2,2)
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
[[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters.
bins : array of integers, shape (2)
The number of bins in each direction in the format [hbins, vbins].
The default is [200,200].
ignore_grid: bool
If True, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: bool
If True, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where I_O is the intensity on the detector plane in the absence of
simulated fields. Default is False.
Returns
-------
hax : `~astropy.units.Quantity` array shape (hbins,)
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape (vbins, )
The vertical axis of the synthetic radiograph in meters.
intensity : ndarray, shape (hbins, vbins)
The number of particles counted in each bin of the histogram.
"""
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
x = self.x0
else:
x = self.x
# Determine locations of points in the detector plane using unit
# vectors
xloc = np.dot(x - self.detector, self.det_hdir)
yloc = np.dot(x - self.detector, self.det_vdir)
if size is None:
# If a detector size is not given, choose lengths based on the
# dimensions of the grid
w = self.mag * np.max(
[
np.max(np.abs(self.grid.pts0.to(u.m).value)),
np.max(np.abs(self.grid.pts1.to(u.m).value)),
np.max(np.abs(self.grid.pts2.to(u.m).value)),
]
)
# The factor of 5 here is somewhat arbitrary: we just want a
# region a few times bigger than the image of the grid on the
# detector, since particles could be deflected out
size = 5 * np.array([[-w, w], [-w, w]]) * u.m
# Generate the histogram
intensity, h, v = np.histogram2d(
xloc, yloc, range=size.to(u.m).value, bins=bins
)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[0:-1]
v = ((v + np.roll(v, -1)) / 2)[0:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / self.nparticles
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Overwrite any zeros in intensity to avoid log10(0)
intensity[intensity == 0] = 1
# Calculate the optical_density
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
| 38.919569 | 90 | 0.582994 | 44,786 | 0.954173 | 0 | 0 | 7,697 | 0.163986 | 0 | 0 | 26,555 | 0.565758 |
c37a40407f09301be18f33044c4803950764471c | 924 | py | Python | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
]
| 5 | 2020-04-11T23:56:13.000Z | 2021-05-22T09:09:36.000Z | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
]
| 4 | 2019-10-29T07:17:36.000Z | 2019-11-27T05:36:01.000Z | polyengine/switch_start.py | AkanshDivker/polyengine | f81e1ef68d92470b51888db1d0c693b6d8c6b45f | [
"MIT"
]
| 2 | 2020-10-29T14:03:09.000Z | 2021-01-01T07:53:16.000Z | # switch_start.py
# Adding another switch statement
# Authors : Seoyeon Hwang
import string
import random
class Switch_Start:
def __init__(self, str):
self.string = str
def insert_switch(self, str):
#generate random variable
_LENGTH = 11
string_pool = string.ascii_letters + string.digits
num_pool = string.digits
var1 = random.choice(string.ascii_letters)
for i in range(_LENGTH):
var1 += random.choice(string_pool)
#writing another switch statement
first = "{int "
case0 = "switch (0) { case 0:"
case1 = "; case 1:"
case2 = "; case 2:"
case3 = "; case 3:"
last = "; }}"
result = str + first + var1 + "="+random.choice(num_pool)+";" + case0 + var1 + "++" + case1 + var1 + "--" + case2 + var1 + "++" + case3 + var1 + "--" + last
return result | 30.8 | 165 | 0.548701 | 809 | 0.875541 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.246753 |
c37d3cf95d24a23185d3d7d87e99934db95b537d | 5,494 | py | Python | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
]
| 12 | 2016-05-31T04:18:13.000Z | 2021-10-09T06:45:43.000Z | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
]
| 2 | 2019-08-09T20:30:26.000Z | 2021-02-09T02:14:04.000Z | focal_mech/demo/test6.py | blasscoc/FocalMechClassifier | 8d54d5a19cea447c030ada596369e47e7f39d483 | [
"MIT"
]
| 7 | 2016-08-06T03:13:24.000Z | 2021-09-26T14:39:41.000Z | from numpy import array, rad2deg, pi, mgrid, argmin
from matplotlib.pylab import contour
import matplotlib.pyplot as plt
import mplstereonet
from obspy.imaging.beachball import aux_plane
from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm
from focal_mech.io.read_hash import read_demo, read_hash_solutions
from focal_mech.util.hash_routines import hash_to_classifier
from focal_mech.lib.sph_harm import get_sph_harm
from focal_mech.lib.correlate import corr_shear
hash_solns = read_hash_solutions("example1.out")
# we want solutions that are symetric
polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True)
inputs = hash_to_classifier(polarity_data, parity=1)
event = 3146815
result = classify(*inputs[event], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth1 = c.collections[0].get_paths()[0].vertices
pth1 = rad2deg(pth1)
pth2 = c.collections[0].get_paths()[1].vertices
pth2 = rad2deg(pth2)
hash_focal = rad2deg(hash_solns[event])
event2 = 3158361
result = classify(*inputs[event2], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln2, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth3 = c.collections[0].get_paths()[0].vertices
pth3 = rad2deg(pth3)
pth4 = c.collections[0].get_paths()[1].vertices
pth4 = rad2deg(pth4)
hash_focal2 = rad2deg(hash_solns[event2])
event3 = 3153955
result = classify(*inputs[event3], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln3, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth5 = c.collections[0].get_paths()[0].vertices
pth5 = rad2deg(pth5)
pth6 = c.collections[0].get_paths()[1].vertices
pth6 = rad2deg(pth6)
hash_focal3 = rad2deg(hash_solns[event3])
fig = plt.figure(facecolor="white", figsize=(10,20))
ax = fig.add_subplot(221, projection='stereonet')
ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event][:,0])
toa = rad2deg(polarity_data[event][:,1])
polarity = polarity_data[event][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(222, projection='stereonet')
ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln2
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln2)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal2
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal2)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event2][:,0])
toa = rad2deg(polarity_data[event2][:,1])
polarity = polarity_data[event2][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(224, projection='stereonet')
ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln3
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln3)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal3
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal3)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event3][:,0])
toa = rad2deg(polarity_data[event3][:,1])
polarity = polarity_data[event3][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
plt.tight_layout(pad=4.0, h_pad=20.0)
plt.show()
| 26.669903 | 76 | 0.67874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.054969 |
c37d8d6e64bf2027aa73ad7627b83cab9c6c0c89 | 3,102 | py | Python | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
]
| null | null | null | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
]
| null | null | null | gigantumcli/changelog.py | fossabot/gigantum-cli | d8054a8741484592ef1da750dd23affadc99fb5f | [
"MIT"
]
| null | null | null | # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
import json
class ChangeLog(object):
"""Class to provide an interface to the posted ChangeLog information"""
def __init__(self):
"""Constructor"""
# Load data
self._change_log_url = "https://s3.amazonaws.com/io.gigantum.changelog/changelog.json"
self.data = self._load_data()
def _load_data(self):
"""Load the changelog data file from remote source
Returns:
dict
"""
data = None
try:
response = requests.get(self._change_log_url)
data = response.json()
finally:
return data
def is_update_available(self, tag):
"""Method to check if an update is available using the changelog as a history
Args:
tag(str): The 8-char short hash tag for the CURRENT image in used
Returns:
bool
"""
latest_hash = self.data['latest']['id']
return latest_hash != tag
def get_changelog(self, tag="latest"):
"""Method to print the changelog data
Args:
tag(str): Version of the changelog to grab
Returns:
str
"""
if not self.data:
# No changelog data was available...probably no internet connection
return None
if tag not in self.data:
raise ValueError("Tag {} not available".format(tag))
data = self.data[tag]
msg = "Version: {}\n".format(data['id'])
msg = "{}Release Date: {}\n".format(msg, data['date'])
msg = "{}Note: \n".format(msg)
# Show notices
if 'messages' in data:
for note in data['messages']:
msg = "{} - {}\n".format(msg, note)
# Show changes
for change_key in data['changes']:
msg = "{}\n{}: \n".format(msg, change_key)
for change_str in data['changes'][change_key]:
msg = "{} - {}\n".format(msg, change_str)
return msg
| 33.717391 | 94 | 0.627982 | 1,980 | 0.638298 | 0 | 0 | 0 | 0 | 0 | 0 | 1,970 | 0.635074 |
c37ed9ece51e833849523b39409da272c30bdafb | 7,311 | py | Python | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
]
| 3 | 2019-04-04T04:57:36.000Z | 2022-01-14T09:42:05.000Z | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
]
| 1 | 2019-04-04T04:57:24.000Z | 2019-05-29T18:03:31.000Z | main_old/understanding_smoothing_microsoft.py | benjaminleroy/smooth_rf | de166a7e777e8a203656b194d772def9d3c8f06d | [
"MIT"
]
| null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.metrics
import sklearn
import progressbar
import sklearn.model_selection
from plotnine import *
import pdb
import sys
sys.path.append("smooth_rf/")
import smooth_base
import smooth_level
# function
def average_depth(random_forest, data):
"""
calculate the average depth of each point (average across trees)
Arguments:
----------
random_forest : sklearn random forest model (fit)
data : array (n, p)
data frame that can be predicted from random_forest
Returns:
--------
average_depth : array (n,)
vector of average depth in forest of each data point
"""
# test:
#rf_fit
#smooth_rf_opt
#d1 = average_depth(rf_fit, data)
#d2 = average_depth(smooth_rf_opt, data)
#np.all(d1 == d2)
n_trees = len(random_forest.estimators_)
n_obs = data.shape[0]
depth = np.zeros(n_obs)
for t in random_forest.estimators_:
d_path = t.decision_path(data)
depth = depth + np.array(d_path.sum(axis = 1)).ravel()
return depth / n_trees
# start of analysis
data, y = smooth_base.generate_data(large_n = 650)
data_vis = pd.DataFrame(data = {"x1":data[:,0],
"x2":data[:,1],
"y":y},
columns = ["x1","x2","y"])
ggout = ggplot(data_vis) +\
geom_point(aes(x = "x1",y ="x2", color = "factor(y)")) +\
theme_minimal() +\
labs(x= "X1", y = "X2", color = "value (minus 100)")
rf = sklearn.ensemble.RandomForestRegressor(n_estimators = 300)
rf_fit = rf.fit(data,y)
smooth_rf_opt, smooth_rf_last ,_, _ = smooth_base.smooth(
rf_fit,
X_trained = data,
y_trained = y.ravel(),
X_tune = None,
y_tune = None,
resample_tune= False, # oob
no_constraint = False,
subgrad_max_num = 10000,
subgrad_t_fix = 1,
parents_all=True,
verbose = True,
all_trees = False,
initial_lamb_seed = None)
# test data
data_test, y_test = smooth_base.generate_data(large_n = 10000)
reorder = np.random.choice(data_test.shape[0],
size = data_test.shape[0], replace= False)
data_test = data_test[reorder,:]
y_test = y_test[reorder]
yhat_base = rf_fit.predict(data_test)
yhat_smooth = smooth_rf_opt.predict(data_test)
base_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_base)
smooth_mse = sklearn.metrics.mean_squared_error(y_true = y_test, y_pred = yhat_smooth)
error_base = np.abs(yhat_base - y_test)
error_smooth = np.abs(yhat_smooth - y_test)
extreme_binary = np.max([np.max(np.abs(error_base)),
np.max(np.abs(error_smooth))])
col_vis = error_base - error_smooth
extreme = np.max(np.abs(col_vis))
mean_depth_test = average_depth(rf_fit,data_test)
data_vis = pd.DataFrame(data = {"X1":data_test[:,0],
"X2":data_test[:,1],
"y": y_test.ravel(),
"error_base":error_base.copy(),
"error_smooth":error_smooth.copy(),
"error":col_vis.copy(),
"mean_depth":mean_depth_test.copy()},
columns = ["X1","X2","y","error",
"error_base","error_smooth",
"mean_depth"])
a = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error"),
size = .5) +\
scale_color_continuous(name = "bwr",
limits= [-extreme, extreme]) +\
theme_bw() +\
labs(color = "Difference in Error",
title = r'Difference in Error ($Error_{base} - Error_{smooth}$)')
b = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_base"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Base Random Forest")
c = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "error_smooth"),
size = .5) +\
scale_color_continuous(name = "binary",
limits= [0, extreme_binary]) +\
theme_bw() +\
labs(color = "Error",
title = "Error from Smoothed Random Forest")
d = ggplot(data_vis) +\
geom_point(aes(x = "X1", y="X2", color = "factor(y)"),
size = .5) +\
theme_bw() +\
labs(color = "True Value (discrete)",
title = "Test Set True Values")
e = ggplot(data_vis,aes(x = "mean_depth", y = "error")) +\
geom_point(alpha = .1) +\
theme_bw() +\
labs(x = "Mean depth in Forest",
y = "Difference in Error",
title = "Lack of relationship between diff in errors and depth")
f = ggplot(data_vis, aes(x = "X1", y = "X2", color = "mean_depth")) +\
geom_point() +\
scale_color_continuous(name = "Blues") +\
theme_bw() +\
labs(color = "Mean depth in Forest",
title = "Mean depth in Forest (Depth averaged across trees)")
g = ggplot(data_vis) +\
geom_point(aes(x = "error_base", y = "error_smooth"),
alpha = .05) +\
geom_abline(intercept = 0, slope = 1) +\
theme_bw() +\
labs(x = "Error from Random Forest",
y = "Error from Smooth Random Forest",
title = "Comparing Errors Between Models",
subtitle = r"(total error: rf: %f vs srf: %f)" %\
(base_mse, smooth_mse))
save_as_pdf_pages([a + theme(figure_size = (8,6))],
filename = "images/diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([b + theme(figure_size = (8,6))],
filename = "images/error_base"+"_understanding_smoothing.pdf")
save_as_pdf_pages([c + theme(figure_size = (8,6))],
filename = "images/error_smooth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([d + theme(figure_size = (8,6))],
filename = "images/truth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([e + theme(figure_size = (8,6))],
filename = "images/mean_depth_diff_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([f + theme(figure_size = (8,6))],
filename = "images/mean_depth"+"_understanding_smoothing.pdf")
save_as_pdf_pages([g + theme(figure_size = (8,6))],
filename = "images/error_vs_error"+"_understanding_smoothing.pdf")
save_as_pdf_pages([a + theme(figure_size = (8,6)),
b + theme(figure_size = (8,6)),
c + theme(figure_size = (8,6)),
d + theme(figure_size = (8,6)),
e + theme(figure_size = (8,6)),
f + theme(figure_size = (8,6)),
g + theme(figure_size = (8,6))],
filename = "images/understanding_smoothing.pdf")
# some of these observations might be due to the decision on the values of the classes
# we'll see
| 34.004651 | 91 | 0.56285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,881 | 0.257284 |
c37ef55b28f73e2f2453409e73faf8e176864615 | 1,147 | py | Python | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
]
| 48 | 2021-03-25T14:00:04.000Z | 2022-03-27T17:00:00.000Z | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
]
| 2 | 2021-04-16T13:21:44.000Z | 2021-06-16T15:23:09.000Z | AER/Experiments/Metrics.py | LeBenchmark/Interspeech2021 | 2a3b424389631b317b39973291b7252bbf44a73b | [
"MIT"
]
| 2 | 2021-07-05T13:42:23.000Z | 2021-09-01T10:24:00.000Z | import numpy as np
def CCC(y_true, y_pred):
"""
Calculate the CCC for two numpy arrays.
"""
x = y_true
y = y_pred
xMean = x.mean()
yMean = y.mean()
xyCov = (x * y).mean() - (xMean * yMean)
# xyCov = ((x-xMean) * (y-yMean)).mean()
xVar = x.var()
yVar = y.var()
return 2 * xyCov / (xVar + yVar + (xMean - yMean) ** 2)
def MSE(y_true, y_pred):
"""
Calculate the Mean Square Error for two numpy arrays.
"""
mse = (np.square(y_true - y_pred)).mean(axis=0)
return mse
def RMSE(y_true, y_pred):
"""
Calculate the Mean Square Error for two numpy arrays.
"""
return np.sqrt(MSE(y_true, y_pred))
def perfMeasure(y_actual, y_pred):
"""
Calculate the confusion matrix for two numpy arrays.
"""
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_pred)):
if y_actual[i]==y_pred[i]==1:
TP += 1
if y_pred[i]==1 and y_actual[i]!=y_pred[i]:
FP += 1
if y_actual[i]==y_pred[i]==-1:
TN += 1
if y_pred[i]==-1 and y_actual[i]!=y_pred[i]:
FN += 1
return (TP, FP, TN, FN)
| 23.408163 | 59 | 0.529207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.262424 |
c37f533b46624d83873bcd5b9e4314c8ccb4405c | 11,734 | py | Python | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
]
| 1 | 2021-06-25T02:27:31.000Z | 2021-06-25T02:27:31.000Z | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
]
| null | null | null | myo/device_listener.py | ehliang/myo-unlock | 059e130a90e44df3869dd892e216c020d6d97a7e | [
"MIT"
]
| null | null | null | # Copyright (c) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import abc
import six
import time
import threading
import warnings
from .lowlevel.enums import EventType, Pose, Arm, XDirection
from .utils.threading import TimeoutClock
from .vector import Vector
from .quaternion import Quaternion
class DeviceListener(six.with_metaclass(abc.ABCMeta)):
"""
Interface for listening to data sent from a Myo device.
Return False from one of its callback methods to instruct
the Hub to stop processing.
The *DeviceListener* operates between the high and low level
of the myo Python bindings. The ``myo`` object that is passed
to callback methods is a :class:`myo.lowlevel.ctyping.Myo`
object.
"""
def on_event(self, kind, event):
"""
Called before any of the event callbacks.
"""
def on_event_finished(self, kind, event):
"""
Called after the respective event callbacks have been
invoked. This method is *always* triggered, even if one of
the callbacks requested the stop of the Hub.
"""
def on_pair(self, myo, timestamp):
pass
def on_unpair(self, myo, timestamp):
pass
def on_connect(self, myo, timestamp):
pass
def on_disconnect(self, myo, timestamp):
pass
def on_pose(self, myo, timestamp, pose):
pass
def on_orientation_data(self, myo, timestamp, orientation):
pass
def on_accelerometor_data(self, myo, timestamp, acceleration):
pass
def on_gyroscope_data(self, myo, timestamp, gyroscope):
pass
def on_rssi(self, myo, timestamp, rssi):
pass
def on_emg(self, myo, timestamp, emg):
pass
def on_unsync(self, myo, timestamp):
pass
def on_sync(self, myo, timestamp, arm, x_direction):
pass
def on_unlock(self, myo, timestamp):
pass
def on_lock(self, myo, timestamp):
pass
class Feed(DeviceListener):
"""
This class implements the :class:`DeviceListener` interface
to collect all data and make it available to another thread
on-demand.
.. code-block:: python
import myo as libmyo
feed = libmyo.device_listener.Feed()
hub = libmyo.Hub()
hub.run(1000, feed)
try:
while True:
myos = feed.get_connected_devices()
if myos:
print myos[0], myos[0].orientation
time.sleep(0.5)
finally:
hub.stop(True)
hub.shutdown()
"""
class MyoProxy(object):
__slots__ = ('synchronized,_pair_time,_unpair_time,_connect_time,'
'_disconnect_time,_myo,_emg,_orientation,_acceleration,'
'_gyroscope,_pose,_arm,_xdir,_rssi,_firmware_version').split(',')
def __init__(self, low_myo, timestamp, firmware_version):
super(Feed.MyoProxy, self).__init__()
self.synchronized = threading.Condition()
self._pair_time = timestamp
self._unpair_time = None
self._connect_time = None
self._disconnect_time = None
self._myo = low_myo
self._emg = None
self._orientation = Quaternion.identity()
self._acceleration = Vector(0, 0, 0)
self._gyroscope = Vector(0, 0, 0)
self._pose = Pose.rest
self._arm = None
self._xdir = None
self._rssi = None
self._firmware_version = firmware_version
def __repr__(self):
result = '<MyoProxy ('
with self.synchronized:
if self.connected:
result += 'connected) at 0x{0:x}>'.format(self._myo.value)
else:
result += 'disconnected)>'
return result
def __assert_connected(self):
if not self.connected:
raise RuntimeError('Myo was disconnected')
@property
def connected(self):
with self.synchronized:
return (self._connect_time is not None and
self._disconnect_time is None)
@property
def paired(self):
with self.synchronized:
return (self.myo_ is None or self._unpair_time is not None)
@property
def pair_time(self):
return self._pair_time
@property
def unpair_time(self):
with self.synchronized:
return self._unpair_time
@property
def connect_time(self):
return self._connect_time
@property
def disconnect_time(self):
with self.synchronized:
return self._disconnect_time
@property
def firmware_version(self):
return self._firmware_version
@property
def orientation(self):
with self.synchronized:
return self._orientation.copy()
@property
def acceleration(self):
with self.synchronized:
return self._acceleration.copy()
@property
def gyroscope(self):
with self.synchronized:
return self._gyroscope.copy()
@property
def pose(self):
with self.synchronized:
return self._pose
@property
def arm(self):
with self.synchronized:
return self._arm
@property
def x_direction(self):
with self.synchronized:
return self._xdir
@property
def rssi(self):
with self.synchronized:
return self._rssi
def set_locking_policy(self, locking_policy):
with self.synchronized:
self.__assert_connected()
self._myo.set_locking_policy(locking_policy)
def set_stream_emg(self, emg):
with self.synchronized:
self.__assert_connected()
self._myo.set_stream_emg(emg)
def vibrate(self, vibration_type):
with self.synchronized:
self.__assert_connected()
self._myo.vibrate(vibration_type)
def request_rssi(self):
"""
Requests the RSSI of the Myo armband. Until the RSSI is
retrieved, :attr:`rssi` returns None.
"""
with self.synchronized:
self.__assert_connected()
self._rssi = None
self._myo.request_rssi()
def __init__(self):
super(Feed, self).__init__()
self.synchronized = threading.Condition()
self._myos = {}
def get_devices(self):
"""
get_devices() -> list of Feed.MyoProxy
Returns a list of paired and connected Myo's.
"""
with self.synchronized:
return list(self._myos.values())
def get_connected_devices(self):
"""
get_connected_devices(self) -> list of Feed.MyoProxy
Returns a list of connected Myo's.
"""
with self.synchronized:
return [myo for myo in self._myos.values() if myo.connected]
def wait_for_single_device(self, timeout=None, interval=0.5):
"""
wait_for_single_device(timeout) -> Feed.MyoProxy or None
Waits until a Myo is was paired **and** connected with the Hub
and returns it. If the *timeout* is exceeded, returns None.
This function will not return a Myo that is only paired but
not connected.
:param timeout: The maximum time to wait for a device.
:param interval: The interval at which the function should
exit sleeping. We can not sleep endlessly, otherwise
the main thread can not be exit, eg. through a
KeyboardInterrupt.
"""
timer = TimeoutClock(timeout)
start = time.time()
with self.synchronized:
# As long as there are no Myo's connected, wait until we
# get notified about a change.
while not timer.exceeded:
# Check if we found a Myo that is connected.
for myo in six.itervalues(self._myos):
if myo.connected:
return myo
remaining = timer.remaining
if interval is not None and remaining > interval:
remaining = interval
self.synchronized.wait(remaining)
return None
# DeviceListener
def on_event(self, kind, event):
myo = event.myo
timestamp = event.timestamp
with self.synchronized:
if kind == EventType.paired:
fmw_version = event.firmware_version
self._myos[myo.value] = self.MyoProxy(myo, timestamp, fmw_version)
self.synchronized.notify_all()
return True
elif kind == EventType.unpaired:
try: proxy = self._myos.pop(myo.value)
except KeyError:
message = "Myo 0x{0:x} was not in the known Myo's list"
warnings.warn(message.format(myo.value), RuntimeWarning)
else:
# Remove the reference handle from the Myo proxy.
with proxy.synchronized:
proxy._unpair_time = timestamp
proxy._myo = None
finally:
self.synchronized.notify_all()
return True
else:
try: proxy = self._myos[myo.value]
except KeyError:
message = "Myo 0x{0:x} was not in the known Myo's list"
warnings.warn(message.format(myo.value), RuntimeWarning)
return True
with proxy.synchronized:
if kind == EventType.connected:
proxy._connect_time = timestamp
elif kind == EventType.disconnected:
proxy._disconnect_time = timestamp
elif kind == EventType.emg:
proxy._emg = event.emg
elif kind == EventType.arm_synced:
proxy._arm = event.arm
proxy._xdir = event.x_direction
elif kind == EventType.rssi:
proxy._rssi = event.rssi
elif kind == EventType.pose:
proxy._pose = event.pose
elif kind == EventType.orientation:
proxy._orientation = event.orientation
proxy._gyroscope = event.gyroscope
proxy._acceleration = event.acceleration
| 32.325069 | 82 | 0.587183 | 10,397 | 0.886058 | 0 | 0 | 1,599 | 0.136271 | 0 | 0 | 3,793 | 0.323249 |
c3806b9e128d8474be2a0c8c16ed645a6cd61414 | 333 | py | Python | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
]
| null | null | null | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
]
| null | null | null | utilities/poisson.py | lukepinkel/pylmm | b9e896222f077b000f9a752be77cfc9e60b49f19 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 13:34:49 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.special
def poisson_logp(x, mu, logp=True):
p = sp.special.xlogy(x, mu) - sp.special.gammaln(x + 1) - mu
if logp==False:
p = np.exp(p)
return p
| 19.588235 | 65 | 0.618619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.327327 |
c382207d4a3aa645831dc8af78380466763f0458 | 581 | py | Python | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
]
| 1 | 2021-11-15T11:21:55.000Z | 2021-11-15T11:21:55.000Z | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
]
| null | null | null | iotest.py | AaltoRSE/ImageNetTools | 1ed8b8c38bd14eb47fc6167bf194f327a2696bf1 | [
"BSD-3-Clause"
]
| null | null | null | '''
Created on Sep 29, 2021
@author: thomas
'''
import ImageNetTools
import sys
import getopt
def main(argv):
try:
opts, args = getopt.getopt(argv,"hd:",["dataset="])
except getopt.GetoptError:
printHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-d", "--dataset"):
ImageNetTools.benchmarkIOSpeeds(arg)
sys.exit()
def printHelp():
print('Run IO Speed testing with a given Dataset')
print('python iotest.py -d /path/to/dataset' )
main(sys.argv[1:]) | 22.346154 | 59 | 0.555938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.273666 |
c3825a98b9b5079c534d11d77f64da2d82f8a541 | 1,775 | py | Python | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
]
| 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
]
| null | null | null | sagas/tests/sinkers/test_results_render.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
]
| 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | """
$ pytest -s -v test_results_render.py
"""
import logging
import pytest
from sagas.nlu.results_render import ResultsRender
def test_descriptor():
import sagas.nlu.results_render
sagas.nlu.results_render.logger.setLevel(logging.DEBUG)
# $ str 'Rezervasyonumu onaylamak istiyorum.'
results = [{'delivery': 'sentence',
'inspector': 'specs_of',
'part': '_',
'pattern': 'behave_reservation',
'provider': 'default',
'value': {'category': 'request',
'pos': 'v',
'subs': [{'candidates': 'request',
'substitute': 'request',
'word': 'iste'}],
'words': ['istiyorum/iste']}},
{'delivery': 'slot',
'inspector': 'pipes',
'part': 'verb:obj/obj',
'pattern': 'behave_reservation',
'provider': 'cat/cat_proc',
'value': [{'cat': 'reservation',
'path': '/obj/obj',
'pos': 'noun',
'trans': 'reservation',
'value': 'reservation',
'word': 'rezervasyon'}]},
{'delivery': 'sentence',
'inspector': 'kind_of',
'part': 'obj',
'pattern': 'behave_reservation',
'provider': 'default',
'value': {'category': 'approve', 'pos': '*', 'word': 'onaylamak/onayla'}}]
dsp=ResultsRender()
patt = 'behave {obj:_} for {obj:/obj}, modal {_:_}'
assert dsp.render(patt, results)=='behave approve for reservation, modal request'
| 36.979167 | 90 | 0.450704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 771 | 0.434366 |
c382afee49a8dcf277085e3abd2845bbc944eef7 | 5,838 | py | Python | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
]
| 1 | 2019-10-12T21:30:15.000Z | 2019-10-12T21:30:15.000Z | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
]
| null | null | null | equatation.py | asteinig4018/mhacks19 | 64b5ba211f38f3b30927bf926073b777fb23a46f | [
"MIT"
]
| null | null | null | import json
import math
from HistoricalTweetDataFetcher import getHistoricalData
joelsarray = getHistoricalData(0)
arrs = []
arrm = []
arrp = []
arrsTotal = 0
arrmTotal = 0
ncount = 0
ccount = 0
lcount = 0
time = joelsarray[0]["h"]
for dictionary in joelsarray:
arrs.append(dictionary["s"])
arrm.append(dictionary["m"])
arrp.append(dictionary["p"])
for x in range(len(arrs)):
arrsTotal += arrs[x]
arrmTotal += arrm[x]
if arrp[x]=='l':
lcount += 1
elif arrp[x]=='c':
ccount += 1
elif arrp[x]=='n':
ncount += 1
arrsAvg = arrsTotal/len(arrs)#sentiment value
arrmAvg = arrmTotal/len(arrm)#magnitude value
#print(arrsTotal)
#print(len(arrs))
#rint(arrsAvg)
#print(arrmAvg)
#print(lcount)
#print(ccount)
###################################################################
filename2 = "weather_us.json"
if filename2:
with open(filename2, 'r') as f:
weatherstore = json.load(f)
for x in range(50):
statearray = list(weatherstore.keys())
statesAverage = 0
for state in statearray:
for x in range(50):
temptemp = float(weatherstore[state]["temperature"])
temphigh = float(weatherstore[state]["average_monthly_high"])
templow = float(weatherstore[state]["average_monthly_low"])
statesAverage+=((temptemp-temphigh)*(templow-temptemp))/(math.pow(((temphigh+templow)/2),2))
statesAverage = statesAverage/50 #this is the average tempeature multiplyer
print(statesAverage)
#####################################################################################
filename3 = "sp500_price.json"
if filename3:
with open(filename3, 'r') as f:
stockdata = json.load(f)
stockpricecurrent = stockdata["current_price"]
stockpricechange = stockdata["percent_change"]#percent change of S&P500
if stockpricechange <= 0.73 and stockpricechange >=-0.73:
stockmultiply = 0;
else:
stockmultiply = stockpricechange*0.5*0.73
print(stockpricechange)
#########################################################################################
filename4 = "trump_approval_rating.json"
if filename4:
with open(filename4, 'r') as f:
approvalratingdata = json.load(f)
approveAvg = approvalratingdata["approve_avg"]#approval average data
currentApproval = approvalratingdata["approve"]#current approval percentage
########################################################################################
def equation(sentiment, stockmultiply, pollcurrent, pollaverage, avgtemp, lvalue, cvalue, ltweets, ctweet, time, const1 = 70, const2 = 60, const3 = 50, const4 = 45, const5 = 25, slideInput = True):
point = const1*(sentiment) + const2*(stockmultiply)+const3*((pollcurrent-pollaverage)/(pollaverage))+const4*avgtemp + const5/2*lvalue*ltweets+ const5/2*cvalue+ctweet+const5
filename5 = "data.json"
if(slideInput==True):
if filename5:
with open(filename5, 'r') as f:
outputdata = json.load(f)
print(outputdata)
outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1]
outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2]
outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3]
outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4]
outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5]
outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6]
outputdata["chartData"]["labels"][6] = str(time)+":00"
outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1]
outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2]
outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3]
outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4]
outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5]
outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6]
outputdata["chartData"]["thisWeek"][6] = point
with open(filename5, 'w') as f:
json.dump(outputdata, f)
else:
if filename5:
with open(filename5, 'r') as f:
outputdata = json.load(f)
print(outputdata)
outputdata["chartData"]["labels"][0]=outputdata["chartData"]["labels"][1]
outputdata["chartData"]["labels"][1]=outputdata["chartData"]["labels"][2]
outputdata["chartData"]["labels"][2]=outputdata["chartData"]["labels"][3]
outputdata["chartData"]["labels"][3]=outputdata["chartData"]["labels"][4]
outputdata["chartData"]["labels"][4]=outputdata["chartData"]["labels"][5]
outputdata["chartData"]["labels"][5]=outputdata["chartData"]["labels"][6]
outputdata["chartData"]["labels"][6] = str(time) + ":00"
outputdata["chartData"]["thisWeek"][0]=outputdata["chartData"]["thisWeek"][1]
outputdata["chartData"]["thisWeek"][1]=outputdata["chartData"]["thisWeek"][2]
outputdata["chartData"]["thisWeek"][2]=outputdata["chartData"]["thisWeek"][3]
outputdata["chartData"]["thisWeek"][3]=outputdata["chartData"]["thisWeek"][4]
outputdata["chartData"]["thisWeek"][4]=outputdata["chartData"]["thisWeek"][5]
outputdata["chartData"]["thisWeek"][5]=outputdata["chartData"]["thisWeek"][6]
outputdata["chartData"]["thisWeek"][6] = point
with open(filename5, 'w') as f:
json.dump(outputdata, f)
return point
my_list = equation(arrsAvg, stockmultiply, currentApproval, approveAvg, statesAverage, 0, 0, lcount, ccount, 17, 70, 60, 50, 45, 25)
| 39.714286 | 198 | 0.604488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.318602 |
5ed9ef5b5cccf956209757de81563a4bc4e12b59 | 43,492 | py | Python | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
]
| null | null | null | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
]
| null | null | null | oscar/apps/offer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
]
| 1 | 2019-07-10T06:32:14.000Z | 2019-07-10T06:32:14.000Z | from decimal import Decimal as D, ROUND_DOWN, ROUND_UP
import math
import datetime
from django.core import exceptions
from django.template.defaultfilters import slugify
from django.db import models
from django.utils.translation import ungettext, ugettext as _
from django.utils.importlib import import_module
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.conf import settings
from oscar.apps.offer.managers import ActiveOfferManager
from oscar.templatetags.currency_filters import currency
from oscar.models.fields import PositiveDecimalField, ExtendedURLField
def load_proxy(proxy_class):
module, classname = proxy_class.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise exceptions.ImproperlyConfigured(
"Error importing module %s: %s" % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
"Module %s does not define a %s" % (module, classname))
class ConditionalOffer(models.Model):
"""
A conditional offer (eg buy 1, get 10% off)
"""
name = models.CharField(
_("Name"), max_length=128, unique=True,
help_text=_("This is displayed within the customer's basket"))
slug = models.SlugField(_("Slug"), max_length=128, unique=True, null=True)
description = models.TextField(_("Description"), blank=True, null=True)
# Offers come in a few different types:
# (a) Offers that are available to all customers on the site. Eg a
# 3-for-2 offer.
# (b) Offers that are linked to a voucher, and only become available once
# that voucher has been applied to the basket
# (c) Offers that are linked to a user. Eg, all students get 10% off. The
# code to apply this offer needs to be coded
# (d) Session offers - these are temporarily available to a user after some
# trigger event. Eg, users coming from some affiliate site get 10% off.
SITE, VOUCHER, USER, SESSION = ("Site", "Voucher", "User", "Session")
TYPE_CHOICES = (
(SITE, _("Site offer - available to all users")),
(VOUCHER, _("Voucher offer - only available after entering the appropriate voucher code")),
(USER, _("User offer - available to certain types of user")),
(SESSION, _("Session offer - temporary offer, available for a user for the duration of their session")),
)
offer_type = models.CharField(_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128)
condition = models.ForeignKey('offer.Condition', verbose_name=_("Condition"))
benefit = models.ForeignKey('offer.Benefit', verbose_name=_("Benefit"))
# Some complicated situations require offers to be applied in a set order.
priority = models.IntegerField(_("Priority"), default=0,
help_text=_("The highest priority offers are applied first"))
# AVAILABILITY
# Range of availability. Note that if this is a voucher offer, then these
# dates are ignored and only the dates from the voucher are used to
# determine availability.
start_date = models.DateField(_("Start Date"), blank=True, null=True)
end_date = models.DateField(
_("End Date"), blank=True, null=True,
help_text=_("Offers are not active on their end date, only "
"the days preceding"))
# Use this field to limit the number of times this offer can be applied in
# total. Note that a single order can apply an offer multiple times so
# this is not the same as the number of orders that can use it.
max_global_applications = models.PositiveIntegerField(
_("Max global applications"),
help_text=_("The number of times this offer can be used before it "
"is unavailable"), blank=True, null=True)
# Use this field to limit the number of times this offer can be used by a
# single user. This only works for signed-in users - it doesn't really
# make sense for sites that allow anonymous checkout.
max_user_applications = models.PositiveIntegerField(
_("Max user applications"),
help_text=_("The number of times a single user can use this offer"),
blank=True, null=True)
# Use this field to limit the number of times this offer can be applied to
# a basket (and hence a single order).
max_basket_applications = models.PositiveIntegerField(
blank=True, null=True,
help_text=_("The number of times this offer can be applied to a "
"basket (and order)"))
# Use this field to limit the amount of discount an offer can lead to.
# This can be helpful with budgeting.
max_discount = models.DecimalField(
_("Max discount"), decimal_places=2, max_digits=12, null=True,
blank=True,
help_text=_("When an offer has given more discount to orders "
"than this threshold, then the offer becomes "
"unavailable"))
# TRACKING
total_discount = models.DecimalField(
_("Total Discount"), decimal_places=2, max_digits=12,
default=D('0.00'))
num_applications = models.PositiveIntegerField(
_("Number of applications"), default=0)
num_orders = models.PositiveIntegerField(
_("Number of Orders"), default=0)
redirect_url = ExtendedURLField(_("URL redirect (optional)"), blank=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
objects = models.Manager()
active = ActiveOfferManager()
# We need to track the voucher that this offer came from (if it is a
# voucher offer)
_voucher = None
class Meta:
ordering = ['-priority']
verbose_name = _("Conditional Offer")
verbose_name_plural = _("Conditional Offers")
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(ConditionalOffer, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('offer:detail', kwargs={'slug': self.slug})
def __unicode__(self):
return self.name
def clean(self):
if self.start_date and self.end_date and self.start_date > self.end_date:
raise exceptions.ValidationError(_('End date should be later than start date'))
def is_active(self, test_date=None):
"""
Test whether this offer is active and can be used by customers
"""
if test_date is None:
test_date = datetime.date.today()
predicates = [self.get_max_applications() > 0]
if self.start_date:
predicates.append(self.start_date <= test_date)
if self.end_date:
predicates.append(test_date < self.end_date)
if self.max_discount:
predicates.append(self.total_discount < self.max_discount)
return all(predicates)
def is_condition_satisfied(self, basket):
return self._proxy_condition().is_satisfied(basket)
def is_condition_partially_satisfied(self, basket):
return self._proxy_condition().is_partially_satisfied(basket)
def get_upsell_message(self, basket):
return self._proxy_condition().get_upsell_message(basket)
def apply_benefit(self, basket):
"""
Applies the benefit to the given basket and returns the discount.
"""
if not self.is_condition_satisfied(basket):
return D('0.00')
return self._proxy_benefit().apply(basket, self._proxy_condition(),
self)
def set_voucher(self, voucher):
self._voucher = voucher
def get_voucher(self):
return self._voucher
def get_max_applications(self, user=None):
"""
Return the number of times this offer can be applied to a basket
"""
limits = [10000]
if self.max_user_applications and user:
limits.append(max(0, self.max_user_applications -
self.get_num_user_applications(user)))
if self.max_basket_applications:
limits.append(self.max_basket_applications)
if self.max_global_applications:
limits.append(
max(0, self.max_global_applications - self.num_applications))
return min(limits)
def get_num_user_applications(self, user):
OrderDiscount = models.get_model('order', 'OrderDiscount')
aggregates = OrderDiscount.objects.filter(
offer_id=self.id, order__user=user).aggregate(
total=models.Sum('frequency'))
return aggregates['total'] if aggregates['total'] is not None else 0
def shipping_discount(self, charge):
return self._proxy_benefit().shipping_discount(charge)
def _proxy_condition(self):
"""
Returns the appropriate proxy model for the condition
"""
field_dict = dict(self.condition.__dict__)
for field in field_dict.keys():
if field.startswith('_'):
del field_dict[field]
if self.condition.proxy_class:
klass = load_proxy(self.condition.proxy_class)
return klass(**field_dict)
klassmap = {
self.condition.COUNT: CountCondition,
self.condition.VALUE: ValueCondition,
self.condition.COVERAGE: CoverageCondition}
if self.condition.type in klassmap:
return klassmap[self.condition.type](**field_dict)
return self.condition
def _proxy_benefit(self):
"""
Returns the appropriate proxy model for the benefit
"""
field_dict = dict(self.benefit.__dict__)
for field in field_dict.keys():
if field.startswith('_'):
del field_dict[field]
klassmap = {
self.benefit.PERCENTAGE: PercentageDiscountBenefit,
self.benefit.FIXED: AbsoluteDiscountBenefit,
self.benefit.MULTIBUY: MultibuyDiscountBenefit,
self.benefit.FIXED_PRICE: FixedPriceBenefit,
self.benefit.SHIPPING_ABSOLUTE: ShippingAbsoluteDiscountBenefit,
self.benefit.SHIPPING_FIXED_PRICE: ShippingFixedPriceBenefit,
self.benefit.SHIPPING_PERCENTAGE: ShippingPercentageDiscountBenefit}
if self.benefit.type in klassmap:
return klassmap[self.benefit.type](**field_dict)
return self.benefit
def record_usage(self, discount):
self.num_applications += discount['freq']
self.total_discount += discount['discount']
self.num_orders += 1
self.save()
record_usage.alters_data = True
def availability_description(self):
"""
Return a description of when this offer is available
"""
sentences = []
if self.max_global_applications:
desc = _(
"Can be used %(total)d times "
"(%(remainder)d remaining)") % {
'total': self.max_global_applications,
'remainder': self.max_global_applications - self.num_applications}
sentences.append(desc)
if self.max_user_applications:
if self.max_user_applications == 1:
desc = _("Can be used once per user")
else:
desc = _(
"Can be used %(total)d times per user") % {
'total': self.max_user_applications}
sentences.append(desc)
if self.max_basket_applications:
if self.max_user_applications == 1:
desc = _("Can be used once per basket")
else:
desc = _(
"Can be used %(total)d times per basket") % {
'total': self.max_basket_applications}
sentences.append(desc)
if self.start_date and self.end_date:
desc = _("Available between %(start)s and %(end)s") % {
'start': self.start_date,
'end': self.end_date}
sentences.append(desc)
elif self.start_date:
sentences.append(_("Available until %(start)s") % {
'start': self.start_date})
elif self.end_date:
sentences.append(_("Available until %(end)s") % {
'end': self.end_date})
if self.max_discount:
sentences.append(_("Available until a discount of %(max)s "
"has been awarded") % {
'max': currency(self.max_discount)})
return "<br/>".join(sentences)
class Condition(models.Model):
COUNT, VALUE, COVERAGE = ("Count", "Value", "Coverage")
TYPE_CHOICES = (
(COUNT, _("Depends on number of items in basket that are in "
"condition range")),
(VALUE, _("Depends on value of items in basket that are in "
"condition range")),
(COVERAGE, _("Needs to contain a set number of DISTINCT items "
"from the condition range")))
range = models.ForeignKey(
'offer.Range', verbose_name=_("Range"), null=True, blank=True)
type = models.CharField(_('Type'), max_length=128, choices=TYPE_CHOICES,
null=True, blank=True)
value = PositiveDecimalField(_('Value'), decimal_places=2, max_digits=12,
null=True, blank=True)
proxy_class = models.CharField(_("Custom class"), null=True, blank=True,
max_length=255, unique=True, default=None)
class Meta:
verbose_name = _("Condition")
verbose_name_plural = _("Conditions")
def __unicode__(self):
if self.proxy_class:
return load_proxy(self.proxy_class).name
if self.type == self.COUNT:
return _("Basket includes %(count)d item(s) from %(range)s") % {
'count': self.value, 'range': unicode(self.range).lower()}
elif self.type == self.COVERAGE:
return _("Basket includes %(count)d distinct products from %(range)s") % {
'count': self.value, 'range': unicode(self.range).lower()}
return _("Basket includes %(amount)s from %(range)s") % {
'amount': currency(self.value),
'range': unicode(self.range).lower()}
description = __unicode__
def consume_items(self, basket, affected_lines):
pass
def is_satisfied(self, basket):
"""
Determines whether a given basket meets this condition. This is
stubbed in this top-class object. The subclassing proxies are
responsible for implementing it correctly.
"""
return False
def is_partially_satisfied(self, basket):
"""
Determine if the basket partially meets the condition. This is useful
for up-selling messages to entice customers to buy something more in
order to qualify for an offer.
"""
return False
def get_upsell_message(self, basket):
return None
def can_apply_condition(self, product):
"""
Determines whether the condition can be applied to a given product
"""
return (self.range.contains_product(product)
and product.is_discountable and product.has_stockrecord)
def get_applicable_lines(self, basket, most_expensive_first=True):
"""
Return line data for the lines that can be consumed by this condition
"""
line_tuples = []
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(product):
continue
price = line.unit_price_incl_tax
if not price:
continue
line_tuples.append((price, line))
if most_expensive_first:
return sorted(line_tuples, reverse=True)
return sorted(line_tuples)
class Benefit(models.Model):
range = models.ForeignKey(
'offer.Range', null=True, blank=True, verbose_name=_("Range"))
# Benefit types
PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE = (
"Percentage", "Absolute", "Multibuy", "Fixed price")
SHIPPING_PERCENTAGE, SHIPPING_ABSOLUTE, SHIPPING_FIXED_PRICE = (
'Shipping percentage', 'Shipping absolute', 'Shipping fixed price')
TYPE_CHOICES = (
(PERCENTAGE, _("Discount is a % of the product's value")),
(FIXED, _("Discount is a fixed amount off the product's value")),
(MULTIBUY, _("Discount is to give the cheapest product for free")),
(FIXED_PRICE, _("Get the products that meet the condition for a fixed price")),
(SHIPPING_ABSOLUTE, _("Discount is a fixed amount off the shipping cost")),
(SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")),
(SHIPPING_PERCENTAGE, _("Discount is a % off the shipping cost")),
)
type = models.CharField(_("Type"), max_length=128, choices=TYPE_CHOICES)
value = PositiveDecimalField(_("Value"), decimal_places=2, max_digits=12,
null=True, blank=True)
# If this is not set, then there is no upper limit on how many products
# can be discounted by this benefit.
max_affected_items = models.PositiveIntegerField(
_("Max Affected Items"), blank=True, null=True,
help_text=_("Set this to prevent the discount consuming all items "
"within the range that are in the basket."))
class Meta:
verbose_name = _("Benefit")
verbose_name_plural = _("Benefits")
def __unicode__(self):
if self.type == self.PERCENTAGE:
desc = _("%(value)s%% discount on %(range)s") % {
'value': self.value,
'range': unicode(self.range).lower()}
elif self.type == self.MULTIBUY:
desc = _("Cheapest product is free from %s") % (
unicode(self.range).lower(),)
elif self.type == self.FIXED_PRICE:
desc = _("The products that meet the condition are "
"sold for %(amount)s") % {
'amount': currency(self.value)}
elif self.type == self.SHIPPING_PERCENTAGE:
desc = _("%(value)s%% off shipping cost") % {
'value': self.value}
elif self.type == self.SHIPPING_ABSOLUTE:
desc = _("%(amount)s off shipping cost") % {
'amount': currency(self.value)}
elif self.type == self.SHIPPING_FIXED_PRICE:
desc = _("Get shipping for %(amount)s") % {
'amount': currency(self.value)}
else:
desc = _("%(amount)s discount on %(range)s") % {
'amount': currency(self.value),
'range': unicode(self.range).lower()}
if self.max_affected_items:
desc += ungettext(" (max %d item)", " (max %d items)", self.max_affected_items) % self.max_affected_items
return desc
description = __unicode__
def apply(self, basket, condition, offer=None):
return D('0.00')
def clean(self):
if not self.type:
raise ValidationError(_("Benefit requires a value"))
method_name = 'clean_%s' % self.type.lower().replace(' ', '_')
if hasattr(self, method_name):
getattr(self, method_name)()
def clean_multibuy(self):
if not self.range:
raise ValidationError(
_("Multibuy benefits require a product range"))
if self.value:
raise ValidationError(
_("Multibuy benefits don't require a value"))
if self.max_affected_items:
raise ValidationError(
_("Multibuy benefits don't require a 'max affected items' "
"attribute"))
def clean_percentage(self):
if not self.range:
raise ValidationError(
_("Percentage benefits require a product range"))
if self.value > 100:
raise ValidationError(
_("Percentage discount cannot be greater than 100"))
def clean_shipping_absolute(self):
if not self.value:
raise ValidationError(
_("A discount value is required"))
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_shipping_percentage(self):
if self.value > 100:
raise ValidationError(
_("Percentage discount cannot be greater than 100"))
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_shipping_fixed_price(self):
if self.range:
raise ValidationError(
_("No range should be selected as this benefit does not "
"apply to products"))
if self.max_affected_items:
raise ValidationError(
_("Shipping discounts don't require a 'max affected items' "
"attribute"))
def clean_fixed_price(self):
if self.range:
raise ValidationError(
_("No range should be selected as the condition range will "
"be used instead."))
def clean_absolute(self):
if not self.range:
raise ValidationError(
_("Percentage benefits require a product range"))
def round(self, amount):
"""
Apply rounding to discount amount
"""
if hasattr(settings, 'OSCAR_OFFER_ROUNDING_FUNCTION'):
return settings.OSCAR_OFFER_ROUNDING_FUNCTION(amount)
return amount.quantize(D('.01'), ROUND_DOWN)
def _effective_max_affected_items(self):
"""
Return the maximum number of items that can have a discount applied
during the application of this benefit
"""
return self.max_affected_items if self.max_affected_items else 10000
def can_apply_benefit(self, product):
"""
Determines whether the benefit can be applied to a given product
"""
return product.has_stockrecord and product.is_discountable
def get_applicable_lines(self, basket, range=None):
"""
Return the basket lines that are available to be discounted
:basket: The basket
:range: The range of products to use for filtering. The fixed-price
benefit ignores its range and uses the condition range
"""
if range is None:
range = self.range
line_tuples = []
for line in basket.all_lines():
product = line.product
if (not range.contains(product) or
not self.can_apply_benefit(product)):
continue
price = line.unit_price_incl_tax
if not price:
# Avoid zero price products
continue
if line.quantity_without_discount == 0:
continue
line_tuples.append((price, line))
# We sort lines to be cheapest first to ensure consistent applications
return sorted(line_tuples)
def shipping_discount(self, charge):
return D('0.00')
class Range(models.Model):
"""
Represents a range of products that can be used within an offer
"""
name = models.CharField(_("Name"), max_length=128, unique=True)
includes_all_products = models.BooleanField(_('Includes All Products'), default=False)
included_products = models.ManyToManyField('catalogue.Product', related_name='includes', blank=True,
verbose_name=_("Included Products"))
excluded_products = models.ManyToManyField('catalogue.Product', related_name='excludes', blank=True,
verbose_name=_("Excluded Products"))
classes = models.ManyToManyField('catalogue.ProductClass', related_name='classes', blank=True,
verbose_name=_("Product Classes"))
included_categories = models.ManyToManyField('catalogue.Category', related_name='includes', blank=True,
verbose_name=_("Included Categories"))
# Allow a custom range instance to be specified
proxy_class = models.CharField(_("Custom class"), null=True, blank=True,
max_length=255, default=None, unique=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
__included_product_ids = None
__excluded_product_ids = None
__class_ids = None
class Meta:
verbose_name = _("Range")
verbose_name_plural = _("Ranges")
def __unicode__(self):
return self.name
def contains_product(self, product):
"""
Check whether the passed product is part of this range
"""
# We look for shortcircuit checks first before
# the tests that require more database queries.
if settings.OSCAR_OFFER_BLACKLIST_PRODUCT and \
settings.OSCAR_OFFER_BLACKLIST_PRODUCT(product):
return False
# Delegate to a proxy class if one is provided
if self.proxy_class:
return load_proxy(self.proxy_class)().contains_product(product)
excluded_product_ids = self._excluded_product_ids()
if product.id in excluded_product_ids:
return False
if self.includes_all_products:
return True
if product.product_class_id in self._class_ids():
return True
included_product_ids = self._included_product_ids()
if product.id in included_product_ids:
return True
test_categories = self.included_categories.all()
if test_categories:
for category in product.categories.all():
for test_category in test_categories:
if category == test_category or category.is_descendant_of(test_category):
return True
return False
# Shorter alias
contains = contains_product
def _included_product_ids(self):
if None == self.__included_product_ids:
self.__included_product_ids = [row['id'] for row in self.included_products.values('id')]
return self.__included_product_ids
def _excluded_product_ids(self):
if not self.id:
return []
if None == self.__excluded_product_ids:
self.__excluded_product_ids = [row['id'] for row in self.excluded_products.values('id')]
return self.__excluded_product_ids
def _class_ids(self):
if None == self.__class_ids:
self.__class_ids = [row['id'] for row in self.classes.values('id')]
return self.__class_ids
def num_products(self):
if self.includes_all_products:
return None
return self.included_products.all().count()
@property
def is_editable(self):
"""
Test whether this product can be edited in the dashboard
"""
return self.proxy_class is None
# ==========
# Conditions
# ==========
class CountCondition(Condition):
"""
An offer condition dependent on the NUMBER of matching items from the basket.
"""
class Meta:
proxy = True
verbose_name = _("Count Condition")
verbose_name_plural = _("Count Conditions")
def is_satisfied(self, basket):
"""
Determines whether a given basket meets this condition
"""
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line.product)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
if num_matches >= self.value:
return True
return False
def _get_num_matches(self, basket):
if hasattr(self, '_num_matches'):
return getattr(self, '_num_matches')
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line.product)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
self._num_matches = num_matches
return num_matches
def is_partially_satisfied(self, basket):
num_matches = self._get_num_matches(basket)
return 0 < num_matches < self.value
def get_upsell_message(self, basket):
num_matches = self._get_num_matches(basket)
delta = self.value - num_matches
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) % {
'delta': delta, 'range': self.range}
def consume_items(self, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
:basket: The basket
:affected_lines: The lines that have been affected by the discount.
This should be list of tuples (line, discount, qty)
"""
# We need to count how many items have already been consumed as part of
# applying the benefit, so we don't consume too many items.
num_consumed = 0
for line, __, quantity in affected_lines:
num_consumed += quantity
to_consume = max(0, self.value - num_consumed)
if to_consume == 0:
return
for __, line in self.get_applicable_lines(basket,
most_expensive_first=True):
quantity_to_consume = min(line.quantity_without_discount,
to_consume)
line.consume(quantity_to_consume)
to_consume -= quantity_to_consume
if to_consume == 0:
break
class CoverageCondition(Condition):
"""
An offer condition dependent on the number of DISTINCT matching items from the basket.
"""
class Meta:
proxy = True
verbose_name = _("Coverage Condition")
verbose_name_plural = _("Coverage Conditions")
def is_satisfied(self, basket):
"""
Determines whether a given basket meets this condition
"""
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(product) and product.id not in covered_ids):
covered_ids.append(product.id)
if len(covered_ids) >= self.value:
return True
return False
def _get_num_covered_products(self, basket):
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(product) and product.id not in covered_ids):
covered_ids.append(product.id)
return len(covered_ids)
def get_upsell_message(self, basket):
delta = self.value - self._get_num_covered_products(basket)
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) % {
'delta': delta, 'range': self.range}
def is_partially_satisfied(self, basket):
return 0 < self._get_num_covered_products(basket) < self.value
def consume_items(self, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
"""
# Determine products that have already been consumed by applying the
# benefit
consumed_products = []
for line, __, quantity in affected_lines:
consumed_products.append(line.product)
to_consume = max(0, self.value - len(consumed_products))
if to_consume == 0:
return
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(product):
continue
if product in consumed_products:
continue
if not line.is_available_for_discount:
continue
# Only consume a quantity of 1 from each line
line.consume(1)
consumed_products.append(product)
to_consume -= 1
if to_consume == 0:
break
def get_value_of_satisfying_items(self, basket):
covered_ids = []
value = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line.product) and line.product.id not in covered_ids):
covered_ids.append(line.product.id)
value += line.unit_price_incl_tax
if len(covered_ids) >= self.value:
return value
return value
class ValueCondition(Condition):
"""
An offer condition dependent on the VALUE of matching items from the
basket.
"""
class Meta:
proxy = True
verbose_name = _("Value Condition")
verbose_name_plural = _("Value Conditions")
def is_satisfied(self, basket):
"""
Determine whether a given basket meets this condition
"""
value_of_matches = D('0.00')
for line in basket.all_lines():
product = line.product
if (self.can_apply_condition(product) and product.has_stockrecord
and line.quantity_without_discount > 0):
price = line.unit_price_incl_tax
value_of_matches += price * int(line.quantity_without_discount)
if value_of_matches >= self.value:
return True
return False
def _get_value_of_matches(self, basket):
if hasattr(self, '_value_of_matches'):
return getattr(self, '_value_of_matches')
value_of_matches = D('0.00')
for line in basket.all_lines():
product = line.product
if (self.can_apply_condition(product) and product.has_stockrecord
and line.quantity_without_discount > 0):
price = line.unit_price_incl_tax
value_of_matches += price * int(line.quantity_without_discount)
self._value_of_matches = value_of_matches
return value_of_matches
def is_partially_satisfied(self, basket):
value_of_matches = self._get_value_of_matches(basket)
return D('0.00') < value_of_matches < self.value
def get_upsell_message(self, basket):
value_of_matches = self._get_value_of_matches(basket)
return _('Spend %(value)s more from %(range)s') % {
'value': currency(self.value - value_of_matches),
'range': self.range}
def consume_items(self, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
We allow lines to be passed in as sometimes we want them sorted
in a specific order.
"""
# Determine value of items already consumed as part of discount
value_consumed = D('0.00')
for line, __, qty in affected_lines:
price = line.unit_price_incl_tax
value_consumed += price * qty
to_consume = max(0, self.value - value_consumed)
if to_consume == 0:
return
for price, line in self.get_applicable_lines(basket,
most_expensive_first=True):
quantity_to_consume = min(
line.quantity_without_discount,
(to_consume / price).quantize(D(1), ROUND_UP))
line.consume(quantity_to_consume)
to_consume -= price * quantity_to_consume
if to_consume == 0:
break
# ========
# Benefits
# ========
class PercentageDiscountBenefit(Benefit):
"""
An offer benefit that gives a percentage discount
"""
class Meta:
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer=None):
line_tuples = self.get_applicable_lines(basket)
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
quantity_affected = min(line.quantity_without_discount,
max_affected_items - affected_items)
line_discount = self.round(self.value / D('100.0') * price
* int(quantity_affected))
line.discount(line_discount, quantity_affected)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(basket, affected_lines)
return discount
class AbsoluteDiscountBenefit(Benefit):
"""
An offer benefit that gives an absolute discount
"""
class Meta:
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer=None):
line_tuples = self.get_applicable_lines(basket)
if not line_tuples:
return self.round(D('0.00'))
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
remaining_discount = self.value - discount
quantity_affected = min(
line.quantity_without_discount,
max_affected_items - affected_items,
int(math.ceil(remaining_discount / price)))
line_discount = self.round(min(remaining_discount,
quantity_affected * price))
line.discount(line_discount, quantity_affected)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(basket, affected_lines)
return discount
class FixedPriceBenefit(Benefit):
"""
An offer benefit that gives the items in the condition for a
fixed price. This is useful for "bundle" offers.
Note that we ignore the benefit range here and only give a fixed price
for the products in the condition range. The condition cannot be a value
condition.
We also ignore the max_affected_items setting.
"""
class Meta:
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer=None):
if isinstance(condition, ValueCondition):
return self.round(D('0.00'))
line_tuples = self.get_applicable_lines(basket, range=condition.range)
if not line_tuples:
return self.round(D('0.00'))
# Determine the lines to consume
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_discount,
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return self.round(discount)
# Apply discount to the affected lines
discount_applied = D('0.00')
last_line = covered_lines[-1][0]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
line.discount(line_discount, quantity)
discount_applied += line_discount
return discount
class MultibuyDiscountBenefit(Benefit):
class Meta:
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer=None):
line_tuples = self.get_applicable_lines(basket)
if not line_tuples:
return self.round(D('0.00'))
# Cheapest line gives free product
discount, line = line_tuples[0]
line.discount(discount, 1)
affected_lines = [(line, discount, 1)]
condition.consume_items(basket, affected_lines)
return discount
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer=None):
# Attach offer to basket to indicate that it qualifies for a shipping
# discount. At this point, we only allow one shipping offer per
# basket.
basket.shipping_offer = offer
condition.consume_items(basket, affected_lines=())
return D('0.00')
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
class Meta:
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
class Meta:
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
class Meta:
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
return charge * self.value / D('100.0')
| 38.150877 | 117 | 0.616849 | 42,237 | 0.971144 | 0 | 0 | 165 | 0.003794 | 0 | 0 | 11,622 | 0.267222 |
5eda690b685cc647a25421c21d7eb3efc87731a9 | 3,288 | py | Python | htk-lite/commandlist/help.py | otherbeast/hackers-tool-kit | 12991889db1f6843dde82e7da4b4cdfb50740da5 | [
"Apache-2.0"
]
| 393 | 2019-01-21T05:52:54.000Z | 2022-03-29T06:07:04.000Z | htk-lite/commandlist/help.py | urantialife/hackers-tool-kit | 34dbabf3e94825684fd1a684f522d3dc3565eb2d | [
"Apache-2.0"
]
| 19 | 2019-02-22T00:49:28.000Z | 2021-12-30T20:28:59.000Z | htk-lite/commandlist/help.py | urantialife/hackers-tool-kit | 34dbabf3e94825684fd1a684f522d3dc3565eb2d | [
"Apache-2.0"
]
| 138 | 2019-03-15T23:22:19.000Z | 2022-03-20T17:19:09.000Z | #!/usr/local/bin/python
# coding: latin-1
#if you use this code give me credit @tuf_unkn0wn
#i do not give you permission to show / edit this script without my credit
#to ask questions or report a problem message me on instagram @tuf_unkn0wn
"""
██░ ██ ▄▄▄ ▄████▄ ██ ▄█▀▓█████ ▓█████▄
▓██░ ██▒▒████▄ ▒██▀ ▀█ ██▄█▒ ▓█ ▀ ▒██▀ ██▌
▒██▀▀██░▒██ ▀█▄ ▒▓█ ▄ ▓███▄░ ▒███ ░██ █▌
░▓█ ░██ ░██▄▄▄▄██ ▒▓▓▄ ▄██▒▓██ █▄ ▒▓█ ▄ ░▓█▄ ▌
░▓█▒░██▓ ▓█ ▓██▒▒ ▓███▀ ░▒██▒ █▄░▒████▒░▒████▓
▒ ▒░▒ ▒▒ ▓▒█ ░▒ ▒ ░▒ ▒▒ ▓▒ ▒░ ░ ▒▒▓ ▒
▒ ░▒░ ░ ▒ ▒▒ ░ ░ ▒ ░ ░▒ ▒░ ░ ░ ░ ░ ▒ ▒
░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
"""
import os
import sys
import random
lred = '\033[91m'
lblue = '\033[94m'
lgreen = '\033[92m'
yellow = '\033[93m'
cyan = '\033[1;36m'
purple = '\033[95m'
red = '\033[31m'
green = '\033[32m'
blue = '\033[34m'
orange = '\033[33m'
colorlist = [red, blue, green, yellow, lblue, purple, cyan, lred, lgreen, orange]
randomcolor = random.choice(colorlist)
banner3list = [red, blue, green, purple]
def helpbanner():
a = os.popen("ls commandlist -1 | wc -l").read()
b = a.replace('\n', '')
print """
╔══════════════════════════════════════════════════════════╗
║ ║
║ \033[92m ██░ ██ ▓█████ ██▓ ██▓███ \033[0m ║
║ \033[90m ▓██░ ██▒▓█ ▀ ▓██▒ ▓██░ ██▒ \033[0m ║
║ \033[92m ▒██▀▀██░▒███ ▒██░ ▓██░ ██▓▒ \033[0m ║
║ \033[90m ░▓█ ░██ ▒▓█ ▄ ▒██░ ▒██▄█▓▒ ▒ \033[0m ║
║ \033[92m ░▓█▒░██▓░▒████▒░██████▒▒██▒ ░ ░ \033[0m ║
║ \033[94m ▒ ░░▒░▒░░ ▒░ ░░ ▒░▓ ░▒▓▒░ ░ ░ \033[0m ║
║ \033[90m ▒ ░▒░ ░ ░ ░ ░░ ░ ▒ ░░▒ ░ \033[0m ║
║ \033[94m ░ ░░ ░ ░ ░ ░ ░░ \033[0m ║
║ \033[90m ░ ░ ░ ░ ░ ░ ░ \033[0m ║
║ ║
║══════════════════════════════════════════════════════════║
║ Commands: [\033[32m{0}\033[0m] Banners: [\033[31m6\033[0m] ║
║══════════════════════════════════════════════════════════════════════════════════════╗
║ ? | this menu ║
║ exit | exit htkl ║
║ clear | clears screen ║
║ banner | shows a banner ║
║ infoscan | gather information on a host [for a more specific scan type infoscan -o] ║
║ dos | run Denial-Of-Service attacks ║
║ ║
║ ║
║ \033[5m@tuf_unkn0wn\033[0m ║
╚══════════════════════════════════════════════════════════════════════════════════════╝
\033[0m\n""".format(b)
helpbanner()
| 46.971429 | 102 | 0.253041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,412 | 0.919933 |
5edd1d618589e67fdc13ac60dffe9edc5736896c | 2,980 | py | Python | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
]
| 1 | 2020-03-10T10:52:13.000Z | 2020-03-10T10:52:13.000Z | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
]
| null | null | null | scripts/core/soldier.py | whackashoe/entwinement | 4acff2147b86e08e267fc50c327917a338c7bf36 | [
"Unlicense"
]
| null | null | null | d_soldiers = []
class Soldier:
def __init__(self, id, name, team):
self.id = id
self.name = name
self.team = team
self.x = 0
self.y = 0
self.xVelo = 0
self.yVelo = 0
self.kills = 0
self.deaths = 0
self.alive = 'true'
self.driving = 'false'
self.gun = 0
self.ammo = 0
self.reloading = 'false'
def setPosition(self, x, y, xv, yv):
self.x = x
self.y = y
self.xVelo = xv
self.yVelo = yv
def setName(self, name):
self.name = name
def setTeam(self, team):
self.team = team
def setGun(self, gun):
self.gun = gun
def setGunInfo(self, gun, ammo, reloading):
self.gun = gun
self.ammo = ammo
self.reloading = reloading
def die(self):
self.alive = 'false'
self.driving = 'false'
self.deaths += 1
def respawn(self):
self.alive = 'true'
def teleport(self, x, y):
global com
self.x = x
self.y = y
com += 'f_t s '+str(self.id)+' '+str(self.x)+' '+str(self.y)+';'
def applyForce(self, xf, yf):
global com
com += 'f_af s '+str(self.id)+' '+str(xf)+' '+str(yf)+';'
def setVelocity(self, xf, yf):
global com
self.xVelo = xf
self.yVelo = yf
com += 'f_v s '+str(self.id)+' '+str(self.xVelo)+' '+str(self.yVelo)+';'
def changeTeam(self, team):
global com
self.team = team
com += 's_ct '+str(self.id)+' '+str(self.team)+';'
def changeGun(self, gun):
global com
self.gun = gun
com += 's_cg '+str(self.id)+' '+str(self.gun)+';'
def changeAttachment(self, type, amount):
global com
com += 's_ca '+str(self.id)+' '+str(type)+' '+str(amount)+';'
def killSoldier(self):
global com
self.alive = false
com += 's_ks '+str(id)+';'
def respawnSoldier(self, spawn):
global com
com += 's_rs '+str(self.id)+' '+str(spawn)+';'
def enterVehicle(self, vehicleId):
global com
com += 's_en '+str(self.id)+' '+str(vehicleId)+';'
def exitVehicle(self):
global com
com += 's_ex '+str(self.id)+';'
def addKill(self):
global com
self.kills += 1
com += 's_ak '+str(self.id)+';'
def addDeath(self):
global com
self.deaths += 1
com += 's_ad '+str(self.id)+';'
def dropGun(self):
global com
com += 's_dg '+str(self.id)+';'
def addSoldier(team):
global com
com += 'a s '+str(team)+';'
def getSoldier(n):
global d_soldiers
return d_soldiers[n]
def getSoldierById(id):
global d_soldiers
for n in xrange(len(d_soldiers)):
s = d_soldiers[n]
if s.id == id:
return s
def getSoldiers():
global d_soldiers
return d_soldiers
def getSoldierCount():
global d_soldiers
return len(d_soldiers)
def getTeamKills(team):
amount = 0
for n in xrange(len(d_soldiers)):
s = d_soldiers[n]
if s.team == team:
amount += s.kills
return amount
def getTeamDeaths(team):
amount = 0
for n in xrange(len(d_soldiers)):
s = d_soldiers[n]
if s.team == team:
amount += s.deaths
return amount
def getTeamSize(team):
amount = 0
for n in xrange(len(d_soldiers)):
s = d_soldiers[n]
if s.team == team:
amount += 1
return amount
| 18.742138 | 74 | 0.617785 | 2,133 | 0.715772 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.07349 |
5eddcc0044e85262897bbd20777730764bb0e4ac | 785 | py | Python | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
]
| null | null | null | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
]
| null | null | null | dzTraficoBackend/dzTrafico/BusinessLayer/Statistics/DataVisualizationController.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
]
| null | null | null | from dzTrafico.BusinessEntities.Simulation import Simulation
import lxml.etree as etree
class DataVisualizationController(object):
def __init__(self, simulation):
# Initialize necessary file paths
self.simulation = simulation
def get_emissions_results(self):
pass
def get_travel_time_results(self):
travel_time_results = []
def get_waiting_time_results(self):
pass
def get_root_node_file(self, filename):
tree = etree.parse(Simulation.project_directory + filename)
return tree.getroot()
class DataVisualization(object):
def __init__(self, type, data):
self.type = type
self.data = data
def add_data(self, data):
for value in data:
self.data.append(value)
| 23.787879 | 67 | 0.680255 | 693 | 0.882803 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.042038 |
5edde40f3283ddaa109a18bcb421a16c3e99b304 | 7,343 | py | Python | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
]
| 149 | 2021-04-21T06:25:21.000Z | 2022-03-29T08:57:49.000Z | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
]
| 41 | 2021-05-11T00:46:16.000Z | 2022-03-22T05:17:47.000Z | bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 28ea1c891f6ee8f96269f41a0642cd6194dd04e9 | [
"Apache-2.0"
]
| 5 | 2021-04-21T10:54:46.000Z | 2022-02-25T17:41:21.000Z | #!/usr/bin/env python3
import collections
import logging
import os
import typing
import unicodedata
from janome.tokenizer import Tokenizer
from transformers.file_utils import cached_path
from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab
import bunkai.constant
"""
The original source code is from cl-tohoku/bert-japanese.
https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
The original source code is under Apache-2.0 License.
"""
logger = logging.getLogger(__name__)
KNOWN_PRETRAINED_VOCABS = {
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
}
class JanomeTokenizer(object):
"""Runs basic tokenization with Janome morphological parser."""
def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):
"""
Construct a JanomeTokenizer.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
:arg never_split: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
:arg normalize_text: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
self.normalize_text = normalize_text
self.janome_tokenizer = Tokenizer()
def tokenize(self, text: str, *, never_split=None, **kwargs):
"""Tokenizes a piece of text."""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
never_split = self.never_split + (never_split if never_split is not None else [])
tokens = self.janome_tokenizer.tokenize(text)
__tokens = []
last_index = 0
for t in tokens:
token = t.surface
token_start = text.index(token, last_index)
if last_index != token_start:
__tokens.append(text[last_index:token_start])
if self.do_lower_case and token not in never_split:
token = token.lower()
__tokens.append(token.lower())
else:
__tokens.append(token)
last_index = token_start + len(token)
if len(text) != last_index:
__tokens.append(text[last_index:])
assert text == "".join(__tokens), f"[{text}] != [{''.join(__tokens)}]"
return __tokens
class CharacterTokenizer(object):
"""Runs Character tokenziation."""
def __init__(self, vocab, unk_token, normalize_text=True):
self.vocab = vocab
self.unk_token = unk_token
self.normalize_text = normalize_text
def tokenize(self, text):
"""
Tokenize a piece of text into characters.
For example:
input = "apple"
output = ["a", "p", "p", "l", "e"]
:arg text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
:return: A list of characters.
"""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
output_tokens = []
for char in text:
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
output_tokens.append(char)
return output_tokens
class JanomeSubwordsTokenizer(BertTokenizer):
def __init__(
self,
vocab_file,
*,
subword_tokenizer_type="wordpiece",
do_subword_tokenize: bool = True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs,
):
"""
Construct a JanomeSubwordsTokenizer.
:arg vocab_file: Path to a one-wordpiece-per-line vocabulary file.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
:arg do_word_tokenize: (`optional`) boolean (default True) Whether to do word tokenization.
:arg do_subword_tokenize: (`optional`) boolean (default True) Whether to do subword tokenization.
:arg word_tokenizer_type: (`optional`) string (default "basic")
Type of word tokenizer. basic / janome / pre_tokenize
:arg subword_tokenizer_type: (`optional`) string (default "wordpiece") Type of subword tokenizer.
:arg cls_token: No description.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if os.path.isfile(vocab_file):
self.vocab = load_vocab(vocab_file)
elif vocab_file in KNOWN_PRETRAINED_VOCABS:
url: str = f"https://s3.amazonaws.com/models.huggingface.co/bert/{vocab_file}/vocab.txt"
self.vocab = load_vocab(cached_path(url))
else:
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
# add new vocab
self.add_tokens([" ", bunkai.constant.METACHAR_LINE_BREAK])
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_word_tokenize = False
self.do_subword_tokenize = True
if do_subword_tokenize:
if subword_tokenizer_type == "wordpiece":
self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
elif subword_tokenizer_type == "character":
self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token)
else:
raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type))
self.janome_tokenizer = JanomeTokenizer()
def tokenize(self, text: typing.Union[str, typing.List[str]]) -> typing.List[str]:
if isinstance(text, str):
morphemes = self.janome_tokenizer.tokenize(text)
elif isinstance(text, list) and all([isinstance(t, str) for t in text]):
morphemes = text
else:
raise Exception(f"Invalid input-type {text}")
if self.do_subword_tokenize:
split_tokens = []
for token in morphemes:
sts = [sub_token for sub_token in self.subword_tokenizer.tokenize(token)]
if len(sts) == 0:
split_tokens.append(token)
else:
split_tokens += sts
else:
split_tokens = morphemes
return split_tokens
| 36.715 | 116 | 0.626719 | 6,567 | 0.894321 | 0 | 0 | 0 | 0 | 0 | 0 | 2,678 | 0.364701 |
5edecbbe347219a2740ccd3534f648ace677fd24 | 10,232 | py | Python | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
]
| null | null | null | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
]
| null | null | null | tests/exchanges_tests.py | tomwalton78/Crypto-Exchange-API-Aggregator | c5b1756eac46274cdbe5c4e49db62450a35b70a6 | [
"MIT"
]
| 1 | 2019-11-16T07:31:00.000Z | 2019-11-16T07:31:00.000Z | import unittest
from datetime import datetime
import os
import sys
from api.exchanges.exchange import ExchangeAPICallFailedException
from api.exchanges.gdax_exchange import GdaxExchange
from api.exchanges.kraken_exchange import KrakenExchange
from api.exchanges.bitstamp_exchange import BitstampExchange
from api.exchanges.bitfinex_exchange import BitfinexExchange
class HiddenPrints:
"""Class to disable printing for functions run under its scope.
Example:
with HiddenPrints()
print('hello world')
Nothing will print, since anything under the scope of HiddenPrints has its
printing output suppressed.
"""
def __enter__(self):
"""Disable printing on entering 'with HiddenPrints()' scope
"""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
"""Re-enable printing on exiting 'with HiddenPrints()' scope
"""
sys.stdout.close()
sys.stdout = self._original_stdout
class GdaxExchangeTests(unittest.TestCase):
"""
Tests that functions within GdaxExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
g = GdaxExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising GdaxExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
g = GdaxExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
g = GdaxExchange('BTC-EUR')
g.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
g = GdaxExchange('LTC-GBP')
g.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
g = GdaxExchange('BTC-EUR')
g.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
g.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
g = GdaxExchange('BTC-EUR')
with HiddenPrints():
g.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/gdax_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class KrakenExchangeTests(unittest.TestCase):
"""
Tests that functions within KrakenExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = KrakenExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising KrakenExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = KrakenExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = KrakenExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = KrakenExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = KrakenExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = KrakenExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) + '/kraken_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class BitstampExchangeTests(unittest.TestCase):
"""
Tests that functions within BitstampExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = BitstampExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising BitstampExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = BitstampExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = BitstampExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = BitstampExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = BitstampExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitstamp_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = BitstampExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitstamp_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
class BitfinexExchangeTests(unittest.TestCase):
"""
Tests that functions within BitfinexExchange class perform as intended.
"""
def test_initialisation_with_valid_market(self):
try:
k = BitfinexExchange('BTC-EUR')
pass
except KeyError:
self.fail(
'Initialising BitfinexExchange with BTC-EUR raised KeyError.'
)
def test_initialisation_with_invalid_market(self):
with self.assertRaises(KeyError):
k = BitfinexExchange('REDDDDDDDDDD-BLUEEEEEEEEEE')
def test_fetch_l1_quote_on_supported_market(self):
try:
k = BitfinexExchange('BTC-EUR')
k.fetch_l1_quote()
pass
except Exception as e:
self.fail(
'Fetch l1 quote on supported market failed: {}'.format(
str(e)
)
)
def test_fetch_l1_quote_on_unsupported_market(self):
with self.assertRaises(ExchangeAPICallFailedException):
k = BitfinexExchange('LTC-GBP')
k.fetch_l1_quote()
def test_latest_l1_quote_to_csv(self):
k = BitfinexExchange('BTC-EUR')
k.latest_l1_quote = {
"best ask size": 0.65333759,
"best bid price": 5780.1,
"best ask price": 5781.24,
"timestamp": datetime.utcnow(),
"best bid size": 0.001006
}
k.latest_l1_quote_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitfinex_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
def test_fetch_l1_quote_and_write_to_csv(self):
k = BitfinexExchange('BTC-EUR')
with HiddenPrints():
k.fetch_l1_quote_and_write_to_csv(
path_to_folder=os.path.dirname(os.path.realpath(__file__)) +
'/'
)
# Test that csv file exists
path = (
os.path.dirname(os.path.realpath(__file__)) +
'/bitfinex_BTC-EUR.csv'
)
self.assertTrue(os.path.exists(path))
os.remove(path)
if __name__ == '__main__':
unittest.main(exit=False)
| 30.094118 | 79 | 0.583268 | 9,792 | 0.956998 | 0 | 0 | 0 | 0 | 0 | 0 | 2,171 | 0.212177 |
5edf354d82c1df0367e44041106c0bf97648bea1 | 1,342 | py | Python | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
]
| null | null | null | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
]
| 33 | 2019-12-02T18:56:18.000Z | 2022-02-10T01:18:01.000Z | stats/clustering.py | KNSI-Golem/assets-generation | e366b96e0f2bba16c90816e2690b3b89fd50e514 | [
"MIT"
]
| 2 | 2020-09-11T13:11:59.000Z | 2021-02-16T17:08:33.000Z | from sklearn.cluster import KMeans
import image_processing
import numpy as np
import some_analysis
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from autoencoder import ConvAutoencoder
input_path = './bin'
output_shape = (32, 48)
processing_output = './processed/results_processing'
data = image_processing.get_data_from_images(processing_output)
data = data[:, :, :, :-1]
encoder, _, _ = ConvAutoencoder.build(32, 48, 3,
filters=(32, 64),
latentDim=512)
encoder.load_weights('encoder.h5')
data_encoded = encoder.predict(data)
#data_reshaped = data.reshape((data.shape[0], -1))
n_clusters = 200
# Runs in parallel 4 CPUs
kmeans = KMeans(n_clusters=n_clusters, n_init=15, n_jobs=8)
# Train K-Means.
y_pred_kmeans = kmeans.fit_predict(data_encoded)
data += 1.0
data *= 127.5
array = np.empty((n_clusters), dtype=object)
for i in range(n_clusters):
array[i] = []
for cluster, idx in zip(y_pred_kmeans, range(data.shape[0])):
array[cluster].append(idx)
i = 1
for l in array:
cluster = data[l]
some_analysis.make_preview(cluster, f'./previews/cluster_v3_{i}.png', n_cols=5)
i += 1
'''
data_embedded = TSNE(learning_rate=200).fit_transform(data_reshaped)
plt.scatter(data_embedded[:, 0], data_embedded[:, 1])
''' | 28.553191 | 83 | 0.69076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.226528 |
5edf63e904c948abd2995cb1fd09ff2f09a7f87a | 572 | py | Python | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
]
| null | null | null | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
]
| null | null | null | CursoEmVideo/Aula22/ex109/ex109.py | lucashsouza/Desafios-Python | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | [
"MIT"
]
| null | null | null | """
Modifique as funções que foram criadas no desafio 107 para
que elas aceitem um parametro a mais, informando se o valor
retornado por elas vai ser ou não formatado pela função
moeda(), desenvolvida no desafio 108.
"""
from Aula22.ex109 import moeda
from Aula22.ex109.titulo import titulo
preco = float(input("Preço: R$"))
titulo('Informações Calculadas: ')
print(f"Metade: {moeda.metade(preco, True)}")
print(f"Dobro: {moeda.dobro(preco, True)}")
print(f"10% Acréscimo: {moeda.aumentar(preco, 10, True)}")
print(f"10% Desconto: {moeda.diminuir(preco, 10, True)}")
| 28.6 | 59 | 0.737762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.760757 |
5ee0230190b385a1bf8afa9cd7f0b235b7db13a2 | 4,787 | py | Python | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
]
| null | null | null | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
]
| null | null | null | mocu/graphical_model/mocu/scripts/visualizetoysystem.py | exalearn/oded | e706c48d60360b041b9f1cfc64fa208d01fbb65a | [
"MIT"
]
| null | null | null | from mocu.utils.toysystems import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def make_rhs_full_system(a,b,k,c,lam,psi,theta):
def rhs_full_system(y,t):
C = c(a,b,k,y[0],psi,theta)
y1_dot = lam[0] * (y[0] - 1)
y2_dot = lam[1] * (y[1] - C) * (y[1] - a) * (y[1] - b)
return [y1_dot , y2_dot]
return rhs_full_system
def plot_points_in_full_space(xy,c,colors):
a = 0; b=1; k = 5
y1_lin = np.linspace( 0, 1, 100 )
plt.scatter(xy[0] , xy[1] , c=colors , cmap=plt.cm.coolwarm)
plt.plot( y1_lin , c(a,b,k,y1_lin) , 'k')
def plot_different_thetas():
a = 0; b=1;
theta = [0.45 , 0.5 , 0.55]
nsamp = 100
y0 = np.linspace(0.5 - (b-a)/20 , 0.5 + (b-a)/20 , nsamp)
tf = 30
dt = 0.05
t = np.arange(0,tf,dt)
colors = cm.coolwarm( np.linspace(0,1,len(y0)) )
yfinal = []
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 4))
for (i,th) in enumerate(theta):
f = make_f( a , b , th )
g = make_noise_function(0.03)
for (y0_i,c_i) in zip(y0,colors):
Y = np.squeeze( sdeint.itoint(f,g,y0_i,t) )
yfinal.append(Y[-1])
axes[i].plot(t , [y for y in Y] , c=c_i)
axes[i].set_title(r'Boundary = ' + str(th) , fontsize=20)
axes[0].set_xlabel(r'$t$' , fontsize=16)
axes[0].set_ylabel(r'$c(t,\theta)$' , fontsize=16)
plt.tight_layout()
plt.show()
def f_input_output_sde( psi,theta,x0 ):
tf = 30
dt = 0.05
t = np.arange(0,tf,dt)
a = 0; b=1;
c = 0.04 * np.abs( psi-theta ) + 0.48
f = make_f( a , b , c )
g = make_noise_function( 0.03 )
y = np.squeeze( sdeint.itoint(f,g,x0,t) )
return y
def f_input_output_ode( psi,theta,x0 ):
dt = 0.05
tf = 30
t = np.arange(0,tf,dt)
lam = [-0.01,-1]
k = 5
a = 0; b=1;
c = lambda a,b,k,y1,psi,theta : (0.48 + 0.04*np.abs(psi-theta) ) + 0.04*np.abs(b-a)*np.sin(2*np.pi*k*y1)
f_real = make_rhs_full_system(a,b,k,c,lam,psi,theta)
y = np.squeeze( odeint( f_real , x0 , t ) )
#y = [ yi[1] for yi in y ]
return y
def estimate_transition_probabilities( f_input_output , psi , theta , y0 ):
colors = cm.coolwarm( np.linspace(0,1,len(y0)) )
yfinal = []
#plt.figure()
#fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 4))
for (y0_i,c_i) in zip(y0,colors):
Y = f_input_output( psi , theta , y0_i )
if (np.size(Y[0]) > 1):
Y = [ yi[1] for yi in Y ]
yfinal.append(Y[-1])
#axes[2].plot(t , [y for y in Y] , c=c_i)
# Only use y-coordinate of "real" system
if (np.size(y0[0]) > 1):
y0 = np.array( [ yi[1] for yi in y0 ] )
# Estimate IC->final phase probabilities
idx0_0 = np.where( y0 < 0.5 )[0]
idx0_1 = np.where( y0 >= 0.5 )[0]
yfinal = np.array( yfinal )
n_00 = np.sum( yfinal[idx0_0] < 0.5 )
n_01 = np.sum( yfinal[idx0_0] >= 0.5 )
n_10 = np.sum( yfinal[idx0_1] < 0.5 )
n_11 = np.sum( yfinal[idx0_1] >= 0.5 )
n_0 = np.sum( yfinal < 0.5 )
n_1 = np.sum( yfinal >= 0.5 )
rho_ic_to_final = np.array([ [n_00/(n_00+n_01) , n_01/(n_00+n_01) ] ,
[n_10/(n_10+n_11) , n_11/(n_10+n_11)] ])
print( 'rho( final phase | ic ): ' )
print( rho_ic_to_final )
def plot_real_system( psi , theta , y0_2 ):
dt = 0.05
tf = 30
t = np.arange(0,tf,dt)
#fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))
colors = cm.coolwarm( np.linspace(0,1,len(y0_2)) )
for (y0_i,c_i) in zip(y0_2,colors):
y = f_input_output_ode( psi,theta,y0_i )
plt.figure(2)
plt.plot( t , [yi[1] for yi in y] , c=c_i )
plt.xlabel(r'$t$',fontsize=20)
plt.ylabel(r'$c_2$',fontsize=20)
plt.figure(3)
plt.plot( [yi[0] for yi in y] , [yi[1] for yi in y] , c=c_i )
plt.xlabel(r'$c_1$',fontsize=20)
plt.ylabel(r'$c_2$',fontsize=20)
c1 = np.linspace( 0 , 1 , 100 )
C = (0.48 + 0.04 * np.abs(psi-theta) ) + 0.04 * np.sin(2*np.pi*5*c1)
plt.plot( c1 , C , 'k--' , lw=3 )
plt.tight_layout()
plt.show()
def main():
nsamp = 1000
y0 = np.random.uniform( 0 , 1 , nsamp )
y1 = np.linspace(0.45 , 0.55 , nsamp)
y = tuple( zip( y0 , y1 ) )
psi = 0.0; theta = 0.5
estimate_transition_probabilities( f_input_output_sde , psi , theta , y1 )
estimate_transition_probabilities( f_input_output_ode , psi , theta , y )
#plot_different_thetas()
#plot_real_system( psi , theta , y )
if __name__ == '__main__':
main()
| 29.012121 | 113 | 0.524546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.095049 |
5ee24f1707c0b95700a49b8f88fefee14ccd1a6c | 9,053 | py | Python | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
]
| 1 | 2021-09-17T12:37:34.000Z | 2021-09-17T12:37:34.000Z | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
]
| null | null | null | sbm/stochastic_block_model.py | pmacg/pysbm | e2f6ceeb4fff903b53a4d3c05694411026a084c3 | [
"MIT"
]
| null | null | null | """
Several methods for generating graphs from the stochastic block model.
"""
import itertools
import math
import random
import scipy.sparse
import numpy as np
def _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed):
"""
Compute the number of possible edges between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of possible edges between these clusters
"""
if not same_cluster:
# The number is simply the product of the number of vertices
return c1_size * c2_size
else:
# The base number is n choose 2
possible_edges_between_clusters = int((c1_size * (c1_size - 1)) / 2)
# If we are allowed self-loops, then add them on
if self_loops:
possible_edges_between_clusters += c1_size
# The number is normally the same for undirected and directed graphs, unless the clusters are the same, in which
# case the number for the directed graph is double since we need to consider both directions of each edge.
if directed:
possible_edges_between_clusters *= 2
# But if we are allowed self-loops, then we shouldn't double them since there is only one 'direction'.
if directed and self_loops:
possible_edges_between_clusters -= c1_size
return possible_edges_between_clusters
def _get_number_of_edges(c1_size, c2_size, prob, same_cluster, self_loops, directed):
"""
Compute the number of edges there will be between two clusters.
:param c1_size: The size of the first cluster
:param c2_size: The size of the second cluster
:param prob: The probability of an edge between the clusters
:param same_cluster: Whether these are the same cluster
:param self_loops: Whether we will generate self loops
:param directed: Whether we are generating a directed graph
:return: the number of edges to generate between these clusters
"""
# We need to compute the number of possible edges
possible_edges_between_clusters = _get_num_pos_edges(c1_size, c2_size, same_cluster, self_loops, directed)
# Sample the number of edges from the binomial distribution
return np.random.binomial(possible_edges_between_clusters, prob)
def _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=False):
"""
Given a list of cluster sizes, and a square matrix Q, generates edges for a graph in the following way.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
May return self-loops. The calling code can decide what to do with them.
Returns edges as pairs (u, v) where u and v are integers giving the index of the respective vertices.
:param cluster_sizes: a list giving the number of vertices in each cluster
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:return: Edges (u, v).
"""
# We will iterate over the clusters. This variable keeps track of the index of the first vertex in the current
# cluster_1.
c1_base_index = 0
for cluster_1 in range(len(cluster_sizes)):
# Keep track of the index of the first vertex in the current cluster_2
c2_base_index = c1_base_index
# If we are constructing a directed graph, we need to consider all values of cluster_2.
# Otherwise, we will consider only the clusters with an index >= cluster_1.
if directed:
second_clusters = range(len(cluster_sizes))
c2_base_index = 0
else:
second_clusters = range(cluster_1, len(cluster_sizes))
for cluster_2 in second_clusters:
# Compute the number of edges between these two clusters
num_edges = _get_number_of_edges(cluster_sizes[cluster_1],
cluster_sizes[cluster_2],
prob_mat_q[cluster_1][cluster_2],
cluster_1 == cluster_2,
True,
directed)
# Sample this number of edges. TODO: correct for possible double-sampling of edges
num_possible_edges = (cluster_sizes[cluster_1] * cluster_sizes[cluster_2]) - 1
for i in range(num_edges):
edge_idx = random.randint(0, num_possible_edges)
u = c1_base_index + int(edge_idx / cluster_sizes[cluster_1])
v = c2_base_index + (edge_idx % cluster_sizes[cluster_1])
yield u, v
# Update the base index for the second cluster
c2_base_index += cluster_sizes[cluster_2]
# Update the base index of this cluster
c1_base_index += cluster_sizes[cluster_1]
def sbm_adjmat(cluster_sizes, prob_mat_q, directed=False, self_loops=False):
"""
Generate a graph from the stochastic block model.
The list cluster_sizes gives the number of vertices inside each cluster and the matrix Q gives the probability of
each edge between pairs of clusters.
For two vertices u and v where u is in cluster i and v is in cluster j, there is an edge between u and v with
probability Q_{i, j}.
For the undirected case, we assume that the matrix Q is symmetric (and in practice look only at the upper triangle).
For the directed case, we generate edges (u, v) and (v, u) with probabilities Q_{i, j} and Q_{j, i} respectively.
Returns the adjacency matrix of the graph as a sparse scipy matrix in the CSR format.
:param cluster_sizes: The number of vertices in each cluster.
:param prob_mat_q: A square matrix where Q_{i, j} is the probability of each edge between clusters i and j. Should
be symmetric in the undirected case.
:param directed: Whether to generate a directed graph (default is false).
:param self_loops: Whether to generate self-loops (default is false).
:return: The sparse adjacency matrix of the graph.
"""
# Initialize the adjacency matrix
adj_mat = scipy.sparse.lil_matrix((sum(cluster_sizes), sum(cluster_sizes)))
# Generate the edges in the graph
for (u, v) in _generate_sbm_edges(cluster_sizes, prob_mat_q, directed=directed):
if u != v or self_loops:
# Add this edge to the adjacency matrix.
adj_mat[u, v] = 1
if not directed:
adj_mat[v, u] = 1
# Reformat the output matrix to the CSR format
return adj_mat.tocsr()
def sbm_adjmat_equal_clusters(n, k, prob_mat_q, directed=False):
"""
Generate a graph from the general stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by the probability matrix Q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param prob_mat_q: q[i][j] gives the probability of an edge between clusters i and j
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
return sbm_adjmat([int(n/k)] * k, prob_mat_q, directed=directed)
def ssbm_adjmat(n, k, p, q, directed=False):
"""
Generate a graph from the symmetric stochastic block model.
Generates a graph with n vertices and k clusters. Every cluster will have floor(n/k) vertices. The probability of
each edge inside a cluster is given by p. The probability of an edge between two different clusters is q.
:param n: The number of vertices in the graph.
:param k: The number of clusters.
:param p: The probability of an edge inside a cluster.
:param q: The probability of an edge between clusters.
:param directed: Whether to generate a directed graph.
:return: The sparse adjacency matrix of the graph.
"""
# Every cluster has the same size.
cluster_sizes = [int(n/k)] * k
# Construct the k*k probability matrix Q. The off-diagonal entries are all q and the diagonal entries are all p.
prob_mat_q = []
for row_num in range(k):
new_row = [q] * k
new_row[row_num] = p
prob_mat_q.append(new_row)
# Call the general sbm method.
return sbm_adjmat(cluster_sizes, prob_mat_q, directed=directed)
| 44.377451 | 120 | 0.68353 | 0 | 0 | 3,000 | 0.331382 | 0 | 0 | 0 | 0 | 5,850 | 0.646195 |
5ee2508b1563859bc37a102d678ee13eb3c4fb40 | 3,496 | py | Python | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
]
| 2 | 2022-01-30T13:23:22.000Z | 2022-01-31T10:23:46.000Z | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
]
| null | null | null | py_include/__init__.py | mauro-balades/py-include | d2d3b90fc840a550a49d76798bc8a97b076480d8 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
"""
| --------------------- Py include <Mauro Baladés> ---------------------
| ___ _ _ _ __ _ _ ___ ____
| | |_) \ \_/ | | | |\ | / /` | | | | | | | \ | |_
| |_| |_| |_| |_| \| \_\_, |_|__ \_\_/ |_|_/ |_|__
| ----------------------------------------------------------------------
| MIT License
|
| Copyright (c) 2022 Mauro Baladés
|
| Permission is hereby granted, free of charge, to any person obtaining a copy
| of this software and associated documentation files (the "Software"), to deal
| in the Software without restriction, including without limitation the rights
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
| copies of the Software, and to permit persons to whom the Software is
| furnished to do so, subject to the following conditions:
|
| The above copyright notice and this permission notice shall be included in all
| copies or substantial portions of the Software.
|
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
| SOFTWARE.
|
"""
from pathlib import Path
import sys
def _exec_modules(*args, **kwargs):
# Get locals from kwargs
local = kwargs.get("local", None)
# Check if local is None,
# because user did not define it.
if local is None:
raise Exception("Need to pass the local variable")
# Iterate every path that user gives as
# arguments (stored in *args).
for arg in args:
# Store the path into a
# platform specific-path
path = Path(arg)
# Open the file and get it's
# content
with open(path, "r") as f:
data = f.read()
# Execute the file content.
exec(data, globals(), local)
def _ret_modules(*args, **kwargs):
pass
def include(*args, **kwargs):
"""Here is where all the magic ocour. This function takes an
infinite amount of paths and they are being executend to
feel like user imported it.
Note:
It can also be used to store it into a variable if user
needs it. This can be done by adding the argument `ret`
to True (more detail in #Args).
Note:
Please note how (for the import statement) you will need a
`__init__.py` and paths separated by dots. With py-include,
you don't need. Py-include will make your path supported
by the current platform and it will open it's content and
execute it, so you don't need a path divided by `.` or
a `__init__.py`
Args:
files [list(str)]: A list of paths to include.
ret [bool]: If it is set to True, return the module (defaults to False).
Note:
If `ret` is set to `True`, the function will return all modules
as user will need to unpack them.
"""
# Get the value whether user whan't to execute
# the module or to return it. (defaults to False)
ret = kwargs.get("ret", False)
# Check if user inserted `ret` as True. If it not,
# we will open the file and execute it's content.
# If it is True, we will return the module they
# whanted to import.
if not ret:
_exec_modules(*args, **kwargs)
return _ret_modules(*args, **kwargs)
| 33.295238 | 80 | 0.65246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,952 | 0.843911 |
5ee3400a48d58dbe03ad61379d1f85e22cd4df99 | 7,201 | py | Python | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
]
| 1 | 2019-12-11T22:22:02.000Z | 2019-12-11T22:22:02.000Z | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
]
| 15 | 2020-03-25T02:21:18.000Z | 2022-03-27T20:05:01.000Z | src/scripts/load_data.py | murphycj/agfusionweb-react | 9305aa3caa653fa74608d1ae3dd59c03a3df6294 | [
"MIT"
]
| null | null | null | import pyensembl
import sys
import sqlite3
import boto3
import pickle
dynamodb = boto3.resource('dynamodb')
table_agfusion_gene_synonyms = dynamodb.Table('agfusion_gene_synonyms')
table_agfusion_genes = dynamodb.Table('agfusion_genes')
table_agfusion_sequences = dynamodb.Table('agfusion_sequences')
def add_synonym(data, id, ensg):
if id != '':
if id not in data:
data[id] = [ensg]
else:
data[id].append(ensg)
return data
def process_gene_synonym(species, release, pyens_db, c):
data = {}
# get gene synonymes
query = c.execute('select * from ' + species + '_' + str(release) + ';').fetchall()
for row in query:
ensg = row[1]
entrez = row[2]
symbol = row[3]
if ensg!='':
data = add_synonym(data, entrez, ensg)
data = add_synonym(data, symbol, ensg)
else:
continue
with table_agfusion_gene_synonyms.batch_writer() as batch:
for gene_id, ensg in data.items():
batch.put_item(
Item={
'gene_id': gene_id,
'species_release': species + '_' + str(release),
'ensembl_gene_id': ';'.join(ensg)
}
)
def write(db, species, release):
with table_agfusion_sequences.batch_writer() as batch:
for gene_id, seq in db.items():
batch.put_item(
Item={
'id': gene_id,
'species_release': species + '_' + str(release),
'sequence': seq
}
)
def upload_fasta(species, genome, release):
# cdna
db = pickle.load(open(
'/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}cdna.all.fa.gz.pickle'.format(
genome,
release,
species.capitalize(),
genome,
str(release) + '.' if release <= 75 else ''
)))
write(db, species, release)
# import pdb; pdb.set_trace()
db = pickle.load(open(
'/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}ncrna.fa.gz.pickle'.format(
genome,
release,
species.capitalize(),
genome,
str(release) + '.' if release <= 75 else ''
)))
write(db, species, release)
# pep
db = pickle.load(open(
'/Users/charliemurphy/Library/Caches/pyensembl/{}/ensembl{}/{}.{}.{}pep.all.fa.gz.pickle'.format(
genome,
release,
species.capitalize(),
genome,
str(release) + '.' if release <= 75 else ''
)))
write(db, species, release)
def process_gene_data(species, release, pyens_db, c):
protein_db = [
'pfam', 'smart', 'superfamily', 'tigrfam', 'pfscan', 'tmhmm', 'seg', 'ncoils', 'prints',
'pirsf', 'signalp']
domains = {}
for pdb in protein_db:
query = c.execute('select * from {}_{}_{}'.format(species, release, pdb)).fetchall()
for q in query:
ensp = q[1]
if ensp not in domains:
domains[ensp] = {j:[] for j in protein_db}
domains[ensp][pdb].append(list(q[2:]))
genes = pyens_db.genes()
canonical = c.execute(
'select g.stable_id, t.transcript_stable_id from {}_{} g left join {}_{}_transcript t on g.canonical_transcript_id = t.transcript_id;'.format(
species,
release,
species,
release
)).fetchall()
canonical = dict(canonical)
with table_agfusion_genes.batch_writer() as batch:
for gene in genes:
data = {
'id': gene.id,
'species_release': species + '_' + str(release),
'name': gene.name,
'start': gene.start,
'end': gene.end,
'strand': gene.strand,
'contig': gene.contig,
'biotype': gene.biotype,
'is_protein_coding': gene.is_protein_coding,
'transcripts': {}
}
for transcript in gene.transcripts:
five_prime_utr_len = 0
three_prime_utr_len = 0
if transcript.contains_start_codon:
five_prime_utr_len = len(transcript.five_prime_utr_sequence)
if transcript.contains_stop_codon:
three_prime_utr_len = len(transcript.three_prime_utr_sequence)
data['transcripts'][transcript.id] = {
'name': transcript.name,
'start': transcript.start,
'end': transcript.end,
'biotype': transcript.biotype,
'complete': transcript.complete,
'exons': [[i[0], i[1]] for i in transcript.exon_intervals],
'has_start_codon': transcript.contains_start_codon,
'has_stop_codon': transcript.contains_stop_codon,
'five_prime_utr_len': five_prime_utr_len,
'three_prime_utr_len': three_prime_utr_len,
'is_protein_coding': transcript.is_protein_coding,
'protein_id': transcript.protein_id,
'domains': {j: [] for j in protein_db},
'canonical': True if transcript.id == canonical.get(gene.id, '') else False
}
if transcript.is_protein_coding:
data['transcripts'][transcript.id]['coding'] = \
[[i[0], i[1]] for i in transcript.coding_sequence_position_ranges]
if transcript.protein_id in domains:
data['transcripts'][transcript.id]['domains'] = domains[transcript.protein_id]
# make sure nothing is an empty string, convert to none
for pdb in data['transcripts'][transcript.id]['domains'].keys():
for i in range(len(data['transcripts'][transcript.id]['domains'][pdb])):
domain = data['transcripts'][transcript.id]['domains'][pdb][i]
domain = [j if j else None for j in domain]
data['transcripts'][transcript.id]['domains'][pdb][i] = domain
try:
# table_agfusion_genes.put_item(Item=data)
batch.put_item(Item=data)
except:
import pdb; pdb.set_trace()
def process_data(species, release, genome, agfusion):
pyens_db = pyensembl.EnsemblRelease(release, species)
db = sqlite3.Connection(agfusion)
c = db.cursor()
# process_gene_synonym(species, release, pyens_db, c)
# process_gene_data(species, release, pyens_db, c)
upload_fasta(species, genome, release)
def put_to_dynamodb():
pass
# process_data('homo_sapiens', 94, '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.94.db')
# process_data('homo_sapiens', 75, 'GRCh37', '/Users/charliemurphy/Downloads/agfusion.homo_sapiens.75.db')
# process_data('mus_musculus', 92, 'GRCm38', '/Users/charliemurphy/Downloads/agfusion.mus_musculus.92.db') | 34.956311 | 150 | 0.550618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,687 | 0.234273 |
5ee4affc10253568c59d31e6e7ecff29108ae1b0 | 10,118 | py | Python | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
]
| null | null | null | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
]
| null | null | null | google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/dataflow/io/multifiles_source.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
]
| 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mutiple files/file patterns source.
Multiple File source, which reads the union of multiple files and/or file
patterns.
"""
from apache_beam import coders
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.io.tfrecordio import _TFRecordSource as TFRecordSource
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
# pylint: disable=g-import-not-at-top
try:
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import StaticValueProvider
except ImportError:
from apache_beam.utils.value_provider import ValueProvider
from apache_beam.utils.value_provider import StaticValueProvider
# pylint: enable=g-import-not-at-top
FILE_LIST_SEPARATOR = ','
class MultiFilesSource(iobase.BoundedSource):
"""Base class for multiple files source.
Support to read multiple files or file patterns separated by a comma. Subclass
should implement create_source() to actually create sources to use.
"""
def __init__(self, file_patterns, **kwargs):
# Handle the templated values.
if not isinstance(file_patterns, (basestring, ValueProvider)):
raise TypeError('%s: file_pattern must be of type string'
' or ValueProvider; got %r instead'
% (self.__class__.__name__, file_patterns))
if isinstance(file_patterns, basestring):
file_patterns = StaticValueProvider(str, file_patterns)
self._file_patterns = file_patterns
self._sources = []
self._kwargs = kwargs
def _populate_sources_lazily(self):
# We need to do it lazily because self._file_patterns can be a templated
# value and must be evaluated at runtime.
if not self._sources:
# dedup repeated files or file patterns.
for file_pattern in list(set(self._file_patterns.get().split(
FILE_LIST_SEPARATOR))):
self._sources.append(self.create_source(file_pattern.strip(),
**self._kwargs))
def estimate_size(self):
self._populate_sources_lazily()
return sum(s.estimate_size() for s in self._sources)
def get_range_tracker(self, start_position, stop_position):
self._populate_sources_lazily()
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._sources)
return OffsetRangeTracker(start_position, stop_position)
def create_source(self, file_pattern, **kwargs):
raise NotImplementedError('MultiFilesSource cannot be used directly.')
def read(self, range_tracker):
self._populate_sources_lazily()
start_source = range_tracker.start_position()
stop_source = range_tracker.stop_position()
for source_ix in range(start_source, stop_source):
if not range_tracker.try_claim(source_ix):
break
sub_range_tracker = self._sources[source_ix].get_range_tracker(None, None)
for record in self._sources[source_ix].read(sub_range_tracker):
yield record
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
self._populate_sources_lazily()
if start_position or stop_position:
raise ValueError(
'Multi-files initial splitting is not supported. Expected start and '
'stop positions to be None. Received %r and %r respectively.' %
(start_position, stop_position))
for source in self._sources:
for bundle in source.split(desired_bundle_size):
yield bundle
def display_data(self):
return {'file_patterns': DisplayDataItem(str(self._file_patterns),
label='File Patterns')}
class _MultiTextSource(MultiFilesSource):
"""Multiple files source for Text source."""
# TODO(user): Currently liquid sharding is performed on source boundaries.
# For text files, a more complicated RangeTracker can be implemented to
# support liquid sharding within sub-sources if needed. See ConcatRangeTracker
# in concat_source.py for reference.
def create_source(self, file_pattern, min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0):
return TextSource(file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coders.StrUtf8Coder(),
validate=validate,
skip_header_lines=skip_header_lines)
# TODO(user): currently compression_type is not a ValueProvider valure in
# filebased_source, thereby we have to make seperate classes for
# non-compressed and compressed version of TFRecord sources. Consider to
# make the compression_type a ValueProvider in filebased_source.
class _MultiTFRecordSource(MultiFilesSource):
"""Multiple files source for TFRecord source."""
def create_source(self, file_pattern):
return TFRecordSource(
file_pattern=file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True)
class _MultiTFRecordGZipSource(MultiFilesSource):
"""Multiple files source for TFRecord source gzipped."""
def create_source(self, file_pattern):
return TFRecordSource(
file_pattern=file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.GZIP,
validate=True)
class ReadFromMultiFilesText(PTransform):
"""A PTransform for reading text files or files patterns.
It is a wrapper of ReadFromText but supports multiple files or
files patterns.
"""
def __init__(
self,
file_patterns,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the ReadFromText transform.
Args:
file_patterns: The file paths/patterns to read from as local file paths
or GCS files. Paths/patterns seperated by commas.
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is CompressionTypes.AUTO, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
coder: Coder used to decode each line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
**kwargs: optional args dictionary.
"""
super(ReadFromMultiFilesText, self).__init__(**kwargs)
self._source = _MultiTextSource(
file_patterns,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromMultiFilesTFRecord(PTransform):
"""Transform for reading multiple TFRecord sources.
It is a wrapper of ReadFromTFRecord but supports multiple files or
files patterns.
"""
def __init__(self,
file_patterns,
**kwargs):
"""Initialize a ReadFromMultiFilesTFRecord transform.
Args:
file_patterns: file glob patterns to read TFRecords from.
**kwargs: optional args dictionary.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromMultiFilesTFRecord, self).__init__(**kwargs)
self._source = _MultiTFRecordSource(file_patterns)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromMultiFilesTFRecordGZip(PTransform):
"""Transform for reading multiple TFRecord Gzipped sources.
It is a wrapper of ReadFromTFRecord gzipped but supports multiple files or
files patterns.
"""
def __init__(self,
file_patterns,
**kwargs):
"""Initialize a ReadFromMultiFilesTFRecordGzip transform.
Args:
file_patterns: file glob patterns to read TFRecords from.
**kwargs: optional args dictionary.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromMultiFilesTFRecordGZip, self).__init__(**kwargs)
self._source = _MultiTFRecordGZipSource(file_patterns)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
| 37.062271 | 80 | 0.720202 | 8,012 | 0.791856 | 952 | 0.09409 | 0 | 0 | 0 | 0 | 4,392 | 0.434078 |
5ee51c2ffdafe95ae165b98a996207a8a39f4653 | 10,830 | py | Python | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
]
| 15 | 2020-08-07T12:12:17.000Z | 2022-03-29T10:20:38.000Z | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
]
| 159 | 2020-08-05T14:34:59.000Z | 2022-03-31T21:02:10.000Z | eureka/S5_lightcurve_fitting/s5_fit.py | evamariaa/Eureka | a3e739a528fbe85ec588bca996188765649b7778 | [
"MIT"
]
| 17 | 2021-06-16T09:40:41.000Z | 2022-03-22T18:28:07.000Z | import numpy as np
import matplotlib.pyplot as plt
import glob, os, time
from ..lib import manageevent as me
from ..lib import readECF as rd
from ..lib import sort_nicely as sn
from ..lib import util, logedit
from . import parameters as p
from . import lightcurve as lc
from . import models as m
from .utils import get_target_data
#FINDME: Keep reload statements for easy testing
from importlib import reload
reload(p)
reload(m)
reload(lc)
class MetaClass:
'''A class to hold Eureka! metadata.
'''
def __init__(self):
return
def fitJWST(eventlabel, s4_meta=None):
'''Fits 1D spectra with various models and fitters.
Parameters
----------
eventlabel: str
The unique identifier for these data.
s4_meta: MetaClass
The metadata object from Eureka!'s S4 step (if running S4 and S5 sequentially).
Returns
-------
meta: MetaClass
The metadata object with attributes added by S5.
Notes
-------
History:
- November 12-December 15, 2021 Megan Mansfield
Original version
- December 17-20, 2021 Megan Mansfield
Connecting S5 to S4 outputs
- December 17-20, 2021 Taylor Bell
Increasing connectedness of S5 and S4
'''
print("\nStarting Stage 5: Light Curve Fitting\n")
# Initialize a new metadata object
meta = MetaClass()
meta.eventlabel = eventlabel
# Load Eureka! control file and store values in Event object
ecffile = 'S5_' + eventlabel + '.ecf'
ecf = rd.read_ecf(ecffile)
rd.store_ecf(meta, ecf)
# load savefile
if s4_meta == None:
# Search for the S2 output metadata in the inputdir provided in
# First just check the specific inputdir folder
rootdir = os.path.join(meta.topdir, *meta.inputdir.split(os.sep))
if rootdir[-1]!='/':
rootdir += '/'
files = glob.glob(rootdir+'S4_'+meta.eventlabel+'*_Meta_Save.dat')
if len(files)==0:
# There were no metadata files in that folder, so let's see if there are in children folders
files = glob.glob(rootdir+'**/S4_'+meta.eventlabel+'*_Meta_Save.dat', recursive=True)
files = sn.sort_nicely(files)
if len(files)==0:
# There may be no metafiles in the inputdir - raise an error and give a helpful message
raise AssertionError('Unable to find an output metadata file from Eureka!\'s S4 step '
+'in the inputdir: \n"{}"!'.format(rootdir))
elif len(files)>1:
# There may be multiple runs - use the most recent but warn the user
print('WARNING: There are multiple metadata save files in your inputdir: \n"{}"\n'.format(rootdir)
+'Using the metadata file: \n{}\n'.format(files[-1])
+'and will consider aperture ranges listed there. If this metadata file is not a part\n'
+'of the run you intended, please provide a more precise folder for the metadata file.')
fname = files[-1] # Pick the last file name (should be the most recent or only file)
fname = fname[:-4] # Strip off the .dat ending
s4_meta = me.loadevent(fname)
# Need to remove the topdir from the outputdir
s4_outputdir = s4_meta.outputdir[len(s4_meta.topdir):]
if s4_outputdir[0]=='/':
s4_outputdir = s4_outputdir[1:]
s4_allapers = s4_meta.allapers
# Overwrite the temporary meta object made above to be able to find s4_meta
meta = s4_meta
# Load Eureka! control file and store values in the S4 metadata object
ecffile = 'S5_' + eventlabel + '.ecf'
ecf = rd.read_ecf(ecffile)
rd.store_ecf(meta, ecf)
# Overwrite the inputdir with the exact output directory from S4
meta.inputdir = s4_outputdir
meta.old_datetime = s4_meta.datetime # Capture the date that the
meta.datetime = None # Reset the datetime in case we're running this on a different day
meta.inputdir_raw = meta.inputdir
meta.outputdir_raw = meta.outputdir
if (not s4_allapers) or (not meta.allapers):
# The user indicated in the ecf that they only want to consider one aperture
# in which case the code will consider only the one which made s4_meta.
# Alternatively, S4 was run without allapers, so S5's allapers will only conside that one
meta.spec_hw_range = [meta.spec_hw,]
meta.bg_hw_range = [meta.bg_hw,]
run_i = 0
for spec_hw_val in meta.spec_hw_range:
for bg_hw_val in meta.bg_hw_range:
t0 = time.time()
meta.spec_hw = spec_hw_val
meta.bg_hw = bg_hw_val
# Do some folder swapping to be able to reuse this function to find S4 outputs
tempfolder = meta.outputdir_raw
meta.outputdir_raw = meta.inputdir_raw
meta.inputdir = util.pathdirectory(meta, 'S4', meta.runs[run_i], old_datetime=meta.old_datetime, ap=spec_hw_val, bg=bg_hw_val)
meta.outputdir_raw = tempfolder
run_i += 1
if meta.testing_S5:
# Only fit a single channel while testing
chanrng = [0]
else:
chanrng = range(meta.nspecchan)
for channel in chanrng:
# Create directories for Stage 5 processing outputs
run = util.makedirectory(meta, 'S5', ap=spec_hw_val, bg=bg_hw_val, ch=channel)
meta.outputdir = util.pathdirectory(meta, 'S5', run, ap=spec_hw_val, bg=bg_hw_val, ch=channel)
# Copy existing S4 log file and resume log
meta.s5_logname = meta.outputdir + 'S5_' + meta.eventlabel + ".log"
log = logedit.Logedit(meta.s5_logname, read=meta.s4_logname)
log.writelog("\nStarting Channel {} of {}\n".format(channel+1, meta.nspecchan))
log.writelog(f"Input directory: {meta.inputdir}")
log.writelog(f"Output directory: {meta.outputdir}")
# Copy ecf (and update outputdir in case S5 is being called sequentially with S4)
log.writelog('Copying S5 control file')
# shutil.copy(ecffile, meta.outputdir)
new_ecfname = meta.outputdir + ecffile.split('/')[-1]
with open(new_ecfname, 'w') as new_file:
with open(ecffile, 'r') as file:
for line in file.readlines():
if len(line.strip())==0 or line.strip()[0]=='#':
new_file.write(line)
else:
line_segs = line.strip().split()
if line_segs[0]=='inputdir':
new_file.write(line_segs[0]+'\t\t/'+meta.inputdir+'\t'+' '.join(line_segs[2:])+'\n')
else:
new_file.write(line)
# Set the intial fitting parameters
params = p.Parameters(param_file=meta.fit_par)
# Subtract off the zeroth time value to avoid floating point precision problems when fitting for t0
t_offset = int(np.floor(meta.bjdtdb[0]))
t_mjdtdb = meta.bjdtdb - t_offset
params.t0.value -= t_offset
# Get the flux and error measurements for the current channel
flux = meta.lcdata[channel,:]
flux_err = meta.lcerr[channel,:]
# Normalize flux and uncertainties to avoid large flux values
flux_err /= flux.mean()
flux /= flux.mean()
if meta.testing_S5:
# FINDME: Use this area to add systematics into the data
# when testing new systematics models. In this case, I'm
# introducing an exponential ramp to test m.ExpRampModel().
log.writelog('****Adding exponential ramp systematic to light curve****')
fakeramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--')
fakeramp.coeffs = np.array([-1,40,-3, 0, 0, 0])
flux *= fakeramp.eval(time=t_mjdtdb)
# Load the relevant values into the LightCurve model object
lc_model = lc.LightCurve(t_mjdtdb, flux, channel, meta.nspecchan, unc=flux_err, name=eventlabel, time_units=f'MJD_TDB = BJD_TDB - {t_offset}')
# Make the astrophysical and detector models
modellist=[]
if 'transit' in meta.run_myfuncs:
t_model = m.TransitModel(parameters=params, name='transit', fmt='r--')
modellist.append(t_model)
if 'polynomial' in meta.run_myfuncs:
t_polynom = m.PolynomialModel(parameters=params, name='polynom', fmt='r--')
modellist.append(t_polynom)
if 'expramp' in meta.run_myfuncs:
t_ramp = m.ExpRampModel(parameters=params, name='ramp', fmt='r--')
modellist.append(t_ramp)
model = m.CompositeModel(modellist)
# Fit the models using one or more fitters
log.writelog("=========================")
if 'lsq' in meta.fit_method:
log.writelog("Starting lsq fit.")
model.fitter = 'lsq'
lc_model.fit(model, meta, fitter='lsq')
log.writelog("Completed lsq fit.")
log.writelog("-------------------------")
if 'emcee' in meta.fit_method:
log.writelog("Starting emcee fit.")
model.fitter = 'emcee'
lc_model.fit(model, meta, fitter='emcee')
log.writelog("Completed emcee fit.")
log.writelog("-------------------------")
if 'dynesty' in meta.fit_method:
log.writelog("Starting dynesty fit.")
model.fitter = 'dynesty'
lc_model.fit(model, meta, fitter='dynesty')
log.writelog("Completed dynesty fit.")
log.writelog("-------------------------")
if 'lmfit' in meta.fit_method:
log.writelog("Starting lmfit fit.")
model.fitter = 'lmfit'
lc_model.fit(model, meta, fitter='lmfit')
log.writelog("Completed lmfit fit.")
log.writelog("-------------------------")
log.writelog("=========================")
# Plot the results from the fit(s)
if meta.isplots_S5 >= 1:
lc_model.plot(meta)
return meta, lc_model
| 44.204082 | 158 | 0.568421 | 105 | 0.009695 | 0 | 0 | 0 | 0 | 0 | 0 | 4,099 | 0.378486 |
5ee68ea9e8a99cf09e7e6d0ca6ce334ef983ef7f | 4,724 | py | Python | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
]
| null | null | null | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
]
| null | null | null | model_compression_toolkit/common/graph/graph_matchers.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, List
from model_compression_toolkit.common.graph.base_node import BaseNode
from model_compression_toolkit.common.matchers import node_matcher, walk_matcher, edge_matcher
class NodeOperationMatcher(node_matcher.BaseNodeMatcher):
"""
Class NodeOperationMatcher to check if the layer class of a node matches a specific layer.
"""
def __init__(self, operation: Any):
"""
Init for class NodeOperationMathcer.
Args:
operation: Which layer to check if matches.
"""
self.operation = operation
def apply(self, input_node_object: Any) -> bool:
"""
Check if input_node_object matches the matcher condition.
Args:
input_node_object: Node object to check the matcher on.
Returns:
True if input_node_object is the layer the NodeOperationMatcher holds. Otherwise,
return nothing.
"""
if input_node_object.type == self.operation:
return True
class NodeFrameworkAttrMatcher(node_matcher.BaseNodeMatcher):
"""
Class NodeFrameworkAttrMatcher to check if a node's attribute has a specific value.
"""
def __init__(self, attr_name: str, attr_value: Any):
"""
Init a NodeFrameworkAttrMatcher object.
Args:
attr_name: Name of node's attribute to check.
attr_value: Value to check if the attribute is equal to.
"""
self.attr_name = attr_name
self.attr_value = attr_value
def apply(self, input_node_object: Any) -> bool:
"""
Check if input_node_object has an attribute with the value the NodeFrameworkAttrMatcher
contains.
Args:
input_node_object: Node object to check for its attribute and value.
Returns:
True if the node has an attribute with the attribute name and the value that
were passed during the initialization of NodeFrameworkAttrMatcher.
"""
if self.attr_name in input_node_object.framework_attr:
if input_node_object.framework_attr[self.attr_name] == self.attr_value:
return True
class EdgeMatcher(edge_matcher.BaseEdgeMatcher):
"""
class EdgeMatcher to check if an edge matches an edge that EdgeMatcher contains.
"""
def __init__(self, source_matcher: BaseNode, target_matcher: BaseNode):
"""
Init an EdgeMatcher object.
Args:
source_matcher: Source node to match.
target_matcher: Destination node to match.
"""
super().__init__(source_matcher, target_matcher)
def apply(self, input_object: Any) -> bool:
"""
Check if input_object is a tuple of two nodes and the same nodes that were
passed during the EdgeMatcher initialization.
Args:
input_object: Object to check if equals to the edge EdgeMatcher holds.
Returns:
Whether input_object is equal to the edge EdgeMatcher holds or not.
"""
if isinstance(input_object, tuple) and len(input_object) >= 2:
return self.source_matcher.apply(input_object[0]) and self.target_matcher.apply(input_object[1])
else:
return False
class WalkMatcher(walk_matcher.WalkMatcherList):
"""
Class WalkMatcher to check if a list of nodes matches another list of nodes.
"""
def __init__(self, matcher_list: List[BaseNode]):
"""
Init a WalkMatcher object.
Args:
matcher_list: List of nodes to holds for checking.
"""
super().__init__(matcher_list)
def apply(self, input_object: Any) -> bool: # not in use
"""
Check if a list of nodes matches the list of nodes the WalkMatcher holds.
Args:
input_object: Object to check.
Returns:
True if input_object matches the list of nodes the WalkMatcher holds.
"""
pass # pragma: no cover
| 31.704698 | 108 | 0.650296 | 3,816 | 0.80779 | 0 | 0 | 0 | 0 | 0 | 0 | 3,059 | 0.647544 |
5ee6b363eabe25c724e148a500f83b42a84aa031 | 3,022 | py | Python | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
]
| null | null | null | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
]
| null | null | null | initialize_app_db.py | daniel-julio-iglesias/microblog | 360198198336f0dda7d20aafeb337f59cb4a2329 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The next steps use just in case to recreate the already existing DB
Backup and Delete the folder "migrations"
Backup and Delete the file "app.db"
Execute the next console commands
Linux
(venv) $ export FLASK_APP=microblog.py
MS Windows
(venv) $ set FLASK_APP=microblog.py
(venv) $ flask db init
(venv) $ flask db migrate -m "initialization"
(venv) $ python initialize_app_db.py
### (venv) $ flask shell
(venv) $ flask run
http://localhost:5000/
http://localhost:5000/index
Use the function "initialize_data_into_db()"
for data recreation.
Use the function "remove_data_from_db()"
for data deletion. Then you can simply
use again the function "initialize_data_into_db()"
for data recreation.
"""
from datetime import datetime, timedelta
from app import create_app, db
from app.models import User, Post
from config import Config
def initialize_data_into_db():
app = create_app(Config)
app_context = app.app_context()
app_context.push()
db.create_all()
u1 = User(username='john', email='[email protected]')
u2 = User(username='susan', email='[email protected]')
u3 = User(username='mary', email='[email protected]')
u4 = User(username='david', email='[email protected]')
u5 = User(username='daniel', email='[email protected]')
u5.set_password('dog')
db.session.add_all([u1, u2, u3, u4, u5])
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4,
timestamp=now + timedelta(seconds=2))
p5 = Post(body="My post number one.", author=u5,
timestamp=now + timedelta(seconds=5))
p6 = Post(body="My post number two.", author=u5,
timestamp=now + timedelta(seconds=6))
p7 = Post(body="My post number three.", author=u5,
timestamp=now + timedelta(seconds=7))
p8 = Post(body="My post number four.", author=u5,
timestamp=now + timedelta(seconds=8))
p9 = Post(body="My post number five.", author=u5,
timestamp=now + timedelta(seconds=9))
db.session.add_all([p1, p2, p3, p4, p5, p6, p7, p8, p9])
db.session.commit()
u1.follow(u2)
u1.follow(u4)
u2.follow(u3)
u3.follow(u4)
db.session.commit()
users = User.query.all()
print(users)
"""
[<User john>, <User susan>]
"""
for u in users:
print(u.id, u.username)
def remove_data_from_db():
"""
In case of removing data...
"""
app = create_app(Config)
app_context = app.app_context()
app_context.push()
db.create_all()
db.session.remove()
db.drop_all()
app_context.pop()
if __name__ == '__main__':
initialize_data_into_db()
# remove_data_from_db()
| 26.982143 | 67 | 0.648246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,175 | 0.388815 |
5ee6e99348be1e75186fd4d95f9769f455fc8a1a | 4,328 | py | Python | gpytorch/kernels/rbf_kernel.py | techshot25/gpytorch | b4aee6f81a3428172d4914e7e0fef0e71cd1f519 | [
"MIT"
]
| 1 | 2019-11-08T11:25:56.000Z | 2019-11-08T11:25:56.000Z | gpytorch/kernels/rbf_kernel.py | VonRosenchild/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 | [
"MIT"
]
| null | null | null | gpytorch/kernels/rbf_kernel.py | VonRosenchild/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 | [
"MIT"
]
| 1 | 2021-07-02T19:40:07.000Z | 2021-07-02T19:40:07.000Z | #!/usr/bin/env python3
from .kernel import Kernel
from ..functions import RBFCovariance
def postprocess_rbf(dist_mat):
return dist_mat.div_(-2).exp_()
class RBFKernel(Kernel):
r"""
Computes a covariance matrix based on the RBF (squared exponential) kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{equation*}
k_{\text{RBF}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left( -\frac{1}{2}
(\mathbf{x_1} - \mathbf{x_2})^\top \Theta^{-2} (\mathbf{x_1} - \mathbf{x_2}) \right)
\end{equation*}
where :math:`\Theta` is a :attr:`lengthscale` parameter.
See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.
.. note::
This kernel does not have an `outputscale` parameter. To add a scaling parameter,
decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
Args:
:attr:`ard_num_dims` (int, optional):
Set this if you want a separate lengthscale for each
input dimension. It should be `d` if :attr:`x1` is a `n x d` matrix. Default: `None`
:attr:`batch_shape` (torch.Size, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`lengthscale_prior` (Prior, optional):
Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
:attr:`lengthscale_constraint` (Constraint, optional):
Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.
:attr:`eps` (float):
The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`lengthscale` (Tensor):
The lengthscale parameter. Size/shape of parameter depends on the
:attr:`ard_num_dims` and :attr:`batch_shape` arguments.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
>>> # Non-batch: ARD (different lengthscale for each input dimension)
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims=5))
>>> covar = covar_module(x) # Output: LazyTensor of size (10 x 10)
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(batch_shape=torch.Size([2])))
>>> covar = covar_module(x) # Output: LazyTensor of size (2 x 10 x 10)
"""
def __init__(self, **kwargs):
super(RBFKernel, self).__init__(has_lengthscale=True, **kwargs)
def forward(self, x1, x2, diag=False, **params):
if (
x1.requires_grad
or x2.requires_grad
or (self.ard_num_dims is not None and self.ard_num_dims > 1)
or diag
):
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
return self.covar_dist(x1_, x2_, square_dist=True, diag=diag,
dist_postprocess_func=postprocess_rbf,
postprocess=True, **params)
return RBFCovariance().apply(x1, x2, self.lengthscale,
lambda x1, x2: self.covar_dist(x1, x2,
square_dist=True,
diag=False,
dist_postprocess_func=postprocess_rbf,
postprocess=False,
**params))
| 47.56044 | 115 | 0.575092 | 4,167 | 0.9628 | 0 | 0 | 0 | 0 | 0 | 0 | 2,946 | 0.680684 |
5ee75d983cd35cd4e28ec87b90865e27b89bfd3b | 5,132 | py | Python | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
]
| 10 | 2020-01-08T10:42:32.000Z | 2021-07-08T01:58:08.000Z | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
]
| 2 | 2020-10-07T09:48:54.000Z | 2020-11-03T23:37:13.000Z | src/dependenpy/finder.py | gitter-badger/dependenpy | db411b7bbd466b79064cbb419049f17cd3bff4c1 | [
"ISC"
]
| 1 | 2019-12-10T18:32:05.000Z | 2019-12-10T18:32:05.000Z | # -*- coding: utf-8 -*-
"""dependenpy finder module."""
from importlib.util import find_spec
from os.path import basename, exists, isdir, isfile, join, splitext
class PackageSpec(object):
"""Holder for a package specification (given as argument to DSM)."""
def __init__(self, name, path, limit_to=None):
"""
Initialization method.
Args:
name (str): name of the package.
path (str): path to the package.
limit_to (list of str): limitations.
"""
self.name = name
self.path = path
self.limit_to = limit_to or []
def __hash__(self):
return hash((self.name, self.path))
@property
def ismodule(self):
"""Property to tell if the package is in fact a module (a file)."""
return self.path.endswith(".py")
def add(self, spec):
"""
Add limitations of given spec to self's.
Args:
spec (PackageSpec): another spec.
"""
for limit in spec.limit_to:
if limit not in self.limit_to:
self.limit_to.append(limit)
@staticmethod
def combine(specs):
"""
Combine package specifications' limitations.
Args:
specs (list of PackageSpec): the package specifications.
Returns:
list of PackageSpec: the new, merged list of PackageSpec.
"""
new_specs = {}
for spec in specs:
if new_specs.get(spec, None) is None:
new_specs[spec] = spec
else:
new_specs[spec].add(spec)
return list(new_specs.values())
class PackageFinder(object):
"""Abstract package finder class."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
raise NotImplementedError
class LocalPackageFinder(PackageFinder):
"""Finder to find local packages (directories on the disk)."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
if not exists(package):
return None
name, path = None, None
enforce_init = kwargs.pop("enforce_init", True)
if isdir(package):
if isfile(join(package, "__init__.py")) or not enforce_init:
name, path = basename(package), package
elif isfile(package) and package.endswith(".py"):
name, path = splitext(basename(package))[0], package
if name and path:
return PackageSpec(name, path)
return None
class InstalledPackageFinder(PackageFinder):
"""Finder to find installed Python packages using importlib."""
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
spec = find_spec(package)
if spec is None:
return None
limit = []
if "." in package:
package, limit = package.split(".", 1)
limit = [limit]
spec = find_spec(package)
if spec is not None:
if spec.submodule_search_locations:
path = spec.submodule_search_locations[0]
elif spec.origin and spec.origin != "built-in":
path = spec.origin
else:
return None
return PackageSpec(spec.name, path, limit)
return None
class Finder(object):
"""
Main package finder class.
Initialize it with a list of package finder classes (not instances).
"""
def __init__(self, finders=None):
"""
Initialization method.
Args:
finders (list of classes):
list of package finder classes (not instances) in a specific
order. Default: [LocalPackageFinder, InstalledPackageFinder].
"""
if finders is None:
self.finders = [LocalPackageFinder(), InstalledPackageFinder()]
else:
self.finders = [f() for f in finders]
def find(self, package, **kwargs):
"""
Find a package using package finders.
Return the first package found.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments used by finders.
Returns:
PackageSpec: if package found, else None
"""
for finder in self.finders:
package_spec = finder.find(package, **kwargs)
if package_spec:
return package_spec
return None
| 28.353591 | 79 | 0.563718 | 4,954 | 0.965316 | 0 | 0 | 675 | 0.131528 | 0 | 0 | 2,386 | 0.464926 |
5ee8d965db0dc6afc3a0712b8a012c62228c1b2d | 1,738 | py | Python | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
]
| null | null | null | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
]
| null | null | null | pearsonr/beta.py | rkhullar/pearsonr-pure-python | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | [
"MIT"
]
| null | null | null | import math
def contfractbeta(a: float, b: float, x: float, itmax: int = 200) -> float:
# https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/
# evaluates the continued fraction form of the incomplete Beta function; incompbeta()
# code translated from: Numerical Recipes in C
eps = 3.0e-7
bm = az = am = 1.0
qab = a + b
qap = a + 1.0
qam = a - 1.0
bz = 1.0 - qab * x / qap
for i in range(itmax + 1):
em = float(i + 1)
tem = em + em
d = em * (b - em) * x / ((qam + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = 1.0
if abs(az - aold) < (eps * abs(az)):
return az
message = 'a or b too large or given itmax too small for computing incomplete beta function.'
raise ValueError(message)
def incompbeta(a: float, b: float, x: float) -> float:
# https://malishoaib.wordpress.com/2014/04/15/the-beautiful-beta-functions-in-raw-python/
# evaluates incomplete beta function, here a, b > 0 and 0 <= x <= 1
# this function requires contfractbeta(a,b,x, itmax = 200)
# code translated from: Numerical Recipes in C
if x == 0 or x == 1:
return x
else:
lbeta = math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) + a * math.log(x) + b * math.log(1 - x)
if x < (a + 1) / (a + b + 2):
return math.exp(lbeta) * contfractbeta(a, b, x) / a
else:
return 1 - math.exp(lbeta) * contfractbeta(b, a, 1 - x) / b
| 34.76 | 108 | 0.533947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.323936 |
5eeb434867ca1d9eaca8effbf5839d14aaa33835 | 33,018 | py | Python | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
]
| 12 | 2021-10-05T11:38:24.000Z | 2022-03-25T09:56:08.000Z | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
]
| 6 | 2021-10-06T13:27:55.000Z | 2022-03-10T12:55:15.000Z | hisim/components/generic_pv_system.py | FZJ-IEK3-VSA/HiSim | e9b3a69c6db331523b9ed5ac7aa6f57f9b4798b2 | [
"MIT"
]
| 4 | 2022-02-21T19:00:50.000Z | 2022-03-22T11:01:38.000Z | # Generic/Built-in
import datetime
import math
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pvlib
from dataclasses_json import dataclass_json
from typing import Optional
from dataclasses import dataclass
from functools import lru_cache
from hisim.simulationparameters import SimulationParameters
# Owned
from hisim import component as cp
from hisim import loadtypes as lt
from hisim import utils
from hisim import log
from hisim.components.weather import Weather
__authors__ = "Vitor Hugo Bellotto Zago"
__copyright__ = "Copyright 2021, the House Infrastructure Project"
__credits__ = ["Noah Pflugradt"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Vitor Hugo Bellotto Zago"
__email__ = "[email protected]"
__status__ = "development"
"""
The functions cited in this module are at some degree based on the tsib project:
[tsib-kotzur]:
Kotzur, Leander, Detlef Stolten, and Hermann-Josef Wagner. Future grid load of the residential building sector. No. RWTH-2018-231872. Lehrstuhl für Brennstoffzellen (FZ Jülich), 2019.
ID: http://hdl.handle.net/2128/21115
http://nbn-resolving.org/resolver?verb=redirect&identifier=urn:nbn:de:0001-2019020614
The implementation of the tsib project can be found under the following repository:
https://github.com/FZJ-IEK3-VSA/tsib
"""
temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
@lru_cache(maxsize=16)
def simPhotovoltaicFast(
dni_extra=None,
DNI=None,
DHI=None,
GHI=None,
azimuth=None,
apparent_zenith=None,
temperature=None,
wind_speed=None,
surface_azimuth : float = 180,
surface_tilt : float = 30 ):
"""
Simulates a defined PV array with the Sandia PV Array Performance Model.
The implementation is done in accordance with following tutorial:
https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb
Parameters
----------
surface_tilt: int or float, optional (default:30)
Tilt angle of of the array in degree.
surface_azimuth: int or float, optional (default:180)
Azimuth angle of of the array in degree. 180 degree means south,
90 degree east and 270 west.
losses: float, optional (default: 0.1)
Losses due to soiling, mismatch, diode connections, dc wiring etc.
Returns
--------
"""
poa_irrad = pvlib.irradiance.get_total_irradiance( surface_tilt,
surface_azimuth,
apparent_zenith,
azimuth,
DNI,
GHI,
DHI,
dni_extra )
pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model)
pv_dc = pvlib.pvsystem.pvwatts_dc( poa_irrad[ "poa_global" ],
temp_cell = pvtemps,
pdc0 = 1,
gamma_pdc = -0.002,
temp_ref = 25.0 )
if math.isnan(pv_dc):
pv_dc = 0
return pv_dc
def simPhotovoltaicSimple(
dni_extra=None,
DNI=None,
DHI=None,
GHI=None,
azimuth=None,
apparent_zenith=None,
temperature=None,
wind_speed=None,
surface_tilt=30,
surface_azimuth=180,
albedo=0.2):
"""
Simulates a defined PV array with the Sandia PV Array Performance Model.
The implementation is done in accordance with following tutorial:
https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
----------
tmy_data: pandas.DataFrame(), required
Weatherfile in the format of a tmy file.
surface_tilt: int or float, optional (default:30)
Tilt angle of of the array in degree.
surface_azimuth: int or float, optional (default:180)
Azimuth angle of of the array in degree. 180 degree means south,
90 degree east and 270 west.
albedo: float, optional (default: 0.2)
Reflection coefficient of the surrounding area.
losses: float, optional (default: 0.1)
Losses due to soiling, mismatch, diode connections, dc wiring etc.
load_module_data: Boolean, optional (default: False)
If True the module data base is loaded from the Sandia Website.
Otherwise it is loaded from this relative path
'\\profiles\\PV-Modules\\sandia_modules.csv'.
module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_')
Module name. The string must be existens in Sandia Module database.
integrateInverter: bool, optional (default: True)
If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter.
inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_')
Type of inverter.
Returns
--------
"""
# automatic pd time series in future pvlib version
# calculate airmass
airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith)
# use perez model to calculate the plane of array diffuse sky radiation
poa_sky_diffuse = pvlib.irradiance.perez(
surface_tilt,
surface_azimuth,
DHI,
np.float64(DNI),
dni_extra,
apparent_zenith,
azimuth,
airmass,
)
# calculate ground diffuse with specified albedo
poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse(
surface_tilt, GHI, albedo=albedo
)
# calculate angle of incidence
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth)
# calculate plane of array irradiance
poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse)
# calculate pv cell and module temperature
temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **temp_model)
pv_dc = pvlib.pvsystem.pvwatts_dc(poa_irrad["poa_global"], temp_cell=pvtemps, pdc0=1, gamma_pdc=-0.002,
temp_ref=25.0)
if math.isnan(pv_dc):
pv_dc = 0
return pv_dc
@dataclass_json
@dataclass
class PVSystemConfig:
parameter_string: str
time: int
location: str
module_name:str
integrate_inverter: bool
inverter_name:str
power: float
def __init__(self,
my_simulation_parameters: SimulationParameters,
time:int,
location:str,
power:float,
module_name:str,
integrate_inverter:bool,
inverter_name:str ):
self.parameter_string = my_simulation_parameters.get_unique_key()
self.time = time
self.location = location
self.module_name = module_name
self.integrate_inverter = integrate_inverter
self.inverter_name = inverter_name
self.power = power
class PVSystem(cp.Component):
"""
Parameters:
-----------------------------------------------------
time:
simulation timeline
location: Location
object Location with temperature and solar data
power: float
Power in kWp to be provided by the PV System
Returns:
-----------------------------------------------------
pass
"""
# Inputs
TemperatureOutside = "TemperatureOutside"
DirectNormalIrradiance = "DirectNormalIrradiance"
DirectNormalIrradianceExtra = "DirectNormalIrradianceExtra"
DiffuseHorizontalIrradiance = "DiffuseHorizontalIrradiance"
GlobalHorizontalIrradiance = "GlobalHorizontalIrradiance"
Azimuth = "Azimuth"
ApparentZenith = "ApparentZenith"
WindSpeed = "WindSpeed"
# Outputs
ElectricityOutput = "ElectricityOutput"
#Forecasts
PV_Forecast_24h = "PV_Forecast_24h"
# Similar components to connect to:
# 1. Weather
@utils.measure_execution_time
def __init__(self,
my_simulation_parameters: SimulationParameters,
my_simulation_repository : Optional[ cp.SimRepository ] = None,
time : int = 2019,
location : str = "Aachen",
power : float = 10E3,
load_module_data : bool = False,
module_name : str = "Hanwha_HSL60P6_PA_4_250T__2013_",
integrateInverter : bool = True,
inverter_name : str = "ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_" ):
super().__init__( "PVSystem", my_simulation_parameters = my_simulation_parameters )
self.pvconfig = PVSystemConfig(my_simulation_parameters=my_simulation_parameters,
location=location, power = power, module_name=module_name,
integrate_inverter=integrateInverter, inverter_name=inverter_name,
time=time)
self.build( load_module_data, my_simulation_repository )
self.t_outC : cp.ComponentInput = self.add_input(self.ComponentName,
self.TemperatureOutside,
lt.LoadTypes.Temperature,
lt.Units.Celsius,
True)
self.DNIC : cp.ComponentInput = self.add_input(self.ComponentName,
self.DirectNormalIrradiance,
lt.LoadTypes.Irradiance,
lt.Units.Wm2,
True)
self.DNIextraC : cp.ComponentInput = self.add_input(self.ComponentName,
self.DirectNormalIrradianceExtra,
lt.LoadTypes.Irradiance,
lt.Units.Wm2,
True)
self.DHIC: cp.ComponentInput = self.add_input(self.ComponentName,
self.DiffuseHorizontalIrradiance,
lt.LoadTypes.Irradiance,
lt.Units.Wm2,
True)
self.GHIC: cp.ComponentInput = self.add_input(self.ComponentName,
self.GlobalHorizontalIrradiance,
lt.LoadTypes.Irradiance,
lt.Units.Wm2,
True)
self.azimuthC : cp.ComponentInput = self.add_input(self.ComponentName,
self.Azimuth,
lt.LoadTypes.Any,
lt.Units.Degrees,
True)
self.apparent_zenithC : cp.ComponentInput = self.add_input(self.ComponentName,
self.ApparentZenith,
lt.LoadTypes.Any,
lt.Units.Degrees,
True)
self.wind_speedC: cp.ComponentInput = self.add_input(self.ComponentName,
self.WindSpeed,
lt.LoadTypes.Speed,
lt.Units.MeterPerSecond,
True)
self.electricity_outputC : cp.ComponentOutput = self.add_output(self.ComponentName,
PVSystem.ElectricityOutput,
lt.LoadTypes.Electricity,
lt.Units.Watt,
False)
self.add_default_connections(Weather, self.get_weather_default_connections())
def get_weather_default_connections(self):
log.information("setting weather default connections")
connections = []
weather_classname = Weather.get_classname()
connections.append(cp.ComponentConnection(PVSystem.TemperatureOutside,weather_classname, Weather.TemperatureOutside))
connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradiance,weather_classname, Weather.DirectNormalIrradiance))
connections.append(cp.ComponentConnection(PVSystem.DirectNormalIrradianceExtra,weather_classname, Weather.DirectNormalIrradianceExtra))
connections.append(cp.ComponentConnection(PVSystem.DiffuseHorizontalIrradiance,weather_classname, Weather.DiffuseHorizontalIrradiance))
connections.append(cp.ComponentConnection(PVSystem.GlobalHorizontalIrradiance,weather_classname, Weather.GlobalHorizontalIrradiance))
connections.append(cp.ComponentConnection(PVSystem.Azimuth,weather_classname, Weather.Azimuth))
connections.append(cp.ComponentConnection(PVSystem.ApparentZenith,weather_classname, Weather.ApparentZenith))
connections.append(cp.ComponentConnection(PVSystem.WindSpeed,weather_classname, Weather.WindSpeed))
return connections
def i_restore_state(self):
pass
def write_to_report(self):
lines = []
lines.append("Name: {}".format(self.ComponentName))
lines.append("Power: {:3.0f} kWp".format(self.pvconfig.power*1E-3))
lines.append("Module: {}".format(self.pvconfig.module_name))
lines.append("Inverter: {}".format(self.pvconfig.inverter_name))
return lines
def i_simulate(self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool):
if hasattr(self, "output"):
#if(len(self.output) < timestep)
# raise Exception("Somehow the precalculated list of values for the PV system seems to be incorrect. Please delete the cache.")
stsv.set_output_value(self.electricity_outputC, self.output[timestep] * self.pvconfig.power)
else:
DNI = stsv.get_input_value(self.DNIC)
dni_extra = stsv.get_input_value(self.DNIextraC)
DHI = stsv.get_input_value(self.DHIC)
GHI = stsv.get_input_value(self.GHIC)
azimuth = stsv.get_input_value(self.azimuthC)
temperature = stsv.get_input_value(self.t_outC)
wind_speed = stsv.get_input_value(self.wind_speedC)
apparent_zenith = stsv.get_input_value(self.apparent_zenithC)
#ac_power = self.simPhotovoltaic2(dni_extra=dni_extra,
# DNI=DNI,
# DHI=DHI,
# GHI=GHI,
# azimuth=azimuth,
# apparent_zenith=apparent_zenith,
# temperature=temperature,
# wind_speed=wind_speed)
#ac_power = simPhotovoltaicSimple(
# dni_extra=dni_extra,
# DNI=DNI,
# DHI=DHI,
# GHI=GHI,
# azimuth=azimuth,
# apparent_zenith=apparent_zenith,
# temperature=temperature,
# wind_speed=wind_speed)
ac_power = simPhotovoltaicFast(
dni_extra=dni_extra,
DNI=DNI,
DHI=DHI,
GHI=GHI,
azimuth=azimuth,
apparent_zenith=apparent_zenith,
temperature=temperature,
wind_speed=wind_speed)
resultingvalue = ac_power * self.pvconfig.power
# if you wanted to access the temperature forecast from the weather component:
# val = self.simulation_repository.get_entry(Weather.Weather_Temperature_Forecast_24h)
stsv.set_output_value(self.electricity_outputC, resultingvalue)
self.data[timestep] = ac_power
if timestep + 1 == self.data_length:
database = pd.DataFrame(self.data, columns=["output"])
database.to_csv(self.cache_filepath, sep=",", decimal=".", index=False)
if self.my_simulation_parameters.system_config.predictive == True:
last_forecast_timestep = int( timestep + 24 * 3600 / self.my_simulation_parameters.seconds_per_timestep )
if ( last_forecast_timestep > len( self.output ) ):
last_forecast_timestep = len( self.output )
pvforecast = [ self.output[ t ] * self.pvconfig.power for t in range( timestep, last_forecast_timestep ) ]
self.simulation_repository.set_entry( self.PV_Forecast_24h, pvforecast )
def get_coordinates(self, location="Aachen", year=2019):
"""
Reads a test reference year file and gets the GHI, DHI and DNI from it.
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
-------
try_num: int (default: 4)
The region number of the test reference year.
year: int (default: 2010)
The year. Only data for 2010 and 2030 available
"""
# get the correct file path
filepath = os.path.join(utils.HISIMPATH["weather"][location])
# get the geoposition
with open(filepath + ".dat", encoding="utf-8") as fp:
lines = fp.readlines()
location_name = lines[0].split(maxsplit=2)[2].replace('\n', '')
lat = float(lines[1][20:37])
lon = float(lines[2][15:30])
self.location = {"name": location_name, "latitude": lat, "longitude": lon}
self.index = pd.date_range(
"{}-01-01 00:00:00".format(year), periods=60*24*365, freq="T", tz="Europe/Berlin"
)
def i_save_state(self):
pass
def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues):
pass
def build( self, load_module_data : bool, my_simulation_repository : Optional[ cp.SimRepository ] ):
log.information(self.pvconfig.to_json()) # type: ignore
file_exists, self.cache_filepath = utils.get_cache_file("PVSystem", self.pvconfig)
if file_exists:
self.output = pd.read_csv(self.cache_filepath, sep=',', decimal='.')['output'].tolist()
if len(self.output) != self.my_simulation_parameters.timesteps:
raise Exception("Reading the cached PV values seems to have failed. Expected "
+ str(self.my_simulation_parameters.timesteps) + " values, but got " + str(len(self.output )))
else:
self.get_coordinates(location = self.pvconfig.location, year = self.pvconfig.time)
# Factor to guarantee peak power based on module with 250 Wh
self.ac_power_factor = math.ceil( ( self.pvconfig.power * 1e3 ) / 250 )
#when predictive control is activated, the PV simulation is run beforhand to make forecasting easier
if self.my_simulation_parameters.system_config.predictive and my_simulation_repository is not None:
#get yearly weather data from dictionary
dni_extra = my_simulation_repository.get_entry( Weather.Weather_DirectNormalIrradianceExtra_yearly_forecast )
DNI = my_simulation_repository.get_entry( Weather.Weather_DirectNormalIrradiance_yearly_forecast )
DHI = my_simulation_repository.get_entry( Weather.Weather_DiffuseHorizontalIrradiance_yearly_forecast )
GHI = my_simulation_repository.get_entry( Weather.Weather_GlobalHorizontalIrradiance_yearly_forecast )
azimuth = my_simulation_repository.get_entry( Weather.Weather_Azimuth_yearly_forecast )
apparent_zenith = my_simulation_repository.get_entry( Weather.Weather_ApparentZenith_yearly_forecast )
temperature = my_simulation_repository.get_entry( Weather.Weather_TemperatureOutside_yearly_forecast )
wind_speed = my_simulation_repository.get_entry( Weather.Weather_WindSpeed_yearly_forecast )
x= [ ]
for i in range( len( dni_extra ) ):
x.append( simPhotovoltaicFast( dni_extra[ i ], DNI[ i ], DHI[ i ], GHI[ i ], azimuth[ i ], apparent_zenith[ i ], temperature[ i ], wind_speed[ i ] ) )
self.output = x
database = pd.DataFrame( self.output, columns = [ "output" ] )
database.to_csv( self.cache_filepath, sep=",", decimal=".", index=False )
else:
self.data = [0] * self.my_simulation_parameters.timesteps
self.data_length = self.my_simulation_parameters.timesteps
if self.my_simulation_parameters.system_config.predictive and my_simulation_repository is not None:
my_simulation_repository.delete_entry( Weather.Weather_DirectNormalIrradianceExtra_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_DirectNormalIrradiance_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_DiffuseHorizontalIrradiance_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_GlobalHorizontalIrradiance_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_Azimuth_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_ApparentZenith_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_TemperatureOutside_yearly_forecast )
my_simulation_repository.delete_entry( Weather.Weather_WindSpeed_yearly_forecast )
self.modules = pd.read_csv(
os.path.join(utils.HISIMPATH["photovoltaic"]["modules"]),
index_col=0,
)
self.inverters = pd.read_csv(
os.path.join(utils.HISIMPATH["photovoltaic"]["inverters"]),
index_col=0,
)
self.temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
# load the sandia data
if load_module_data:
# load module data online
modules = pvlib.pvsystem.retrieve_sam(name="SandiaMod")
self.module = modules[self.pvconfig.module_name]
# get inverter data
inverters = pvlib.pvsystem.retrieve_sam("cecinverter")
self.inverter = inverters[self.pvconfig.inverter_name]
else:
# load module and inverter data from csv
module = self.modules[self.pvconfig.module_name]
self.module = pd.to_numeric(module, errors="coerce")
inverter = self.inverters[self.pvconfig.inverter_name]
self.inverter = pd.to_numeric(inverter, errors="coerce")
#self.power = self.power
#self.module_name = module_name
#self.inverter_name = inverter_name
#self.integrateInverter = integrateInverter
#self.simPhotovoltaicSimpleJit = simPhotovoltaicSimple
def plot(self):
# Plots ac_power. One day is represented by 1440 steps.
#self.ac_power.iloc[0:7200].plot()
plt.plot(self.data)
plt.ylabel("Power [W]")
plt.xlabel("Time")
plt.show()
def interpolate(self,pd_database,year):
firstday = pd.Series([0.0], index=[
pd.to_datetime(datetime.datetime(year-1, 12, 31, 23, 0), utc=True).tz_convert("Europe/Berlin")])
lastday = pd.Series(pd_database[-1], index=[
pd.to_datetime(datetime.datetime(year, 12, 31, 22, 59), utc=True).tz_convert("Europe/Berlin")])
#pd_database = pd_database.append(firstday)
pd_database = pd_database.append(lastday)
pd_database = pd_database.sort_index()
return pd_database.resample('1T').asfreq().interpolate(method='linear').tolist()
def simPhotovoltaic2(
self,
dni_extra=None,
DNI=None,
DHI=None,
GHI=None,
azimuth=None,
apparent_zenith=None,
temperature=None,
wind_speed=None,
surface_tilt=30,
surface_azimuth=180,
albedo=0.2):
"""
Simulates a defined PV array with the Sandia PV Array Performance Model.
The implementation is done in accordance with following tutorial:
https://github.com/pvlib/pvlib-python/blob/master/docs/tutorials/tmy_to_power.ipynb
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
----------
tmy_data: pandas.DataFrame(), required
Weatherfile in the format of a tmy file.
surface_tilt: int or float, optional (default:30)
Tilt angle of of the array in degree.
surface_azimuth: int or float, optional (default:180)
Azimuth angle of of the array in degree. 180 degree means south,
90 degree east and 270 west.
albedo: float, optional (default: 0.2)
Reflection coefficient of the surrounding area.
losses: float, optional (default: 0.1)
Losses due to soiling, mismatch, diode connections, dc wiring etc.
load_module_data: Boolean, optional (default: False)
If True the module data base is loaded from the Sandia Website.
Otherwise it is loaded from this relative path
'\\profiles\\PV-Modules\\sandia_modules.csv'.
module_name: str, optional (default:'Hanwha_HSL60P6_PA_4_250T__2013_')
Module name. The string must be existens in Sandia Module database.
integrateInverter: bool, optional (default: True)
If an inverter shall be added to the simulation, providing the photovoltaic output after the inverter.
inverter_name: str, optional (default: 'ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_')
Type of inverter.
Returns
--------
"""
# automatic pd time series in future pvlib version
# calculate airmass
airmass = pvlib.atmosphere.get_relative_airmass(apparent_zenith)
# use perez model to calculate the plane of array diffuse sky radiation
poa_sky_diffuse = pvlib.irradiance.perez(
surface_tilt,
surface_azimuth,
DHI,
np.float64(DNI),
dni_extra,
apparent_zenith,
azimuth,
airmass,
)
# calculate ground diffuse with specified albedo
poa_ground_diffuse = pvlib.irradiance.get_ground_diffuse(
surface_tilt, GHI, albedo=albedo
)
# calculate angle of incidence
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth, apparent_zenith, azimuth)
# calculate plane of array irradiance
poa_irrad = pvlib.irradiance.poa_components(aoi, np.float64(DNI), poa_sky_diffuse, poa_ground_diffuse)
# calculate pv cell and module temperature
#temp_model = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS["sapm"]["open_rack_glass_glass"]
pvtemps = pvlib.temperature.sapm_cell(poa_irrad["poa_global"], temperature, wind_speed, **self.temp_model)
# calculate effective irradiance on pv module
sapm_irr = pvlib.pvsystem.sapm_effective_irradiance(
module=self.module,
poa_direct=poa_irrad["poa_direct"],
poa_diffuse=poa_irrad["poa_diffuse"],
airmass_absolute=airmass,
aoi=aoi,
)
# calculate pv performance
sapm_out = pvlib.pvsystem.sapm(
sapm_irr,
module=self.module,
temp_cell=pvtemps,
)
# calculate peak load of single module [W]
peak_load = self.module.loc["Impo"] * self.module.loc["Vmpo"]
ac_power = pd.DataFrame()
if self.pvconfig.integrate_inverter:
# calculate load after inverter
iv_load = pvlib.inverter.sandia(inverter=self.inverter, v_dc=sapm_out["v_mp"], p_dc=sapm_out["p_mp"])
ac_power = iv_load / peak_load
else:
# load in [kW/kWp]
ac_power = sapm_out["p_mp"] / peak_load
if math.isnan(ac_power):
ac_power = 0.0
#ac_power = ac_power * self.time_correction_factor
#ac_power = ac_power
#data = [DHI,
# DNI,
# GHI,
# dni_extra,
# aoi,
# apparent_zenith,
# azimuth,
# airmass,
# wind_speed]
#if timestep % 60 == 0 and timestep < 1442:
# log.information(data)
# log.information("Timestep:{} , AcPower: {}".format(timestep, ac_power))
return ac_power
def readTRY(location="Aachen", year=2010):
"""
Reads a test reference year file and gets the GHI, DHI and DNI from it.
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
-------
try_num: int (default: 4)
The region number of the test reference year.
year: int (default: 2010)
The year. Only data for 2010 and 2030 available
"""
# get the correct file path
filepath = os.path.join(utils.HISIMPATH["weather"][location])
# get the geoposition
with open(filepath + ".dat", encoding="utf-8") as fp:
lines = fp.readlines()
location_name = lines[0].split(maxsplit=2)[2].replace('\n', '')
lat = float(lines[1][20:37])
lon = float(lines[2][15:30])
location = {"name": location_name, "latitude": lat, "longitude": lon}
# check if time series data already exists as .csv with DNI
if os.path.isfile(filepath + ".csv"):
data = pd.read_csv(filepath + ".csv", index_col=0, parse_dates=True,sep=";",decimal=",")
data.index = pd.to_datetime(data.index, utc=True).tz_convert("Europe/Berlin")
# else read from .dat and calculate DNI etc.
else:
# get data
data = pd.read_csv(
filepath + ".dat", sep=r"\s+", skiprows=([i for i in range(0, 31)])
)
data.index = pd.date_range(
"{}-01-01 00:00:00".format(year), periods=8760, freq="H", tz="Europe/Berlin"
)
data["GHI"] = data["D"] + data["B"]
data = data.rename(columns={"D": "DHI", "t": "T", "WG": "WS"})
# calculate direct normal
data["DNI"] = calculateDNI(data["B"], lon, lat)
# data["DNI"] = data["B"]
# save as .csv
#data.to_csv(filepath + ".csv",sep=";",decimal=",")
return data, location
def calculateDNI(directHI, lon, lat, zenith_tol=87.0):
"""
Calculates the direct NORMAL irradiance from the direct horizontal irradiance with the help of the PV lib.
Based on the tsib project @[tsib-kotzur] (Check header)
Parameters
----------
directHI: pd.Series with time index
Direct horizontal irradiance
lon: float
Longitude of the location
lat: float
Latitude of the location
zenith_tol: float, optional
Avoid cosines of values above a certain zenith angle of in order to avoid division by zero.
Returns
-------
DNI: pd.Series
"""
solarPos = pvlib.solarposition.get_solarposition(directHI.index, lat, lon)
solarPos["apparent_zenith"][solarPos.apparent_zenith > zenith_tol] = zenith_tol
DNI = directHI.div(solarPos["apparent_zenith"].apply(math.radians).apply(math.cos))
DNI = DNI.fillna(0)
if DNI.isnull().values.any():
raise ValueError("Something went wrong...")
return DNI
| 45.168263 | 183 | 0.594009 | 23,586 | 0.714294 | 0 | 0 | 7,306 | 0.22126 | 0 | 0 | 11,054 | 0.334767 |
5eeb79ff59fe8c898948a4d629f95025dddf840d | 1,843 | py | Python | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
]
| null | null | null | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
]
| null | null | null | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/__init__.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
]
| null | null | null | """
////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) NVIDIA Corporation. All rights reserved.
//
// NVIDIA Sample Code
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
"""
from .loadCsvNode import LoadCsvNode
from .bootstrapNode import BootstrapNode
from .logReturnNode import LogReturnNode
from .distanceNode import DistanceNode
from .hierarchicalClusteringNode import HierarchicalClusteringNode
from .hrpWeight import HRPWeightNode
from .portfolioNode import PortfolioNode
from .performanceMetricNode import PerformanceMetricNode
from .nrpWeightNode import NRPWeightNode
from .maxDrawdownNode import MaxDrawdownNode
from .featureNode import FeatureNode
from .aggregateTimeFeature import AggregateTimeFeatureNode
from .mergeNode import MergeNode
from .diffNode import DiffNode
from .rSquaredNode import RSquaredNode
from .shapSummaryPlotNode import ShapSummaryPlotPlotNode
from .leverageNode import LeverageNode
from .rawDataNode import RawDataNode
from .transactionCostNode import TransactionCostNode
__all__ = ["LoadCsvNode", "BootstrapNode", "LogReturnNode",
"DistanceNode", "HierarchicalClusteringNode", "HRPWeightNode",
"PortfolioNode", "PerformanceMetricNode", "NRPWeightNode",
"MaxDrawdownNode", "FeatureNode", "AggregateTimeFeatureNode",
"MergeNode", "DiffNode", "RSquaredNode", "ShapSummaryPlotPlotNode",
"LeverageNode", "RawDataNode", "TransactionCostNode"]
| 42.860465 | 78 | 0.72382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 905 | 0.491047 |
5eecada079f1111eeed67c73ca6a1720da167194 | 1,541 | py | Python | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
]
| 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
]
| 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
]
| 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | #from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python
#pythran export maxsum(int list)
#pythran export maxsumseq(int list)
#pythran export maxsumit(int list)
#runas maxsum([0, 1, 0])
#runas maxsumseq([-1, 2, -1, 3, -1])
#runas maxsumit([-1, 1, 2, -5, -6])
def maxsum(sequence):
"""Return maximum sum."""
maxsofar, maxendinghere = 0, 0
for x in sequence:
# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]``
maxendinghere = max(maxendinghere + x, 0)
maxsofar = max(maxsofar, maxendinghere)
return maxsofar
def maxsumseq(sequence):
start, end, sum_start = -1, -1, -1
maxsum_, sum_ = 0, 0
for i, x in enumerate(sequence):
sum_ += x
if maxsum_ < sum_: # found maximal subsequence so far
maxsum_ = sum_
start, end = sum_start, i
elif sum_ < 0: # start new sequence
sum_ = 0
sum_start = i
assert maxsum_ == maxsum(sequence)
assert maxsum_ == sum(sequence[start + 1:end + 1])
return sequence[start + 1:end + 1]
def maxsumit(iterable):
maxseq = seq = []
start, end, sum_start = -1, -1, -1
maxsum_, sum_ = 0, 0
for i, x in enumerate(iterable):
seq.append(x); sum_ += x
if maxsum_ < sum_:
maxseq = seq; maxsum_ = sum_
start, end = sum_start, i
elif sum_ < 0:
seq = []; sum_ = 0
sum_start = i
assert maxsum_ == sum(maxseq[:end - start])
return maxseq[:end - start]
| 31.44898 | 96 | 0.580792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.27904 |
5eed202c73e618fc929047ee896a35003f968654 | 28,280 | py | Python | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
]
| 1 | 2020-03-04T15:14:40.000Z | 2020-03-04T15:14:40.000Z | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
]
| null | null | null | src/binwalk/__init__.py | dotysan/binwalk | d3b5d73538557f2a290996dcea84352fcfb6d1a1 | [
"MIT"
]
| null | null | null | __all__ = ["Binwalk"]
import os
import re
import time
import magic
from binwalk.compat import *
from binwalk.config import *
from binwalk.update import *
from binwalk.filter import *
from binwalk.parser import *
from binwalk.plugins import *
from binwalk.plotter import *
from binwalk.hexdiff import *
from binwalk.entropy import *
from binwalk.extractor import *
from binwalk.prettyprint import *
from binwalk.smartstrings import *
from binwalk.smartsignature import *
from binwalk.common import file_size, unique_file_name, BlockFile
class Binwalk(object):
'''
Primary Binwalk class.
Useful class objects:
self.filter - An instance of the MagicFilter class.
self.extractor - An instance of the Extractor class.
self.parser - An instance of the MagicParser class.
self.display - An instance of the PrettyPrint class.
self.magic_files - A list of magic file path strings to use whenever the scan() method is invoked.
self.scan_length - The total number of bytes to be scanned.
self.total_scanned - The number of bytes that have already been scanned.
self.scan_type - The type of scan being performed, one of: BINWALK, BINCAST, BINARCH, STRINGS, ENTROPY.
Performing a simple binwalk scan:
from binwalk import Binwalk
scan = Binwalk().scan(['firmware1.bin', 'firmware2.bin'])
for (filename, file_results) in scan.iteritems():
print "Results for %s:" % filename
for (offset, results) in file_results:
for result in results:
print offset, result['description']
'''
# Default libmagic flags. Basically disable anything we don't need in the name of speed.
DEFAULT_FLAGS = magic.MAGIC_NO_CHECK_TEXT | magic.MAGIC_NO_CHECK_ENCODING | magic.MAGIC_NO_CHECK_APPTYPE | magic.MAGIC_NO_CHECK_TOKENS
# Maximum magic bytes length
MAX_SIGNATURE_SIZE = 128
# Minimum verbosity level at which to enable extractor verbosity.
VERY_VERBOSE = 2
# Scan every byte by default.
DEFAULT_BYTE_ALIGNMENT = 1
# Valid scan_type values.
# ENTROPY must be the largest value to ensure it is performed last if multiple scans are performed.
# REHASH must also be larger than any scans that would generate extracted files.
BINWALK = 0x01
BINARCH = 0x02
BINCAST = 0x03
STRINGS = 0x04
COMPRESSION = 0x05
HEXDIFF = 0x06
CUSTOM = 0x07
REHASH = 0x08
BINVIS = 0x09
ENTROPY = 0x0A
def __init__(self, magic_files=[], flags=magic.MAGIC_NONE, log=None, quiet=False, verbose=0, ignore_smart_keywords=False, ignore_time_skews=False, load_extractor=False, load_plugins=True, exec_commands=True, max_extract_size=None):
'''
Class constructor.
@magic_files - A list of magic files to use.
@flags - Flags to pass to magic_open. [TODO: Might this be more appropriate as an argument to load_signaures?]
@log - Output PrettyPrint data to log file as well as to stdout.
@quiet - If set to True, supress PrettyPrint output to stdout.
@verbose - Verbosity level.
@ignore_smart_keywords - Set to True to ignore smart signature keywords.
@ignore_time_skews - Set to True to ignore file results with timestamps in the future.
@load_extractor - Set to True to load the default extraction rules automatically.
@load_plugins - Set to False to disable plugin support.
@exec_commands - Set to False to disable the execution of external utilities when extracting data from files.
@max_extract_size - Limit the size of extracted files.
Returns None.
'''
self.flags = self.DEFAULT_FLAGS | flags
self.last_extra_data_section = ''
self.load_plugins = load_plugins
self.magic_files = magic_files
self.verbose = verbose
self.total_scanned = 0
self.scan_length = 0
self.total_read = 0
self.matryoshka = 1
self.epoch = 0
self.year = 0
self.plugins = None
self.magic = None
self.mfile = None
self.entropy = None
self.strings = None
self.scan_type = self.BINWALK
if not ignore_time_skews:
# Consider timestamps up to 1 year in the future valid,
# to account for any minor time skew on the local system.
self.year = time.localtime().tm_year + 1
self.epoch = int(time.time()) + (60 * 60 * 24 * 365)
# Instantiate the config class so we can access file/directory paths
self.config = Config()
# Use the system default magic file if no other was specified
if not self.magic_files or self.magic_files is None:
# Append the user's magic file first so that those signatures take precedence
self.magic_files = [
self.config.paths['user'][self.config.BINWALK_MAGIC_FILE],
self.config.paths['system'][self.config.BINWALK_MAGIC_FILE],
]
# Only set the extractor verbosity if told to be very verbose
if self.verbose >= self.VERY_VERBOSE:
extractor_verbose = True
else:
extractor_verbose = False
# Create an instance of the PrettyPrint class, which can be used to print results to screen/file.
self.display = PrettyPrint(self, log=log, quiet=quiet, verbose=verbose)
# Create MagicFilter and Extractor class instances. These can be used to:
#
# o Create include/exclude filters
# o Specify file extraction rules to be applied during a scan
#
self.filter = MagicFilter()
self.extractor = Extractor(verbose=extractor_verbose, exec_commands=exec_commands, max_size=max_extract_size)
if load_extractor:
self.extractor.load_defaults()
# Create SmartSignature and MagicParser class instances. These are mostly for internal use.
self.smart = SmartSignature(self.filter, ignore_smart_signatures=ignore_smart_keywords)
self.parser = MagicParser(self.filter, self.smart)
def __del__(self):
self.cleanup()
def __enter__(self):
return self
def __exit__(self, t, v, traceback):
self.cleanup()
def cleanup(self):
'''
Close magic and cleanup any temporary files generated by the internal instance of MagicParser.
Returns None.
'''
try:
self.magic.close()
except:
pass
try:
self.parser.cleanup()
except:
pass
def load_signatures(self, magic_files=[]):
'''
Load signatures from magic file(s).
Called automatically by Binwalk.scan() with all defaults, if not already called manually.
@magic_files - A list of magic files to use (default: self.magic_files).
Returns None.
'''
# The magic files specified here override any already set
if magic_files and magic_files is not None:
self.magic_files = magic_files
# Parse the magic file(s) and initialize libmagic
self.mfile = self.parser.parse(self.magic_files)
self.magic = magic.open(self.flags)
self.magic.load(str2bytes(self.mfile))
# Once the temporary magic file is loaded into libmagic, we don't need it anymore; delete the temp file
self.parser.rm_magic_file()
def hexdiff(self, file_names, length=0x100, offset=0, block=16, first=False):
if not length and len(file_names) > 0:
length = file_size(file_names[0])
if not block:
block = 16
HexDiff(self).display(file_names, offset=offset, size=length, block=block, show_first_only=first)
def analyze_strings(self, file_names, length=0, offset=0, n=0, block=0, load_plugins=True, whitelist=[], blacklist=[]):
'''
Performs a strings analysis on the specified file(s).
@file_names - A list of files to analyze.
@length - The number of bytes in the file to analyze.
@offset - The starting offset into the file to begin analysis.
@n - The minimum valid string length.
@block - The block size to use when performing entropy analysis.
@load_plugins - Set to False to disable plugin callbacks.
@whitelist - A list of whitelisted plugins.
@blacklist - A list of blacklisted plugins.
Returns a dictionary compatible with other classes and methods (Entropy, Binwalk, analyze_entropy, etc):
{
'file_name' : (offset, [{
'description' : 'Strings',
'string' : 'found_string'
}]
)
}
'''
data = {}
self.strings = Strings(file_names,
self,
length=length,
offset=offset,
n=n,
block=block,
algorithm='gzip', # Use gzip here as it is faster and we don't need the detail provided by shannon
load_plugins=load_plugins,
whitelist=whitelist,
blacklist=blacklist)
data = self.strings.strings()
del self.strings
self.strings = None
return data
def analyze_entropy(self, files, offset=0, length=0, block=0, plot=True, legend=True, save=False, algorithm=None, load_plugins=True, whitelist=[], blacklist=[], compcheck=False):
'''
Performs an entropy analysis on the specified file(s).
@files - A dictionary containing file names and results data, as returned by Binwalk.scan.
@offset - The offset into the data to begin analysis.
@length - The number of bytes to analyze.
@block - The size of the data blocks to analyze.
@plot - Set to False to disable plotting.
@legend - Set to False to exclude the legend and custom offset markers from the plot.
@save - Set to True to save plots to disk instead of displaying them.
@algorithm - Set to 'gzip' to use the gzip entropy "algorithm".
@load_plugins - Set to False to disable plugin callbacks.
@whitelist - A list of whitelisted plugins.
@blacklist - A list of blacklisted plugins.
@compcheck - Set to True to perform heuristic compression detection.
Returns a dictionary of:
{
'file_name' : ([list, of, offsets], [list, of, entropy], average_entropy)
}
'''
data = {}
self.entropy = Entropy(files,
self,
offset,
length,
block,
plot,
legend,
save,
algorithm=algorithm,
load_plugins=plugins,
whitelist=whitelist,
blacklist=blacklist,
compcheck=compcheck)
data = self.entropy.analyze()
del self.entropy
self.entropy = None
return data
def plot3d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False):
'''
Generates a 3D data plot of the specified target files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify 0 to scan the entire file(s).
@max_points - Set the maximum number of data points to plot.
@show_grids - Set to True to show axis grids in the 3D plot.
@verbose - Set to True to enable verbose output.
Returns None.
'''
if not isinstance(target_files, type([])):
target_files = [target_files]
Plotter3D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot()
def plot2d(self, target_files, offset=0, length=0, max_points=None, show_grids=False, verbose=False):
'''
Generates a 2D data plot of the specified target files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify 0 to scan the entire file(s).
@max_points - Set the maximum number of data points to plot.
@show_grids - Set to True to show axis grids in the 3D plot.
@verbose - Set to True to enable verbose output.
Returns None.
'''
if not isinstance(target_files, type([])):
target_files = [target_files]
Plotter2D(target_files, offset=offset, length=length, max_points=max_points, show_grids=show_grids, verbose=verbose).plot()
def scan(self, target_files, offset=0, length=0, show_invalid_results=False, callback=None, start_callback=None, end_callback=None, base_dir=None, matryoshka=1, plugins_whitelist=[], plugins_blacklist=[]):
'''
Performs a binwalk scan on a file or list of files.
@target_files - File or list of files to scan.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify -1 for streams.
@show_invalid_results - Set to True to display invalid results.
@callback - Callback function to be invoked when matches are found.
@start_callback - Callback function to be invoked prior to scanning each file.
@end_callback - Callback function to be invoked after scanning each file.
@base_dir - Base directory for output files.
@matryoshka - Number of levels to traverse into the rabbit hole.
@plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded.
@plugins_blacklist - A list of plugin names to not load.
Returns a dictionary of :
{
'target file name' : [
(0, [{description : "LZMA compressed data..."}]),
(112, [{description : "gzip compressed data..."}])
]
}
'''
# Prefix all directory names with an underscore. This prevents accidental deletion of the original file(s)
# when the user is typing too fast and is trying to deleted the extraction directory.
prefix = '_'
dir_extension = 'extracted'
i = 0
total_results = {}
self.matryoshka = matryoshka
# For backwards compatibility
if not isinstance(target_files, type([])):
target_files = [target_files]
if base_dir is None:
base_dir = ''
# Instantiate the Plugins class and load all plugins, if not disabled
self.plugins = Plugins(self, whitelist=plugins_whitelist, blacklist=plugins_blacklist)
if self.load_plugins:
self.plugins._load_plugins()
# Load the magic signatures. This must be done for every scan, as some signature scans
# may use a different list of magic signatures.
self.load_signatures()
while i < self.matryoshka:
new_target_files = []
# Scan each target file
for target_file in target_files:
ignore_files = []
# On the first scan, add the base_dir value to dir_prefix. Subsequent target_file values will have this value prepended already.
if i == 0:
dir_prefix = os.path.join(base_dir, prefix + os.path.basename(target_file))
else:
dir_prefix = os.path.join(os.path.dirname(target_file), prefix + os.path.basename(target_file))
output_dir = unique_file_name(dir_prefix, dir_extension)
# Set the output directory for extracted files to go to
self.extractor.output_directory(output_dir)
if start_callback is not None:
start_callback(target_file)
results = self.single_scan(target_file,
offset=offset,
length=length,
show_invalid_results=show_invalid_results,
callback=callback)
if end_callback is not None:
end_callback(target_file)
# Get a list of extracted file names; don't scan them again.
for (index, results_list) in results:
for result in results_list:
if result['extract']:
ignore_files.append(result['extract'])
# Find all newly created files and add them to new_target_files / new_target_directories
for (dir_path, sub_dirs, files) in os.walk(output_dir):
for fname in files:
fname = os.path.join(dir_path, fname)
if fname not in ignore_files:
new_target_files.append(fname)
# Don't worry about sub-directories
break
total_results[target_file] = results
target_files = new_target_files
i += 1
# Be sure to delete the Plugins instance so that there isn't a lingering reference to
# this Binwalk class instance (lingering handles to this Binwalk instance cause the
# __del__ deconstructor to not be called).
if self.plugins is not None:
del self.plugins
self.plugins = None
return total_results
def single_scan(self, target_file='', fd=None, offset=0, length=0, show_invalid_results=False, callback=None, plugins_whitelist=[], plugins_blacklist=[]):
'''
Performs a binwalk scan on one target file or file descriptor.
@target_file - File to scan.
@fd - A common.BlockFile object.
@offset - Starting offset at which to start the scan.
@length - Number of bytes to scan. Specify -1 for streams.
@show_invalid_results - Set to True to display invalid results.
@callback - Callback function to be invoked when matches are found.
@plugins_whitelist - A list of plugin names to load. If not empty, only these plugins will be loaded.
@plugins_blacklist - A list of plugin names to not load.
The callback function is passed two arguments: a list of result dictionaries containing the scan results
(one result per dict), and the offset at which those results were identified. Example callback function:
def my_callback(offset, results):
print "Found %d results at offset %d:" % (len(results), offset)
for result in results:
print "\t%s" % result['description']
binwalk.Binwalk(callback=my_callback).scan("firmware.bin")
Upon completion, the scan method returns a sorted list of tuples containing a list of results dictionaries
and the offsets at which those results were identified:
scan_results = [
(0, [{description : "LZMA compressed data..."}]),
(112, [{description : "gzip compressed data..."}])
]
See SmartSignature.parse for a more detailed description of the results dictionary structure.
'''
scan_results = {}
fsize = 0
jump_offset = 0
i_opened_fd = False
i_loaded_plugins = False
plugret = PLUGIN_CONTINUE
plugret_start = PLUGIN_CONTINUE
self.total_read = 0
self.total_scanned = 0
self.scan_length = length
self.filter.show_invalid_results = show_invalid_results
self.start_offset = offset
# Check to make sure either a target file or a file descriptor was supplied
if not target_file and fd is None:
raise Exception("Must supply Binwalk.single_scan with a valid file path or BlockFile object")
# Need the total size of the target file, even if we aren't scanning the whole thing
if target_file:
fsize = file_size(target_file)
# If no length was specified, make the length the size of the target file minus the starting offset
if self.scan_length == 0:
self.scan_length = fsize - offset
# Open the target file and seek to the specified start offset
if fd is None:
fd = BlockFile(target_file, length=self.scan_length, offset=offset)
i_opened_fd = True
# If offset is negative (bytes from EOF), BlockFile class will autmoatically calculate the right offset
offset = fd.offset
# Seek to the starting offset.
#fd.seek(offset)
# If the Plugins class has not already been instantitated, do that now.
if self.plugins is None:
self.plugins = Plugins(self, blacklist=plugins_blacklist, whitelist=plugins_whitelist)
i_loaded_plugins = True
if self.load_plugins:
self.plugins._load_plugins()
# Invoke any pre-scan plugins
plugret_start = self.plugins._pre_scan_callbacks(fd)
# Load the magic signatures if they weren't already loaded.
if not self.magic:
self.load_signatures()
# Main loop, scan through all the data
while not ((plugret | plugret_start) & PLUGIN_TERMINATE):
i = 0
# Read in the next block of data from the target file and make sure it's valid
(data, dlen) = fd.read_block()
if not data or dlen == 0:
break
# The total number of bytes scanned could be bigger than the total number
# of bytes read from the file if the previous signature result specified a
# jump offset that was beyond the end of the then current data block.
#
# If this is the case, we need to index into this data block appropriately in order to
# resume the scan from the appropriate offset.
#
# Don't update dlen though, as it is the literal offset into the data block that we
# are to scan up to in this loop iteration. It is also appended to self.total_scanned,
# which is what we want (even if we have been told to skip part of the block, the skipped
# part is still considered part of the total bytes scanned).
if jump_offset > 0:
total_check = self.total_scanned + dlen
# Is the jump offset beyond the total amount of data that we've currently read in (i.e., in a future data block)?
if jump_offset >= total_check:
i = -1
# Try to seek to the jump offset; this won't work if fd == sys.stdin
try:
fd.seek(jump_offset)
self.total_read = jump_offset
self.total_scanned = jump_offset - dlen
except:
pass
# Is the jump offset inside this block of data?
elif jump_offset > self.total_scanned and jump_offset < total_check:
# Index into this block appropriately; jump_offset is the file offset that
# we need to jump to, and self.total_scanned is the file offset that starts
# the beginning of the current block
i = jump_offset - self.total_scanned
# We're done with jump_offset, zero it out for the next round
jump_offset = 0
# Scan through each block of data looking for signatures
if i >= 0 and i < dlen:
# Scan this data block for a list of offsets which are candidates for possible valid signatures.
# Signatures could be split across the block boundary; since data conatins 1KB more than dlen,
# pass up to dlen+MAX_SIGNATURE_SIZE to find_signature_candidates, but don't accept signatures that
# start after the end of dlen.
for candidate in self.parser.find_signature_candidates(data[i:dlen+self.MAX_SIGNATURE_SIZE], (dlen-i)):
# If a previous signature specified a jump offset beyond this candidate signature offset, ignore it
if (i + candidate + self.total_scanned) < jump_offset:
continue
# Reset these values on each loop
smart = {}
results = []
results_offset = -1
# In python3 we need a bytes object to pass to magic.buffer
candidate_data = str2bytes(data[i+candidate:i+candidate+fd.MAX_TRAILING_SIZE])
# Pass the data to libmagic, and split out multiple results into a list
for magic_result in self.parser.split(self.magic.buffer(candidate_data)):
i_set_results_offset = False
# Some signatures need to take into account the length of a given string
# when specifying additional offsets. Parse the string-len keyword to adjust
# for this prior to calling self.smart.parse.
magic_result = self.smart._parse_string_len(magic_result)
# Some file names are not NULL byte terminated, but rather their length is
# specified in a size field. To ensure these are not marked as invalid due to
# non-printable characters existing in the file name, parse the filename(s) and
# trim them to the specified filename length, if one was specified.
magic_result = self.smart._parse_raw_strings(magic_result)
# Invoke any pre-parser callback plugin functions
if not (plugret_start & PLUGIN_STOP_PLUGINS):
raw_result = {'description' : magic_result}
plugret = self.plugins._scan_pre_parser_callbacks(raw_result)
magic_result = raw_result['description']
if (plugret & PLUGIN_TERMINATE):
break
# Make sure this is a valid result before further processing
if not self.filter.invalid(magic_result):
# The smart filter parser returns a dictionary of keyword values and the signature description.
smart = self.smart.parse(magic_result)
# Validate the jump value and check if the response description should be displayed
if self._is_valid(smart, candidate+i, fsize):
# If multiple results are returned and one of them has smart['jump'] set to a non-zero value,
# the calculated results offset will be wrong since i will have been incremented. Only set the
# results_offset value when the first match is encountered.
if results_offset < 0:
results_offset = offset + i + candidate + smart['adjust'] + self.total_scanned
i_set_results_offset = True
# Double check to make sure the smart['adjust'] value is sane.
# If it makes results_offset negative, then it is not sane.
if results_offset >= 0:
smart['offset'] = results_offset
# Invoke any scan plugins
if not (plugret_start & PLUGIN_STOP_PLUGINS):
plugret = self.plugins._scan_callbacks(smart)
results_offset = smart['offset']
if (plugret & PLUGIN_TERMINATE):
break
# Extract the result, if it matches one of the extract rules and is not a delayed extract.
if self.extractor.enabled and not (self.extractor.delayed and smart['delay']) and not ((plugret | plugret_start) & PLUGIN_NO_EXTRACT):
# If the signature did not specify a size, extract to the end of the file.
if not smart['size']:
smart['size'] = fsize-results_offset
smart['extract'] = self.extractor.extract( results_offset,
smart['description'],
target_file,
smart['size'],
name=smart['name'])
if not ((plugret | plugret_start) & PLUGIN_NO_DISPLAY):
# This appears to be a valid result, so append it to the results list.
results.append(smart)
elif i_set_results_offset:
results_offset = -1
# Did we find any valid results?
if results_offset >= 0:
scan_results[results_offset] = results
if callback is not None:
callback(results_offset, results)
# If a relative jump offset was specified, update the absolute jump_offset variable
if has_key(smart, 'jump') and smart['jump'] > 0:
jump_offset = results_offset + smart['jump']
# Track the total number of bytes scanned
self.total_scanned += dlen
# The starting offset only affects the reported offset for results
# in the first block of data. Zero it out after the first block has
# been processed.
offset = 0
# Sort the results before returning them
scan_items = list(scan_results.items())
scan_items.sort()
# Do delayed extraction, if specified.
if self.extractor.enabled and self.extractor.delayed:
scan_items = self.extractor.delayed_extract(scan_items, target_file, fsize)
# Invoke any post-scan plugins
#if not (plugret_start & PLUGIN_STOP_PLUGINS):
self.plugins._post_scan_callbacks(fd)
# Be sure to delete the Plugins instance so that there isn't a lingering reference to
# this Binwalk class instance (lingering handles to this Binwalk instance cause the
# __del__ deconstructor to not be called).
if i_loaded_plugins:
del self.plugins
self.plugins = None
if i_opened_fd:
fd.close()
return scan_items
def concatenate_results(self, results, new):
'''
Concatenate multiple Binwalk.scan results into one dictionary.
@results - Binwalk results to append new results to.
@new - New data to append to results.
Returns None.
'''
for (new_file_name, new_data) in iterator(new):
if not has_key(results, new_file_name):
results[new_file_name] = new_data
else:
for i in range(0, len(new_data)):
found_offset = False
(new_offset, new_results_list) = new_data[i]
for j in range(0, len(results[new_file_name])):
(offset, results_list) = results[new_file_name][j]
if offset == new_offset:
results_list += new_results_list
results[new_file_name][j] = (offset, results_list)
found_offset = True
break
if not found_offset:
results[new_file_name] += new_data
def _is_valid(self, result, location, file_size):
'''
Determines if a result string is valid and should be displayed to the user or not.
@result - Result dictionary, as returned by self.smart.parse.
@location - The file offset of the result.
@file_size - The total size of the file.
Returns True if the string should be displayed.
Returns False if the string should not be displayed.
'''
if self.filter.show_invalid_results:
return True
if result['invalid'] or result['jump'] < 0 or result['size'] < 0:
return False
if ((location + result['size']) > file_size) or (self.year and result['year'] > self.year) or (self.epoch and result['epoch'] > self.epoch):
return False
desc = result['description']
return (desc and desc is not None and not self.filter.invalid(desc) and self.filter.filter(desc) != self.filter.FILTER_EXCLUDE)
| 37.858099 | 232 | 0.701627 | 27,740 | 0.980905 | 0 | 0 | 0 | 0 | 0 | 0 | 15,582 | 0.55099 |
5eed38c8799a8f20aa9075adc117edac9f20f714 | 149 | py | Python | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
]
| 1 | 2018-08-24T09:01:09.000Z | 2018-08-24T09:01:09.000Z | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
]
| null | null | null | dodo.py | Ublimjo/nwt | 08b04c376a3792fd4663de2f229617ddabc3d032 | [
"MIT"
]
| null | null | null | def task_clean_junk():
"""Remove junk file"""
return {
'actions': ['rm -rdf $(find . | grep pycache)'],
'clean': True,
}
| 21.285714 | 56 | 0.496644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.483221 |
5eeebe655d0529cd4e57b3684dd0b12853503ba1 | 442 | py | Python | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
]
| null | null | null | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
]
| null | null | null | greedy_algorithms/6_maximum_salary/largest_number.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
]
| null | null | null | #Uses python3
import sys
def largest_number(a):
#write your code here
res = ""
while len(a)!=0:
maxa = a[0]
for x in a:
if int(str(x)+str(maxa))>int(str(maxa)+str(x)):
maxa = x
res += str(maxa)
a.remove(str(maxa))
return res
if __name__ == '__main__':
#input = sys.stdin.read()
data = input().split(' ')
a = data[1:]
print(largest_number(a))
| 19.217391 | 59 | 0.506787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.167421 |
5eefaff8065c5ecea5f5a36834a9168d04d5bd4e | 2,403 | py | Python | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
]
| 1 | 2021-12-20T15:02:54.000Z | 2021-12-20T15:02:54.000Z | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
]
| null | null | null | screenblankmgr.py | nsw42/pijuui | fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac | [
"BSD-2-Clause"
]
| null | null | null | import logging
import subprocess
class PlayingState:
Inactive = 0
Active = 1
class ProfileBase:
def __init__(self):
raise NotImplementedError()
def on_start_playing(self):
raise NotImplementedError()
def on_stop_playing(self):
raise NotImplementedError()
def on_playing_tick(self):
raise NotImplementedError()
def _set_timeout(self, timeout):
self._run_xset(str(timeout))
def _run_xset(self, s_arg):
cmd = ['xset', 's', s_arg]
logging.debug(cmd)
subprocess.run(cmd)
class ScreenBlankProfileNone(ProfileBase):
def __init__(self):
pass
def on_start_playing(self):
pass
def on_stop_playing(self):
pass
def on_playing_tick(self):
pass
class ScreenBlankProfileBalanced(ProfileBase):
def __init__(self):
pass
def on_start_playing(self):
self._set_timeout(self, 300)
def on_stop_playing(self):
self._set_timeout(self, 30)
def on_playing_tick(self):
pass
class ScreenBlankProfileOnWhenPlaying(ProfileBase):
def __init__(self):
pass
def on_start_playing(self):
self._set_timeout(60 * 60)
def on_stop_playing(self):
self._run_xset('on')
self._set_timeout(10)
def on_playing_tick(self):
self._run_xset('off')
self._run_xset('reset')
class ScreenBlankMgr:
def __init__(self, profile: ProfileBase):
self.state = None
self.profile = profile
self.tick_countdown = 5
def set_state(self, new_state: str):
"""
new_state in ('playing', 'paused', 'stopped')
"""
new_state = PlayingState.Active if (new_state == 'playing') else PlayingState.Inactive
if self.state == new_state:
if self.state == PlayingState.Active:
self.tick_countdown -= 1
if self.tick_countdown <= 0:
self.profile.on_playing_tick()
self.tick_countdown = 5
else:
self.state = new_state
if self.state == PlayingState.Active:
self.profile.on_start_playing()
else:
self.profile.on_stop_playing()
profiles = {
'none': ScreenBlankProfileNone(),
'balanced': ScreenBlankProfileBalanced(),
'onoff': ScreenBlankProfileOnWhenPlaying()
}
| 22.669811 | 94 | 0.615481 | 2,204 | 0.917187 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.052434 |
5ef0d5fcfc264e4c868fb459e7c8ec1ae720744a | 6,136 | py | Python | warmmail/subscribe/tasks_send.py | sahilsakhuja/warmmail | 8a1f80d26c7a24c9aa054d869266cebd4540d7f2 | [
"MIT"
]
| null | null | null | warmmail/subscribe/tasks_send.py | sahilsakhuja/warmmail | 8a1f80d26c7a24c9aa054d869266cebd4540d7f2 | [
"MIT"
]
| null | null | null | warmmail/subscribe/tasks_send.py | sahilsakhuja/warmmail | 8a1f80d26c7a24c9aa054d869266cebd4540d7f2 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import os
import urllib.parse
from datetime import date, datetime
from functools import partial
from urllib.parse import quote_plus
import pandas as pd
import plotly.express as px
import pytz
from csci_utils.luigi.requires import Requirement, Requires
from csci_utils.luigi.target import TargetOutput
from django.template.loader import render_to_string
from luigi import (
DateParameter,
ExternalTask,
ListParameter,
LocalTarget,
Parameter,
Target,
Task,
)
from plotly.io import to_image
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from .models import Subscription
from .tasks_fetch import ConvertAQIFileToParquet
class UrlParameter(Parameter):
"""Descriptor to ensure that a file name is url safe i.e. quoted"""
def normalize(self, x):
return quote_plus(x)
class RowFilterTarget(Target):
"""A target class for filters on rows
Checks to see if any rows exist that satisfy the given filter
If no results found, return True (i.e. task is complete), else False
False - causes Luigi to think that task is pending and runs it + check requirements
"""
def __init__(self, model, **kwargs):
self.model = model
self.kwargs = kwargs
def exists(self):
vals = self.model.objects.filter(**self.kwargs)
if not vals:
return True
return False
class RowFilterOutput:
"""Descriptor for the output method
Returns a "RowFilterTarget" for the Luigi task
Additional feature: in case there are values returned from the filter,
descriptor can accept name of fields and parameters on the parent class
and update the parent class parameters -
this ensures that downstream tasks do not need to call the database again
"""
def __init__(self, model, entries_param=None, field=None, **kwargs):
self.model = model
entries_param = (
entries_param if isinstance(entries_param, list) else [entries_param]
)
field = field if isinstance(field, list) else [field]
self.parent_updates = dict(zip(entries_param, field))
self.kwargs = kwargs
def __get__(self, task, cls):
if not task:
return self
return partial(self.__call__, task)
def __call__(self, task):
vals = self.model.objects.filter(**self.kwargs)
if vals and self.parent_updates:
for entry, field in self.parent_updates.items():
setattr(task, entry, tuple(set(getattr(v, field) for v in vals)))
return RowFilterTarget(self.model, **self.kwargs)
class GenerateEmails(ExternalTask):
"""
Task to generate the html content to be sent via email.
Uses Django's render to string functionality.
:param city: name of the city for which report has to be generated
:param pol: name of the dominant pollutant for that city
:param date: the date for which report has to be generated
"""
city = UrlParameter(default=None)
pol = Parameter(default="pm25")
date = DateParameter(default=date.today())
requires = Requires()
historical = Requirement(ConvertAQIFileToParquet)
output = TargetOutput(
factory=LocalTarget,
file_pattern="emails/{task.city}-{task.date}",
ext=".html",
)
def run(self):
city = urllib.parse.unquote(self.city)
df = pd.read_parquet(self.historical.output().path)
df = df[df["City"] == city].sort_index(ascending=False)
df = df[df["Specie"].isin(["pm10", "pm25"])]
df = df.pivot(index=None, columns="Specie", values="median")
df.fillna(0, inplace=True)
df.sort_index(inplace=True, ascending=False)
last_7_days = df.iloc[:6]
data = {"aqi": df.iloc[0][self.pol]}
df["month"] = df.index.strftime("%Y-%m")
df_month = df.groupby("month").agg("mean")
last_7_days_bar = px.bar(last_7_days, title="Last 7 Days", barmode="group")
month_bar = px.bar(df_month, title="Monthly", barmode="group")
from base64 import b64encode
data["image_last_7_days"] = b64encode(
to_image(last_7_days_bar, format="png", engine="kaleido")
).decode()
data["image_months"] = b64encode(
to_image(month_bar, format="png", engine="kaleido")
).decode()
html = render_to_string(
"subscribe/newsletter_email_template.html", {"data": data}
)
with open(self.output().path, "w") as f:
f.write(html)
class CheckForPendingEmails(Task):
"""
Task to check for pending emails. This uses a "RowFilterOutput" which checks for rows in the database
which have the "next_email_date" in the past.
For each such row found (city + dominent pollutant fetched frm the DB), the task requires a GenerateEmails task.
"""
cities = ListParameter(default=None)
pols = ListParameter(default=None)
date = DateParameter(default=date.today())
def requires(self):
return {
k: self.clone(GenerateEmails, city=k, pol=self.pols[i])
for i, k in enumerate(self.cities)
}
output = RowFilterOutput(
model=Subscription,
entries_param=["cities", "pols"],
field=["city", "dominentpol"],
next_email_date__lte=datetime.now(tz=pytz.utc),
)
def run(self):
for city in self.cities:
vals = Subscription.objects.filter(
next_email_date__lte=datetime.now(tz=pytz.utc), city__exact=city
)
emails = list(map(lambda x: x.email, vals))
html = open(self.input()[city].path).read()
message = Mail(
from_email="[email protected]",
to_emails=emails[0],
subject=f"Daily AQI Update for {city} from WarmMail",
html_content=html,
)
try:
sg = SendGridAPIClient(os.environ.get("SENDGRID_API_KEY"))
sg.send(message)
except Exception as e:
print(e.message)
| 32.638298 | 116 | 0.64309 | 5,418 | 0.882986 | 0 | 0 | 0 | 0 | 0 | 0 | 1,719 | 0.28015 |
5ef260b5bf84eb695b2bd8138b23ebab7ec1405b | 4,779 | py | Python | cno/chrutils.py | CherokeeLanguage/cherokee-audio-data | a10b7b38c0c1b56338561c917cef18a078ca573c | [
"CC0-1.0",
"MIT"
]
| 2 | 2021-09-15T19:41:01.000Z | 2022-01-12T17:57:08.000Z | cno/chrutils.py | CherokeeLanguage/cherokee-audio-data | a10b7b38c0c1b56338561c917cef18a078ca573c | [
"CC0-1.0",
"MIT"
]
| 1 | 2021-10-08T18:06:29.000Z | 2021-10-08T18:48:44.000Z | cno/chrutils.py | CherokeeLanguage/cherokee-audio-data | a10b7b38c0c1b56338561c917cef18a078ca573c | [
"CC0-1.0",
"MIT"
]
| null | null | null | #!/usr/bin/env python3
def test():
cedTest = ["U²sgal²sdi ạ²dv¹ne²³li⁴sgi.", "Ụ²wo²³dị³ge⁴ɂi gi²hli a¹ke²³he³²ga na ạ²chu⁴ja.",
"Ạ²ni²³tạɂ³li ạ²ni²sgạ²ya a¹ni²no²hạ²li²³do³²he, ạ²hwi du¹ni²hyọ²he.",
"Sa¹gwu⁴hno ạ²sgạ²ya gạ²lo¹gwe³ ga²ne²he sọ³ɂị³hnv³ hla².",
"Na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi u²dlv²³kwsạ²ti ge¹se³, ạ²le go²hu⁴sdi yu²³dv³²ne⁴la a¹dlv²³kwsge³.",
"A¹na³ɂi²sv⁴hnv go²hu⁴sdi wu²³ni³go²he do²jụ²wạ³ɂị²hlv,",
"na³hnv³ gạ²lo¹gwe³ ga²ne⁴hi kị²lạ²gwu ị²yv⁴da wị²du²³sdạ³yo²hle³ o²³sdạ²gwu nu²³ksẹ²stạ²nv⁴na ị²yu³sdi da¹sdạ²yo²hị²hv⁴.",
"U²do²hị²yu⁴hnv³ wu²³yo³hle³ ạ²le u¹ni²go²he³ gạ²nv³gv⁴.",
"Na³hnv³ gạ²lo¹gwe³ nị²ga²³ne³hv⁴na \"ạ²hwi e¹ni²yo³ɂa!\" u¹dv²hne.",
"\"Ji²yo³ɂe³²ga\" u¹dv²hne na³ gạ²lo¹gwe³ ga²ne⁴hi, a¹dlv²³kwsgv³.",
"U¹na³ne²lu²³gi³²se do²jụ²wạ³ɂị²hlv³ di³dla, nạ²ɂv²³hnị³ge⁴hnv wu²³ni³luh²ja u¹ni²go²he³ so²³gwị³li gạɂ³nv⁴.",
"\"So²³gwị³lị³le³² i¹nạ²da²hị³si\" u¹dv²hne³ na³ u²yo²hlv⁴.", "\"Hạ²da²hị³se³²ga³\" a¹go¹se²³le³."]
for a in cedTest:
print("_______________");
print();
print(a);
print(ced2mco(a));
asciiCedText = ["ga.2da.2de3ga", "ha.2da.2du1ga", "u2da.2di23nv32di", "u1da.2di23nv32sv23?i", "a1da.2de3go3?i"]
for a in asciiCedText:
print("_______________");
print();
print(a);
print(ascii_ced2mco(a));
return
# Converts MCO annotation into pseudo English phonetics for use by the aeneas alignment package
# lines prefixed with '#' are returned with the '#' removed, but otherwise unchanged.
def mco2espeak(text: str):
import unicodedata as ud
import re
if (len(text.strip()) == 0):
return ""
# Handle specially flagged text
if (text[0].strip() == "#"):
if text[1] != "!":
return text.strip()[1:]
else:
text = text[2:]
newText = ud.normalize('NFD', text.strip()).lower()
if (newText[0] == ""):
newText = newText[1:]
# remove all tone indicators
newText = re.sub("[\u030C\u0302\u0300\u0301\u030b]", "", newText)
newText = "[[" + newText.strip() + "]]"
newText = newText.replace(" ", "]] [[")
newText = newText.replace("'", "]]'[[")
newText = newText.replace(".]]", "]].")
newText = newText.replace(",]]", "]],")
newText = newText.replace("!]]", "]]!")
newText = newText.replace("?]]", "]]?")
newText = newText.replace(":]]", "]]:")
newText = newText.replace(";]]", "]];")
newText = newText.replace("\"]]", "]]\"")
newText = newText.replace("']]", "]]'")
newText = newText.replace(" ]]", "]] ")
newText = newText.replace("[[ ", " [[")
newText = re.sub("(?i)([aeiouv]):", "\\1", newText)
# convert all vowels into approximate espeak x-sampa escaped forms
newText = newText.replace("A", "0")
newText = newText.replace("a", "0")
newText = newText.replace("v", "V")
newText = newText.replace("tl", "tl#")
newText = newText.replace("hl", "l#")
newText = newText.replace("J", "dZ")
newText = newText.replace("j", "dZ")
newText = newText.replace("Y", "j")
newText = newText.replace("y", "j")
newText = newText.replace("Ch", "tS")
newText = newText.replace("ch", "tS")
newText = newText.replace("ɂ", "?")
return newText
def ced2mco(text: str):
import unicodedata as ud
import re
tones2mco = [("²³", "\u030C"), ("³²", "\u0302"), ("¹", "\u0300"), ("²", ""), ("³", "\u0301"), ("⁴", "\u030b")]
text = ud.normalize('NFD', text)
text = re.sub("(?i)([aeiouv])([^¹²³⁴\u0323]+)", "\\1\u0323\\2", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)$", "\\1\u0323\\2", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)([^¹²³⁴a-zɂ])", "\\1\u0323\\2\\3", text)
text = re.sub("(?i)([^aeiouv\u0323¹²³⁴]+)([¹²³⁴]+)", "\\2\\1", text)
text = re.sub("(?i)([aeiouv])([¹²³⁴]+)", "\\1\\2:", text)
text = text.replace("\u0323", "")
text = re.sub("(?i)([aeiouv])²$", "\\1\u0304", text)
text = re.sub("(?i)([aeiouv])²([^a-zɂ¹²³⁴:])", "\\1\u0304\\2", text)
for ced2mcotone in tones2mco:
text = text.replace(ced2mcotone[0], ced2mcotone[1])
#
return ud.normalize('NFC', text)
def ascii_ced2mco(text: str):
import unicodedata as ud
text = ud.normalize('NFD', text)
return ced2mco(ascii_ced2ced(text))
def ascii_ced2ced(text: str):
import unicodedata as ud
text = ud.normalize('NFD', text)
text = text.replace(".", "\u0323")
text = text.replace("1", "¹")
text = text.replace("2", "²")
text = text.replace("3", "³")
text = text.replace("4", "⁴")
text = text.replace("?", "ɂ")
return text
if __name__ == "__main__":
test()
| 38.232 | 138 | 0.586943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,476 | 0.475422 |
5ef27b5395234b7acc5798e9c4c4dad901d9aba3 | 2,585 | py | Python | molo/usermetadata/tests/test_tags.py | praekelt/molo.usermetadata | 90cc0dffe55db8ece208d13d37d76956daadfa5a | [
"BSD-2-Clause"
]
| null | null | null | molo/usermetadata/tests/test_tags.py | praekelt/molo.usermetadata | 90cc0dffe55db8ece208d13d37d76956daadfa5a | [
"BSD-2-Clause"
]
| 14 | 2016-04-21T17:19:08.000Z | 2018-06-18T12:49:58.000Z | molo/usermetadata/tests/test_tags.py | praekeltfoundation/molo.usermetadata | 90cc0dffe55db8ece208d13d37d76956daadfa5a | [
"BSD-2-Clause"
]
| null | null | null | import pytest
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import Main, SiteLanguageRelation, Languages
from molo.usermetadata.models import PersonaIndexPage, PersonaPage
from wagtail.wagtailcore.models import Site
from wagtail.contrib.settings.context_processors import SettingsProxy
@pytest.mark.django_db
class TestPages(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.english = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(self.main.get_site()),
locale='en', is_active=True
)
self.index = PersonaIndexPage(title='Personae', slug="personae")
self.main.add_child(instance=self.index)
self.index.save_revision().publish()
self.page = PersonaPage(title="child", slug="child")
self.index.add_child(instance=self.page)
self.page.save_revision().publish()
self.client = Client()
# Login
self.user = self.login()
site = Site.objects.get(is_default_site=True)
setting = SettingsProxy(site)
self.persona_settings = setting['usermetadata']['PersonaeSettings']
self.persona_settings.persona_required = True
self.persona_settings.save()
self.site_settings = setting['core']['SiteSettings']
self.site_settings.ga_tag_manager = 'GTM-xxxx'
self.site_settings.save()
def test_persona_selected_tag(self):
response = self.client.get('/')
self.assertRedirects(
response, reverse('molo.usermetadata:persona') + '?next=/')
response = self.client.get('%s?next=%s' % ((
reverse(
'molo.usermetadata:set_persona',
kwargs={'persona_slug': self.page.slug})),
'/'))
self.assertTrue(self.client.session['MOLO_PERSONA_SELECTED'])
response = self.client.get('/')
self.assertContains(response, 'persona=child')
def test_skip_persona_selected_tag(self):
response = self.client.get('/')
self.assertRedirects(
response, reverse('molo.usermetadata:persona') + '?next=/')
response = self.client.get('%s?next=%s' % ((
reverse('molo.usermetadata:skip_persona')), '/'))
self.assertTrue(self.client.session['MOLO_PERSONA_SELECTED'])
response = self.client.get('/')
self.assertContains(response, 'persona=skip')
| 34.013158 | 75 | 0.659574 | 2,158 | 0.834816 | 0 | 0 | 2,181 | 0.843714 | 0 | 0 | 373 | 0.144294 |
5ef2f309d751c48873dcfc34c92ab93f2ef03256 | 1,793 | py | Python | app/db_con.py | bmugenya/Zup | 1677c1e4e263409f9f5fcaac7411dd403e32650e | [
"MIT"
]
| null | null | null | app/db_con.py | bmugenya/Zup | 1677c1e4e263409f9f5fcaac7411dd403e32650e | [
"MIT"
]
| 1 | 2020-03-06T17:32:15.000Z | 2020-03-06T17:32:15.000Z | app/db_con.py | bmugenya/Zup | 1677c1e4e263409f9f5fcaac7411dd403e32650e | [
"MIT"
]
| null | null | null | import psycopg2
url = "dbname='da43n1slakcjkc' user='msqgxzgmcskvst' host='ec2-54-80-184-43.compute-1.amazonaws.com' port=5432 password='9281f925b1e2298e8d62812d9d4e430c1054db62e918c282d7039fa85b1759fa'"
class database_setup(object):
def __init__(self):
self.conn = psycopg2.connect(url)
self.cursor = self.conn.cursor()
def destroy_tables(self):
self.cursor.execute("""DROP TABLE IF EXISTS user CASCADE;""")
self.conn.commit()
def create_tables(self):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Users (
user_id SERIAL NOT NULL,
fname VARCHAR(25) NOT NULL,
lname VARCHAR(25) NOT NULL,
post_date DATE NOT NULL DEFAULT CURRENT_DATE,
email VARCHAR(50) UNIQUE NOT NULL,
password VARCHAR(256) NOT NULL,
photo VARCHAR(255) NOT NULL,
PRIMARY KEY (email)
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Report (
report_id SERIAL NOT NULL,
num_tweet INT NOT NULL,
tweet VARCHAR(255) NOT NULL,
plot_bar VARCHAR(255) NOT NULL,
plot_pie VARCHAR(255) NOT NULL,
post_date DATE NOT NULL DEFAULT CURRENT_DATE,
email VARCHAR(50) REFERENCES Users(email) NOT NULL,
PRIMARY KEY (report_id)
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Config (
config_id SERIAL NOT NULL,
consumerKey TEXT NOT NULL,
consumerSecret TEXT NOT NULL,
accessToken TEXT NOT NULL,
accessSecret TEXT NOT NULL,
email VARCHAR(50) REFERENCES Users(email) NOT NULL,
PRIMARY KEY (config_id)
);""")
self.conn.commit()
| 34.480769 | 187 | 0.605131 | 1,586 | 0.884551 | 0 | 0 | 0 | 0 | 0 | 0 | 1,389 | 0.774679 |
5ef2f8f0dbedcc720d930427f98c729897cff0e0 | 780 | py | Python | server/dao/messageDao.py | ZibingZhang/Level-Up | e936eef7fc4f17e8bb392f98c7dff37dfad9d47b | [
"MIT"
]
| null | null | null | server/dao/messageDao.py | ZibingZhang/Level-Up | e936eef7fc4f17e8bb392f98c7dff37dfad9d47b | [
"MIT"
]
| 1 | 2020-01-23T19:22:06.000Z | 2020-01-23T19:23:47.000Z | server/dao/messageDao.py | ZibingZhang/Level-Up | e936eef7fc4f17e8bb392f98c7dff37dfad9d47b | [
"MIT"
]
| null | null | null | from constants import cursor
def add_message(player_name, message):
cursor.execute(
"INSERT INTO levelup.messages ("
"SENDER, MESSAGE"
") VALUES ("
"%s, %s"
")", (player_name, message)
)
def reset():
cursor.execute(
"DELETE FROM levelup.messages"
)
cursor.execute(
"ALTER TABLE levelup.messages AUTO_INCREMENT=1"
)
def get_largest_id():
cursor.execute(
"SELECT MAX(ID) FROM levelup.messages"
)
id = cursor.fetchall()[0][0]
return int(id) if id is not None else 0
def get_next_messages(message_id):
cursor.execute(
"SELECT SENDER, MESSAGE FROM levelup.messages WHERE id>%s ORDER BY ID ASC",
(message_id, )
)
return cursor.fetchall()
| 21.081081 | 83 | 0.603846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.334615 |
5ef3a63fa138240896cecf671d1c8882815b58b3 | 3,248 | py | Python | skeletrack/bbox.py | mpeven/skeletal-tracker | ddb6e7d59899c0f3f0470805006e5c5c4bcabe33 | [
"MIT"
]
| null | null | null | skeletrack/bbox.py | mpeven/skeletal-tracker | ddb6e7d59899c0f3f0470805006e5c5c4bcabe33 | [
"MIT"
]
| null | null | null | skeletrack/bbox.py | mpeven/skeletal-tracker | ddb6e7d59899c0f3f0470805006e5c5c4bcabe33 | [
"MIT"
]
| null | null | null | import numpy as np
import shapely.geometry as geom
class Bbox:
def __init__(self, name, part_id, depth_image, xyz, box_size, projection):
if not isinstance(xyz, np.ndarray):
raise ValueError("xyz must be an np.ndarray")
self.name = name
self.id = part_id
self.center = np.array([xyz[0], xyz[1]])
self.z = xyz[2]
self.im_d = depth_image
self.im_d[self.im_d == 0] = 255
x_delta_scaled = box_size[0]/2
self.weight = 1.0
y_delta_scaled = box_size[1]/2
self.xmin, self.xmax = xyz[0]-x_delta_scaled, xyz[0]+x_delta_scaled
self.ymin, self.ymax = xyz[1]-y_delta_scaled, xyz[1]+y_delta_scaled
self.poly = geom.box(self.xmin, self.ymin, self.xmax, self.ymax)
self.color_min = (int(projection['fx']*self.xmin/xyz[2] + projection['cx']),
int(projection['fy']*self.ymin/xyz[2] + projection['cy']))
self.color_max = (int(projection['fx']*self.xmax/xyz[2] + projection['cx']),
int(projection['fy']*self.ymax/xyz[2] + projection['cy']))
self.depth_min = (int(projection['fx_d']*self.xmin/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymin/xyz[2] + projection['cy_d']))
self.depth_max = (int(projection['fx_d']*self.xmax/xyz[2] + projection['cx_d']),
int(projection['fy_d']*self.ymax/xyz[2] + projection['cy_d']))
def __str__(self):
return "{{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}}".format(self.xmin, self.ymin, self.xmax, self.ymax)
def __repr__(self):
return "(bbox: {{{: 1.4f},{: 1.4f}}}, {{{: 1.4f},{: 1.4f}}})".format(self.xmin, self.ymin, self.xmax, self.ymax)
def size(self):
return (self.xmax - self.xmin) * (self.ymax - self.ymin)
def get_bb_depth_matrix(self):
""" Get the portion of the depth image inside the bounding box """
min_x, max_x = sorted((self.depth_min[0], self.depth_max[0]))
min_y, max_y = sorted((self.depth_min[1], self.depth_max[1]))
bounded_im = self.im_d[min_y: max_y+1, min_x: max_x+1]
return bounded_im
def overlap(self, bb2):
dx = min(self.xmax, bb2.xmax) - max(self.xmin, bb2.xmin)
dy = min(self.ymax, bb2.ymax) - max(self.ymin, bb2.ymin)
if (dx>=0) and (dy>=0):
return dx*dy
return 0
def p_over(self, bb2):
return self.overlap(bb2)/(min(self.size(), bb2.size()))
def p_depth(self, bb2):
bounded_im1 = self.get_bb_depth_matrix()
bounded_im2 = bb2.get_bb_depth_matrix()
print(bounded_im1.empty or bounded_im2.empty)
mean1 = np.mean(bounded_im1)
mean2 = np.mean(bounded_im2)
stdev1 = np.std(bounded_im1)
stdev2 = np.std(bounded_im2)
half_negative_square_of_mean_difference = -1/2 * (mean1 - mean2) ** 2
term1_power = half_negative_square_of_mean_difference / (stdev1 ** 2)
term2_power = half_negative_square_of_mean_difference / (stdev2 ** 2)
out = (np.exp(term1_power) + np.exp(term2_power))/2
return out
def prob(self, bb2, alpha):
return alpha * self.p_over(bb2) + (1-alpha) * self.p_depth(bb2)
| 44.493151 | 120 | 0.594828 | 3,195 | 0.983682 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.084052 |
5ef50480947622fa6c85f38cc28d083417268f20 | 351 | py | Python | apps/snippet/admin.py | AniPython/ani | 2536ac9ddae2b8396b634f982fb1083339b4a389 | [
"MIT"
]
| null | null | null | apps/snippet/admin.py | AniPython/ani | 2536ac9ddae2b8396b634f982fb1083339b4a389 | [
"MIT"
]
| null | null | null | apps/snippet/admin.py | AniPython/ani | 2536ac9ddae2b8396b634f982fb1083339b4a389 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import Tag, Article
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
list_editable = ('order',)
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'author']
readonly_fields = ['create_time', 'update_time']
| 19.5 | 52 | 0.709402 | 230 | 0.655271 | 0 | 0 | 276 | 0.786325 | 0 | 0 | 61 | 0.173789 |
5ef67226c4fddb4ea740eed126e252d451b1063d | 1,326 | py | Python | test/functional/test_framework/script_util.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
]
| 1,389 | 2017-06-28T02:35:01.000Z | 2022-03-25T20:09:01.000Z | test/functional/test_framework/script_util.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
]
| 1,039 | 2015-03-25T23:58:32.000Z | 2022-03-30T00:41:16.000Z | test/functional/test_framework/script_util.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
]
| 564 | 2017-06-28T03:55:03.000Z | 2022-03-30T14:57:40.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
# src/policy/policy.h). Considering a Tx with the smallest possible single
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
# we get to a minimum size of 60 bytes:
#
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
#
# Hence, the scriptPubKey of the single output has to have a size of at
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
# The following script constant consists of a single push of 21 bytes of 'a':
# <PUSH_21> <21-bytes of 'a'>
# resulting in a 22-byte size. It should be used whenever (small) fake
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
# met.
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
| 51 | 84 | 0.737557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.921569 |
5efb1967191c3b432f3eb4d402361c056b7541a9 | 4,085 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/Torrent/TorrentIPC.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/Torrent/TorrentIPC.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Protocol/Torrent/TorrentIPC.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/env python
#
# Copyright (C) 2006 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: [email protected]
# to discuss alternative licensing.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""(Bit)Torrent IPC messages"""
from Kamaelia.BaseIPC import IPC
# ====================== Messages to send to TorrentMaker =======================
class TIPCMakeTorrent(IPC):
"Create a .torrent file"
Parameters = [ "trackerurl", "log2piecesizebytes", "title", "comment", "srcfile" ]
#Parameters:
# trackerurl - the URL of the BitTorrent tracker that will be used
# log2piecesizebytes - log base 2 of the hash-piece-size, sensible value: 18
# title - name of the torrent
# comment - a field that can be read by users when they download the torrent
# srcfile - the file that the .torrent file will have metainfo about
# ========= Messages for TorrentPatron to send to TorrentService ================
# a message for TorrentClient (i.e. to be passed on by TorrentService)
class TIPCServicePassOn(IPC):
"Add a client to TorrentService"
Parameters = [ "replyService", "message" ]
#Parameters: replyService, message
# request to add a TorrentPatron to a TorrentService's list of clients
class TIPCServiceAdd(IPC):
"Add a client to TorrentService"
Parameters = [ "replyService" ]
#Parameters: replyService
# request to remove a TorrentPatron from a TorrentService's list of clients
class TIPCServiceRemove(IPC):
"Remove a client from TorrentService"
Parameters = [ "replyService" ]
#Parameters: replyService
# ==================== Messages for TorrentClient to produce ====================
# a new torrent has been added with id torrentid
class TIPCNewTorrentCreated(IPC):
"New torrent %(torrentid)d created in %(savefolder)s"
Parameters = [ "torrentid", "savefolder" ]
#Parameters: torrentid, savefolder
# the torrent you requested me to download is already being downloaded as torrentid
class TIPCTorrentAlreadyDownloading(IPC):
"That torrent is already downloading!"
Parameters = [ "torrentid" ]
#Parameters: torrentid
# for some reason the torrent could not be started
class TIPCTorrentStartFail(object):
"Torrent failed to start!"
Parameters = []
#Parameters: (none)
# message containing the current status of a particular torrent
class TIPCTorrentStatusUpdate(IPC):
"Current status of a single torrent"
def __init__(self, torrentid, statsdictionary):
super(TIPCTorrentStatusUpdate, self).__init__()
self.torrentid = torrentid
self.statsdictionary = statsdictionary
def __str__(self):
return "Torrent %d status : %s" % (self.torrentid, str(int(self.statsdictionary.get("fractionDone",0) * 100)) + "%")
# ====================== Messages to send to TorrentClient ======================
# create a new torrent (a new download session) from a .torrent file's binary contents
class TIPCCreateNewTorrent(IPC):
"Create a new torrent"
Parameters = [ "rawmetainfo" ]
#Parameters: rawmetainfo - the contents of a .torrent file
# close a running torrent
class TIPCCloseTorrent(IPC):
"Close torrent %(torrentid)d"
Parameters = [ "torrentid" ]
#Parameters: torrentid
| 40.04902 | 124 | 0.682742 | 2,064 | 0.505263 | 0 | 0 | 0 | 0 | 0 | 0 | 3,027 | 0.741004 |
5efb27ff2e3645c70f7c8e38f1cd5d5485dc77ac | 12,418 | py | Python | srcf/database/schema.py | danielchriscarter/srcf-python | a7143afd5340338094131a51f560efcd874457d2 | [
"MIT"
]
| null | null | null | srcf/database/schema.py | danielchriscarter/srcf-python | a7143afd5340338094131a51f560efcd874457d2 | [
"MIT"
]
| 2 | 2020-08-23T17:23:28.000Z | 2021-04-01T18:32:11.000Z | srcf/database/schema.py | danielchriscarter/srcf-python | a7143afd5340338094131a51f560efcd874457d2 | [
"MIT"
]
| 3 | 2021-01-12T00:06:39.000Z | 2021-09-26T23:31:15.000Z | from __future__ import print_function, unicode_literals
from binascii import unhexlify
from enum import Enum
import os
import pwd
import six
from sqlalchemy import Column, Integer, String, Boolean, DateTime, Text, Enum as SQLAEnum, Numeric
from sqlalchemy import event
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.schema import Table, FetchedValue, CheckConstraint, ForeignKey, DDL
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.mutable import MutableDict
from .compat import MemberCompat, SocietyCompat, AdminsSetCompat
__all__ = ["Member", "Society", "PendingAdmin",
"POSTGRES_USER", "RESTRICTED"]
# Should we make the notes & danger flags, and pending-admins
# tables available?
# These postgres roles have special permissions / are mentioned
# in the schema. Everyone else should connect as 'nobody'
schema_users = ("root", "srcf-admin", "hades")
# When connecting over a unix socket, postgres uses `getpeereid`
# for authentication; this is the number that matters:
euid_name = pwd.getpwuid(os.geteuid()).pw_name
if euid_name in schema_users or euid_name.endswith("-adm"):
POSTGRES_USER = euid_name
else:
POSTGRES_USER = "nobody"
is_root = POSTGRES_USER == "root" or POSTGRES_USER.endswith("-adm")
is_webapp = POSTGRES_USER == "srcf-admin"
is_hades = POSTGRES_USER == "hades"
RESTRICTED = not is_root
def _hexdump(raw):
rendered = "".join(chr(x) if len(repr(chr(x))) == 3 else "." for x in range(256))
safe = []
for pos in range(0, len(raw), 16):
line = raw[pos:pos + 16]
hex_ = " ".join("{:02x}".format(c) for c in line)
if len(line) > 8:
hex_ = "{} {}".format(hex_[:24], hex_[24:])
chars = "".join(rendered[c] if c < len(rendered) else "." for c in line)
safe.append("{:08x} {:48s} |{}|".format(pos, hex_, chars))
return "\n".join(safe)
CRSID_TYPE = String(7)
SOCIETY_TYPE = String(16)
Base = declarative_base()
class MailHandler(Enum):
"""
Choices for handling of email sent to `@srcf.net` addresses.
"""
forward = 1
"""
Forward emails to the user's registered contact address.
"""
pip = 2
"""
Process emails using Exim.
"""
hades = 3
"""
Deliver emails to the user's Hades mailbox.
"""
class Member(Base, MemberCompat):
__tablename__ = 'members'
crsid = Column(CRSID_TYPE, CheckConstraint('crsid = lower(crsid)'),
primary_key=True)
surname = Column(String(100))
preferred_name = Column(String(100))
member = Column(Boolean, nullable=False)
user = Column(Boolean, nullable=False)
disk_quota_gb = Column(Integer, FetchedValue())
disk_usage_gb = Column(Numeric, FetchedValue())
disk_usage_updated = Column(DateTime(timezone=True), FetchedValue())
if is_root or is_webapp:
uid = Column(Integer, FetchedValue())
gid = Column(Integer, FetchedValue())
email = Column(String(100), CheckConstraint("email ~ E'@'"), unique=True)
# FetchedValue: these columns are set by triggers (see below)
joined = Column(DateTime(timezone=True), FetchedValue())
modified = Column(DateTime(timezone=True), FetchedValue())
danger = Column(Boolean, nullable=False, server_default='f')
notes = Column(Text, nullable=False, server_default='')
domains = relationship("Domain", primaryjoin="foreign(Domain.owner) == Member.crsid")
if is_root or is_webapp or is_hades:
mail_handler = Column(SQLAEnum(*(handler.name for handler in MailHandler)),
nullable=False, server_default='pip')
__table_args__ = (
CheckConstraint("""
(NOT member OR (surname IS NOT NULL AND
preferred_name IS NOT NULL AND
email IS NOT NULL AND
joined IS NOT NULL))
""", name="members_must_have_details"),
CheckConstraint('member OR NOT "user"', name="users_must_be_members"),
)
def __str__(self):
return self.crsid
def __repr__(self):
if is_root or is_webapp:
m = ' member' if self.member else ' ex-member'
u = ' user' if self.user else ''
flags = m + u
r = '<Member {0} {1} {2}{3}>'.format(self.crsid, self.name, self.email, flags)
else:
r = '<Member {0} {1}>'.format(self.crsid, self.name)
if not six.PY3:
r = r.encode("utf8")
return r
def __eq__(self, other):
if not isinstance(other, Member):
return False
else:
return self.crsid == other.crsid
def __hash__(self):
return hash(self.crsid)
@hybrid_property
def name(self):
"""Joins :attr:`preferred_name` and :attr:`surname`"""
if self.preferred_name and self.surname:
return self.preferred_name + " " + self.surname
else:
return self.preferred_name or self.surname or None
society_admins = Table(
'society_admins', Base.metadata,
Column('crsid', CRSID_TYPE,
ForeignKey('members.crsid'), primary_key=True),
Column('society', SOCIETY_TYPE,
ForeignKey('societies.society'), primary_key=True),
)
class Society(Base, SocietyCompat):
__tablename__ = "societies"
society = Column(SOCIETY_TYPE, CheckConstraint('society = lower(society)'),
primary_key=True)
description = Column(String(100), nullable=False)
disk_quota_gb = Column(Integer, FetchedValue())
disk_usage_gb = Column(Numeric, FetchedValue())
disk_usage_updated = Column(DateTime(timezone=True), FetchedValue())
if is_root or is_webapp:
uid = Column(Integer, FetchedValue())
gid = Column(Integer, FetchedValue())
joined = Column(DateTime(timezone=True), FetchedValue())
modified = Column(DateTime(timezone=True), FetchedValue())
role_email = Column(String(100), CheckConstraint("email ~ E'@'"))
danger = Column(Boolean, nullable=False, server_default='f')
notes = Column(Text, nullable=False, server_default='')
admins = relationship("Member",
secondary=society_admins, collection_class=AdminsSetCompat,
backref=backref("societies", collection_class=set))
if is_root or is_webapp:
pending_admins = relationship("PendingAdmin", backref=backref("society"))
domains = relationship("Domain", primaryjoin="foreign(Domain.owner) == Society.society")
def __str__(self):
return self.society
def __repr__(self):
orphaned = '' if self.admins else ' orphaned'
return '<Society {0}{1}>'.format(self.society, orphaned)
def __eq__(self, other):
if not isinstance(other, Society):
return False
else:
return self.society == other.society
def __hash__(self):
return hash(self.society)
def __contains__(self, other):
if isinstance(other, Member):
return other in self.admins
elif isinstance(other, six.string_types):
return other in self.admin_crsids
else:
return False
@property
def admin_crsids(self):
""":attr:`admins`, as a set of strings (crsids)"""
return frozenset(m.crsid for m in self.admins)
@hybrid_property
def email(self):
"""[email protected] address"""
return self.society + "[email protected]"
if is_root or is_webapp:
class PendingAdmin(Base):
__tablename__ = "pending_society_admins"
# There is no ForeignKey constraint here because this table exists to
# reference users that don't exist yet.
crsid = Column(CRSID_TYPE, CheckConstraint('crsid = lower(crsid)'),
primary_key=True)
society_society = Column(SOCIETY_TYPE,
ForeignKey('societies.society'),
name="society",
primary_key=True)
def __str__(self):
return "{0} {1}".format(self.crsid, self.society.society)
def __repr__(self):
return '<PendingAdmin {0} {1}>'.format(self.crsid, self.society.society)
class Domain(Base):
__tablename__ = "domains"
id = Column(Integer, primary_key=True)
class_ = Column("class", String(7), nullable=False)
owner = Column(String(16), nullable=False)
domain = Column(String(256), nullable=False)
root = Column(String(256))
wild = Column(Boolean, nullable=False, server_default='f')
danger = Column(Boolean, nullable=False, server_default='f')
last_good = Column(DateTime(timezone=True))
def __str__(self):
return self.domain
def __repr__(self):
return "<{}: {} ({} {}){}{}>".format(self.__class__.__name__,
self.domain,
self.class_,
self.owner,
" @ {}".format(repr(self.root)) if self.root else "",
" wild" if self.wild else "")
class HTTPSCert(Base):
__tablename__ = "https_certs"
id = Column(Integer, primary_key=True)
domain = Column(String(256), nullable=False)
name = Column(String(32))
def __str__(self):
return self.domain
def __repr__(self):
return "<{}: {} ({})>".format(self.__class__.__name__, self.domain, self.name)
JobState = SQLAEnum('unapproved', 'queued', 'running', 'done', 'failed', 'withdrawn',
name='job_state')
LogType = SQLAEnum('created', 'started', 'progress', 'output', 'done', 'failed', 'note',
name='log_type')
LogLevel = SQLAEnum('debug', 'info', 'warning', 'error', 'critical',
name='log_level')
event.listen(
Base.metadata,
"before_create",
DDL("CREATE EXTENSION hstore")
)
class Job(Base):
__tablename__ = 'jobs'
job_id = Column(Integer, primary_key=True)
owner_crsid = Column(CRSID_TYPE, ForeignKey("members.crsid"))
owner = relationship("Member")
state = Column(JobState, nullable=False, server_default='unapproved')
state_message = Column(Text)
created_at = Column(DateTime)
type = Column(String(100), nullable=False)
args = Column(MutableDict.as_mutable(HSTORE), nullable=False)
environment = Column(Text)
class JobLog(Base):
__tablename__ = 'job_log'
log_id = Column(Integer, primary_key=True)
job_id = Column(Integer, ForeignKey("jobs.job_id"))
time = Column(DateTime)
type = Column(LogType)
level = Column(LogLevel)
message = Column(Text)
raw = Column(Text)
@property
def raw_safe(self):
if not self.raw.startswith("\\x"):
return self.raw
raw = unhexlify(self.raw[2:])
try:
return raw.decode("utf-8")
except UnicodeDecodeError:
return "[Could not decode output as UTF-8]\n{}".format(_hexdump(raw))
else:
PendingAdmin = None
LogLevel = None
Domain = None
HTTPSCert = None
JobState = None
Job = None
JobLog = None
def dump_schema():
from sqlalchemy import create_engine
import os.path
directory = os.path.dirname(__file__)
with open(os.path.join(directory, "triggers.sql")) as f:
triggers = f.read()
with open(os.path.join(directory, "grants.sql")) as f:
grants = f.read()
event.listen(
Base.metadata,
"after_create",
DDL(triggers)
)
event.listen(
Base.metadata,
"after_create",
DDL(grants)
)
def dump(sql, *multiparams, **params):
print(sql.compile(dialect=engine.dialect), ";")
engine = create_engine('postgresql://', strategy='mock', executor=dump)
Base.metadata.create_all(engine, checkfirst=False)
if __name__ == "__main__":
dump_schema()
| 33.836512 | 102 | 0.607988 | 8,622 | 0.694315 | 0 | 0 | 911 | 0.073361 | 0 | 0 | 2,424 | 0.195201 |
5efcf7db618c88e80670f2e44849d8f110aeefaf | 15,226 | py | Python | tests/test_grid.py | ascillitoe/pyvista | b0eb948042f208a03b9feb5784854ebb8507dae8 | [
"MIT"
]
| null | null | null | tests/test_grid.py | ascillitoe/pyvista | b0eb948042f208a03b9feb5784854ebb8507dae8 | [
"MIT"
]
| null | null | null | tests/test_grid.py | ascillitoe/pyvista | b0eb948042f208a03b9feb5784854ebb8507dae8 | [
"MIT"
]
| 1 | 2020-03-23T15:46:56.000Z | 2020-03-23T15:46:56.000Z | import os
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
beam = pyvista.UnstructuredGrid(examples.hexbeamfile)
# create structured grid
x = np.arange(-10, 10, 2)
y = np.arange(-10, 10, 2)
z = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(x, y, z)
sgrid = pyvista.StructuredGrid(x, y, z)
try:
test_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(test_path, 'test_data')
except:
test_path = '/home/alex/afrl/python/source/pyvista/tests'
def test_volume():
assert beam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured():
unstruct_grid = pyvista.UnstructuredGrid(sgrid)
assert unstruct_grid.points.shape[0] == x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured():
grid = pyvista.UnstructuredGrid(beam, deep=True)
grid.points += 1
assert not np.any(grid.points == beam.points)
def test_init_bad_input():
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def test_init_from_arrays():
offset = np.array([0, 9], np.int8)
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
assert grid.n_cells == 2
assert np.allclose(grid.offset, offset)
def test_surface_indices():
surf = beam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, beam.surface_indices())
def test_extract_feature_edges():
edges = beam.extract_feature_edges(90)
assert edges.n_points
edges = beam.extract_feature_edges(180)
assert not edges.n_points
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtu', 'vtk'])
def test_save(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
beam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy():
# need a grid with quadratic cells
lgrid = beam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_extract_cells():
ind = [1, 2, 3]
part_beam = beam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
mask = np.zeros(beam.n_cells, np.bool)
mask[:3] = True
part_beam = beam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
def test_merge():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list():
grid_a = beam.copy()
grid_a.points[:, 0] += 1
grid_b = beam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([beam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > beam.n_points
def test_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(sgrid.x, x)
assert np.allclose(sgrid.y, y)
assert np.allclose(sgrid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vts', 'vtk'])
def test_save_structured(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
sgrid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(Exception):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(filename)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtr', 'vtk'])
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vti', 'vtk'])
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert grid.spacing == [1, 1, 1]
assert grid.origin == [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
del grid
grid = pyvista.RectilinearGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
def test_grid_extract_selection_points():
grid = pyvista.UnstructuredGrid(sgrid)
sub_grid = grid.extract_selection_points([0])
assert sub_grid.n_cells == 1
sub_grid = grid.extract_selection_points(range(100))
assert sub_grid.n_cells > 1
def test_gaussian_smooth():
uniform = examples.load_uniform()
active = uniform.active_scalars_name
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(scalars=active)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
values = uniform.active_scalars
uniform = uniform.gaussian_smooth(radius_factor=5, std_dev=1.3)
assert uniform.active_scalars_name == active
assert uniform.active_scalars.shape == values.shape
assert not np.all(uniform.active_scalars == values)
| 32.67382 | 97 | 0.64843 | 0 | 0 | 0 | 0 | 3,243 | 0.212991 | 0 | 0 | 701 | 0.04604 |
5efda15abd13bae316a30c8f74303450a7d645eb | 5,767 | py | Python | Server/src/quadradiusr_server/server.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
]
| null | null | null | Server/src/quadradiusr_server/server.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
]
| null | null | null | Server/src/quadradiusr_server/server.py | kjarosh/QuadradiusR | 2e55188bf9c9cd980ec6d11fce51830d0b4749d7 | [
"MIT"
]
| null | null | null | import asyncio
import logging
from collections import defaultdict
from typing import Optional, List, Dict
from aiohttp import web
from aiohttp.web_runner import AppRunner, TCPSite
from quadradiusr_server.auth import Auth
from quadradiusr_server.config import ServerConfig
from quadradiusr_server.cron import Cron, SetupService
from quadradiusr_server.db.base import Game, Lobby
from quadradiusr_server.db.database_engine import DatabaseEngine
from quadradiusr_server.db.repository import Repository
from quadradiusr_server.game import GameInProgress
from quadradiusr_server.lobby import LiveLobby
from quadradiusr_server.notification import NotificationService
from quadradiusr_server.utils import import_submodules
routes = web.RouteTableDef()
class ServerNotStartedException(Exception):
pass
class QuadradiusRServer:
def __init__(self, config: ServerConfig) -> None:
self.config: ServerConfig = config
self.notification_service = NotificationService()
self.database = DatabaseEngine(config.database)
self.repository = Repository(self.database)
self.auth = Auth(config.auth, self.repository)
self.cron = Cron(config.cron, self.repository, self.notification_service)
self.setup_service = SetupService(self.repository)
self.app = web.Application()
self.app['server'] = self
self.app['auth'] = self.auth
self.app['database'] = self.database
self.app['repository'] = self.repository
self.app['notification'] = self.notification_service
self.app.add_routes(routes)
if config.static.redirect_root:
async def root_handler(request):
raise web.HTTPFound(config.static.redirect_root)
self.app.router.add_route('GET', '', root_handler)
if config.static.serve_path:
self.app.router.add_static('/', config.static.serve_path)
self.runner: Optional[AppRunner] = None
self.site: Optional[TCPSite] = None
self.lobbies: Dict[str, LiveLobby] = dict()
self.games: Dict[str, GameInProgress] = dict()
self.gateway_connections: Dict[str, List[object]] = \
defaultdict(lambda: [])
def _ensure_started(self):
if not self.site:
raise ServerNotStartedException()
@property
def is_secure(self) -> bool:
self._ensure_started()
return True if self.site._ssl_context else False
@property
def address(self) -> (str, int):
self._ensure_started()
return self.site._server.sockets[0].getsockname()
def _get_scheme(self, protocol):
if protocol == 'http':
scheme = 'https' if self.is_secure else 'http'
elif protocol == 'ws':
scheme = 'wss' if self.is_secure else 'ws'
else:
raise ValueError(f'Unknown protocol {protocol}')
return scheme
def get_url(self, protocol: str = 'http') -> str:
# TCPSite.name is not implemented properly
self._ensure_started()
addr = self.address
scheme = self._get_scheme(protocol)
return f'{scheme}://{addr[0]}:{addr[1]}'
def get_href(self, protocol: str = 'http') -> str:
if self.config.href:
return f'{self._get_scheme(protocol)}://{self.config.href}'
else:
return self.get_url(protocol)
async def start(self):
await self.database.initialize()
self.runner = AppRunner(self.app)
await self.runner.setup()
cfg = self.config
logging.info('Starting server')
self.site = TCPSite(
runner=self.runner,
host=cfg.host,
port=cfg.port,
shutdown_timeout=cfg.shutdown_timeout,
backlog=cfg.backlog,
reuse_address=cfg.reuse_address,
reuse_port=cfg.reuse_port,
# TODO ssl_context=ssl_context,
)
await self.setup_service.run_setup_jobs()
await self.cron.register()
await self.site.start()
logging.info(f'Server started at {cfg.host}:{cfg.port}')
async def shutdown(self):
logging.info('Server shutdown initiated')
if self.runner:
await self.runner.cleanup()
if self.database:
await self.database.dispose()
logging.info('Server shutdown finished')
async def _run_async(self):
await self.start()
while True:
await asyncio.sleep(3600)
def run(self) -> int:
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(self._run_async())
return 0
except KeyboardInterrupt:
logging.info('Interrupted')
loop.run_until_complete(self.shutdown())
return -1
finally:
loop.close()
def register_gateway(self, gateway):
user_id = gateway.user_id
self.gateway_connections[user_id].append(gateway)
def unregister_gateway(self, gateway):
user_id = gateway.user_id
self.gateway_connections[user_id].remove(gateway)
def start_lobby(self, lobby: Lobby) -> LiveLobby:
if lobby.id_ not in self.lobbies.keys():
self.lobbies[lobby.id_] = LiveLobby(
lobby.id_, self.repository,
self.notification_service)
return self.lobbies[lobby.id_]
def start_game(self, game: Game) -> GameInProgress:
if game.id_ not in self.games.keys():
self.games[game.id_] = GameInProgress(
game, self.repository, self.config.game)
return self.games[game.id_]
# importing submodules automatically registers endpoints
import quadradiusr_server.rest
import_submodules(quadradiusr_server.rest)
| 32.767045 | 81 | 0.650945 | 4,879 | 0.84602 | 0 | 0 | 267 | 0.046298 | 1,192 | 0.206693 | 473 | 0.082018 |
5eff513cdc7ff514a20abc942fb429679a31b4d7 | 95 | py | Python | 12_find the output/03_In Python/01_GeeksForGeeks/05_Set Five/problem_4.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
]
| 1 | 2021-11-16T14:14:38.000Z | 2021-11-16T14:14:38.000Z | 12_find the output/03_In Python/01_GeeksForGeeks/05_Set Five/problem_4.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
]
| null | null | null | 12_find the output/03_In Python/01_GeeksForGeeks/05_Set Five/problem_4.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
]
| null | null | null | def gfg(x,l = []):
for i in range(x):
l.append(i*i)
print(l)
gfg(2)
gfg(3,[3,2,1])
gfg(3)
| 10.555556 | 19 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5effb0c993d722db84398b9fa87c2c824fbd66c6 | 2,638 | py | Python | duck/utils/cal_ints.py | galaxycomputationalchemistry/duck | a57337afd523c99ebe4babf74c1868578c6cf1e0 | [
"Apache-2.0"
]
| 1 | 2020-06-20T23:27:46.000Z | 2020-06-20T23:27:46.000Z | duck/utils/cal_ints.py | galaxycomputationalchemistry/duck | a57337afd523c99ebe4babf74c1868578c6cf1e0 | [
"Apache-2.0"
]
| 4 | 2018-07-17T12:48:59.000Z | 2020-04-01T11:00:42.000Z | duck/utils/cal_ints.py | xchem/duck | b98bb78284e9c92837ac1e69fc2f06306ab1e28c | [
"Apache-2.0"
]
| 3 | 2019-06-15T16:04:47.000Z | 2020-04-01T07:54:53.000Z | import json, pickle, sys, os
from parmed.geometry import distance2
from parmed.topologyobjects import Atom
import operator
import parmed
import math
def check_same(atom, chain, res_name, res_number, atom_name):
if atom.residue.name == res_name:
if atom.residue.number == res_number:
if atom.name == atom_name:
if atom.residue.chain == chain:
return True
return False
def is_lig(atom):
# Non-hydrogen
if atom.residue.name == "UNL" and atom.atomic_number > 1:
return True
def find_atom(res_atom=None, prot_file=None, combined_pmd=None):
# Parse the input data like this -> "A_LYS_311_N"
chain = res_atom.split("_")[0]
res_name = res_atom.split("_")[1]
res_number = int(res_atom.split("_")[2])
atom_name = res_atom.split("_")[3]
# Read the original PDB File and find the atom coords
protein = parmed.load_file(prot_file)
for atom in protein.atoms:
if check_same(atom, chain, res_name, res_number, atom_name):
prot_atom = atom
break
distance_atom_1 = [(x.idx, distance2(x, prot_atom)) for x in combined_pmd.atoms]
distance_atom_1.sort(key=operator.itemgetter(1))
return distance_atom_1, prot_atom
def find_result(res_atom=None, prot_file=None, combined_pmd=None):
# Find the
distance_atom_1, prot_atom = find_atom(res_atom, prot_file, combined_pmd)
# Now find the one nearest
distance_atom_2 = [
(x.idx, distance2(x, prot_atom)) for x in combined_pmd.atoms if is_lig(x)
]
distance_atom_2.sort(key=operator.itemgetter(1))
# These are the interactions to find
index_one = distance_atom_1[0][0]
# The ligand one
index_two = distance_atom_2[0][0]
out_res = [index_one, index_two, math.sqrt(distance_atom_2[0][1])]
return index_one, index_two, out_res, distance_atom_2[0][1]
def find_interaction(res_atom=None, prot_file=None):
output_file = "indice.text"
if not res_atom or prot_file:
if os.path.isfile(output_file):
return json.load(open(output_file))
# Read files
print("loading pickle")
pickle_in = open("complex_system.pickle", "rb")
combined_pmd = pickle.load(pickle_in)[0]
pickle_in.close()
index_one, index_two, out_res, dist = find_result(res_atom, prot_file, combined_pmd)
out_f = open(output_file, "w")
out_f.write(json.dumps(out_res))
out_f.close()
return [index_one, index_two, math.sqrt(dist)]
if __name__ == "__main__":
# Define the input
res_atom = sys.argv[1]
prot_file = sys.argv[2]
find_interaction(res_atom, prot_file)
| 33.392405 | 88 | 0.681956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.121304 |
6f011e9d1e6d5fe45f9c159871d9be7ae9ea35b9 | 1,111 | py | Python | snakes/help_info.py | japinol7/snakes | bb501736027897bacab498ad7bbbe622cf4b9755 | [
"MIT"
]
| 12 | 2019-04-15T07:20:31.000Z | 2019-05-18T22:03:35.000Z | snakes/help_info.py | japinol7/snakes | bb501736027897bacab498ad7bbbe622cf4b9755 | [
"MIT"
]
| null | null | null | snakes/help_info.py | japinol7/snakes | bb501736027897bacab498ad7bbbe622cf4b9755 | [
"MIT"
]
| null | null | null | """Module help_info."""
__author__ = 'Joan A. Pinol (japinol)'
class HelpInfo:
"""Manages information used for help purposes."""
def print_help_keys(self):
print(' F1: \t show a help screen while playing the game'
' t: \t stats on/off\n'
' L_Ctrl + R_Alt + g: grid\n'
' p: \t pause\n'
' ESC: exit game\n'
' ^m: \t pause/resume music\n'
' ^s: \t sound effects on/off\n'
' Alt + Enter: change full screen / normal screen mode\n'
' ^h: \t shows this help\n'
' \t left, a: move snake to the left\n'
' \t right, d: move snake to the right\n'
' \t up, w: move snake up\n'
' \t down, s: move snake down\n'
' \t u 4: fire a light shot\n'
' \t i 5: fire a medium shot\n'
' \t j 1: fire a strong shot\n'
' \t k 2: fire a heavy shot\n'
)
| 41.148148 | 73 | 0.417642 | 1,039 | 0.935194 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.669667 |
6f0325adcc4e209cb06df2012d7cf8d2933313bf | 3,983 | py | Python | run_minprop_PD.py | kztakemoto/network_propagation | 7e66aca7f179cfe982b388b20b240745b4927bf9 | [
"MIT"
]
| 3 | 2021-04-24T10:58:33.000Z | 2022-03-22T10:02:33.000Z | run_minprop_PD.py | kztakemoto/network_propagation | 7e66aca7f179cfe982b388b20b240745b4927bf9 | [
"MIT"
]
| null | null | null | run_minprop_PD.py | kztakemoto/network_propagation | 7e66aca7f179cfe982b388b20b240745b4927bf9 | [
"MIT"
]
| 1 | 2019-11-25T06:32:13.000Z | 2019-11-25T06:32:13.000Z | import warnings
warnings.simplefilter('ignore')
import argparse
import pickle
import numpy as np
import pandas as pd
import networkx as nx
import scipy.sparse as sp
from network_propagation_methods import minprop_2
from sklearn.metrics import roc_auc_score, auc
import matplotlib.pyplot as plt
#### Parameters #############
parser = argparse.ArgumentParser(description='Runs MINProp')
parser.add_argument('--alphaP', type=float, default=0.25, help='diffusion parameter for the protein-protein interaction network')
parser.add_argument('--alphaD', type=float, default=0.25, help='diffusion parameter for the disease similarity network')
parser.add_argument('--max_iter', type=int, default=1000, help='maximum number of iterations')
parser.add_argument('--eps', type=float, default=1.0e-6, help='convergence threshold')
parser.add_argument('--dir_data', type=str, default='./data/', help='directory of pickled network data')
args = parser.parse_args()
#### load data ############
### protein-protein interaction network
with open(args.dir_data + 'norm_adj_networkP.pickle', mode='rb') as f:
norm_adj_networkP = pickle.load(f)
nb_proteins = norm_adj_networkP.shape[0]
### disease similarity network
with open(args.dir_data + 'adj_networkD.pickle', mode='rb') as f:
adj_networkD = pickle.load(f)
nb_diseases = adj_networkD.shape[0]
# normalized adjacency matrix
deg_networkD = np.sum(adj_networkD, axis=0)
norm_adj_networkD = sp.csr_matrix(adj_networkD / np.sqrt(np.dot(deg_networkD.T, deg_networkD)), dtype=np.float64)
del(adj_networkD)
del(deg_networkD)
### protein-disease network (data used in PRINCE study)
with open(args.dir_data + 'biadj_networkPD.pickle', mode='rb') as f:
biadj_networkPD = pickle.load(f)
# get the list of protein-disease pairs
PD_pairs = biadj_networkPD.nonzero()
# number of protein-disease pairs
nb_PD_pairs = len(PD_pairs[0])
#### Network propagation MINProp ###########################
roc_value_set = np.array([], dtype=np.float64)
rankings = np.array([], dtype=np.int64)
for i in range(nb_PD_pairs):
# leave-one-out validation
# remove a protein-disease association
idx_P = PD_pairs[0][i]
idx_D = PD_pairs[1][i]
biadj_networkPD[idx_P, idx_D] = 0.0
biadj_networkPD.eliminate_zeros()
# normalized biadjacency matrix (ToDo: faster implementation)
degP = np.sum(biadj_networkPD, axis=1)
degD = np.sum(biadj_networkPD, axis=0)
norm_biadj_networkPD = sp.csr_matrix(biadj_networkPD / np.sqrt(np.dot(degP, degD)), dtype=np.float64)
norm_biadj_networkPD.data[np.isnan(norm_biadj_networkPD.data)] = 0.0
norm_biadj_networkPD.eliminate_zeros()
# set initial label
yP = np.zeros(nb_proteins, dtype=np.float64)
yD = np.zeros(nb_diseases, dtype=np.float64)
yD[idx_D] = 1.0
# propagation
fP, fD, convergent = minprop_2(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP, yD, args.alphaP, args.alphaD, args.eps, args.max_iter)
# ranking
labels_real = np.zeros(nb_proteins)
labels_real[idx_P] = 1
rank = int(np.where(labels_real[np.argsort(-fP)]==1)[0]) + 1
rankings = np.append(rankings, rank)
# get AUC value
roc_value = roc_auc_score(labels_real, fP)
print(i, "AUC:", roc_value, convergent)
roc_value_set = np.append(roc_value_set, roc_value)
# reassign the protein-disease association
biadj_networkPD[idx_P, idx_D] = 1.0
print("Average AUC", np.mean(roc_value_set))
# compute sensitivity and top rate (ROC-like curve)
# ToDo: faster implementation
sen_set = np.array([], dtype=np.float64)
top_rate_set = np.array([], dtype=np.float64)
for k in range(nb_proteins):
# sensitibity
sen = (rankings <= (k+1)).sum() / nb_PD_pairs
# top rate
top_rate = (k + 1) / nb_proteins
sen_set = np.append(sen_set, sen)
top_rate_set = np.append(top_rate_set, top_rate)
# get AUC value
print("Summarized AUC", auc(top_rate_set, sen_set))
# plot ROC-like curve
plt.scatter(top_rate_set, sen_set)
plt.show()
| 38.298077 | 153 | 0.726839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,112 | 0.279187 |
6f03742065f7d2c3fc2369fb406d4426cdddbeab | 459 | py | Python | Exercicios em Python/ex080.py | Raphael-Azevedo/Exercicios_Python | dece138f38edd02b0731aed78e44acccb021b3cb | [
"MIT"
]
| null | null | null | Exercicios em Python/ex080.py | Raphael-Azevedo/Exercicios_Python | dece138f38edd02b0731aed78e44acccb021b3cb | [
"MIT"
]
| null | null | null | Exercicios em Python/ex080.py | Raphael-Azevedo/Exercicios_Python | dece138f38edd02b0731aed78e44acccb021b3cb | [
"MIT"
]
| null | null | null | n = []
i = 0
for c in range(0, 5):
n1 = int(input('Digite um valor: '))
if c == 0 or n1 > n[-1]:
n.append(n1)
print(f'Adicionado na posição {c} da lista...')
else:
pos = 0
while pos < len(n):
if n1 <= n[pos]:
n.insert(pos, n1)
print(f'Adicionado na posição {pos} da lista...')
break
pos += 1
print(f'Os valores digitados em ordem foram {n}')
| 25.5 | 65 | 0.461874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.317495 |
6f03aa2ab2aaee70b468bb66183fe442925a1018 | 13,132 | py | Python | rawal_stuff/src/demo.py | rawalkhirodkar/traffic_light_detection | 0e1e99962477bcf271b22d5205b1e7afab8635ba | [
"MIT"
]
| null | null | null | rawal_stuff/src/demo.py | rawalkhirodkar/traffic_light_detection | 0e1e99962477bcf271b22d5205b1e7afab8635ba | [
"MIT"
]
| null | null | null | rawal_stuff/src/demo.py | rawalkhirodkar/traffic_light_detection | 0e1e99962477bcf271b22d5205b1e7afab8635ba | [
"MIT"
]
| null | null | null | import cv2
import numpy as np
import random
import copy
import dlib
from keras.models import Sequential
from keras.optimizers import SGD
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.models import load_model
from convnetskeras.convnets import preprocess_image_batch, convnet
from convnetskeras.imagenet_tool import synset_to_dfs_ids
np.set_printoptions(threshold=np.inf)
#----------------------------Globals------------------------------------------------------------
MIN_AREA = 20
MAX_AREA = 500
MIN_RED_DENSITY = 0.4
MIN_BLACk_DENSITY_BELOW = 0
MIN_POLYAPPROX = 3
WIDTH_HEIGHT_RATIO = [0.333, 1.5] #range
#------------------------------------------------------------------------------------------------
tracker_list = []
TRACK_FRAME = 10
VOTE_FRAME = 3
frame0_detections = []
frame1_detections = []
frame2_detections = []
frame_detections = []
RADIAL_DIST = 10
#------------------------------------------------------------------------------------------------
def dist(x1,y1,x2,y2):
a = np.array((x1 ,y1))
b = np.array((x2, y2))
return np.linalg.norm(a-b)
#------------------------------------------------------------------------------------------------
BOUNDING_BOX = [0,0,0,0] #x1, y1, x2, y2
#------------------------------------------------------------------------------------------------
def prune_detection(detections):
ans = []
size = len(detections)
for i in range(0,size):
(x,y,w,h) = detections[i]
found = -1
for j in range(i+1,size):
(x1,y1,w1,h1) = detections[j]
if(dist(x,y,x1,y1) < RADIAL_DIST):
found = 1
break
if found == -1:
ans.append(detections[i])
return ans
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
def inside(p):
(x,y) = p
if(x < BOUNDING_BOX[2] and x > BOUNDING_BOX[0] and y < BOUNDING_BOX[3] and y > BOUNDING_BOX[1]):
return True
return False
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
def is_violation(frame_detections):
for (x,y,w,h) in frame_detections:
p1 = (x,y)
p2 = (x+w,y)
p3 = (x,y+h)
p4 = (x+w,y+h)
if(inside(p1) and inside(p2) and inside(p3) and inside(p4)):
continue
elif(not(inside(p1)) and not(inside(p2)) and not(inside(p3)) and not(inside(p4))):
continue
else:
return True
return False
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
def create_model():
nb_classes = 2
# Create the model
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(3, 128, 128), border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,3)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
return model
#------------------------------------------------------------------------------------------------
print "Loading model"
model = create_model()
model.load_weights("../model/traffic_light_weights.h5")
#------------------------------------------------------------------------------------------------
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model_heatmap = convnet('vgg_19',weights_path="../model/weights/vgg19_weights.h5", heatmap=True)
model_heatmap.compile(optimizer=sgd, loss='mse')
traffic_light_synset = "n06874185"
ids = synset_to_dfs_ids(traffic_light_synset)
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
clipnum = raw_input("Enter Clip number:\n")
f=open('../../dayTrain/dayClip'+str(clipnum)+'/frameAnnotationsBULB.csv','r')
inputs=f.read()
f.close();
inputs=inputs.split()
inputs=[i.split(";") for i in inputs]
for i in range(21):
inputs.pop(0)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('output'+str(clipnum)+'.avi',fourcc, 20.0, (1280,960))
#------------------------------------------------------------------------------------------------
frame_num = -1
VIOLATION = -1
for i in inputs:
if i[1]=="stop":
filename="../../dayTrain/dayClip"+str(clipnum)+"/frames/"+i[0][12:len(i[0])]
original_img=cv2.imread(filename)
img=copy.copy(original_img)
height, width, channels = img.shape
if(frame_num == -1):
center_x = width/2
center_y = height/2
BB_width = width/4
BB_height = height/4
BOUNDING_BOX = [center_x-BB_width,center_y-BB_height,center_x + BB_width, center_y + BB_height ]
frame_num += 1
#------------------detection begins--------------------------------------------------------
if(frame_num % TRACK_FRAME < VOTE_FRAME): #VOTE_FRAME = 3, then 0,1,2 allowed
#------------------reset------------------------
if(frame_num % TRACK_FRAME == 0):
tracker_list = []
frame0_detections = []
frame1_detections = []
frame2_detections = []
#------------------reset------------------------
#-----------preprocess------------------------------------
img = cv2.medianBlur(img,3) # Median Blur to Remove Noise
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
b,g,r = cv2.split(img)
clahe = cv2.createCLAHE(clipLimit=7.0, tileGridSize=(8,8)) # Adaptive histogram equilization
clahe = clahe.apply(r)
img = cv2.merge((b,g,clahe))
#----------------------------------------------------------
#----------red threshold the HSV image--------------------
img1 = cv2.inRange(img, np.array([0, 100, 100]), np.array([10,255,255])) #lower red hue
img2 = cv2.inRange(img, np.array([160, 100, 100]), np.array([179,255,255])) #upper red hue
img3 = cv2.inRange(img, np.array([160, 40, 60]), np.array([180,70,80]))
img4 = cv2.inRange(img, np.array([0, 150, 40]), np.array([20,190,75]))
img5 = cv2.inRange(img, np.array([145, 35, 65]), np.array([170,65,90]))
img = cv2.bitwise_or(img1,img3)
img = cv2.bitwise_or(img,img2)
img = cv2.bitwise_or(img,img4)
img = cv2.bitwise_or(img,img5)
cv2.medianBlur(img,7)
ret,thresh = cv2.threshold(img,127,255,0)
#----------------------------------------------------------
#--------------------Heatmap------------------------------------
im_heatmap = preprocess_image_batch([filename], color_mode="bgr")
out_heatmap = model_heatmap.predict(im_heatmap)
heatmap = out_heatmap[0,ids].sum(axis=0)
my_range = np.max(heatmap) - np.min(heatmap)
heatmap = heatmap / my_range
heatmap = heatmap * 255
heatmap = cv2.resize(heatmap,(width,height))
cv2.imwrite("heatmap.png",heatmap)
cv2.imwrite("image.png",original_img)
heatmap[heatmap < 128] = 0 # Black
heatmap[heatmap >= 128] = 255 # White
heatmap = np.asarray(heatmap,dtype=np.uint8)
#----------------------------------------------------------
thresh = cv2.bitwise_and(thresh,heatmap)
#----------------------------------------------------------
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
red_density = (area*1.0)/(w*h)
width_height_ratio = (w*1.0)/h
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * perimeter, True)
temp=cv2.cvtColor(original_img[y+h:y+2*h,x:x+w], cv2.COLOR_RGB2GRAY)
(thresh, temp) = cv2.threshold(temp, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
black_density_below = ((w*h - cv2.countNonZero(temp))*1.0)/(w*h)
if area>MIN_AREA and area<MAX_AREA and len(approx) > MIN_POLYAPPROX and red_density > MIN_RED_DENSITY and width_height_ratio < WIDTH_HEIGHT_RATIO[1] and width_height_ratio > WIDTH_HEIGHT_RATIO[0] and black_density_below > MIN_BLACk_DENSITY_BELOW:
try:
r_x1=x-50
r_y1=y-50
r_x2=x+w+50
r_y2=y+h+50
temp=original_img[r_y1:r_y2,r_x1:r_x2]
xx=cv2.resize(temp,(128,128))
xx=np.asarray(xx)
xx=np.transpose(xx,(2,0,1))
xx=np.reshape(xx,(1,3,128,128))
if model.predict_classes(xx,verbose=0)==[1]:
cv2.rectangle(original_img, (x,y), (x+w,y+h),(0,255,0), 2)
#append detections
if frame_num % TRACK_FRAME == 0:
frame0_detections.append((x,y,w,h))
elif frame_num%TRACK_FRAME == 1:
frame1_detections.append((x,y,w,h))
elif frame_num%TRACK_FRAME == 2:
frame2_detections.append((x,y,w,h))
else:
cv2.rectangle(original_img, (x,y), (x+w,y+h),(255,0,0), 1)
except Exception as e:
cv2.rectangle(original_img, (x,y), (x+w,y+h),(0,255,0), 2) #edges are allowed
print e
pass
#--------------------Violation in Detect Phase------------------------------
frame_detections = []
if(frame_num % TRACK_FRAME == 0):
frame_detections = frame0_detections
if(frame_num % TRACK_FRAME == 1):
frame_detections = frame1_detections
if(frame_num % TRACK_FRAME == 2):
frame_detections = frame2_detections
#--------------------Violation in Detect Phase------------------------------
#compute and start tracking
if frame_num % TRACK_FRAME == 2:
all_detections = frame0_detections + frame1_detections + frame2_detections
final_detections = prune_detection(all_detections)
for (x,y,w,h) in final_detections:
tracker = dlib.correlation_tracker()
tracker.start_track(original_img, dlib.rectangle(x,y,(x+w),(y+h)))
tracker_list.append(tracker)
#------------------detection end----------------------------------------------------
#------------------tracking begins----------------------------------------------------
else:
frame_detections = []
for tracker in tracker_list:
tracker.update(original_img)
rect = tracker.get_position()
pt1 = (int(rect.left()), int(rect.top()))
pt2 = (int(rect.right()), int(rect.bottom()))
cv2.rectangle(original_img, pt1, pt2, (255, 255, 255), 2)
frame_detections.append((pt1[0], pt1[1], pt2[0]-pt1[0], pt2[1]-pt1[1]))
#------------------ tracking end----------------------------------------------------
if(is_violation(frame_detections) == True):
cv2.rectangle(original_img, (BOUNDING_BOX[0],BOUNDING_BOX[1]), (BOUNDING_BOX[2],BOUNDING_BOX[3]),(0, 0, 255), 2)
else:
cv2.rectangle(original_img, (BOUNDING_BOX[0],BOUNDING_BOX[1]), (BOUNDING_BOX[2],BOUNDING_BOX[3]),(60, 255, 255), 2)
cv2.imshow("Annotated",original_img)
out.write(original_img)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
cv2.destroyAllWindows()
#------------------------------------------------------------------------------------------------
| 43.919732 | 262 | 0.456823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,307 | 0.251828 |
6f043f48e4529a5b4d4237cf80295c09f14302ee | 3,720 | py | Python | kaivy/geometry/line2d.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
]
| null | null | null | kaivy/geometry/line2d.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
]
| null | null | null | kaivy/geometry/line2d.py | team-kaivy/kaivy | e27b53e8e9eedc48abc99151f3adbb76f0a9b331 | [
"MIT"
]
| null | null | null | ########################################################################################################################
# #
# This file is part of kAIvy #
# #
# Copyright (c) 2019-2021 by the kAIvy team and contributors #
# #
########################################################################################################################
import numpy as np
from kaivy.geometry.geometry2d import Geometry2D
from kaivy.geometry.transformation2d import Transformation2D
from kivy.graphics import Line, SmoothLine, Color
class Line2D(Geometry2D):
"""
Defines a simple line defined by two points
"""
def __init__(self, points, width=1.0, color=(1.0, 1.0, 1.0, 1.0)):
"""
Initializer
:param points: The line's points
"""
super().__init__()
self.geometry_class_name = 'Line2D'
self.set_nodes(np.array(points))
self.smooth = True
self.color = color
self.width = width
def render_to_kivy(self, target, transformation: Transformation2D, parameters={}, geometry_out=None):
color = parameters.get('color', self.color)
target.add(Color(*color))
nodes = transformation.transform(self.nodes)
if geometry_out is not None:
if self.GO_TAG_LINE_LIST not in geometry_out: # add line array if still missing
geometry_out[self.GO_TAG_LINE_LIST] = []
geometry_out[self.GO_TAG_LINE_LIST].append({self.GO_TAG_OWNER: self, self.GO_TAG_LINE_LIST_LINES: nodes})
nodes = nodes.flatten().tolist()
if self.smooth:
target.add(SmoothLine(points=nodes, width=self.width))
else:
target.add(Line(points=nodes, width=self.width))
def distance_to_point(self, point, ray=False):
"""
Returns the distance between this line and given point
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
"""
return self.line_distance_to_point(self.nodes, point, ray=ray)
@staticmethod
def line_distance_to_point(point_list, point, ray=False):
"""
Returns the distance from line p1 p2 and a given point point
:param point_list: The line's points as numpy array
:param point: A 2D coordinate
:param ray: Defines if the line defines an unbound ray
:return: The distance to the point and the nearest point. None, None if line is invalid
"""
# two points define the line
n = (point_list[1] - point_list[0])
if np.sum(n) == 0:
return None, None
line_length = np.linalg.norm(n)
n = n / line_length
ap = point - point_list[0]
t = ap.dot(n)
if not ray:
t = min(max(t, 0), line_length)
x = point_list[0] + t * n
# d = (np.cross(ap, n) ** 2).sum()**0.5
return ((point - x) ** 2).sum() ** 0.5, x
def to_dict(self, options): # Overrides Geometry2D to_dict
result = super().to_dict(options)
if options.get(self.OPTION_VISUAL_DETAILS, True):
result['width'] = self.width
result['smooth'] = self.smooth
return result
| 42.758621 | 136 | 0.491129 | 2,674 | 0.718817 | 0 | 0 | 885 | 0.237903 | 0 | 0 | 1,671 | 0.449194 |
6f050e8b2c15f5d5adcf74276ee71e811d247441 | 5,813 | py | Python | data_loader/MSVD_dataset.py | dendisuhubdy/collaborative-experts | e6db63837537c054723ce00b73264101acc29d39 | [
"MIT"
]
| null | null | null | data_loader/MSVD_dataset.py | dendisuhubdy/collaborative-experts | e6db63837537c054723ce00b73264101acc29d39 | [
"MIT"
]
| null | null | null | data_loader/MSVD_dataset.py | dendisuhubdy/collaborative-experts | e6db63837537c054723ce00b73264101acc29d39 | [
"MIT"
]
| null | null | null | import copy
from pathlib import Path
from typing import Dict, Union, List
from collections import defaultdict
import numpy as np
from typeguard import typechecked
from zsvision.zs_utils import memcache, concat_features
from utils.util import memory_summary
from base.base_dataset import BaseDataset
class MSVD(BaseDataset):
@staticmethod
@typechecked
def dataset_paths() -> Dict[str, Union[str, List[str], Path, Dict]]:
subset_paths = {}
test_splits = {
"dev": "val_list.txt",
"official": "test_list.txt",
"public_server_val": "public_server_val.txt",
"public_server_test": "public_server_test.txt",
}
for split_name, fname in test_splits.items():
subset_paths[split_name] = {"train": "train_list.txt", "val": fname}
feature_names = [
"imagenet.senet154.0",
"scene.densenet161.0",
"i3d.i3d.0",
"s3dg.s3dg.0",
"imagenet.resnext101_32x48d.0",
"trn.moments-trn.0",
"r2p1d.r2p1d-ig65m.0",
"r2p1d.r2p1d-ig65m-kinetics.0",
"moments_3d.moments-resnet3d50.0",
"moments-static.moments-resnet50.0",
"detection",
"detection-sem"
]
custom_paths = {
"face": ["aggregated_face_feats/face-avg.pickle"],
"ocr": ["aggregated_ocr_feats/ocr-w2v.pickle"],
}
text_feat_paths = {}
challenge_text_feat_paths = {}
for text_feat in ("openai", "w2v"):
text_feat_names = {key: f"{text_feat}-caption-{key}"
for key in {"train", "val", "test"}}
text_feat_paths[text_feat] = {key: f"aggregated_text_feats/{val}.pkl"
for key, val in text_feat_names.items()}
challenge_text_feat_paths[text_feat] = \
f"aggregated_text_feats/{text_feat}.pkl"
feature_info = {
"subset_list_paths": subset_paths,
"feature_names": feature_names,
"custom_paths": custom_paths,
"text_feat_paths": text_feat_paths,
"challenge_text_feat_paths": challenge_text_feat_paths,
"raw_captions_path": "raw-captions.pkl",
"dict_youtube_mapping_path": "dict_youtube_mapping.pkl"
}
return feature_info
def load_features(self):
root_feat = Path(self.root_feat)
feat_names = {key: self.visual_feat_paths(key) for key in
self.paths["feature_names"]}
feat_names.update(self.paths["custom_paths"])
features = {}
for expert, rel_names in feat_names.items():
if expert not in self.ordered_experts:
continue
feat_paths = tuple([root_feat / rel_name for rel_name in rel_names])
if len(feat_paths) == 1:
features[expert] = memcache(feat_paths[0])
else:
# support multiple forms of feature (e.g. max and avg pooling). For
# now, we only support direct concatenation
msg = f"{expert}: Only direct concat of muliple feats is possible"
print(f"Concatenating aggregates for {expert}....")
assert self.feat_aggregation[expert]["aggregate"] == "concat", msg
axis = self.feat_aggregation[expert]["aggregate-axis"]
x = concat_features.cache_info() # pylint: disable=no-value-for-parameter
print(f"concat cache info: {x}")
features_ = concat_features(feat_paths, axis=axis)
memory_summary()
if expert == "speech":
features_defaults = defaultdict(lambda: np.zeros((1, 300)))
features_defaults.update(features_)
features_ = features_defaults
# Make separate feature copies for each split to allow in-place filtering
features[expert] = copy.deepcopy(features_)
self.features = features
if self.challenge_mode:
self.load_challenge_text_features()
else:
text_feat_paths = self.paths["text_feat_paths"][self.text_feat]
text_features = memcache(root_feat / text_feat_paths["train"])
split_names = {"dev": "val", "official": "test"}
text_features.update(memcache(
root_feat / text_feat_paths[split_names[self.split_name]]))
key_map = memcache(root_feat / self.paths["dict_youtube_mapping_path"])
inverse_map = {val: key for key, val in key_map.items()}
self.text_features = {inverse_map[key]: val for key, val in
text_features.items()}
self.raw_captions = memcache(root_feat / self.paths["raw_captions_path"])
if "detection" in self.ordered_experts:
# Example processing
processed = {}
for key, subdict in self.features["detection"].items():
box, conf = subdict["detection_boxes"], subdict["detection_scores"]
raw = subdict["raw_feats_avg"]
processed[key] = np.concatenate((box, conf.reshape(-1, 1), raw), axis=1)
self.features["detection"] = processed
if "openpose" in self.ordered_experts:
# Example processing
processed = {}
for key, subdict in self.features["openpose"].items():
raw = np.concatenate(subdict["matrix"], axis=1)
processed[key] = raw.transpose(1, 0, 2).reshape(-1, 3 * 18)
self.features["openpose"] = processed
def sanity_checks(self):
assert self.num_test_captions == 81, "Expected to have 81 test caps for MSVD"
| 43.059259 | 90 | 0.584724 | 5,509 | 0.947703 | 0 | 0 | 2,068 | 0.355754 | 0 | 0 | 1,541 | 0.265095 |
6f067497faf1ec468f96a34eb789dd94adfffc2e | 2,381 | py | Python | wagtail/wagtailsearch/forms.py | balkantechnologies/BalkanCMS_core | 68625199028fc96abb175e410a4a7a92c02cb261 | [
"BSD-3-Clause"
]
| 1 | 2021-09-21T00:06:52.000Z | 2021-09-21T00:06:52.000Z | wagtail/wagtailsearch/forms.py | balkantechnologies/BalkanCMS_core | 68625199028fc96abb175e410a4a7a92c02cb261 | [
"BSD-3-Clause"
]
| 1 | 2021-02-24T08:25:30.000Z | 2021-02-24T08:25:30.000Z | wagtail/wagtailsearch/forms.py | balkantechnologies/BalkanCMS_core | 68625199028fc96abb175e410a4a7a92c02cb261 | [
"BSD-3-Clause"
]
| 1 | 2020-11-24T10:21:24.000Z | 2020-11-24T10:21:24.000Z | from django import forms
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.widgets import AdminPageChooser
from wagtail.wagtailsearch import models
class QueryForm(forms.Form):
query_string = forms.CharField(label=_("Search term(s)/phrase"),
help_text=_("Enter the full search string to match. An "
"exact match is required for your Editors Picks to be "
"displayed, wildcards are NOT allowed."),
required=True)
class EditorsPickForm(forms.ModelForm):
sort_order = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(EditorsPickForm, self).__init__(*args, **kwargs)
self.fields['page'].widget = AdminPageChooser()
class Meta:
model = models.EditorsPick
fields = ('query', 'page', 'description')
widgets = {
'description': forms.Textarea(attrs=dict(rows=3)),
}
EditorsPickFormSetBase = inlineformset_factory(models.Query, models.EditorsPick, form=EditorsPickForm, can_order=True, can_delete=True, extra=0)
class EditorsPickFormSet(EditorsPickFormSetBase):
minimum_forms = 1
minimum_forms_message = _("Please specify at least one recommendation for this search term.")
def add_fields(self, form, *args, **kwargs):
super(EditorsPickFormSet, self).add_fields(form, *args, **kwargs)
# Hide delete and order fields
form.fields['DELETE'].widget = forms.HiddenInput()
form.fields['ORDER'].widget = forms.HiddenInput()
# Remove query field
del form.fields['query']
def clean(self):
# Editors pick must have at least one recommended page to be valid
# Check there is at least one non-deleted form.
non_deleted_forms = self.total_form_count()
non_empty_forms = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete and self._should_delete_form(form):
non_deleted_forms -= 1
if not (form.instance.id is None and not form.has_changed()):
non_empty_forms += 1
if (
non_deleted_forms < self.minimum_forms
or non_empty_forms < self.minimum_forms
):
raise forms.ValidationError(self.minimum_forms_message)
| 36.075758 | 144 | 0.673667 | 1,990 | 0.835783 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.191936 |
6f069669d5a2624249034f4c529c35293422204b | 6,994 | py | Python | app/utils/docs_utils.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
]
| 2 | 2021-08-19T12:35:25.000Z | 2022-02-16T04:13:38.000Z | app/utils/docs_utils.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
]
| 46 | 2021-09-02T03:22:05.000Z | 2022-03-31T09:20:00.000Z | app/utils/docs_utils.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
]
| 1 | 2021-11-17T23:18:27.000Z | 2021-11-17T23:18:27.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from typing import (
List,
Dict,
Any
)
from pydantic import BaseModel
from fastapi.openapi.utils import get_openapi
from fastapi.exceptions import RequestValidationError
from app.exceptions import (
InvalidParameterError,
SendTransactionError,
AuthorizationError,
ServiceUnavailableError
)
class MetaModel(BaseModel):
code: int
title: str
class Error400MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "InvalidParameterError"
class Error400Model(BaseModel):
meta: Error400MetaModel
detail: str
class Error401MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "AuthorizationError"
class Error401Model(BaseModel):
meta: Error401MetaModel
detail: str
class Error404MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "NotFound"
class Error404Model(BaseModel):
meta: Error404MetaModel
detail: str
class Error405MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "MethodNotAllowed"
class Error405Model(BaseModel):
meta: Error405MetaModel
detail: str
class Error422MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "RequestValidationError"
class Error422DetailModel(BaseModel):
loc: List[str]
msg: str
type: str
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["loc"]["example"] = ["header", "issuer-address"]
properties["msg"]["example"] = "field required"
properties["type"]["example"] = "value_error.missing"
class Error422Model(BaseModel):
meta: Error422MetaModel
detail: List[Error422DetailModel]
class Error503MetaModel(MetaModel):
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], _) -> None:
properties = schema["properties"]
properties["code"]["example"] = 1
properties["title"]["example"] = "ServiceUnavailableError"
class Error503Model(BaseModel):
meta: Error503MetaModel
detail: str
DEFAULT_RESPONSE = {
400: {
"description": "Invalid Parameter Error / Send Transaction Error",
"model": Error400Model
},
401: {
"description": "Authorization Error",
"model": Error401Model
},
404: {
"description": "Not Found Error",
"model": Error404Model
},
405: {
"description": "Method Not Allowed",
"model": Error405Model
},
422: {
"description": "Validation Error",
"model": Error422Model
},
503: {
"description": "Service Unavailable Error",
"model": Error503Model
}
}
def get_routers_responses(*args):
responses = {}
for arg in args:
if isinstance(arg, int):
responses[arg] = DEFAULT_RESPONSE.get(arg, {})
elif arg == InvalidParameterError:
responses[400] = DEFAULT_RESPONSE[400]
elif arg == SendTransactionError:
responses[400] = DEFAULT_RESPONSE[400]
elif arg == AuthorizationError:
responses[401] = DEFAULT_RESPONSE[401]
elif arg == RequestValidationError:
responses[422] = DEFAULT_RESPONSE[422]
elif arg == ServiceUnavailableError:
responses[503] = DEFAULT_RESPONSE[503]
return responses
def custom_openapi(app):
def openapi():
openapi_schema = app.openapi_schema
if openapi_schema is None:
openapi_schema = get_openapi(
title=app.title,
version=app.version,
openapi_version=app.openapi_version,
description=app.description,
routes=app.routes,
tags=app.openapi_tags,
servers=app.servers,
)
def _get(src: dict, *keys):
tmp_src = src
for key in keys:
tmp_src = tmp_src.get(key)
if tmp_src is None:
return None
return tmp_src
paths = _get(openapi_schema, "paths")
if paths is not None:
for path_info in paths.values():
for router in path_info.values():
# Remove Default Validation Error Response Structure
# NOTE:
# HTTPValidationError is automatically added to APIs docs that have path, header, query,
# and body parameters.
# But HTTPValidationError does not have 'meta',
# and some APIs do not generate a Validation Error(API with no-required string parameter only, etc).
resp_422 = _get(router, "responses", "422")
if resp_422 is not None:
ref = _get(resp_422, "content", "application/json", "schema", "$ref")
if ref == "#/components/schemas/HTTPValidationError":
router["responses"].pop("422")
# Remove empty response's contents
responses = _get(router, "responses")
for resp in responses.values():
schema = _get(resp, "content", "application/json", "schema")
if schema == {}:
resp.pop("content")
return openapi_schema
return openapi
| 29.635593 | 120 | 0.601373 | 2,721 | 0.389048 | 0 | 0 | 1,710 | 0.244495 | 0 | 0 | 1,888 | 0.269946 |
6f06e78625c74321a938329732209995e4f8e1f0 | 2,282 | py | Python | scripts/models/arcii.py | mogumogu2333/MatchZoo | 1182b076bf571eba4af89141b93a51598afc252c | [
"Apache-2.0"
]
| null | null | null | scripts/models/arcii.py | mogumogu2333/MatchZoo | 1182b076bf571eba4af89141b93a51598afc252c | [
"Apache-2.0"
]
| null | null | null | scripts/models/arcii.py | mogumogu2333/MatchZoo | 1182b076bf571eba4af89141b93a51598afc252c | [
"Apache-2.0"
]
| null | null | null | import os
import sys
sys.path.insert(0, "../../")
import matchzoo as mz
import typing
import pandas as pd
import matchzoo
from matchzoo.preprocessors.units.tokenize import Tokenize, WordPieceTokenize
from matchzoo.engine.base_preprocessor import load_preprocessor
import pickle
import utils
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
input_dir = "../../data/"
model_dir = "../../models/arcii"
num_epochs = 10
utils.ensure_dir(model_dir)
with open(os.path.join(input_dir, "train.pkl"), 'rb') as f:
train_pack_processed = pickle.load(f)
print(train_pack_processed.frame().head())
with open(os.path.join(input_dir, "test.pkl"), 'rb') as f:
test_pack_processed = pickle.load(f)
print(test_pack_processed.frame().head())
preprocessor = load_preprocessor(dirpath=os.path.join(input_dir))
print(preprocessor._context)
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=100)
ranking_task = mz.tasks.Classification()
ranking_task.metrics = ['accuracy']
print("`ranking_task` initialized with metrics", ranking_task.metrics)
model = mz.models.ArcII()
model.params.update(preprocessor.context)
model.params['task'] = ranking_task
model.params['embedding_output_dim'] = 100
model.params['embedding_trainable'] = True
model.params['num_blocks'] = 2
model.params['kernel_1d_count'] = 32
model.params['kernel_1d_size'] = 3
model.params['kernel_2d_count'] = [64, 64]
model.params['kernel_2d_size'] = [3, 3]
model.params['pool_2d_size'] = [[3, 3], [3, 3]]
model.params['optimizer'] = 'adam'
model.build()
model.compile()
model.backend.summary()
embedding_matrix = glove_embedding.build_matrix(preprocessor.context['vocab_unit'].state['term_index'])
model.load_embedding_matrix(embedding_matrix)
test_x, test_y = test_pack_processed.unpack()
evaluate = mz.callbacks.EvaluateAllMetrics(model, x=test_x, y=test_y, batch_size=128)
dump_prediction = mz.callbacks.DumpPrediction(model, x=test_x, y=test_y, batch_size=128,
model_save_path=model_dir)
train_generator = mz.DataGenerator(
train_pack_processed,
num_dup=2,
num_neg=1,
batch_size=128,
)
print('num batches:', len(train_generator))
history = model.fit_generator(train_generator, epochs=num_epochs,
callbacks=[evaluate, dump_prediction],
workers=4, use_multiprocessing=True)
| 30.837838 | 103 | 0.765995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.149869 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.