hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c896cf21816f76cd01ad1bacb6b82f675af14297 | 12,510 | py | Python | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/core-api/tests/now_submissions/resources/test_application_resource.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | import json
from tests.factories import (NOWSubmissionFactory, MineFactory, NOWClientFactory,
NOWApplicationIdentityFactory)
class TestGetApplicationResource:
"""GET /now-submissions/applications/{application_guid}"""
def test_get_now_submission_by_guid_success(self, test_client, db_session, auth_headers):
"""Should return the correct records with a 200 response code"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['now_application_guid'] is not None
assert get_data['now_application_guid'] == str(identity.now_application_guid)
def test_get_now_submission_by_guid_mine_name(self, test_client, db_session, auth_headers):
"""Should include the correct mine name"""
mine = MineFactory()
now_submission = NOWSubmissionFactory(mine=mine)
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['mine_name'] is not None
assert get_data['mine_name'] == mine.mine_name
def test_get_now_submission_by_guid_applicant(self, test_client, db_session, auth_headers):
"""Should include the correct applicant"""
applicant = NOWClientFactory()
now_submission = NOWSubmissionFactory(applicant=applicant)
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['applicant']['type'] is not None
assert get_data['applicant']['type'] == applicant.type
def test_get_now_submission_by_guid_submitter(self, test_client, db_session, auth_headers):
"""Should include the correct submitter"""
submitter = NOWClientFactory()
now_submission = NOWSubmissionFactory(submitter=submitter)
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['submitter']['type'] is not None
assert get_data['submitter']['type'] == submitter.type
def test_get_now_submission_by_guid_documents(self, test_client, db_session, auth_headers):
"""Should include the correct documents"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['documents'][0]['filename'] is not None
assert get_data['documents'][0]['filename'] in list(
map(lambda x: x.filename, now_submission.documents))
def test_get_now_submission_by_guid_contacts(self, test_client, db_session, auth_headers):
"""Should include the correct contacts"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['contacts'][0]['type'] is not None
assert get_data['contacts'][0]['type'] in list(
map(lambda x: x.type, now_submission.contacts))
def test_get_now_submission_by_guid_existing_placer_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct existing_placer_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['existing_placer_activity'][0]['type'] is not None
assert get_data['existing_placer_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.existing_placer_activity))
def test_get_now_submission_by_guid_proposed_placer_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct proposed_placer_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['proposed_placer_activity'][0]['type'] is not None
assert get_data['proposed_placer_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.proposed_placer_activity))
def test_get_now_submission_by_guid_existing_settling_pond(self, test_client, db_session,
auth_headers):
"""Should include the correct existing_settling_pond"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['existing_settling_pond'][0]['pondid'] is not None
assert get_data['existing_settling_pond'][0]['pondid'] in list(
map(lambda x: x.pondid, now_submission.existing_settling_pond))
def test_get_now_submission_by_guid_proposed_settling_pond(self, test_client, db_session,
auth_headers):
"""Should include the correct proposed_settling_pond"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['proposed_settling_pond'][0]['pondid'] is not None
assert get_data['proposed_settling_pond'][0]['pondid'] in list(
map(lambda x: x.pondid, now_submission.proposed_settling_pond))
def test_get_now_submission_by_guid_sand_grv_qry_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct sand_grv_qry_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['sand_grv_qry_activity'][0]['type'] is not None
assert get_data['sand_grv_qry_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.sand_grv_qry_activity))
def test_get_now_submission_by_guid_under_exp_new_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct under_exp_new_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['under_exp_new_activity'][0]['type'] is not None
assert get_data['under_exp_new_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.under_exp_new_activity))
def test_get_now_submission_by_guid_under_exp_rehab_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct under_exp_rehab_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['under_exp_rehab_activity'][0]['type'] is not None
assert get_data['under_exp_rehab_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.under_exp_rehab_activity))
def test_get_now_submission_by_guid_under_exp_surface_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct under_exp_surface_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['under_exp_surface_activity'][0]['type'] is not None
assert get_data['under_exp_surface_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.under_exp_surface_activity))
def test_get_now_submission_by_guid_water_source_activity(self, test_client, db_session,
auth_headers):
"""Should include the correct water_source_activity"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_application_guid}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['water_source_activity'][0]['type'] is not None
assert get_data['water_source_activity'][0]['type'] in list(
map(lambda x: x.type, now_submission.water_source_activity))
| 55.110132 | 97 | 0.681455 | 12,352 | 0.98737 | 0 | 0 | 0 | 0 | 0 | 0 | 2,836 | 0.226699 |
c89c4416cb922696e6077b691fa44b4a364a4846 | 447 | py | Python | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/non_positive_integer/schema_instance/nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.nist_data.list_pkg.non_positive_integer.schema_instance.nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd.nistschema_sv_iv_list_non_positive_integer_enumeration_2 import (
NistschemaSvIvListNonPositiveIntegerEnumeration2,
NistschemaSvIvListNonPositiveIntegerEnumeration2Type,
)
__all__ = [
"NistschemaSvIvListNonPositiveIntegerEnumeration2",
"NistschemaSvIvListNonPositiveIntegerEnumeration2Type",
]
| 44.7 | 201 | 0.888143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.232662 |
c89d84cb20f102af7452f0c152beca85a101d946 | 386 | py | Python | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
]
| null | null | null | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
]
| null | null | null | cache-basic.py | kurapikats/python-basics | 7b81e5e8de44186b573b74f05c78b56894df0ed7 | [
"MIT"
]
| null | null | null | import time
def compute(a, b):
time.sleep(2)
return a + b
cache = {}
def cache_compute(a, b):
if (a, b) in cache.keys():
return cache[a, b]
else:
new = compute(a, b)
cache[a, b] = new
return new
print(cache_compute(1, 2))
print(cache_compute(3, 5))
print(cache_compute(3, 5))
print(cache_compute(6, 7))
print(cache_compute(1, 2))
| 14.846154 | 30 | 0.585492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8a19d3ee1214101499b5145f53a93867a82f056 | 675 | py | Python | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
]
| null | null | null | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
]
| null | null | null | dl/src/CookieManager.py | PatrykCholewa/PI_Stored | 4ff4d72fe56281b76ddf7b759c19aabbce3c9899 | [
"MIT"
]
| null | null | null | from datetime import datetime
import jwt
from src import ConfigManager
secret = ConfigManager.get_config("DL_COOKIE_SECRET_KEY")
secure = ConfigManager.get_config("APP_SECURE")
def validate_user_jwt(token, username):
token = jwt.decode(token, secret, "HS256")
expire = token['exp']
if username != token['user']:
return False
return datetime.now() < datetime.fromtimestamp(expire)
def validate_file_by_jwt(token, file_id):
token = jwt.decode(token, secret, "HS256")
expire = token['exp']
file_ids = token['file_list']
if file_id not in file_ids:
return False
return datetime.now() < datetime.fromtimestamp(expire)
| 23.275862 | 58 | 0.708148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.111111 |
c8a2956bd7fb979e05d6c1af9814b3f364a7b696 | 2,403 | py | Python | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
]
| null | null | null | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
]
| 1 | 2020-11-07T12:44:56.000Z | 2020-11-07T12:46:52.000Z | printing/Spooler.py | mrlinqu/intsa_term_client | 596335da6dbdf7eb543b1dcf2c33bcc222aa3321 | [
"MIT"
]
| null | null | null | # Copyright 2020 by Roman Khuramshin <[email protected]>.
# All rights reserved.
# This file is part of the Intsa Term Client - X2Go terminal client for Windows,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
import logging
import threading
import os
import time
import win32print
from .Handler import Handler
class Spooler(threading.Thread):
isAlive = False
spool_dir = None
def __init__(self, spool_dir, printer=None):
super(Spooler, self).__init__()
self.spool_dir = spool_dir
self.printer = printer if printer else win32print.GetDefaultPrinter()
self.jobs = dict()
pass
@staticmethod
def readJobfile(jobfile):
_job_file_handle = open(jobfile, 'r')
content = _job_file_handle.read()
try:
(pdf_file, job_title) = content.split('\n')[0:2]
except ValueError:
pdf_file = content
job_title = 'X2Go Print Job'
_job_file_handle.close()
return (pdf_file, job_title)
pass
def run(self):
logging.debug('starting print queue thread: %s on dir: %s' % (repr(self), self.spool_dir))
self.isAlive = True
while self.isAlive:
l = os.listdir(self.spool_dir)
job_files = [ jf for jf in l if jf.endswith('.ready') ]
#jobs = []
for jobfile in job_files:
if jobfile in self.jobs:
continue
_jobfile = os.path.join(self.spool_dir, jobfile)
(pdf_file, job_title) = Spooler.readJobfile(_jobfile)
handler = Handler(job_file=jobfile, pdf_file=os.path.join(self.spool_dir, pdf_file),job_title=job_title, onHandled=self.onHandled, printer=self.printer)
handler.start()
self.jobs[jobfile] = handler
time.sleep(3)
logging.debug('print queue thread stoped')
pass
def onHandled(self, jobfile):
_jobfile = os.path.join(self.spool_dir, jobfile)
(pdf_file, job_title) = Spooler.readJobfile(_jobfile)
_pdf_file = os.path.join(self.spool_dir, pdf_file)
os.remove(_pdf_file)
os.remove(_jobfile)
del self.jobs[jobfile]
pass
def stop(self):
self.isAlive = False
pass
pass | 28.607143 | 168 | 0.61881 | 1,997 | 0.831045 | 0 | 0 | 388 | 0.161465 | 0 | 0 | 409 | 0.170204 |
c8a3493cfeb4dfbb80acc4a2be0aae2d1cb8c74f | 1,264 | py | Python | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
]
| null | null | null | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
]
| null | null | null | mytests/test_SimpleCalc.py | KishoreParihar/DemoPythonTest | f9dadbf6cfcd4e6877e31ca65851882f73234307 | [
"MIT"
]
| null | null | null | import unittest
import sys
sys.path.append(".")
sys.path.insert(0, '..\\')
from calculator.simplecalculator import Calculator
class TestSimpleCalc(unittest.TestCase):
@classmethod
def setUpClass(cls):
print ("In setupclass() method")
cls.cal = Calculator(4, 3)
@classmethod
def tearDownClass(cls):
print ("In tearDownClass() method")
del cls.cal
def setUp(self):
print ("In setUp() method")
self.cal.a = 8
self.cal.b = 5
def tearDown(self):
print("In tearDown() method")
self.cal.a = 0
self.cal.b = 0
def test_simpleadd(self):
self.assertAlmostEqual(10, self.cal.add(),delta=3)
def test_simplesub(self):
self.assertGreater(4, self.cal.sub())
def test_simplesubFail(self):
self.assertNotEqual(6, self.cal.sub())
def test_assertIs_multiply(self):
self.cal.a = 4
self.cal.b = 1.2
self.assertIs(type(1.2), type(self.cal.mul()))
def test_divison(self):
self.cal.a = 4
self.cal.b = 0
self.assertRaises(ZeroDivisionError, self.cal.div)
with self.assertRaises(TypeError):
self.cal1 = Calculator()
if __name__ == '__main__':
unittest.main()
| 23.849057 | 58 | 0.606013 | 1,088 | 0.860759 | 0 | 0 | 217 | 0.171677 | 0 | 0 | 111 | 0.087816 |
c8a47ee8db41845109ebaa2bf272e65a01b66623 | 2,683 | py | Python | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
]
| null | null | null | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
]
| null | null | null | argos/countdown.9s.py | solettitiger/countdown | c5df89c7d67984171de08508ef4433ea9d6fbbd1 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import sys
import subprocess
import os
from playsound import playsound
# ******************************************************************
# Definitionen
# ******************************************************************
filename = 'countdown.txt'
audiofile = 'ringing.mp3'
settimer = 'add.py'
stoptimer = 'stop.py'
overlay = 'overlay.py'
title = "⏰"
zeit = ""
command = ""
path = ""
diff = 0
# ******************************************************************
# Funktionen
# ******************************************************************
def readdata():
global title, zeit, command, path
full_path = os.path.realpath(__file__)
path, thisfile = os.path.split(full_path)
ff = open(path+"/countdown/"+filename,"r")
ll = ff.readlines()
if(len(ll) == 3):
title = ll[0].strip()
zeit = ll[1].strip()
command = ll[2].strip()
ff.close()
def gettimediff():
global zeit
now = datetime.datetime.now()
day = datetime.datetime(now.year, now.month, now.day)
endtime = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + zeit, "%Y-%m-%d %H:%M")
diff = int((endtime-now).seconds/60)
if(diff < 0):
diff = diff + 1440
if(diff < 1 and diff >= -1):
runDone()
else:
zeit = convertTime(diff)
def runDone():
global zeit
# Command ausführen
if(command != ""):
cmdlist = command.split()
subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# Overlay anzeigen
subprocess.Popen([path+"/countdown/"+overlay, beautifyTimestring(zeit), title], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
zeit = ""
# Sound abspielen
playsound(path+"/countdown/"+audiofile)
# Countdown beenden - dauert die Zeit von Argos
stopCountdown()
def stopCountdown():
ff = open(path+"/countdown/"+filename,"w")
ff.close()
def convertTime(minutes):
hours = int(minutes/60)
minutes = minutes - hours*60
str_hours = "0" + str(hours)
str_minutes = "0" + str(minutes)
return (str_hours[-2:] + ":" + str_minutes[-2:])
def beautifyTimestring(timestring):
times = timestring.split(":")
str_hours = "0" + times[0]
str_minutes = "0" + times[1]
return (str_hours[-2:] + ":" + str_minutes[-2:])
# ******************************************************************
# Main
# ******************************************************************
def main():
readdata()
if(zeit != ""):
gettimediff()
print (title + " " + zeit)
print ("---")
print ("Set Timer | bash='"+ path+"/countdown/"+settimer +"' terminal=false")
print ("Stopp Timer | bash='"+ path+"/countdown/"+stoptimer +"' terminal=false")
if __name__ == "__main__":
main()
| 26.83 | 151 | 0.561685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 885 | 0.329486 |
c8a59080304794abe4b7a5451fd69be502c0aee2 | 1,392 | py | Python | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
]
| 3 | 2017-11-20T23:03:20.000Z | 2020-02-15T19:32:23.000Z | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
]
| 3 | 2019-12-12T09:26:55.000Z | 2021-06-10T19:24:19.000Z | restapi/v1/serializers.py | asntech/jaspar | ae86731e8f197d6830e6d778835f218d4eb1b9e8 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
## Author: Aziz Khan
## License: GPL v3
## Copyright © 2017 Aziz Khan <azez.khan__AT__gmail.com>
from rest_framework import serializers
from portal.models import Matrix, MatrixAnnotation
from django.http import HttpRequest
class MatrixAnnotationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MatrixAnnotation
fields = ('id', 'tag','val')
class MatrixSerializer(serializers.HyperlinkedModelSerializer):
#matrixannotations = MatrixAnnotationSerializer(many=True, read_only=True)
matrix_id = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
sequence_logo = serializers.SerializerMethodField()
#url = serializers.HyperlinkedIdentityField(view_name='matrix-detail', lookup_field='id')
class Meta:
model = Matrix
#fields = ('__all__')
fields = ('matrix_id', 'name','collection', 'base_id', 'version','sequence_logo','url')
def get_matrix_id(self, obj):
return obj.base_id+'.'+str(obj.version)
def get_sequence_logo(self, obj):
host_name = self.context['request'].build_absolute_uri(location='/')
return str(host_name)+'static/logos/svg/'+obj.base_id+'.'+str(obj.version)+'.svg'
def get_url(self, obj):
host_name = self.context['request'].build_absolute_uri(location='/')
return str(host_name)+'api/v1/matrix/'+obj.base_id+'.'+str(obj.version)+'/'
| 29 | 90 | 0.733477 | 1,137 | 0.816224 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.330223 |
c8a8f855a2d0fbd314903aae2f023f9e8c19884d | 5,043 | py | Python | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
]
| 43 | 2020-05-16T21:05:34.000Z | 2022-02-08T11:33:29.000Z | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
]
| 52 | 2020-05-14T16:18:08.000Z | 2021-11-02T19:13:47.000Z | multimodal_models/StackGAN_V2_PyTorch/models.py | kumayu0108/model-zoo | 4285779f6ff51fa1efb0625d67b428e90c343c0c | [
"MIT"
]
| 69 | 2020-05-14T13:39:23.000Z | 2021-07-30T00:51:27.000Z | import torch
import torch.nn as nn
from generator_model import G1, G2
from helper_functions.Blocks import downBlock, Block3x3_leakRelu
from helper_functions.ret_image import Interpolate, condAugmentation
from helper_functions.initial_weights import weights_init
from helper_functions.losses import KLloss, custom_loss
from helper_functions.Blocks import upScale, normalBlock, Residual
import helper_functions.config as cfg
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1, bias=False),
nn.Tanh())
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self, StageNum, zDim = 100):
super(G_NET, self).__init__()
self.zDim = zDim
self.StageNum = StageNum
self.gf_dim = cfg.generatorDim
self.define_module()
def define_module(self):
self.ca_net = condAugmentation()
if self.StageNum == 1:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
elif self.StageNum == 2:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
elif self.StageNum == 3:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
self.h_net3 = G2(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
elif self.StageNum == 4:
self.h_net1 = G1(self.gf_dim * 16, self.zDim)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
self.h_net2 = G2(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
self.h_net3 = G2(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
self.h_net4 = G2(self.gf_dim // 4, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 8)
def forward(self, z_code, text_embedding=None):
c_code, mu, logvar = self.ca_net(text_embedding)
fake_imgs = []
if self.StageNum == 1:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
elif self.StageNum == 2:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
elif self.StageNum == 3:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
h_code3 = self.h_net3(h_code2, c_code)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
elif self.StageNum == 4:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
h_code3 = self.h_net3(h_code2, c_code)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
h_code4 = self.h_net4(h_code3, c_code)
fake_img4 = self.img_net4(h_code4)
fake_imgs.append(fake_img4)
return fake_imgs, mu, logvar
class eval256(nn.Module):
def __init__(self):
super(eval256, self).__init__()
self.df_dim = cfg.discriminatorDim
self.ef_dim = cfg.embeddingsDim
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s64(x_code)
x_code = self.img_code_s64_1(x_code)
x_code = self.img_code_s64_2(x_code)
h_c_code = x_code
output = self.logits(h_c_code)
return output.view(-1) | 39.093023 | 78 | 0.615507 | 4,614 | 0.914932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8a9475637b6493e4ff65f91b1c3dca0e1d6f885 | 382 | py | Python | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
]
| 3 | 2021-08-04T08:03:50.000Z | 2022-03-25T11:22:09.000Z | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
]
| null | null | null | utils/agro_utils.py | TiagoMarta/data_fusion_Vineyard-Segmentation | de54e149d36027bb314b5890ea4a1e71ba472d17 | [
"Unlicense",
"MIT"
]
| null | null | null | import numpy as np
def NDVI(nir,red):
'''
# https://eos.com/make-an-analysis/ndvi/
Inputs: nxm numpy arrays
NIR – reflection in the near-infrared spectrum
RED – reflection in the red range of the spectrum
'''
num = nir-red
dom = nir+red
ndvi = np.divide(num,dom)
ndvi[np.isnan(ndvi)]=0 # Clean array with nan
return(ndvi) | 25.466667 | 57 | 0.609948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.582902 |
c8a98f7aadc1b3bec71524384698aed558c36091 | 3,805 | py | Python | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
]
| null | null | null | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
]
| null | null | null | generator/api/routes.py | horvathandris/phenoflow | d0109f3702bc180954051170a56e017af52636fb | [
"MIT"
]
| null | null | null | from starlette.applications import Starlette
from starlette.responses import JSONResponse
from api import workflow
import oyaml as yaml
app = Starlette(debug=True)
def generateWorkflow(steps, nested=False):
generatedWorkflow = workflow.initWorkflow();
generatedWorkflowInputs = {};
generatedSteps = [];
if (not 'external' in steps[0]['type']): generatedWorkflowInputs['potentialCases'] = {'class':'File', 'path':'replaceMe.csv'};
for step in steps:
if('language' in step['implementation']):
# Send extension of last step output to signify workflow output
extension = None;
language = step['implementation']['language'];
if(step==steps[len(steps) - 1]): extension = step['outputs'][0]['extension'];
generatedWorkflow = workflow.createWorkflowStep(generatedWorkflow, step['position'], step['name'], step['type'], language, extension, nested);
generatedWorkflowInputs['inputModule' + str(step['position'])] = {'class':'File', 'path':language + '/' + step['implementation']['fileName']};
# ~MDC For now, we only assume one variable input to each step, the potential cases; and one variable output, the filtered potential cases.
if(language=='python'):
generatedStep = workflow.createPythonStep(step['name'], step['type'], step['doc'], step['inputs'][0]['doc'], step['outputs'][0]['extension'], step['outputs'][0]['doc']).export_string()
elif(language=='knime'):
generatedStep = workflow.createKNIMEStep(step['name'], step['type'], step['doc'], step['inputs'][0]['doc'], step['outputs'][0]['extension'], step['outputs'][0]['doc']).export_string();
elif(language=='js'):
generatedStep = workflow.createJSStep(step['name'], step['type'], step['doc'], step['inputs'][0]['doc'], step['outputs'][0]['extension'], step['outputs'][0]['doc']).export_string();
else:
# Handle unknown language
generatedStep = '';
generatedSteps.append({'name':step['name'], 'type':step['type'], 'workflowId':step['workflowId'], 'content':generatedStep, 'fileName':step['implementation']['fileName']});
else:
nestedWorkflow = generateWorkflow(step['implementation']['steps'], True);
# Update parent workflow to accomodate nested implementation units
nestedWorkflowInputs = nestedWorkflow['workflowInputs'];
nestedWorkflowInputModules = [nestedWorkflowInput for nestedWorkflowInput in nestedWorkflowInputs if 'inputModule' in nestedWorkflowInput];
for workflowInput in nestedWorkflowInputModules: generatedWorkflowInputs['inputModule'+str(step['position'])+'-'+str(list(nestedWorkflowInputModules).index(workflowInput)+1)] = {'class':'File', 'path':nestedWorkflowInputs[workflowInput]['path']};
generatedWorkflow = workflow.createNestedWorkflowStep(generatedWorkflow, step['position'], step['name'], nestedWorkflow);
# If sent a nested workflow to generate, generate this and store it as a step (as opposed to a command line tool)
generatedSteps.append({'name':step['name'], 'type':step['type'], 'workflowId':step['workflowId'], 'content':yaml.dump(nestedWorkflow['workflow'], default_flow_style=False), 'steps':nestedWorkflow['steps']});
return {'workflow':generatedWorkflow.get_dict(), 'steps':generatedSteps, 'workflowInputs':generatedWorkflowInputs}
@app.route('/generate', methods=['POST'])
async def generate(request):
try:
steps = await request.json();
except:
steps = None;
if(steps):
generatedWorkflow = generateWorkflow(steps);
return JSONResponse({'workflow': yaml.dump(generatedWorkflow['workflow'], default_flow_style=False), 'steps': generatedWorkflow['steps'], 'workflowInputs': yaml.dump(generatedWorkflow['workflowInputs'], default_flow_style=False)});
else:
return JSONResponse({});
| 57.651515 | 252 | 0.70276 | 0 | 0 | 0 | 0 | 476 | 0.125099 | 434 | 0.11406 | 1,259 | 0.33088 |
c8abec201704fed99560906ddf5c95d5088bad9f | 840 | py | Python | heap/maxSlidingWindow.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | heap/maxSlidingWindow.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | heap/maxSlidingWindow.py | saai/LeetcodePythonSolutions | 201f2054dda3f303ae6a376b40cbc7f98688322c | [
"MIT"
]
| null | null | null | class Solution(object):
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
res = []
tmp = [] # tmp[0] always save the current windows max
for i in xrange(len(nums)):
if i < k-1: # first k-1 numbers
while tmp and nums[tmp[-1]]<nums[i]: # keep tmp[0] the max
tmp.pop()
tmp.append(i)
continue
while tmp and nums[tmp[-1]] < nums[i]: # find proper location for nums[i]
tmp.pop()
tmp.append(i)
while tmp and tmp[0]<= i-k: #pop the old max values
tmp.pop(0)
res.append(nums[tmp[0]])
return res
| 31.111111 | 85 | 0.42381 | 770 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.278571 |
c8adae8d9f3f33704f82f32bb3e323260ea0ba97 | 29,151 | py | Python | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
]
| null | null | null | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
]
| null | null | null | tccli/services/tsf/v20180326/help.py | zyh911/tencentcloud-cli | dfc5dbd660d4c60d265921c4edc630091478fc41 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
DESC = "tsf-2018-03-26"
INFO = {
"DeletePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "删除公共配置项"
},
"DescribeSimpleGroups": {
"params": [
{
"name": "GroupIdList",
"desc": "部署组ID列表,不填写时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不填写时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不填写时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不填写时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "GroupId",
"desc": "部署组ID,不填写时查询全量"
},
{
"name": "SearchWord",
"desc": "模糊查询,部署组名称,不填写时查询全量"
},
{
"name": "AppMicroServiceType",
"desc": "部署组类型,精确过滤字段,M:service mesh, P:原生应用, M:网关应用"
}
],
"desc": "查询简单部署组列表"
},
"CreateGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "部署组所属的应用ID"
},
{
"name": "NamespaceId",
"desc": "部署组所属命名空间ID"
},
{
"name": "GroupName",
"desc": "部署组名称"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "GroupDesc",
"desc": "部署组描述"
}
],
"desc": "创建容器部署组"
},
"CreateCluster": {
"params": [
{
"name": "ClusterName",
"desc": "集群名称"
},
{
"name": "ClusterType",
"desc": "集群类型"
},
{
"name": "VpcId",
"desc": "私有网络ID"
},
{
"name": "ClusterCIDR",
"desc": "分配给集群容器和服务IP的CIDR"
},
{
"name": "ClusterDesc",
"desc": "集群备注"
},
{
"name": "TsfRegionId",
"desc": "集群所属TSF地域"
},
{
"name": "TsfZoneId",
"desc": "集群所属TSF可用区"
},
{
"name": "SubnetId",
"desc": "私有网络子网ID"
}
],
"desc": "创建集群"
},
"DescribePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID(只传入应用ID,返回该应用下所有软件包信息)"
},
{
"name": "SearchWord",
"desc": "查询关键字(支持根据包ID,包名,包版本号搜索)"
},
{
"name": "OrderBy",
"desc": "排序关键字(默认为\"UploadTime\":上传时间)"
},
{
"name": "OrderType",
"desc": "升序:0/降序:1(默认降序)"
},
{
"name": "Offset",
"desc": "查询起始偏移"
},
{
"name": "Limit",
"desc": "返回数量限制"
}
],
"desc": "无"
},
"ModifyContainerReplicas": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,部署组唯一标识"
},
{
"name": "InstanceNum",
"desc": "实例数量"
}
],
"desc": "修改容器部署组实例数"
},
"DescribeConfigSummary": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
},
{
"name": "SearchWord",
"desc": "查询关键字,模糊查询:应用名称,配置项名称,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询配置汇总列表"
},
"DeployContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,分组唯一标识"
},
{
"name": "Server",
"desc": "镜像server"
},
{
"name": "TagName",
"desc": "镜像版本名称,如v1"
},
{
"name": "InstanceNum",
"desc": "实例数量"
},
{
"name": "Reponame",
"desc": "旧版镜像名,如/tsf/nginx"
},
{
"name": "CpuLimit",
"desc": "最大的 CPU 核数,对应 K8S 的 limit;不填时默认为 request 的 2 倍"
},
{
"name": "MemLimit",
"desc": "最大的内存 MiB 数,对应 K8S 的 limit;不填时默认为 request 的 2 倍"
},
{
"name": "JvmOpts",
"desc": "jvm参数"
},
{
"name": "CpuRequest",
"desc": "分配的 CPU 核数,对应 K8S 的 request"
},
{
"name": "MemRequest",
"desc": "分配的内存 MiB 数,对应 K8S 的 request"
},
{
"name": "DoNotStart",
"desc": "是否不立即启动"
},
{
"name": "RepoName",
"desc": "(优先使用)新版镜像名,如/tsf/nginx"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "滚动更新必填,更新间隔"
}
],
"desc": "部署容器应用"
},
"AddClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "InstanceIdList",
"desc": "云主机ID列表"
},
{
"name": "OsName",
"desc": "操作系统名称"
},
{
"name": "ImageId",
"desc": "操作系统镜像ID"
},
{
"name": "Password",
"desc": "重装系统密码设置"
},
{
"name": "KeyId",
"desc": "重装系统,关联密钥设置"
},
{
"name": "SgId",
"desc": "安全组设置"
},
{
"name": "InstanceImportMode",
"desc": "云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent"
}
],
"desc": "添加云主机节点至TSF集群"
},
"DescribePodInstances": {
"params": [
{
"name": "GroupId",
"desc": "实例所属groupId"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
}
],
"desc": "获取部署组实例列表"
},
"DescribeServerlessGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段,模糊搜索groupName字段"
},
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段,默认为 createTime字段,支持id, name, createTime"
},
{
"name": "OrderType",
"desc": "排序方式,默认为1:倒序排序,0:正序,1:倒序"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "NamespaceId",
"desc": "分组所属名字空间ID"
},
{
"name": "ClusterId",
"desc": "分组所属集群ID"
}
],
"desc": "查询Serverless部署组列表"
},
"CreateNamespace": {
"params": [
{
"name": "NamespaceName",
"desc": "命名空间名称"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "NamespaceDesc",
"desc": "命名空间描述"
},
{
"name": "NamespaceResourceType",
"desc": "命名空间资源类型(默认值为DEF)"
},
{
"name": "NamespaceType",
"desc": "是否是全局命名空间(默认是DEF,表示普通命名空间;GLOBAL表示全局命名空间)"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
}
],
"desc": "创建命名空间"
},
"DeleteApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "删除应用"
},
"DeleteMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务ID"
}
],
"desc": "删除微服务"
},
"StartGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "启动分组"
},
"DeleteNamespace": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ClusterId",
"desc": "集群ID"
}
],
"desc": "删除命名空间"
},
"DescribeGroupInstances": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询虚拟机部署组云主机列表"
},
"DeleteConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "删除配置项"
},
"DescribePublicConfigSummary": {
"params": [
{
"name": "SearchWord",
"desc": "查询关键字,模糊查询:配置项名称,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询公共配置汇总列表"
},
"DeletePkgs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgIds",
"desc": "需要删除的程序包ID列表"
}
],
"desc": "从软件仓库批量删除程序包。\n一次最多支持删除1000个包,数量超过1000,返回UpperDeleteLimit错误。"
},
"RevocationPublicConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "配置项发布ID"
}
],
"desc": "撤回已发布的公共配置"
},
"DescribePublicConfigs": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量,高优先级"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
},
{
"name": "ConfigIdList",
"desc": "配置项ID列表,不传入时查询全量,低优先级"
},
{
"name": "ConfigName",
"desc": "配置项名称,精确查询,不传入时查询全量"
},
{
"name": "ConfigVersion",
"desc": "配置项版本,精确查询,不传入时查询全量"
}
],
"desc": "查询公共配置项列表"
},
"DescribeSimpleClusters": {
"params": [
{
"name": "ClusterIdList",
"desc": "需要查询的集群ID列表,不填或不传入时查询所有内容"
},
{
"name": "ClusterType",
"desc": "需要查询的集群类型,不填或不传入时查询所有内容"
},
{
"name": "Offset",
"desc": "查询偏移量,默认为0"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "SearchWord",
"desc": "对id和name进行关键词过滤"
}
],
"desc": "查询简单集群列表"
},
"CreateServerlessGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "GroupName",
"desc": "分组名称字段,长度1~60,字母或下划线开头,可包含字母数字下划线"
},
{
"name": "NamespaceId",
"desc": "分组所属名字空间ID"
},
{
"name": "ClusterId",
"desc": "分组所属集群ID"
}
],
"desc": "创建Serverless部署组"
},
"DescribeConfigs": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
},
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量,高优先级"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "ConfigIdList",
"desc": "配置项ID列表,不传入时查询全量,低优先级"
},
{
"name": "ConfigName",
"desc": "配置项名称,精确查询,不传入时查询全量"
},
{
"name": "ConfigVersion",
"desc": "配置项版本,精确查询,不传入时查询全量"
}
],
"desc": "查询配置项列表"
},
"DescribeConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置项ID"
}
],
"desc": "查询配置"
},
"DescribeMicroservices": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "获取微服务列表"
},
"StartContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "启动容器部署组"
},
"RemoveInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群 ID"
},
{
"name": "InstanceIdList",
"desc": "云主机 ID 列表"
}
],
"desc": "从 TSF 集群中批量移除云主机节点"
},
"ExpandGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "InstanceIdList",
"desc": "扩容的机器实例ID列表"
}
],
"desc": "虚拟机部署组添加实例"
},
"DeleteGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "删除容器部署组"
},
"DescribeContainerGroupDetail": {
"params": [
{
"name": "GroupId",
"desc": "分组ID"
}
],
"desc": " 容器部署组详情"
},
"DeleteContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,分组唯一标识"
}
],
"desc": "删除容器部署组"
},
"RollbackConfig": {
"params": [
{
"name": "ConfigReleaseLogId",
"desc": "配置项发布历史ID"
},
{
"name": "ReleaseDesc",
"desc": "回滚描述"
}
],
"desc": "回滚配置"
},
"ModifyMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务 ID"
},
{
"name": "MicroserviceDesc",
"desc": "微服务备注信息"
}
],
"desc": "修改微服务详情"
},
"CreatePublicConfig": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称"
},
{
"name": "ConfigVersion",
"desc": "配置项版本"
},
{
"name": "ConfigValue",
"desc": "配置项值,总是接收yaml格式的内容"
},
{
"name": "ConfigVersionDesc",
"desc": "配置项版本描述"
},
{
"name": "ConfigType",
"desc": "配置项类型"
}
],
"desc": "创建公共配置项"
},
"DescribeImageTags": {
"params": [
{
"name": "ApplicationId",
"desc": "应用Id"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~100"
},
{
"name": "QueryImageIdFlag",
"desc": "不填和0:查询 1:不查询"
},
{
"name": "SearchWord",
"desc": "可用于搜索的 tag 名字"
}
],
"desc": "镜像版本列表"
},
"DescribeServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询Serverless部署组明细"
},
"DescribeMicroservice": {
"params": [
{
"name": "MicroserviceId",
"desc": "微服务ID"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询微服务详情"
},
"DescribePublicConfigReleaseLogs": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
}
],
"desc": "查询公共配置发布历史"
},
"DescribeApplicationAttribute": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "获取应用列表其它字段,如实例数量信息等"
},
"RevocationConfig": {
"params": [
{
"name": "ConfigReleaseId",
"desc": "配置项发布ID"
}
],
"desc": "撤回已发布的配置"
},
"ReleasePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置ID"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ReleaseDesc",
"desc": "发布描述"
}
],
"desc": "发布公共配置"
},
"ReleaseConfig": {
"params": [
{
"name": "ConfigId",
"desc": "配置ID"
},
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "ReleaseDesc",
"desc": "发布描述"
}
],
"desc": "发布配置"
},
"DescribeReleasedConfig": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询group发布的配置"
},
"CreateContainGroup": {
"params": [
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "NamespaceId",
"desc": "分组所属命名空间ID"
},
{
"name": "GroupName",
"desc": "分组名称字段,长度1~60,字母或下划线开头,可包含字母数字下划线"
},
{
"name": "InstanceNum",
"desc": "实例数量"
},
{
"name": "AccessType",
"desc": "0:公网 1:集群内访问 2:NodePort"
},
{
"name": "ProtocolPorts",
"desc": "数组对象,见下方定义"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "CpuLimit",
"desc": "最大分配 CPU 核数,对应 K8S limit"
},
{
"name": "MemLimit",
"desc": "最大分配内存 MiB 数,对应 K8S limit"
},
{
"name": "GroupComment",
"desc": "分组备注字段,长度应不大于200字符"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "滚动更新必填,更新间隔"
},
{
"name": "CpuRequest",
"desc": "初始分配的 CPU 核数,对应 K8S request"
},
{
"name": "MemRequest",
"desc": "初始分配的内存 MiB 数,对应 K8S request"
}
],
"desc": "创建容器部署组"
},
"DescribePublicConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称,不传入时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "ConfigId",
"desc": "配置项ID,不传入时查询全量"
}
],
"desc": "查询公共配置发布信息"
},
"DescribeGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序方式"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
},
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "GroupResourceTypeList",
"desc": "部署组资源类型列表"
}
],
"desc": "获取虚拟机部署组列表"
},
"DescribeSimpleNamespaces": {
"params": [
{
"name": "NamespaceIdList",
"desc": "命名空间ID列表,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "NamespaceResourceTypeList",
"desc": "查询资源类型列表"
},
{
"name": "SearchWord",
"desc": "通过id和name进行过滤"
},
{
"name": "NamespaceTypeList",
"desc": "查询的命名空间类型列表"
},
{
"name": "NamespaceName",
"desc": "通过命名空间名精确过滤"
},
{
"name": "IsDefault",
"desc": "通过是否是默认命名空间过滤,不传表示拉取全部命名空间。0:默认,命名空间。1:非默认命名空间"
}
],
"desc": "查询简单命名空间列表 "
},
"DescribeConfigReleaseLogs": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID,不传入时查询全量"
},
{
"name": "Offset",
"desc": "偏移量,默认为0"
},
{
"name": "Limit",
"desc": "每页条数,默认为20"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
}
],
"desc": "查询配置发布历史"
},
"CreateMicroservice": {
"params": [
{
"name": "NamespaceId",
"desc": "命名空间ID"
},
{
"name": "MicroserviceName",
"desc": "微服务名称"
},
{
"name": "MicroserviceDesc",
"desc": "微服务描述信息"
}
],
"desc": "新增微服务"
},
"DescribeDownloadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
}
],
"desc": "TSF上传的程序包存放在腾讯云对象存储(COS)中,通过该API可以获取从COS下载程序包需要的信息,包括包所在的桶、存储路径、鉴权信息等,之后使用COS API(或SDK)进行下载。\nCOS相关文档请查阅:https://cloud.tencent.com/document/product/436"
},
"DeployServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
},
{
"name": "Memory",
"desc": "所需实例内存大小,取值为 1Gi 2Gi 4Gi 8Gi 16Gi,缺省为 1Gi,不传表示维持原态"
},
{
"name": "InstanceRequest",
"desc": "要求最小实例数,取值范围 [1, 4],缺省为 1,不传表示维持原态"
},
{
"name": "StartupParameters",
"desc": "部署组启动参数,不传表示维持原态"
}
],
"desc": "部署Serverless应用"
},
"DescribeGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "查询虚拟机部署组详情"
},
"CreateConfig": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称"
},
{
"name": "ConfigVersion",
"desc": "配置项版本"
},
{
"name": "ConfigValue",
"desc": "配置项值"
},
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "ConfigVersionDesc",
"desc": "配置项版本描述"
},
{
"name": "ConfigType",
"desc": "配置项值类型"
}
],
"desc": "创建配置项"
},
"DescribeContainerGroups": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段,模糊搜索groupName字段"
},
{
"name": "ApplicationId",
"desc": "分组所属应用ID"
},
{
"name": "OrderBy",
"desc": "排序字段,默认为 createTime字段,支持id, name, createTime"
},
{
"name": "OrderType",
"desc": "排序方式,默认为1:倒序排序,0:正序,1:倒序"
},
{
"name": "Offset",
"desc": "偏移量,取值从0开始"
},
{
"name": "Limit",
"desc": "分页个数,默认为20, 取值应为1~50"
},
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "NamespaceId",
"desc": "命名空间 ID"
}
],
"desc": "容器部署组列表"
},
"DeleteImageTags": {
"params": [
{
"name": "ImageTags",
"desc": "镜像版本数组"
}
],
"desc": "批量删除镜像版本"
},
"DescribeClusterInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
}
],
"desc": "查询集群实例"
},
"CreateApplication": {
"params": [
{
"name": "ApplicationName",
"desc": "应用名称"
},
{
"name": "ApplicationType",
"desc": "应用类型,V:虚拟机应用;C:容器应用;S:serverless应用"
},
{
"name": "MicroserviceType",
"desc": "应用微服务类型,M:service mesh应用;N:普通应用;G:网关应用"
},
{
"name": "ApplicationDesc",
"desc": "应用描述"
},
{
"name": "ApplicationLogConfig",
"desc": "应用日志配置项,废弃参数"
},
{
"name": "ApplicationResourceType",
"desc": "应用资源类型,废弃参数"
},
{
"name": "ApplicationRuntimeType",
"desc": "应用runtime类型"
}
],
"desc": "创建应用"
},
"StopGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "停止虚拟机部署组"
},
"ShrinkGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "下线部署组所有机器实例"
},
"DeployGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "PkgId",
"desc": "程序包ID"
},
{
"name": "StartupParameters",
"desc": "部署组启动参数"
}
],
"desc": "部署虚拟机部署组应用"
},
"DescribeApplications": {
"params": [
{
"name": "SearchWord",
"desc": "搜索字段"
},
{
"name": "OrderBy",
"desc": "排序字段"
},
{
"name": "OrderType",
"desc": "排序类型"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "分页个数"
},
{
"name": "ApplicationType",
"desc": "应用类型"
},
{
"name": "MicroserviceType",
"desc": "应用的微服务类型"
},
{
"name": "ApplicationResourceTypeList",
"desc": "应用资源类型数组"
}
],
"desc": "获取应用列表"
},
"DeleteServerlessGroup": {
"params": [
{
"name": "GroupId",
"desc": "groupId,分组唯一标识"
}
],
"desc": "删除Serverless部署组"
},
"DescribeUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgName",
"desc": "程序包名"
},
{
"name": "PkgVersion",
"desc": "程序包版本"
},
{
"name": "PkgType",
"desc": "程序包类型"
},
{
"name": "PkgDesc",
"desc": "程序包介绍"
}
],
"desc": "TSF会将软件包上传到腾讯云对象存储(COS)。调用此接口获取上传信息,如目标地域,桶,包Id,存储路径,鉴权信息等,之后请使用COS API(或SDK)进行上传。\nCOS相关文档请查阅:https://cloud.tencent.com/document/product/436"
},
"DescribeConfigReleases": {
"params": [
{
"name": "ConfigName",
"desc": "配置项名称,不传入时查询全量"
},
{
"name": "GroupId",
"desc": "部署组ID,不传入时查询全量"
},
{
"name": "NamespaceId",
"desc": "命名空间ID,不传入时查询全量"
},
{
"name": "ClusterId",
"desc": "集群ID,不传入时查询全量"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "ConfigId",
"desc": "配置ID,不传入时查询全量"
},
{
"name": "ApplicationId",
"desc": "应用ID,不传入时查询全量"
}
],
"desc": "查询配置发布信息"
},
"StopContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
}
],
"desc": "停止容器部署组"
},
"DescribeSimpleApplications": {
"params": [
{
"name": "ApplicationIdList",
"desc": "应用ID列表"
},
{
"name": "ApplicationType",
"desc": "应用类型"
},
{
"name": "Limit",
"desc": "每页条数"
},
{
"name": "Offset",
"desc": "起始偏移量"
},
{
"name": "MicroserviceType",
"desc": "微服务类型"
},
{
"name": "ApplicationResourceTypeList",
"desc": "资源类型数组"
},
{
"name": "SearchWord",
"desc": "通过id和name进行关键词过滤"
}
],
"desc": "查询简单应用列表"
},
"DescribePublicConfig": {
"params": [
{
"name": "ConfigId",
"desc": "需要查询的配置项ID"
}
],
"desc": "查询公共配置(单条)"
},
"ModifyContainerGroup": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "AccessType",
"desc": "0:公网 1:集群内访问 2:NodePort"
},
{
"name": "ProtocolPorts",
"desc": "ProtocolPorts数组"
},
{
"name": "UpdateType",
"desc": "更新方式:0:快速更新 1:滚动更新"
},
{
"name": "UpdateIvl",
"desc": "更新间隔,单位秒"
}
],
"desc": "修改容器部署组"
},
"DescribeApplication": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
}
],
"desc": "获取应用详情"
},
"ShrinkInstances": {
"params": [
{
"name": "GroupId",
"desc": "部署组ID"
},
{
"name": "InstanceIdList",
"desc": "下线机器实例ID列表"
}
],
"desc": "虚拟机部署组下线实例"
},
"ModifyUploadInfo": {
"params": [
{
"name": "ApplicationId",
"desc": "应用ID"
},
{
"name": "PkgId",
"desc": "调用DescribeUploadInfo接口时返回的软件包ID"
},
{
"name": "Result",
"desc": "COS返回上传结果(默认为0:成功,其他值表示失败)"
},
{
"name": "Md5",
"desc": "程序包MD5"
},
{
"name": "Size",
"desc": "程序包大小(单位字节)"
}
],
"desc": "调用该接口和COS的上传接口后,需要调用此接口更新TSF中保存的程序包状态。\n调用此接口完成后,才标志上传包流程结束。"
},
"AddInstances": {
"params": [
{
"name": "ClusterId",
"desc": "集群ID"
},
{
"name": "InstanceIdList",
"desc": "云主机ID列表"
},
{
"name": "OsName",
"desc": "操作系统名称"
},
{
"name": "ImageId",
"desc": "操作系统镜像ID"
},
{
"name": "Password",
"desc": "重装系统密码设置"
},
{
"name": "KeyId",
"desc": "重装系统,关联密钥设置"
},
{
"name": "SgId",
"desc": "安全组设置"
},
{
"name": "InstanceImportMode",
"desc": "云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent"
}
],
"desc": "添加云主机节点至TSF集群"
}
} | 18.567516 | 165 | 0.392028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21,311 | 0.597667 |
c8b067f63a4c14a9b78ac5bf7aace3e8420c7a16 | 1,729 | py | Python | workflow_scripts/test_models.py | jcwchen/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
]
| 1 | 2020-12-19T14:46:23.000Z | 2020-12-19T14:46:23.000Z | workflow_scripts/test_models.py | sumit6597/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
]
| null | null | null | workflow_scripts/test_models.py | sumit6597/models | 2fd86acdd51037570e1daefa03873237b76bd5a6 | [
"MIT"
]
| 1 | 2021-08-08T11:47:35.000Z | 2021-08-08T11:47:35.000Z | import onnx
from pathlib import Path
import subprocess
import sys
def run_lfs_install():
result = subprocess.run(['git', 'lfs', 'install'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Git LFS install completed with return code=" + str(result.returncode))
def pull_lfs_file(file_name):
result = subprocess.run(['git', 'lfs', 'pull', '--include', file_name, '--exclude', '\"\"'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("LFS pull completed with return code=" + str(result.returncode))
cwd_path = Path.cwd()
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(['git', 'diff', '--name-only', '--diff-filter=AM', 'origin/master', 'HEAD'],
cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = obtain_diff.communicate()
diff_list = stdoutput.split()
# identify list of changed onnx models in model Zoo
model_list = [str(model).replace("b'","").replace("'", "") for model in diff_list if ".onnx" in str(model)]
# run lfs install before starting the tests
run_lfs_install()
print("\n=== Running ONNX Checker on added models ===\n")
# run checker on each model
failed_models = []
for model_path in model_list:
model_name = model_path.split('/')[-1]
print("Testing:", model_name)
try:
pull_lfs_file(model_path)
model = onnx.load(model_path)
onnx.checker.check_model(model)
print("Model", model_name, "has been successfully checked!")
except Exception as e:
print(e)
failed_models.append(model_path)
if len(failed_models) != 0:
print(str(len(failed_models)) +" models failed onnx checker.")
sys.exit(1)
print(len(model_list), "model(s) checked.")
| 35.285714 | 156 | 0.707924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.318103 |
c8b4dfd0fac657e7ac7e488ed975872bacfb263c | 25 | py | Python | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
]
| 6 | 2020-02-28T21:18:16.000Z | 2020-03-13T16:45:57.000Z | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
]
| 6 | 2020-02-28T12:42:52.000Z | 2020-03-16T03:49:09.000Z | manager/__init__.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
]
| 6 | 2020-03-05T13:04:25.000Z | 2020-03-13T16:46:03.000Z | from .utils import Config | 25 | 25 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8b5d127b254896268904720f95e3739d411d338 | 1,374 | py | Python | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
]
| null | null | null | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
]
| null | null | null | src/classifier/utils/create_data.py | maxscheijen/dutch-sentiment-classifier | 6b3149d906710fadc0b104a9f79ca389a7f5cba3 | [
"Apache-2.0"
]
| null | null | null | import glob
import pandas as pd
from tqdm import tqdm
from classifier import config
class Dataset:
"""Create dataset class"""
def __init__(self):
# Get all txt files
self.paths = sorted(glob.glob("data/*/*/*.txt"))
self.dataframe = None
def load_data(self):
dfs = [] # initialize list for dataframes
# Loop over all txt files
for filepath in tqdm(self.paths):
# Read text files
with open(filepath, "r") as f:
text = f.read()
# Create label from path
if "pos" in filepath:
sentiment = "positief"
else:
sentiment = "negatief"
# Append dataframe to list
dfs.append(pd.DataFrame({"text": [text],
"sentiment": [sentiment]}))
# Concat DataFrames
self.dataframe = pd.concat(dfs).reset_index(drop=True)
def save_data(self):
# Create train and test split
train_data = self.dataframe.sample(frac=config.SPLIT_SIZE,
random_state=config.SEED)
test_data = self.dataframe.iloc[train_data.index]
# Save data
train_data.to_csv(config.TRAIN_DATA, index=None)
test_data.to_csv(config.TEST_DATA, index=None)
| 28.625 | 68 | 0.54294 | 1,286 | 0.935953 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.210335 |
c8b602b1d86d1edc850b44d842ce6f3bb89f273d | 642 | py | Python | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
]
| null | null | null | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
]
| null | null | null | pip/setup.py | siphr/urdu-digit | 133fcea917ce4584c2f98b470f9e3063f9f03c99 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="urdu_digit",
version="0.0.17",
keywords=["urdu", "numeric", "digit", "converter"],
description="English to Urdu numeric digit converter.",
long_description=open('README.md').read(),
project_urls={
'Homepage': 'https://www.techtum.dev/work-urdu-digit-211001.html',
'Source': 'https://github.com/siphr/urdu-digit',
'Tracker': 'https://github.com/siphr/urdu-digit/issues',
},
author="siphr",
author_email="[email protected]",
packages=['urdu_digit'],
platforms="any",
install_requires=[]
)
| 25.68 | 74 | 0.641745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.514019 |
c8b68cb341dae475cc25f2d74d8dcd06d0f58623 | 1,682 | py | Python | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
]
| null | null | null | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
]
| null | null | null | algorithms/intervals.py | calebperkins/algorithms | 9f4a029261160e6b12b8bedd53f0a0ebf541237a | [
"MIT"
]
| null | null | null | import collections
Interval = collections.namedtuple("Interval", "start, end")
class AugmentedTree:
"""
An augmented tree for querying intervals. The nodes are ordered by the start interval. The high attribute is the
maximum end interval of the node and any of its children.
This tree could become imbalanced. More advanced augmented trees should be a based on a self-balancing BST.
"""
def __init__(self, interval):
self.interval = interval
self.high = interval.end
self.left = None
self.right = None
def overlaps(self, interval):
i = self.interval
return i.end >= interval.start and i.start <= interval.end
def intersecting(self, interval):
s = [self]
while s:
n = s.pop()
if n.high < interval.start:
continue
if n.overlaps(interval):
yield n.interval
if n.right and n.right.interval.start <= interval.end:
s.append(n.right)
if n.left:
s.append(n.left)
def __lt__(self, other):
return self.interval.start < other.interval.start
def add(self, interval):
# Create a new node and add it to a leaf
m = AugmentedTree(interval)
n = self
while True:
n.high = max(n.high, m.high)
if m < n:
if n.left:
n = n.left
else:
n.left = m
return
else:
if n.right:
n = n.right
else:
n.right = m
return
| 29 | 116 | 0.521998 | 1,599 | 0.950654 | 385 | 0.228894 | 0 | 0 | 0 | 0 | 365 | 0.217004 |
c8bd12730bd20c4875906f949b15caeb99026f0f | 4,874 | py | Python | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
]
| 3 | 2019-07-27T14:00:42.000Z | 2020-01-17T17:07:51.000Z | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
]
| null | null | null | utils/visualization.py | yigitozgumus/Polimi_Thesis | 711c1edcf1fdb92fc6c15bf5ab1be141c13995c3 | [
"MIT"
]
| 4 | 2019-10-22T02:58:26.000Z | 2020-10-06T09:59:26.000Z | import numpy as np
import matplotlib.pyplot as plt
def show_anomalies(patch_array):
num_figs = len(patch_array)
fig = plt.figure(figsize=(num_figs * 30, 30))
plt.tight_layout()
for i in range(len(patch_array)):
plt.subplot(num_figs, 1, i + 1)
plt.imshow(patch_array[i])
plt.axis("off")
def make_3_channel(image):
return np.array([[[s, s, s] for s in r] for r in image], dtype="u1")
def add_color_red_2d(image):
#return np.array([[[0.7, s, s] for s in r] for r in image], dtype="u1")
return np.array([[[s, 0, 0] for s in r] for r in image], dtype="u1")
def add_color_green_2d(image):
#return np.array([[[0.4, s, 0.9] for s in r] for r in image], dtype="u1")
return np.array([[[0, s, 0] for s in r] for r in image], dtype="u1")
def add_color_blue_2d(image):
#return np.array([[[s, 0.3, 0.3] for s in r] for r in image], dtype="u1")
return np.array([[[0, 0, s] for s in r] for r in image], dtype="u1")
def paint_image_anomalies(image_list, true_labels, pred_labels):
imgs = []
h_turns = 21
w_turns = 32
for img in image_list:
image = make_3_channel(img)
top = 0
left = 0
h, w = image.shape[:2]
for adv_h in range(h_turns):
for adv_w in range(w_turns):
tag = img_tag[adv_h * 32 : (adv_h + 1) * 32, adv_w * 32 : (adv_w + 1) * 32]
anomaly = np.sum(tag)
if anomaly:
mask = np.array(tag == 255)
image[adv_h * 32 : (adv_h + 1) * 32, adv_w * 32 : (adv_w + 1) * 32, 0][
mask
] = 255
imgs.append(image)
return imgs
def connect_imgs(imgs):
patch = np.squeeze(imgs[0])
for i in range(1, len(imgs)):
patch = np.vstack((patch, np.squeeze(imgs[i])))
return patch
def paint_anomalies(num, patches, scores_pred, tl_bool, statistics=False, show=False):
patch_image = np.zeros(2064384, dtype=int)
patch_image = patch_image.reshape(672, 1024, 3)
# plt.imshow(patch_image)
tests = patches[672 * num : 672 * (num + 1)]
preds = scores_pred[672 * num : 672 * (num + 1)]
tl_bool = tl_bool.astype(bool)
real = tl_bool[672 * num : 672 * (num + 1)]
height = 21
width = 32
trues = 0
fps = 0
fns = 0
for i in range(height):
for j in range(width):
index = j + (width * i)
if preds[index] and real[index]:
# make it green, correct_guess
add = add_color_green_2d(tests[index] * 255)
trues += 1
elif preds[index]: # false positive
add = add_color_red_2d(tests[index] * 255)
fps += 1
elif real[index]: # False Negative
add = add_color_blue_2d(tests[index] * 255)
fns += 1
else:
add = make_3_channel(tests[index] * 255)
patch_image[i * 32 : (i + 1) * 32, j * 32 : (j + 1) * 32] += add
if statistics:
print("true predictions: {}".format(trues))
print("False Positives: {}".format(fps))
print("False Negatives: {}".format(fns))
if show:
plt.figure(figsize=(15, 15))
plt.imshow(patch_image)
return
return patch_image
def paint_anomalies_pixelwise(num, patches, scores_pred, true_scores, statistics=False, show=False):
patch_image = np.zeros(1972098, dtype=int)
patch_image = patch_image.reshape(662, 993, 3)
tests = patches[660345 * num : 660345 * (num + 1)]
preds = scores_pred[660345 * num : 660345 * (num + 1)]
true_scores = true_scores.astype(bool)
real = true_scores[660345 * num : 660345 * (num + 1)]
height = 662
width = 993
trues, fps, fns = 0, 0, 0
for h in range(height):
for w in range(width):
index = w + (width * h)
if preds[index] and real[index]:
add = add_color_green_2d(tests[index][15:16, 16:17] * 255)
trues += 1
elif preds[index]:
add = add_color_red_2d(tests[index][15:16, 16:17] * 255)
fps += 1
elif real[index]:
add = add_color_blue_2d(tests[index][15:16, 16:17] * 255)
fns += 1
else:
add = make_3_channel(tests[index][15:16, 16:17] * 255)
patch_image[h : (h + 1), w : (w + 1)] += add
if statistics:
print("true predictions: {}".format(trues))
print("False Positives: {}".format(fps))
print("False Negatives: {}".format(fns))
if show:
plt.figure(figsize=(15, 15))
plt.imshow(patch_image)
return
return patch_image
def compute_predictions(scores, percentile):
per = np.percentile(scores, percentile)
predictions = scores >= per
return predictions
| 34.083916 | 100 | 0.55437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.092942 |
c8c0726d584812a525a610e545b5c0960badaf74 | 18,223 | py | Python | tests/unit/core/tensorrt_loaders.py | ParikhKadam/NeMo | ee11f7c4666d410d91f9da33c61f4819ea625013 | [
"Apache-2.0"
]
| 10 | 2020-03-17T08:32:06.000Z | 2021-04-19T19:03:50.000Z | tests/unit/core/tensorrt_loaders.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
]
| 1 | 2020-06-11T00:54:42.000Z | 2020-06-11T00:54:42.000Z | tests/unit/core/tensorrt_loaders.py | dcmartin/NeMo | d2120a40bf23d3e38ff5677c2685c712f297e6b1 | [
"Apache-2.0"
]
| 3 | 2020-03-10T05:10:07.000Z | 2020-12-08T01:33:35.000Z | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import time
import warnings
from collections import OrderedDict
import numpy as np
import onnx
import tensorrt as trt
from .tensorrt_format import FormatManager
from .tensorrt_runner import (
DEFAULT_SHAPE_VALUE,
TRT_LOGGER,
TensorRTRunnerV2,
default_value,
find_in_dict,
get_input_metadata_from_profile,
is_dimension_dynamic,
is_shape_dynamic,
is_valid_shape_override,
send_on_queue,
write_timestamped,
)
from nemo import logging, logging_mode
def set_onnx_logging_level(sev):
if sev >= logging.INFO:
warnings.filterwarnings("ignore")
class BaseDataLoader(object):
"""
Responsible for fetching or generting input data for runners.
"""
def __call__(self, index, input_metadata, input_example=None):
"""
Fetches or generates inputs.
Args:
index (int): The index of inputs to fetch. For any given index, the inputs should always be the same.
input_metadata (OrderedDict[str, Tuple[np.dtype, Tuple[int]]]): Mapping of input names to their data types and shapes.
Returns:
OrderedDict[str, np.ndarray]: Mapping of input names to numpy buffers containing data.
"""
raise NotImplementedError("BaseDataLoader is an abstract class")
class DefaultDataLoader(BaseDataLoader):
def __init__(
self,
seed=None,
default_shape_value=None,
default_shapes=None,
int_min=None,
int_max=None,
float_min=None,
float_max=None,
):
"""
Optional Args:
seed (int): The seed to use when generating random inputs.
default_shape_value (int): The default value to use when a dimension is dynamic.
default_shapes (Dict[str, Tuple[int]]): A mapping of input names to their corresponding shapes.
"""
self.seed = default_value(seed, int(time.time()))
self.default_shapes = default_value(default_shapes, {})
self.default_shape_value = default_value(default_shape_value, DEFAULT_SHAPE_VALUE)
self.int_min = default_value(int_min, 1)
self.int_max = default_value(int_max, 25)
self.float_min = default_value(float_min, -1.0)
self.float_max = default_value(float_max, 1.0)
def __call__(self, index, input_metadata, input_example=None):
logging.debug("Updating seed to: {:}".format(self.seed + index))
rng = np.random.RandomState(self.seed + index)
buffers = OrderedDict()
i = 0
for name, (dtype, shape) in input_metadata.items():
if input_example is not None and (not isinstance(input_example, tuple) or i < len(input_example)):
if isinstance(input_example, tuple):
static_shape = input_example[i].shape
elif isinstance(input_example, OrderedDict):
static_shape = tuple(input_example.values())[i].shape
else:
static_shape = [tuple(input_example.shape)]
elif is_shape_dynamic(shape):
if name in self.default_shapes:
static_shape = self.default_shapes[name]
else:
static_shape = [self.default_shape_value if is_dimension_dynamic(elem) else elem for elem in shape]
if static_shape != shape:
if not is_valid_shape_override(static_shape, shape):
logging.critical(
"Cannot override original shape: {:}, for input: {:} to {:}".format(
shape, name, static_shape
)
)
logging.warning(
"Input: {:}: Adjusted dynamic shape: {:} to: {:}".format(name, shape, static_shape),
mode=logging_mode.ONCE,
)
else:
if name in self.default_shapes:
logging.warning(
"Will not override static shape: {:}, for input: {:}".format(shape, name),
mode=logging_mode.ONCE,
)
static_shape = shape
if input_example is not None and (not isinstance(input_example, tuple) or i < len(input_example)):
if isinstance(input_example, OrderedDict):
buffers[name] = list(input_example.values())[i].cpu()
else:
buffers[name] = input_example[i].cpu() if isinstance(input_example, tuple) else input_example.cpu()
elif np.issubdtype(dtype, np.integer):
buffers[name] = rng.randint(low=self.int_min, high=self.int_max, size=static_shape, dtype=dtype)
elif np.issubdtype(dtype, np.bool_):
buffers[name] = rng.randint(low=0, high=2, size=static_shape).astype(dtype)
else:
buffers[name] = (
rng.random_sample(size=static_shape) * (self.float_max - self.float_min) + self.float_min
).astype(dtype)
buffers[name] = np.array(
buffers[name]
) # To handle scalars. The above functions return a float if shape is ().
# If the shape is 1D, and has a length equal to the rank of the provided default shape, it is
# likely to be a TRT shape tensor, and so should be overriden such that it's value (not shape) is the default shape.
is_shape_tensor = (
(not is_shape_dynamic(shape))
and (name in self.default_shapes)
and (len(shape) == 1)
and (shape[0] == len(self.default_shapes[name]))
)
if is_shape_tensor:
buffers[name] = np.array(self.default_shapes[name], dtype=dtype)
logging.warning(
"Assuming {:} is a shape tensor. Setting to: {:}".format(name, buffers[name]),
mode=logging_mode.ONCE,
)
i = i + 1
return buffers
# Caches data loaded by a DataLoader for use across multiple runners.
class DataLoaderCache(object):
def __init__(self, data_loader):
self.data_loader = data_loader
self.cache = {} # Dict[int, OrderedDict[str, np.ndarray]]
def load(self, iteration, input_metadata, input_example=None):
"""
Load the specified iteration from the cache if present, or generate using the data loader.
Args:
iteration (int): The iteration whose data to retrieve.
input_metadata (OrderedDict[str, Tuple[np.dtype, Tuple[int]]]): Input Metadata, including shape and type information. The loader may attempt to match input_metadata when data in the cache does not exactly match a new set of input_metadata.
"""
if iteration not in self.cache:
logging.debug("Iteration {:} not found in cache, generating new buffers for all inputs".format(iteration))
self.cache[iteration] = self.data_loader(iteration, input_metadata, input_example)
if self.cache[iteration] is None:
logging.critical(
"Received no data from data_loader(iteration, input_metadata) for input_metadata: {:}".format(
input_metadata
)
)
else:
logging.info("Found iteration {:} in cache".format(iteration))
feed_dict = OrderedDict()
for index, (name, (dtype, shape)) in enumerate(input_metadata.items()):
cached_name = find_in_dict(name, self.cache[iteration], index)
if cached_name is None:
logging.warning("Could not find input: {:} in cache, regenerating buffers".format(name))
self.cache[iteration] = self.data_loader(iteration, input_metadata, input_example)
cached_name = name
buffer = self.cache[iteration][cached_name]
if dtype != buffer.dtype:
logging.warning(
"Cached buffer data type does not match data type for input: {:}. Note: Cached type: {:}, input type: {:}. Attempting to cast".format(
name, buffer.dtype, dtype
)
)
buffer = buffer.astype(dtype)
if not is_valid_shape_override(buffer.shape, shape):
logging.warning(
"Cached buffer shape does not match shape for input. Note: Cached shape: {:}, input shape: {:}.".format(
buffer.shape, shape
)
)
# Try to permute the shape to match
try:
perm = FormatManager.permutation(
FormatManager.deduce_format(buffer.shape), FormatManager.deduce_format(shape)
)
new_shape = FormatManager.convert(tuple(buffer.shape), FormatManager.deduce_format(shape))
logging.warning(
"Attempting to permute shape: {:} using permutation {:}. New shape: {:}".format(
buffer.shape, perm, new_shape
)
)
buffer = np.transpose(buffer, perm)
except NotImplementedError as err:
# If the FormatManager does not recognize the format, skip permutation.
logging.info("Skipping permutation due to {:}".format(err))
except KeyError as err:
# If the FormatManager cannot generate the permutation for the format combination, skip permutation.
logging.info("Skipping permutation due to {:}".format(err))
feed_dict[name] = buffer
return feed_dict
class BaseModelLoader(object):
"""
Loads a model for a runner.
"""
def __call__(self):
"""
Load the model.
Returns:
A model usable by the runner. The return type is dependent on the runner the loader has been implemented for.
"""
raise NotImplementedError("BaseModelLoader is an abstract class")
class BaseOnnxModelLoader(BaseModelLoader):
def check(self, model):
try:
onnx.checker.check_model(model)
logging.debug("ONNX Checker Passed")
except onnx.checker.ValidationError as err:
logging.warning("ONNX Checker exited with an error: {:}".format(err))
return model
# ONNX loaders return ONNX models in memory.
class OnnxFileLoader(BaseOnnxModelLoader):
def __init__(self, path):
"""
Loads an ONNX model from a file.
Args:
path (str): The path from which to load the model.
"""
self.path = path
def __call__(self):
logging.info("Loading {:}".format(self.path))
return self.check(onnx.load(self.path))
def __str__(self):
return "ONNX Model Loader: {:}".format(self.path)
def __repr__(self):
return self.__str__()
class OnnxNetworkLoader(BaseModelLoader):
def __init__(self, onnx_loader, explicit_precision=None):
"""
Parses an ONNX model to create an engine.
Args:
onnx_loader (Callable() -> onnx.ModelProto): A loader that can supply an ONNX model.
Optional Args:
explicit_precision (bool): Whether to create the network with explicit precision enabled.
"""
self.onnx_loader = onnx_loader
self.explicit_precision = default_value(explicit_precision, False)
def __call__(self):
network = TensorRTRunnerV2.create_network(explicit_precision=self.explicit_precision)
parser = trt.OnnxParser(network, TRT_LOGGER)
success = parser.parse(self.onnx_loader().SerializeToString())
if not success:
for index in range(parser.num_errors):
logging.error(parser.get_error(index))
logging.critical("Could not parse ONNX correctly")
return network, parser
class BuildEngineLoader(BaseModelLoader):
def __init__(
self,
network_loader,
max_workspace_size=None,
fp16_mode=None,
int8_mode=None,
profile_shapes=None,
write_engine=None,
calibrator=None,
preprocess_network=None,
layerwise=None,
):
"""
Uses a TensorRT INetworkDefinition to build an engine
Args:
network_loader (Callable()->trt.INetworkDefinition): A callable capable of returning an TensorRT INetworkDefinition. The returned network is owned by the BuildEngineLoader and should not be freed manually. The callable may have at most 2 return values if another object needs to be kept alive for the duration of the network, e.g., in the case of a parser. BuildEngineLoader will take ownership of the second return value, and, like the network, it should not be freed by the callable. The first return value must always be the network.
Optional Args:
max_workspace_size (int): The maximum workspace size, in bytes, when building the engine.
fp16_mode (bool): Whether to build the engine with fp16 mode enabled.
int8_mode (bool): Whether to build the engine with int8 mode enabled.
profile_shapes (Dict[str, List[shape, shape, shape]]): A mapping of binding name to min/opt/max shapes. Only needed for networks with dynamic input shapes.
write_engine (str): A directory in which to save the engine.
calibrator (trt_smeagol.runners.tensorrt_runner_v2.Calibrator): An int8 calibrator. Only required in int8 mode when the network does not have explicit precision.
preprocess_network (Callable(trt.INetworkDefinition)): Preprocessing function for the network definition. May be used to modify the network after parsing. This is called before enabling layerwise outputs.
layerwise (bool): Whether to treat the output of every layer as an output of the network. Defaults to False.
"""
self.network_loader = network_loader
self.max_workspace_size = default_value(max_workspace_size, 1 << 24)
self.fp16_mode = default_value(fp16_mode, False)
self.int8_mode = default_value(int8_mode, False)
self.profile_shapes = default_value(profile_shapes, OrderedDict())
self.write_engine = write_engine
self.written_engine_path = None
self.calibrator = calibrator
self.preprocess_network = default_value(preprocess_network, None)
self.layerwise = default_value(layerwise, False)
def __call__(self):
class DummyContextManager(object):
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return None
network_parser = self.network_loader()
try:
network, parser = network_parser
assert isinstance(network, trt.INetworkDefinition)
except (ValueError, AssertionError):
network = network_parser
parser = DummyContextManager()
with trt.Builder(TRT_LOGGER) as builder, network, parser:
if self.preprocess_network:
logging.debug("Applying network preprocessing: {:}".format(self.preprocess_network))
self.preprocess_network(network)
if self.layerwise:
TensorRTRunnerV2.mark_layerwise(network)
if logging.getEffectiveLevel() <= logging.DEBUG:
TensorRTRunnerV2.log_network(network)
config = builder.create_builder_config()
profile = TensorRTRunnerV2.build_profile(builder, network, self.profile_shapes)
config.add_optimization_profile(profile)
config.max_workspace_size = int(self.max_workspace_size)
if self.fp16_mode:
config.flags = 1 << int(trt.BuilderFlag.FP16)
if self.int8_mode:
config.flags = config.flags | 1 << int(trt.BuilderFlag.INT8)
if not network.has_explicit_precision:
if not self.calibrator:
logging.critical(
"Network does not have explicit precision. A calibrator must be provided in order to use int8 mode."
)
self.calibrator.set_input_metadata(get_input_metadata_from_profile(profile, network))
config.int8_calibrator = self.calibrator
logging.debug("Using builder configuration flags: {:}".format(config.flags))
logging.info(
"Building engine: max workspace size={:} bytes, fp16={:}, int8={:}, layerwise={:}".format(
self.max_workspace_size, self.fp16_mode, self.int8_mode, self.layerwise
)
)
engine = builder.build_engine(network, config)
self.written_engine_path = write_timestamped(
contents=lambda: engine.serialize(), dir=self.write_engine, name="tensorrt_runner_v2.engine"
)
return engine
def get_engine_path(self):
"""
Returns the path at which the engine was written, or None if write_engine was not specified.
"""
return self.written_engine_path
| 43.70024 | 548 | 0.610492 | 16,690 | 0.915876 | 0 | 0 | 0 | 0 | 0 | 0 | 6,482 | 0.355704 |
c8c0d558d52b83f545c1d622f249b8f8181f6952 | 420 | py | Python | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
]
| 2 | 2019-10-08T10:49:52.000Z | 2021-10-01T11:26:31.000Z | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
]
| 1 | 2019-05-16T13:48:29.000Z | 2019-05-16T13:48:49.000Z | vstreamer_server/application/VideoStreamerServerApplication.py | artudi54/video-streamer | 66e5e722ed66abe5877488f177c0ac4f13325382 | [
"MIT"
]
| 1 | 2019-10-08T10:49:56.000Z | 2019-10-08T10:49:56.000Z | import logging
import signal
from PySide2 import QtCore
import vstreamer_utils
class VideoStreamerServerApplication(QtCore.QCoreApplication):
def __init__(self, argv):
super().__init__(argv)
self.setApplicationName("video_streamer_server")
self.logger = vstreamer_utils.make_logger()
vstreamer_utils.set_signal_handlers(self)
self.logger.info("Started server application")
| 28 | 62 | 0.754762 | 338 | 0.804762 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.121429 |
c8c12c77067e0a8b65aeb31d29a9acc363766542 | 2,345 | py | Python | serial_scripts/reset_config/test_reset_config.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
]
| 1 | 2017-06-13T04:42:34.000Z | 2017-06-13T04:42:34.000Z | serial_scripts/reset_config/test_reset_config.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
]
| 1 | 2021-06-01T22:18:29.000Z | 2021-06-01T22:18:29.000Z | serial_scripts/reset_config/test_reset_config.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
]
| null | null | null | #Define environment variable FABRIC_UTILS_PATH and provide path to fabric_utils before running
import time
import os
from contrail_fixtures import *
import testtools
from tcutils.commands import *
from fabric.context_managers import settings
from tcutils.wrappers import preposttest_wrapper
from tcutils.util import *
from fabric.api import run
from fabric.state import connections
import test
from upgrade.verify import VerifyFeatureTestCases
from base import ResetConfigBaseTest
class TestResetConfig(ResetConfigBaseTest,VerifyFeatureTestCases):
''' Reset all the configurations '''
@classmethod
def setUpClass(cls):
super(TestResetConfig, cls).setUpClass()
cls.res.setUp(cls.inputs , cls.connections, cls.logger)
def runTest(self):
pass
#end runTest
@preposttest_wrapper
def test_to_reset_config(self):
'''
1) Creates configurations and verify
2) Reset all the Configurations
3) Check all the configurations has been reset
'''
result = True
self.inputs.fixture_cleanup = "no"
self.verify_config_before_feature_test()
username = self.inputs.host_data[self.inputs.cfgm_ip]['username']
password = self.inputs.host_data[self.inputs.cfgm_ip]['password']
with settings(
host_string='%s@%s' % (
username, self.inputs.cfgm_ips[0]),
password = password, warn_only=True, abort_on_prompts=False, debug=True):
fab_path = os.environ.get('FABRIC_UTILS_PATH', '/opt/contrail/utils')
reset_cmd = "cd " +fab_path +";fab reset_config "
self.logger.info("Starting reset configuration")
status = run(reset_cmd)
self.logger.debug("LOG for fab reset_config : %s" % status)
assert not(status.return_code), 'Failed in running : fab reset_config'
result = result and not(status.return_code)
self.logger.info("Reset configuration completed")
project_list = run("source /etc/contrail/openstackrc;keystone tenant-list")
if self.project.project_name in project_list:
assert False,'Failed to reset all the configurations'
self.logger.info("Successfully all the configurations has been reset")
return result
#end test_to_reset_config
| 40.431034 | 94 | 0.690405 | 1,862 | 0.79403 | 0 | 0 | 1,655 | 0.705757 | 0 | 0 | 688 | 0.29339 |
c8c174e66db5ae93829e5da36ac5e18a48241662 | 15,382 | py | Python | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
]
| 1 | 2021-02-03T13:37:48.000Z | 2021-02-03T13:37:48.000Z | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
]
| 8 | 2020-07-16T23:17:51.000Z | 2020-10-14T20:40:00.000Z | server/services/wiki/pages/overview_service.py | hotosm/oeg-reporter | f0c3da80ba380df907a818db224e9ca2ae0018b3 | [
"BSD-2-Clause"
]
| null | null | null | from server.services.wiki.pages.templates import OverviewPageTemplates
from server.services.wiki.pages.page_service import PageService
from server.services.wiki.mediawiki_service import MediaWikiService
from server.services.wiki.wiki_text_service import WikiTextService
from server.services.wiki.wiki_table_service import WikiTableService
from server.services.wiki.wiki_section_service import WikiSectionService
from server.models.serializers.document import OverviewPageSchema
class OverviewPageService(PageService):
def __init__(self):
self.templates = OverviewPageTemplates()
self.page_fields = [
"organisation.name",
"organisation.url",
"platform.name",
"platform.url",
]
def filter_page_data(self, document_data: dict) -> dict:
"""
Filter required data for the overview page from
document data
Keyword arguments:
document_data -- All required data for a project using
Organised Editing Guidelines
Returns:
overview_page_data -- Dict containing only the required data
for the overview page
"""
overview_page_data = {
"organisation": {
"name": document_data["organisation"]["name"],
"url": document_data["organisation"]["url"],
},
"platform": {
"name": document_data["platform"]["name"],
"url": document_data["platform"]["url"],
},
}
return overview_page_data
def generate_page_sections_dict(self, overview_page_data: dict) -> dict:
"""
Generate dict containing the document content parsed to wikitext
for all sections present in the overview page
Keyword arguments:
overview_page_data -- Dictionary containing the required data for the
overview page sections
Returns:
overview_page_sections -- Dictionary with the document content
parsed to wikitext for the overview
page sections
"""
new_row = self.generate_activities_list_table_row(overview_page_data)
activities_list_section = self.templates.activities_list_section_title
overview_page_sections = {activities_list_section: new_row}
return overview_page_sections
def generate_activities_list_table_row(self, overview_page_data: dict) -> str:
"""
Generates a new table row for activities list table
overview_page_data -- Dict containing only the required data
for the overview page
Returns:
new_row -- String in wikitext format for a new table row
"""
wikitext = WikiTextService()
organisation_name = overview_page_data["organisation"]["name"].capitalize()
organisation_page_title = f"{self.templates.oeg_page}/" f"{organisation_name}"
organisation_link = wikitext.hyperlink_wiki_page(
organisation_page_title, organisation_name
)
platform_link = wikitext.hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
)
new_row = f"\n| {organisation_link}\n| {platform_link}\n|-"
return new_row
def create_page(self, document_data: dict) -> None:
"""
Creates a wiki page
Keyword arguments:
document_data -- All required data for a project using
Organised Editing Guidelines
"""
mediawiki = MediaWikiService()
wikitext = WikiTextService()
token = mediawiki.get_token()
page_title = self.templates.oeg_page
overview_page_sections = self.document_to_page_sections(document_data)
sections_text = wikitext.generate_text_from_dict(
self.templates.page_template,
f"=={self.templates.page_initial_section}==",
overview_page_sections,
)
updated_text = WikiTableService().add_table_row(
page_text=sections_text,
new_row=self.generate_activities_list_table_row(document_data),
table_section_title=self.templates.activities_list_section_title,
table_template=self.templates.table_template,
)
if mediawiki.is_existing_page(page_title):
page_text = MediaWikiService().get_page_text(self.templates.oeg_page)
overview_page_table = (
WikiSectionService()
.get_section_table(
page_text, self.templates.activities_list_section_title
)
.string
)
updated_text = WikiTableService().add_table_row(
page_text=page_text,
new_row=self.generate_activities_list_table_row(document_data),
table_section_title=self.templates.activities_list_section_title,
table_template=overview_page_table,
)
mediawiki.edit_page(token, self.templates.oeg_page, updated_text)
else:
mediawiki.create_page(token, page_title, updated_text)
def enabled_to_report(self, document_data: dict):
if MediaWikiService().is_existing_page(self.templates.oeg_page):
overview_dictionary = self.wikitext_to_dict(self.templates.oeg_page)
serialized_overview_page = self.parse_page_to_serializer(
overview_dictionary
)
organisation_names = [
organisation_data["name"]
for organisation_data in serialized_overview_page["organisation"]
]
platform_names = [
platform_data["name"]
for platform_data in serialized_overview_page["platform"]
]
if (
document_data["organisation"]["name"].capitalize() in organisation_names
and document_data["platform"]["name"] in platform_names
):
return False
else:
return True
else:
return True
def edit_page_text(
self, update_fields: dict, overview_page_data: dict, document_data: dict
):
page_text = MediaWikiService().get_page_text(self.templates.oeg_page)
updated_table_fields = self.get_update_table_fields(
update_fields, overview_page_data
)
if updated_table_fields:
overview_page_table = WikiSectionService().get_section_table(
page_text, self.templates.activities_list_section_title
)
project_list_section_title = (
f"\n=={self.templates.page_initial_section}==\n"
f"==={self.templates.activities_list_section_title}===\n"
)
updated_text = WikiTableService().edit_table(
overview_page_table.string,
project_list_section_title,
updated_table_fields,
)
return updated_text
else:
return page_text
def edit_page(
self, document_data: dict, update_fields: dict, overview_page_data: dict
):
mediawiki = MediaWikiService()
token = mediawiki.get_token()
updated_text = self.edit_page_text(
update_fields, overview_page_data, document_data
)
mediawiki.edit_page(token, self.templates.oeg_page, updated_text)
def table_field_updated(self, update_fields: dict, overview_page_data: dict):
if "platform" in update_fields.keys():
return WikiTextService().hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
)
elif "organisation" in update_fields.keys():
organisation_page_title = (
f"{self.templates.oeg_page}/"
f"{overview_page_data['organisation']['name'].capitalize()}"
)
return WikiTextService().hyperlink_wiki_page(
organisation_page_title,
overview_page_data["organisation"]["name"].capitalize(),
)
else:
return False
def get_update_table_fields(self, update_fields, overview_page_data):
current_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{overview_page_data['organisation']['name'].capitalize()}"
)
current_row_data = {
"organisation": WikiTextService().hyperlink_wiki_page(
current_organisation_page_title,
overview_page_data["organisation"]["name"].capitalize(),
),
"platform": WikiTextService().hyperlink_external_link(
overview_page_data["platform"]["name"],
overview_page_data["platform"]["url"],
),
}
if (
"platform" in update_fields.keys()
and "organisation" in update_fields.keys()
):
update_platform_name = (
update_fields["platform"]["name"]
if "name" in update_fields["platform"].keys()
else overview_page_data["platform"]["name"]
)
update_platform_url = (
update_fields["platform"]["url"]
if "url" in update_fields["platform"].keys()
else overview_page_data["platform"]["url"]
)
update_organisation_name = (
update_fields["organisation"]["name"].capitalize()
if "name" in update_fields["organisation"].keys()
else overview_page_data["organisation"]["name"].capitalize()
)
update_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{update_organisation_name.capitalize()}"
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": WikiTextService().hyperlink_wiki_page(
update_organisation_page_title,
update_organisation_name.capitalize(),
),
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": WikiTextService().hyperlink_external_link(
update_platform_name, update_platform_url
),
},
}
return update_fields
elif "platform" in update_fields.keys():
update_platform_name = (
update_fields["platform"]["name"]
if "name" in update_fields["platform"].keys()
else overview_page_data["platform"]["name"]
)
update_platform_url = (
update_fields["platform"]["url"]
if "url" in update_fields["platform"].keys()
else overview_page_data["platform"]["url"]
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": current_row_data["organisation"],
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": WikiTextService().hyperlink_external_link(
update_platform_name, update_platform_url
),
},
}
return update_fields
elif "organisation" in update_fields.keys():
update_organisation_name = (
update_fields["organisation"]["name"].capitalize()
if "name" in update_fields["organisation"].keys()
else overview_page_data["organisation"]["name"].capitalize()
)
update_organisation_page_title = (
"Organised_Editing/Activities/Auto_report/"
f"{update_organisation_name.capitalize()}"
)
update_fields = {
self.templates.overview_list_organisation_name_column: {
"current": current_row_data["organisation"],
"update": WikiTextService().hyperlink_wiki_page(
update_organisation_page_title,
update_organisation_name.capitalize(),
),
},
self.templates.overview_list_platform_name_column: {
"current": current_row_data["platform"],
"update": current_row_data["platform"],
},
}
return update_fields
else:
return False
def parse_page_to_serializer(self, page_dictionary: dict):
overview_page_data = {"organisation": [], "platform": []}
overview_page_table_text = page_dictionary[self.templates.page_initial_section][
self.templates.activities_list_section_title
]
(
platform_list,
organisation_list,
) = self.get_overview_page_platforms_and_organisations(overview_page_table_text)
overview_page_data["organisation"] = organisation_list
overview_page_data["platform"] = platform_list
# Validate
overview_page_schema = OverviewPageSchema(partial=True)
overview_page_schema.load(overview_page_data)
return overview_page_data
def get_overview_page_platforms_and_organisations(
self, overview_page_table_text: str
):
overview_page_table = WikiTableService().get_text_table(
overview_page_table_text
)
overview_page_table_data = overview_page_table.data(span=False)
organisation_list = []
platform_list = []
wikitext = WikiTextService()
for table_row_number, table_row_data in enumerate(
overview_page_table_data[1:], start=1
):
hyperlinked_organisation_url = overview_page_table.cells(
row=table_row_number,
column=self.templates.overview_list_organisation_name_column,
).value
hyperlinked_platform_url = overview_page_table.cells(
row=table_row_number,
column=self.templates.overview_list_platform_name_column,
).value
organisation_list.append(
{
"name": wikitext.get_page_link_and_text_from_wiki_page_hyperlink(
hyperlinked_organisation_url
)[1]
}
)
(
platform_url,
platform_name,
) = wikitext.get_page_link_and_text_from_external_hyperlink(
hyperlinked_platform_url
)
platform_list.append({"name": platform_name, "url": platform_url})
return platform_list, organisation_list
| 39.64433 | 88 | 0.594981 | 14,901 | 0.96873 | 0 | 0 | 0 | 0 | 0 | 0 | 3,046 | 0.198024 |
c8c21cc5ec4a4f6297ac9cc8b0615e326672a6bb | 414 | py | Python | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
]
| null | null | null | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
]
| 44 | 2022-01-21T01:33:59.000Z | 2022-03-26T23:35:25.000Z | App/migrations/0011_playlist_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
]
| null | null | null | # Generated by Django 4.0 on 2022-03-06 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0010_remove_user_percentage_preferences_user_preferences'),
]
operations = [
migrations.AddField(
model_name='playlist',
name='preferences',
field=models.JSONField(null=True),
),
]
| 21.789474 | 76 | 0.628019 | 323 | 0.780193 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.316425 |
c8c3d449685f28e78f767aafb617c4bfc465febb | 2,779 | py | Python | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
]
| 17 | 2015-01-15T21:41:16.000Z | 2021-01-10T15:34:09.000Z | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
]
| null | null | null | emerald/database_operations.py | femmerling/EmeraldBox | 68f5776577f0c929ca1f5ba23f1dfe480f813037 | [
"MIT"
]
| 5 | 2015-02-07T02:41:18.000Z | 2016-11-11T02:50:21.000Z | import imp
import os.path
from app import db
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
def db_create():
# This creates the new database.
db.create_all()
# If no repo existed, the creation will prepare for the first migration.
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print '\nDatabase creation completed\n'
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_migrate():
# This is used for database migration. Newly created database should go through this as well.
migration = SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % (api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)) + '\n'
def db_upgrade():
# This is used for database migration upgrade.
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'Database upgrade completed!'
print 'Current database version is: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_downgrade(version=None):
# This is used to downgrade the database schema to a certain version or to one version before.
# If you know exactly the version you wish to use then you can directly downgrade to that version.
if not version:
current_version = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
downgrade_version = current_version - 1
else:
downgrade_version = version
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, downgrade_version)
print 'Database downgrade completed!'
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
def db_version():
# this is used to get the latest version in the database
current_version = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print 'The current database version is ' + str(current_version)
# end of file | 41.477612 | 144 | 0.77366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 812 | 0.292191 |
c8c574de241b0c8199ec3be2586cfc5532691047 | 5,253 | py | Python | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | xmuda/eval_sem_pcd.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | from xmuda.models.SSC2d_proj3d2d import SSC2dProj3d2d
from xmuda.data.NYU.nyu_dm import NYUDataModule
from xmuda.data.semantic_kitti.kitti_dm import KittiDataModule
from xmuda.common.utils.sscMetrics import SSCMetrics
from xmuda.data.NYU.params import class_relation_freqs as NYU_class_relation_freqs, class_freq_1_4 as NYU_class_freq_1_4, class_freq_1_8 as NYU_class_freq_1_8, class_freq_1_16 as NYU_class_freq_1_16
import numpy as np
import torch
import torch.nn.functional as F
from xmuda.models.ssc_loss import get_class_weights
from tqdm import tqdm
import pickle
import os
#model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/no_mask_255/v12_removeCPThreshold_KLnonzeros_LRDecay30_NYU_1_0.0001_0.0001_CPThreshold0.0_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=030-val/mIoU=0.26983.ckpt"
model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/kitti/v12_ProjectScale2_CPAt1_8_1divlog_LargerFOV_kitti_1_FrusSize_4_WD0_lr0.0001_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=037-val/mIoU=0.11056.ckpt"
class_weights = {
'1_4': get_class_weights(NYU_class_freq_1_4).cuda(),
'1_8': get_class_weights(NYU_class_freq_1_8).cuda(),
'1_16': get_class_weights(NYU_class_freq_1_16).cuda(),
}
#dataset = "NYU"
dataset = "kitti"
if dataset == "NYU":
NYU_root = "/gpfswork/rech/kvd/uyl37fq/data/NYU/depthbin"
NYU_preprocess_dir = "/gpfsscratch/rech/kvd/uyl37fq/precompute_data/NYU"
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
full_scene_size = (240, 144, 240)
output_scene_size = (60, 36, 60)
NYUdm = NYUDataModule(NYU_root, NYU_preprocess_dir, batch_size=4, num_workers=3)
NYUdm.setup()
_C = 12
data_loader = NYUdm.val_dataloader()
else:
kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti"
kitti_depth_root = "/gpfsscratch/rech/kvd/uyl37fq/Adabin/KITTI/"
kitti_logdir = '/gpfsscratch/rech/kvd/uyl37fq/logs/kitti'
kitti_tsdf_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/TSDF_pred_depth_adabin/kitti"
kitti_label_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/labels/kitti"
kitti_occ_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/occupancy_adabin/kitti"
kitti_sketch_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/sketch_3D/kitti"
kitti_mapping_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/mapping_adabin/kitti"
full_scene_size = (256, 256, 32)
KITTIdm = KittiDataModule(root=kitti_root,
data_aug=True,
TSDF_root=kitti_tsdf_root,
label_root=kitti_label_root,
mapping_root=kitti_mapping_root,
occ_root=kitti_occ_root,
depth_root=kitti_depth_root,
sketch_root=kitti_sketch_root,
batch_size=1,
num_workers=3)
KITTIdm.setup()
_C = 20
data_loader = KITTIdm.val_dataloader()
class_relation_weights = get_class_weights(NYU_class_relation_freqs)
model = SSC2dProj3d2d.load_from_checkpoint(model_path)
model.cuda()
model.eval()
count = 0
out_dict = {}
count = 0
write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/draw_output/kitti"
with torch.no_grad():
for batch in tqdm(data_loader):
if dataset == "NYU":
y_true = batch['ssc_label_1_4'].detach().cpu().numpy()
valid_pix_4 = batch['valid_pix_4']
else:
y_true = batch['ssc_label_1_1'].detach().cpu().numpy()
# valid_pix_1 = batch['valid_pix_1']
valid_pix_1 = batch['valid_pix_double']
batch['img'] = batch['img'].cuda()
pred = model(batch)
y_pred = torch.softmax(pred['ssc'], dim=1).detach().cpu().numpy()
y_pred = np.argmax(y_pred, axis=1)
for i in range(y_true.shape[0]):
out_dict = {
"y_pred": y_pred[i].astype(np.uint16),
"y_true": y_true[i].astype(np.uint16),
}
if dataset == "NYU":
filepath = os.path.join(write_path, batch['name'][i] + ".pkl")
out_dict["cam_pose"] = batch['cam_pose'][i].detach().cpu().numpy()
out_dict["vox_origin"] = batch['vox_origin'][i].detach().cpu().numpy()
elif dataset == "kitti":
filepath = os.path.join(write_path, batch['sequence'][i], batch['frame_id'][i] + ".pkl")
out_dict['valid_pix_1'] = valid_pix_1[i].detach().cpu().numpy()
out_dict['cam_k'] = batch['cam_k'][i].detach().cpu().numpy()
out_dict['T_velo_2_cam'] = batch['T_velo_2_cam'][i].detach().cpu().numpy()
os.makedirs(os.path.join(write_path, batch['sequence'][i]), exist_ok=True)
with open(filepath, 'wb') as handle:
pickle.dump(out_dict, handle)
print("wrote to", filepath)
count += 1
# if count == 4:
# break
# write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/output"
# filepath = os.path.join(write_path, "output.pkl")
# with open(filepath, 'wb') as handle:
# pickle.dump(out_dict, handle)
# print("wrote to", filepath)
| 44.897436 | 234 | 0.663811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,744 | 0.332001 |
c8c6f7ca2165cf621b2f2448c66168d6e16e7af2 | 9,695 | py | Python | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
]
| 2,075 | 2019-02-25T08:54:38.000Z | 2022-03-31T10:44:50.000Z | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
]
| 176 | 2019-03-12T02:58:42.000Z | 2022-03-22T20:17:23.000Z | hnn/src/apps/dataparallel.py | anlewy/mt-dnn | eeb6f01ce0630e61a52b8c9c6f7537cd34978e45 | [
"MIT"
]
| 437 | 2019-03-11T21:36:21.000Z | 2022-03-29T02:40:53.000Z | # Author: [email protected]
# Date: 05/30/2019
#
""" Data parallel module
"""
from collections import OrderedDict
import numpy as np
import torch
from torch.cuda.comm import broadcast_coalesced
from torch.cuda.comm import reduce_add_coalesced
from torch.nn.parallel import parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs,gather
import torch.cuda.comm as comm
import pdb
from bert.optimization import BertAdam
def replicate(network, devices):
devices = tuple(devices)
num_replicas = len(devices)
params = list(network.parameters())
param_indices = {param: idx for idx, param in enumerate(params)}
param_copies = broadcast_coalesced(params, devices)
buffers = list(network._all_buffers())
buffer_indices = {buf: idx for idx, buf in enumerate(buffers)}
buffer_copies = broadcast_coalesced(buffers, devices)
modules = list(network.modules())
module_copies = [[] for device in devices]
module_indices = {}
for i, module in enumerate(modules):
module_indices[module] = i
for j in range(num_replicas):
replica = module.__new__(type(module))
replica.__dict__ = module.__dict__.copy()
replica._parameters = replica._parameters.copy()
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
module_copies[j].append(replica)
for i, module in enumerate(modules):
for key, child in module._modules.items():
if child is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = None
else:
module_idx = module_indices[child]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = module_copies[j][module_idx]
for key, param in module._parameters.items():
if param is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = None
else:
param_idx = param_indices[param]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = param_copies[j][param_idx]
replica._parameters[key].requires_grad = param.requires_grad
for key, buf in module._buffers.items():
if buf is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = None
else:
buffer_idx = buffer_indices[buf]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = buffer_copies[j][buffer_idx]
return [module_copies[j][0] for j in range(num_replicas)]
class XDataParallel(torch.nn.Module):
def __init__(self, module):
super().__init__()
self.device_ids = [i for i in range(torch.cuda.device_count())]
module = module.cuda(self.device_ids[0])
self.replicas = replicate(module, self.device_ids)
self.output_device = self.device_ids[0]
self.dim = 0
self.module = module
def forward(self, *inputs, **kwargs):
#if not self.device_ids:
# return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
#if len(self.device_ids) == 1:
# return self.module(*inputs[0], **kwargs[0])
#replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(self.replicas[:len(inputs)], inputs, kwargs)
return self.gather(outputs, self.output_device)
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.replicas[0].state_dict(destination, prefix, keep_vars)
return sd
def eval(self):
for m in self.replicas:
m.eval()
return self
def train(self, mode=True):
for m in self.replicas:
m.train(mode)
return self
def zero_grad(self):
for m in self.replicas:
for p in m.parameters():
p.grad = None
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
class XParallelOptimizer():
def __init__(self, model, optimizer_fn, grad_clip_norm=1.0):
self.replicas = [model]
if hasattr(model, 'replicas'):
self.replicas = model.replicas
dcnt = torch.cuda.device_count()
total_size = sum([np.prod(p.size()) for p in self.replicas[0].parameters()])
quota = {i:0 for i in range(dcnt)}
#quota[0] = total_size//dcnt
param_groups = {i: [] for i in range(dcnt)}
self.named_parameters=[]
for i,(n, param) in enumerate(self.replicas[0].named_parameters()):
ps = np.prod(param.size())
index = list(sorted(quota.items(), key=lambda x: x[1]))[0][0]
quota[index] += ps
if param.dtype==torch.half:
cp = param.clone().type(torch.cuda.FloatTensor).detach().to('cuda:{}'.format(index)).requires_grad_()
else:
cp = dict(self.replicas[index].named_parameters())[n]
name = n[len('module.'):] if n.startswith('module.') else n
param_groups[index].append((name, cp))
self.named_parameters.append((name, cp))
self.param_groups = param_groups
self.sub_optimizers = [DeviceOptimizer(self.replicas, p, i, optimizer_fn(p, max_grad_norm=0)) for i,p in self.param_groups.items()]
self.grad_clip_norm = grad_clip_norm
def parameters(self):
return OrderedDict(self.named_parameters)
def step(self, grad_scale=1):
def bk(g):
return g.backward()
l2norm_square = parallel_apply([bk for _ in self.sub_optimizers], self.sub_optimizers, devices=[g.device for g in self.sub_optimizers])
l2norm = sum(l2norm_square)**0.5
if str(l2norm) in ['inf', 'nan']:
return False
if grad_scale != 1:
l2norm *= grad_scale
coef = self.grad_clip_norm/(l2norm+1e-6)
if coef<1:
grad_scale = grad_scale*coef
if grad_scale != 1:
for n,p in self.named_parameters:
if p.grad is not None:
p.grad.mul_(grad_scale)
def st(g):
return g.step(l2norm)
parallel_apply([st for _ in self.sub_optimizers], self.sub_optimizers, devices=[g.device for g in self.sub_optimizers])
return True
def zero_grad(self):
for m in self.replicas:
for p in m.parameters():
p.grad = None
for g in self.sub_optimizers:
g.zero_grad()
class DeviceOptimizer():
def __init__(self, replicas, param_group, device, optimizer):
self.param_group = param_group
self.device = device
self.optimizer = optimizer
self.replicas = replicas
self.named_params = [dict(m.named_parameters()) for m in replicas]
def backward(self):
group_params = [[(n,m[n]) for n,p in self.param_group if m[n].grad is not None] for m in self.named_params]
grad_params = [g for g in group_params if len(g)>0]
assert all([len(g)==len(grad_params[0]) for g in grad_params]), [len(g) for g in grad_params]
grad = [[p.grad for n,p in g] for g in grad_params]
reduced_grad = reduce_add_coalesced(grad, self.device)
grads = dict([(n,g) for ((n,p),g) in zip(grad_params[0], reduced_grad)])
l2norm = 0
for n,p in self.param_group:
if n in grads:
p.grad = grads[n].float() if grads[n].dtype==torch.half else grads[n]
l2norm += p.grad.norm().item()**2
else:
assert p.grad is None, n
return l2norm
def step(self, l2norm):
self.optimizer.step()
group_params = [(i, [(n,m[n]) for n,p in self.param_group]) for i,m in enumerate(self.named_params)]
group_params = sorted(group_params, key=lambda x:x[0] if x[0]!=self.device else -1)
params = dict(self.param_group)
for n,p in group_params[0][1]:
if p.data.dtype == torch.half:
p.data.copy_(params[n].data)
else:
p.data = params[n].data
param_list = [[p for n,p in g] for i,g in group_params]
device_list =[i for i,g in group_params]
outputs = broadcast_coalesced(param_list[0], device_list)
for o,p in zip(outputs, param_list):
for x,y in zip(o, p):
y.data.copy_(x.data)
def zero_grad(self):
for n,p in self.param_group:
p.grad = None
self.optimizer.zero_grad()
def optimizer_factory(args, training_steps=None, init_spec=None, no_decay=['bias', 'LayerNorm.weight']):
def optimizer_fn(param_group, max_grad_norm=None):
group0 = dict(params=[],
weight_decay_rate=args.weight_decay,
names=[])
group1 = dict(params=[],
weight_decay_rate=0.00,
names=[])
for (n,p) in param_group:
if not any(nd in n for nd in no_decay):
group0['params'].append(p)
group0['names'].append(n)
else:
group1['params'].append(p)
group1['names'].append(n)
optimizer_grouped_parameters = [group0, group1]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
b1=args.adam_beta1,
b2=args.adam_beta2,
v1=args.qhadam_v1,
v2=args.qhadam_v2,
lr_ends=args.lr_schedule_ends,
warmup=args.warmup_proportion if args.warmup_proportion<1 else args.warmup_proportion/training_steps,
t_total=training_steps,
schedule=args.lr_schedule,
max_grad_norm = args.max_grad_norm if max_grad_norm is None else max_grad_norm,
global_grad_norm = args.global_grad_norm,
init_spec = init_spec,
weight_decay_rate = args.weight_decay)
return optimizer
return optimizer_fn
| 35.643382 | 139 | 0.664569 | 5,691 | 0.587004 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.043012 |
c8c808427fd949238223a24b72518b4c7f83bcd8 | 1,190 | py | Python | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
]
| null | null | null | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
]
| null | null | null | mall/serializers.py | turing0/mallProject | cc56d25c51fa03584f99a633a6f606622cfb1e5d | [
"MIT"
]
| null | null | null | from rest_framework import serializers
from .models import User
from .models import Product
from django.contrib.auth import get_user_model
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'balance')
class ProductSerializer(serializers.ModelSerializer):
# owner_name = serializers.ReadOnlyField(source="owner.username")
class Meta:
model = Product
# fields = '__all__'
fields = ('id', 'name', 'price', 'owner', 'buyer', 'sell_date')
# def update(self, instance, validated_data):
# if validated_data.get("owner"):
# owner = validated_data.pop('owner')
# owner = Product.objects.get(id=self.initial_data["id"])
# owner_task = super(ProductSerializer, self, ).update(instance, validated_data)
# owner_task.owner = owner
# owner_task.save()
# return owner_task
# return super(ProductSerializer, self, ).update(instance, validated_data)
# 处理外键字段
# def create(self, validated_data):
# return Product.objects.create(seller=self.context["seller"], **validated_data)
| 36.060606 | 92 | 0.657143 | 1,057 | 0.879368 | 0 | 0 | 0 | 0 | 0 | 0 | 745 | 0.6198 |
c8ca44f18c6c1244335778442d0b31143cb496f7 | 811 | py | Python | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
]
| null | null | null | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
]
| null | null | null | ch02/multiSinal_button.py | you-know-who-2017/pythonQT | a713bfacbb53c5f23e9d7f61dc44592335a24423 | [
"MIT"
]
| null | null | null | '''
Author: geekli
Date: 2020-12-27 10:38:46
LastEditTime: 2020-12-27 10:40:44
LastEditors: your name
Description:
FilePath: \pythonQT\ch02\multiSinal_button.py
'''
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
class Demo(QWidget):
def __init__(self):
super(Demo, self).__init__()
self.button = QPushButton('Start', self)
self.button.pressed.connect(self.change_text) # 1
self.button.released.connect(self.change_text) # 2
#插槽
def change_text(self):
if self.button.text() == 'Start': # 3
self.button.setText('Stop')
else:
self.button.setText('Start')
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | 25.34375 | 62 | 0.630086 | 451 | 0.553374 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.267485 |
c8cbd8c6d4128ec1fba81659c9414d125347bfa3 | 105 | py | Python | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BH_time_collocations | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
]
| 5 | 2019-06-19T19:42:21.000Z | 2021-04-20T22:43:45.000Z | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
]
| 2 | 2020-02-25T10:19:40.000Z | 2020-03-13T15:29:01.000Z | archive/2021-03-7/results/notebooks/advb_article/get_hmod_sample.py | CambridgeSemiticsLab/BHTenseAndAspect | 2d1864b6e9cd26624c769ee1e970d69d19da7fbf | [
"CC-BY-4.0"
]
| null | null | null | from __main__ import *
hm_df = functs_df[~((functs_df.head_type == 'prep') & (functs_df.suffix))].copy()
| 35 | 81 | 0.695238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.057143 |
c8cc6707f00bfb68eb5be0a694507e862c881eb3 | 1,123 | py | Python | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
]
| null | null | null | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
]
| null | null | null | autodc/components/hpo_optimizer/hpo_optimizer_builder.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
]
| null | null | null | from autodc.components.hpo_optimizer.smac_optimizer import SMACOptimizer
from autodc.components.hpo_optimizer.mfse_optimizer import MfseOptimizer
from autodc.components.hpo_optimizer.bohb_optimizer import BohbOptimizer
from autodc.components.hpo_optimizer.tpe_optimizer import TPEOptimizer
def build_hpo_optimizer(eval_type, evaluator, config_space,
per_run_time_limit=600, per_run_mem_limit=1024,
output_dir='./', inner_iter_num_per_iter=1, seed=1, n_jobs=1):
if eval_type == 'partial':
optimizer_class = MfseOptimizer
elif eval_type == 'partial_bohb':
optimizer_class = BohbOptimizer
elif eval_type == 'holdout_tpe':
optimizer_class = TPEOptimizer
else:
# TODO: Support asynchronous BO
optimizer_class = SMACOptimizer
return optimizer_class(evaluator, config_space,
output_dir=output_dir,
per_run_time_limit=per_run_time_limit,
inner_iter_num_per_iter=inner_iter_num_per_iter,
seed=seed, n_jobs=n_jobs)
| 46.791667 | 86 | 0.688335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.063224 |
c8ccf268808a95f71f44af0d1f8a0dcac8ac8aa6 | 835 | py | Python | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
]
| null | null | null | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
]
| 1 | 2020-05-10T12:57:46.000Z | 2020-05-10T12:59:27.000Z | record_voice.py | y1255018/voice-printer | cea33ae978a0709346bdbaf009f4fa07a97c7463 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import sys, select, termios,tty
import os
def getKey():
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] &= ~termios.ICANON
new[3] &= ~termios.ECHO
try:
termios.tcsetattr(fd, termios.TCSANOW, new)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, old)
print(ch)
return ch
def main():
try:
while 1:
key = getKey()
if key == 'r':
# record sound
os.system("arecord -d 5 -f cd 'test.wav'")
print("finish recording")
elif key == 'p':
#play sound
os.system("aplay 'test.wav'")
elif key == 'q':
break
elif key:
print(key)
except( KeyboardInterrupt, SystemExit):
print( "SIGINTを検知" )
if __name__ == "__main__":
main() | 19.880952 | 50 | 0.568862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.179548 |
c8cd1764a3562bbf6dce2fed67c34407e35a1349 | 1,516 | py | Python | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
]
| null | null | null | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
]
| null | null | null | findpeak.py | BartMassey/pdx-cs-sound | 52f671f155f71eb75a635d9b125f9324889dd329 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
# Copyright (c) 2019 Bart Massey
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
# Find maximum and minimum sample in an audio file.
import sys
import wave as wav
# Get the signal file.
wavfile = wav.open(sys.argv[1], 'rb')
# Channels per frame.
channels = wavfile.getnchannels()
# Bytes per sample.
width = wavfile.getsampwidth()
# Sample rate
rate = wavfile.getframerate()
# Number of frames.
frames = wavfile.getnframes()
# Length of a frame
frame_width = width * channels
# Get the signal and check it.
max_sample = None
min_sample = None
wave_bytes = wavfile.readframes(frames)
# Iterate over frames.
for f in range(0, len(wave_bytes), frame_width):
frame = wave_bytes[f : f + frame_width]
# Iterate over channels.
for c in range(0, len(frame), width):
# Build a sample.
sample_bytes = frame[c : c + width]
# XXX Eight-bit samples are unsigned
sample = int.from_bytes(sample_bytes,
byteorder='little',
signed=(width>1))
# Check extrema.
if max_sample == None:
max_sample = sample
if min_sample == None:
min_sample = sample
if sample > max_sample:
max_sample = sample
if sample < min_sample:
min_sample = sample
wavfile.close()
print("min: {} max: {}".format(min_sample, max_sample))
| 25.694915 | 56 | 0.638522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0.352243 |
c8ce16cc98ba530c9d0d89640e062797670ba6af | 275 | py | Python | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
]
| 1 | 2015-04-26T19:49:35.000Z | 2015-04-26T19:49:35.000Z | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
]
| null | null | null | thywill_apps/src/thywill_apps/test/proof_of_concept/__init__.py | exratione/thywill-python | 2078d6f6fc12034eac60a7cc30bf2bc0d27a8732 | [
"MIT"
]
| null | null | null | '''
A very simple test application to exercise a round trip of messages through the thywill system.
This also illustrates the bare, bare minimum implementation of the 'thywill_interface.py' module -
all it does is echo back incoming messages to the client who sent them.
''' | 45.833333 | 98 | 0.789091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 1 |
c8ce9069c002bb7867b82767bde341a14df75d08 | 104 | py | Python | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
]
| 1,013 | 2020-08-27T12:38:48.000Z | 2022-03-31T23:12:23.000Z | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
]
| 217 | 2020-08-31T11:18:10.000Z | 2022-03-30T17:50:30.000Z | integration/tests/error_assert_variable.py | youhavethewrong/hurl | 91cc14882a5f1ef7fa86be09a9f5581cef680559 | [
"Apache-2.0"
]
| 54 | 2020-09-02T09:41:06.000Z | 2022-03-19T15:33:05.000Z | from tests import app
@app.route("/error-assert-variable")
def error_assert_variable():
return ''
| 14.857143 | 36 | 0.721154 | 0 | 0 | 0 | 0 | 79 | 0.759615 | 0 | 0 | 26 | 0.25 |
c8d09ce36295ecfe93aeeecfaa8a003ce925b428 | 6,979 | py | Python | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
]
| null | null | null | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
]
| null | null | null | src/jk_sysinfo/get_proc_cpu_info.py | jkpubsrc/python-module-jk-sysinfo | 583c9e5d10f64a722ffa794d081aaf94354ba4fb | [
"Apache-1.1"
]
| null | null | null |
import typing
from jk_cachefunccalls import cacheCalls
from jk_cmdoutputparsinghelper import ValueParser_ByteWithUnit
from .parsing_utils import *
from .invoke_utils import run
#import jk_json
_parserColonKVP = ParseAtFirstDelimiter(delimiter=":", valueCanBeWrappedInDoubleQuotes=False, keysReplaceSpacesWithUnderscores=True)
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
def parse_proc_cpu_info(stdout:str, stderr:str, exitcode:int) -> typing.Tuple[list,dict]:
"""
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1000.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 800.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 92
model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz
stepping : 9
microcode : 0x38
cpu MHz : 1100.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
initial apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 21
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities
bugs : monitor spectre_v1 spectre_v2
bogomips : 2995.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
"""
if exitcode != 0:
raise Exception()
cpuInfos = splitAtEmptyLines(stdout.split("\n"))
retExtra = {}
ret = []
for group in cpuInfos:
d = _parserColonKVP.parseLines(group)
if "processor" not in d:
for k, v in d.items():
retExtra[k.lower()] = v
continue
if "cache_size" in d:
d["cache_size_kb"] = ValueParser_ByteWithUnit.parse(d["cache_size"]) // 1024
del d["cache_size"]
if "bogomips" in d:
d["bogomips"] = float(d["apicid"])
elif "BogoMIPS" in d:
d["bogomips"] = float(d["BogoMIPS"])
del d["BogoMIPS"]
if "bugs" in d:
d["bugs"] = d["bugs"].split()
if "flags" in d:
d["flags"] = sorted(d["flags"].split())
elif "Features" in d:
d["flags"] = sorted(d["Features"].split())
del d["Features"]
# bool
for key in [ "fpu", "fpu_exception", "wp" ]:
if key in d:
d[key.lower()] = d[key] == "yes"
if key != key.lower():
del d[key]
# int
for key in [ "CPU_architecture", "CPU_revision", "physical_id", "initial_apicid", "cpu_cores", "core_id", "clflush_size", "cache_alignment", "apicid" ]:
if key in d:
d[key.lower()] = int(d[key])
if key != key.lower():
del d[key]
# float
for key in [ "cpu_MHz" ]:
if key in d:
d[key.lower()] = float(d[key])
if key != key.lower():
del d[key]
# str
for key in [ "CPU_implementer", "CPU_part", "CPU_variant" ]:
if key in d:
d[key.lower()] = d[key]
if key != key.lower():
del d[key]
d["processor"] = int(d["processor"])
if "siblings" in d:
d["siblings"] = int(d["siblings"])
#jk_json.prettyPrint(d)
ret.append(d)
return ret, retExtra
#
#
# Returns:
#
# [
# {
# "<key>": "<value>",
# ...
# },
# ...
# ]
#
@cacheCalls(seconds=3, dependArgs=[0])
def get_proc_cpu_info(c = None) -> typing.Tuple[list,dict]:
stdout, stderr, exitcode = run(c, "cat /proc/cpuinfo")
return parse_proc_cpu_info(stdout, stderr, exitcode)
#
| 29.572034 | 612 | 0.71271 | 0 | 0 | 0 | 0 | 208 | 0.029804 | 0 | 0 | 5,230 | 0.749391 |
c8d14c78402ef6d14f3e0943706f524623b640ce | 900 | py | Python | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
]
| null | null | null | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
]
| null | null | null | src/telegram/telegram.py | timepieces141/refactored-telegram | 02dce4b1273afb5fd8b80cbdc64a560dc75dbeec | [
"MIT"
]
| null | null | null | '''
This module provides the Telegram.
'''
class Telegram:
'''
Telegram encapsulates the pieces and parts of a telegram.
'''
def __init__(self, sender, recipient, message):
'''
Constructs a Telegram instance.
:param sender: The sender of the telegram
:param recipient: The recipient of the telegram
:param message: The message contents
'''
self._sender = sender
self._recipient = recipient
self._message = message
@property
def sender(self):
'''
Provides access to the sender.
'''
return self._sender
@property
def recipient(self):
'''
Provides access to the recipient.
'''
return self._recipient
@property
def message(self):
'''
Retrieve the message.
'''
return self._message
| 20.930233 | 61 | 0.564444 | 854 | 0.948889 | 0 | 0 | 368 | 0.408889 | 0 | 0 | 486 | 0.54 |
c8d1af14aa978ccc8ecf4f4ebec0ffa36d951d1c | 345 | py | Python | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
]
| null | null | null | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
]
| null | null | null | test/test_report.py | aymatveev/testing_framework | 3e522d23b46ddb27b3b389210c244aaee5c3370e | [
"MIT"
]
| null | null | null | from testing_framework.report import report
from typing import Tuple
import html
def test_report():
result = report(("test_report", "second line"))
expected_result = f"""
<!DOCTYPE html>
<html>
<body>
<div>test_report</div><div>second line</div>
</body>
</html>
"""
assert html.escape(expected_result) == html.escape(result) | 23 | 62 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.385507 |
c8d1c681c7ce88bcb176a7a0b8c693c830a7bc65 | 160 | py | Python | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| 12 | 2016-10-03T20:43:43.000Z | 2021-06-12T17:18:42.000Z | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| null | null | null | Python/mixedfractions/mixedfractions.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| 10 | 2017-11-14T19:56:37.000Z | 2021-02-02T07:39:57.000Z | while(True):
inp = [int(x) for x in input().split()]
if inp[0] == 0 and inp[1] == 0:
break
print(inp[0]//inp[1], inp[0]%inp[1], "/", inp[1]) | 32 | 53 | 0.48125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.01875 |
c8d23bd00fcfedf98199c38fb1e64ea94cbde637 | 4,480 | py | Python | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
]
| null | null | null | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
]
| null | null | null | qr_rover_lost_comms/src/qr_rover_lost_comms/qr_rover_lost_comms.py | QuantumRoboticsURC/qrteam | bb28f4ad82eab6fb0706be13f8571e0b3261641e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import sys
import time
import rospy
import subprocess
import actionlib
from std_msgs.msg import Float32
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist, PoseWithCovarianceStamped
from actionlib_msgs.msg import GoalStatus, GoalStatusArray
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
def ping_host(host):
ping_fail_count = rospy.get_param('~ping_fail_count', 2)
ping_command = "ping -c %s -n -W 1 %s" % (ping_fail_count, host)
# TODO: don't shell out, use a more secure python library
p = subprocess.Popen(ping_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, error) = p.communicate()
returncode = p.returncode
return output, error, returncode
class RecorveryController():
def __init__(self):
self.cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.joy_drive = rospy.Publisher('joy_drive', Joy, queue_size=10)
self.joy_arm = rospy.Publisher('joy_arm', Joy, queue_size=10)
self.vel_limit_lost_comms = rospy.Publisher('vel_limit_lost_comms', Float32, queue_size=10)
self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Twist, self.cmd_vel_callback)
self.cmd_vel_twist = Twist()
def cmd_vel_callback(self, msg):
self.cmd_vel_twist = msg
def working_comms(self):
working_comms = False
if (self.ips != "no"):
for ip in self.ips.split(','):
(output, error, returncode) = ping_host(ip)
if returncode == 0:
#ping = int(output.split('/')[-1].split('.')[0])
ping = float(output.split('time=')[1].split(' ')[0])
rospy.loginfo("ping %s: %s" % (ip, ping))
twist = Twist()
if ping > 1000:
self.vel_limit_lost_comms.publish(0.3)
twist.linear.x = self.cmd_vel_twist.linear.x/4
twist.angular.z = self.cmd_vel_twist.angular.z/4
self.cmd_vel.publish(twist)
elif ping > 500:
self.vel_limit_lost_comms.publish(0.6)
twist.linear.x = self.cmd_vel_twist.linear.x/2
twist.angular.z = self.cmd_vel_twist.angular.z/2
self.cmd_vel.publish(twist)
elif ping < 500:
self.vel_limit_lost_comms.publish(1)
twist.linear.x = self.cmd_vel_twist.linear.x
twist.angular.z = self.cmd_vel_twist.angular.z
self.cmd_vel.publish(twist)
working_comms = True
else:
working_comms = True
return working_comms
def zero_joystick(self):
joyDrive = Joy()
joyArm = Joy()
if (self.joy_drive_model == 'xbox'):
joyDrive.axes = [0] * 8
joyDrive.buttons = [0] * 11
elif (self.joy_drive_model == 'ec'):
joyDrive.axes = [0] * 8
joyDrive.buttons = [0] * 15
elif (self.joy_drive_model == 'ps5'):
joyDrive.axes = [0] * 12
joyDrive.buttons = [0] * 12
joyArm.axes = [0] * 3
joyArm.buttons = [0] * 11
self.joy_drive.publish(joyDrive)
self.joy_arm.publish(joyArm)
def do_recovery(self):
if rospy.is_shutdown(): return
rospy.logerr('No connection to base station.')
#if self.connect_to_move_base():
#if self.goal_in_progress():
#rospy.loginfo("Navigation in progress, not recovering until finished...")
#return
#self.navigation_goal_to(self.recovery_pose)
self.zero_joystick()
self.stop_motors()
def stop_motors(self):
twist = Twist() # zero motion
self.cmd_vel.publish(twist)
def main_loop(self):
while not rospy.is_shutdown():
if not self.working_comms():
self.do_recovery()
time.sleep(1)
def main():
rospy.init_node("qr_rover_lost_comms")
qr_rover_lost_comms = RecorveryController()
qr_rover_lost_comms.ips = rospy.get_param('~ips_to_monitor')
qr_rover_lost_comms.joy_drive_model = rospy.get_param('~joy_drive_model')
rospy.loginfo('Monitoring base station on IP(s): %s.' % qr_rover_lost_comms.ips)
qr_rover_lost_comms.main_loop() # start monitoring | 39.646018 | 99 | 0.59375 | 3,320 | 0.741071 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.137277 |
c8d264727c0faf5a872f18da939f483862ce785c | 108 | py | Python | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
]
| null | null | null | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
]
| null | null | null | backend/application/contracts/schemas/__init__.py | uesleicarvalhoo/Ecommerce | 1d8d0f0c522dcd27fd90e315989b6fa93caf62b8 | [
"MIT"
]
| null | null | null | from backend.domain.contracts import NewClient, NewOrder, NewOrderItem
from .new_product import NewProduct
| 27 | 70 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8d5d6f27303f0d53ce075025843560499c32f81 | 508 | py | Python | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
]
| 6 | 2019-01-29T05:58:37.000Z | 2021-11-02T22:47:02.000Z | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
]
| 9 | 2020-09-09T04:53:01.000Z | 2022-03-08T22:52:18.000Z | backend/swagger_server/helpers/_add_audit_entry.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
]
| 4 | 2019-01-29T07:38:55.000Z | 2021-10-16T21:06:42.000Z |
from uuid import UUID
import json
from ..mappings import *
def add_doc_audit_entry(session, doc_id, status, data):
""""Add an audit entry, requires that a commit
be run on the session afterwards
"""
if not isinstance(doc_id, UUID):
raise ValueError("Expecting UUID")
if not isinstance(data, dict):
raise ValueError("Expecting dict")
session.add(FileUsage(
document_id=doc_id.bytes,
fileusage_type=status,
data=json.dumps(data)
))
| 22.086957 | 55 | 0.65748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.25 |
c8d758a027414f97b213413022804a7b0f68fe28 | 523 | py | Python | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
]
| null | null | null | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
]
| null | null | null | version.py | Jin-Tao-208/web_science_coursework | bb4ab2226b70e7b0f7bbef40ceb002900e757a31 | [
"MIT"
]
| null | null | null | # versions of libraries used
import sys
import tweepy
import numpy as np
import pymongo
import emoji
import nltk.tokenize
import requests
print("Python version:{}".format(sys.version))
print("tweepy version:{}".format(tweepy.__version__))
print("pymongo version:{}".format(pymongo.__version__))
print("emoji version:{}".format(emoji.__version__))
print("requests version:{}".format(requests.__version__))
print("numpy version:{}".format(np.__version__))
print("nltk version:{}".format(nltk.__version__))
| 29.055556 | 58 | 0.745698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.307839 |
c8d9772ef30de66f59d67a0dc784ccc67d52e59f | 94 | py | Python | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
]
| null | null | null | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
]
| null | null | null | python3/binary.py | eiadshahtout/Python | b2406b0806bc55a9d8f5482a304a8d6968249018 | [
"MIT"
]
| null | null | null | def count_ones(num):
binary = str(bin(num))[2:]
print(binary)
return binary
count_ones(20) | 15.666667 | 27 | 0.712766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c8d9edb95baf53d14122148e741bd4d9e71e6992 | 6,968 | py | Python | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
]
| 1 | 2019-03-15T03:10:08.000Z | 2019-03-15T03:10:08.000Z | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
]
| null | null | null | adaboost.py | xxxzhi/AdaBoostClassifier | e5161cad03bdeb1c353b1c06dc32752a34c160d3 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import dbm
from sklearn.datasets import load_iris
from classifer.base import BaseClassifier
from classifer.decision_tree import DecisionTreeClassifier
import numpy as np
class AbsAdaBoostClassifier(BaseClassifier):
def __init__(self, num_rounds):
super(AbsAdaBoostClassifier, self).__init__()
self.num_rounds = num_rounds
self.clf = None
def create_classifer(self, index=0):
"""
create a new classifer.
:return: BaseClassifier
"""
pass
def process_alpha(self, index, alpha):
"""
after a successful classifier training. this method will be called
:param index
:param alpha
:return:
"""
print '------------alpha-----------'
print alpha
def train(self, x, y):
"""
:param x:
:param y:
:return:
"""
num_rows = len(x)
classifiers = []
alphas = []
weights = np.ones(num_rows) * 1.0 / num_rows
for n in range(self.num_rounds):
error = 0.
random_indices = AbsAdaBoostClassifier.resample(weights)
resampled_entries_x = []
resampled_entries_y =[]
for i in range(num_rows):
resampled_entries_x.append(x[random_indices[i]])
resampled_entries_y.append(y[random_indices[i]])
print 'round ' + str(n + 1) + ' training...'
weak_classifier = self.create_classifer(n)
print len(resampled_entries_x)
weak_classifier.train(resampled_entries_x, resampled_entries_y)
# training and calculate the rate of error
classifications = weak_classifier.predict(x)
error = 0
for i in range(len(classifications)):
predicted = classifications[i]
error += (predicted != np.argmax(y[i])) * weights[i]
print 'Error', error
if error == 0.:
alpha = 4.0
elif error > 0.7:
print 'Discarding learner'
print n
continue # discard classifier with error > 0.5
else:
alpha = 0.5 * np.log((1 - error) / error)
self.process_alpha(n, alpha)
alphas.append(alpha)
classifiers.append(weak_classifier)
print 'weak learner added'
for i in range(num_rows):
if np.size(y[i]) > 1:
ry = np.argmax(y[i])
else:
ry = y[i]
h = classifications[i]
h = (-1 if h == 0 else 1)
ry = (-1 if ry == 0 else 1)
weights[i] = weights[i] * np.exp(-alpha * h * ry)
sum_weights = sum(weights)
print 'Sum of weights', sum_weights
normalized_weights = [float(w) / sum_weights for w in weights]
weights = normalized_weights
print alphas
print '----------weight----------'
self.clf = zip(alphas, classifiers)
def predict(self, x):
"""
@:param x array-like features
@:return labels
"""
result_list = []
weight_list = []
for (weight, classifier) in self.clf:
res = classifier.predict(x)
result_list.append(res)
weight_list.append(weight)
res =[]
for i in range(len(result_list[0])):
result_map = {}
for j in range(len(result_list)):
if not result_map.has_key(str(result_list[j][i])):
result_map[str(result_list[j][i])] = 0
result_map[str(result_list[j][i])] = result_map[str(result_list[j][i])] + weight_list[j]
cur_max_value = -10000000000000000000000
max_key = ''
for key in result_map:
if result_map[key] > cur_max_value:
cur_max_value = result_map[key]
max_key = key
res.append(int(max_key))
return np.asarray(res)
def load(self, weight=[]):
"""
reload model base on weight of each classifier
:param weight:
:return:
"""
classifiers = []
for index in range(len(weight)):
classifiers.append(self.create_classifer(index))
self.clf = zip(weight, classifiers)
@staticmethod
def resample(weights):
t = np.cumsum(weights)
s = np.sum(weights)
result_arr = np.searchsorted(t, np.random.rand(len(weights))*s)
# add all dataset
# result_arr.append(np.arange(0, len(weights),step=1))
return result_arr
class DecisionAdaBoostClassifier(AbsAdaBoostClassifier):
def __init__(self, num_rounds):
super(DecisionAdaBoostClassifier, self).__init__(num_rounds)
def create_classifer(self, index=0):
return DecisionTreeClassifier()
def process_alpha(self, index, alpha):
super(DecisionAdaBoostClassifier, self).process_alpha(index, alpha)
db = dbm.open('params.pag','c')
key = 'alpha_'+str(index)
db[key] = str(alpha)
db.close()
def test():
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
import matplotlib.pyplot as plt
# We only take the two corresponding features
pairidx = 0
pair =[0,1]
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionAdaBoostClassifier(num_rounds=3)
# clf = DecisionTreeClassifier()
# print X
print y
clf.train(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
print '----'
print iris.data[:1, ]
values = np.c_[xx.ravel(), yy.ravel()]
Z = clf.predict(values)
print Z
print Z.shape
print xx.shape
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
if __name__ == '__main__':
test()
| 28.325203 | 104 | 0.550373 | 5,023 | 0.720867 | 0 | 0 | 286 | 0.041045 | 0 | 0 | 1,118 | 0.160448 |
c8da9080a11e6c113c5b2a18202d6e7d74fba286 | 4,942 | py | Python | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
]
| 1 | 2022-02-02T07:49:58.000Z | 2022-02-02T07:49:58.000Z | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
]
| null | null | null | bioinfo/assembly/overlap.py | sohyongsheng/sequence_assembly | f2dea763da447f09f49de8fbf3ceaad8ed3e0559 | [
"MIT"
]
| null | null | null | import numpy as np
from bioinfo.assembly.errors import InvalidPair
from bioinfo.molecules.sequence import Sequence
class LargestOverlapFinder:
def __init__(self):
pass
# Get indices a, b, c, d of longest substrings first,
# such that substring == first[a: b] == second[c: d].
# Also returns length of substring.
def get_substrings(self, counter):
while not np.all(counter == 0):
i, j = np.unravel_index(counter.argmax(), counter.shape)
length = counter[i, j]
for k in range(length):
counter[i - k, j - k] = 0
b, d = i + 1, j + 1
a, c = b - length, d - length
indices = a, b, c, d
yield indices, length
def is_overlap(self, indices, first, second):
a, b, c, d = indices
# First overlaps with second, e.g.
# 0123
# 1234
# ^^^
if b == len(first) and c == 0:
return True
# Second overlaps with first, e.g.
# 1234
# 0123
# ^^^
elif a == 0 and d == len(second):
return True
# First is within second, e.g.
# 123
# 01234
# ^^^
elif a == 0 and b == len(first):
return True
# Second is within first, e.g.
# 01234
# 123
# ^^^
elif c == 0 and d == len(second):
return True
else:
return False
# Taken from longest common substring problem. See
# following for tutorial on dynamic programming solution:
# https://www.youtube.com/watch?v=BysNXJHzCEs
def tally_counter(self, first, second):
num_rows = len(first) + 1
num_cols = len(second) + 1
counter = np.zeros((num_rows, num_cols), dtype = int)
for i, m in enumerate(first, start = 1):
for j, n in enumerate(second, start = 1):
if m == n:
counter[i, j] = counter[i - 1, j - 1] + 1
counter = self.remove_first_row_first_col(counter)
return counter
def find(self, first, second):
counter = self.tally_counter(first, second)
for indices, length in self.get_substrings(counter):
a, b, c, d = indices
assert first[a: b] == second[c: d]
if self.is_overlap(indices, first, second):
return indices, length
else:
indices, length = None, 0
return indices, length
def remove_first_row_first_col(self, x):
return x[1:, 1:]
class Pair:
finder = LargestOverlapFinder()
def __init__(self, first, second):
self.first = first
self.second = second
if self.first.is_dna != self.second.is_dna:
raise InvalidPair(
"Cannot compare DNA with RNA sequences."
)
self.indices, self.overlap_length = self.finder.find(
self.first.seq_str,
self.second.seq_str,
)
def combine(self):
first = self.first.seq_str
second = self.second.seq_str
# No overlap, so just concatenate.
if self.overlap_length == 0:
combined = first + second
return Sequence(
combined,
is_dna = self.first.is_dna,
)
else:
a, b, c, d = self.indices
# First overlaps with second, e.g.
# 0123
# 1234
# ^^^
if b == len(self.first) and c == 0:
prefix = first[:a]
assert first[a: b] == second[c: d]
overlap = first[a: b]
suffix = second[d:]
combined = prefix + overlap + suffix
return Sequence(
combined,
is_dna = self.first.is_dna,
)
# Second overlaps with first, e.g.
# 1234
# 0123
# ^^^
elif a == 0 and d == len(self.second):
prefix = second[:c]
assert second[c: d] == first[a: b]
overlap = second[c: d]
suffix = first[b:]
combined = prefix + overlap + suffix
return Sequence(
combined,
is_dna = self.first.is_dna,
)
# First is within second, e.g.
# 123
# 01234
# ^^^
elif a == 0 and b == len(self.first):
return Sequence(
second,
is_dna = self.second.is_dna,
)
# Second is within first, e.g.
# 01234
# 123
# ^^^
elif c == 0 and d == len(self.second):
return Sequence(
first,
is_dna = self.first.is_dna,
)
| 31.679487 | 68 | 0.474909 | 4,820 | 0.975314 | 397 | 0.080332 | 0 | 0 | 0 | 0 | 776 | 0.157021 |
c8dab9e9589a6e0d7ec3775c63cd68cd42f91ee4 | 857 | py | Python | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
]
| null | null | null | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
]
| null | null | null | models/operations.py | NikolayXHD/tinkoff-api-python | 4a4b71f7af1d752b8566299c058b712b513fa92f | [
"MIT"
]
| null | null | null | from __future__ import annotations
from . import _base
class Operations(_base.Model):
swagger_types: dict[str, str] = {'operations': 'list[Operation]'}
attribute_map: dict[str, str] = {'operations': 'operations'}
def __init__(self, operations=None):
self._operations = None
self.discriminator = None
self.operations = operations
@property
def operations(self):
"""
:rtype: list[clients.tinkoff.models.Operation]
"""
return self._operations
@operations.setter
def operations(self, operations):
"""
:param list[clients.tinkoff.models.Operation] operations:
"""
if operations is None:
raise ValueError(
'Invalid value for `operations`, must not be `None`'
)
self._operations = operations
| 25.205882 | 69 | 0.611435 | 798 | 0.931155 | 0 | 0 | 475 | 0.554259 | 0 | 0 | 256 | 0.298716 |
c8dad2fb3e34935d8ee2d55f042a5e204873fdf4 | 187 | py | Python | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
]
| null | null | null | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
]
| 5 | 2018-12-27T02:52:01.000Z | 2019-01-02T01:52:55.000Z | tests/IT/fixtures/test_fixture_nested.py | testandconquer/pytest-conquer | da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05 | [
"MIT"
]
| null | null | null | import pytest
@pytest.fixture
def fixture1():
return 1
@pytest.fixture
def fixture2(fixture1):
return fixture1 + 1
def test_with_fixture(fixture2):
assert fixture2 == 2
| 11.6875 | 32 | 0.71123 | 0 | 0 | 0 | 0 | 107 | 0.572193 | 0 | 0 | 0 | 0 |
c8dbf09302e48945dea0b1250add3f9a59269652 | 827 | py | Python | app/api/utils/remoteImageMapper.py | nurely/lxdui | 8cb31dc1117719b140f440f8a705282781db7b35 | [
"Apache-2.0"
]
| 589 | 2017-10-22T04:11:08.000Z | 2022-03-26T22:50:30.000Z | app/api/utils/remoteImageMapper.py | nurely/lxdui | 8cb31dc1117719b140f440f8a705282781db7b35 | [
"Apache-2.0"
]
| 134 | 2017-11-14T02:52:03.000Z | 2022-03-22T12:51:09.000Z | app/api/utils/remoteImageMapper.py | nurely/lxdui | 8cb31dc1117719b140f440f8a705282781db7b35 | [
"Apache-2.0"
]
| 170 | 2017-10-06T06:22:43.000Z | 2022-03-15T02:12:34.000Z |
def remoteImagesList(images):
response = []
aliasesProcessed = []
aliases = [alias[20:] for alias in images['metadata']]
for alias in aliases:
strippedAlias = alias.replace('/default','')
if strippedAlias not in aliasesProcessed:
aliasesDetails = alias.split('/')
if len(aliasesDetails) > 2:
image = prepRemoteImageObject(strippedAlias, aliasesDetails)
if image not in response: response.append(image)
aliasesProcessed.append(strippedAlias)
return response
def prepRemoteImageObject(alias, aliasesDetails):
image = {
'name': aliasesDetails[0].__str__(),
'distribution': aliasesDetails[1].__str__(),
'architecture': aliasesDetails[2].__str__(),
'image': alias
}
return image
| 30.62963 | 76 | 0.628779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.079807 |
c8e095e4b5a713605e60ac5cfbe8f9beb652c2f1 | 390 | py | Python | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
]
| 1 | 2021-10-12T12:15:00.000Z | 2021-10-12T12:15:00.000Z | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
]
| null | null | null | search.py | kanttouchthis/clip-search | 463c3f2849a6f5ae7ebc6bfe7a932ec82f2ab0c1 | [
"MIT"
]
| 1 | 2021-11-20T14:51:11.000Z | 2021-11-20T14:51:11.000Z | from searcher import CLIPSearcher
from utils import get_args
if __name__ == "__main__":
args = get_args()
cs = CLIPSearcher(device=args.device, store_path=args.store_path)
cs.load_dir(args.dir, save_every=args.save_every, recursive=args.recursive, load_new=(not args.dont_load_new))
cs.search(texts=args.texts, images=args.images, results=args.results, outdir=args.outdir)
| 43.333333 | 114 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.025641 |
c8e2a3f8d1524fcc6efb93afc74fa20ef2432c75 | 2,049 | py | Python | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
]
| 7 | 2020-04-02T11:11:09.000Z | 2022-02-05T23:19:51.000Z | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
]
| 24 | 2020-04-22T16:55:09.000Z | 2022-03-30T20:44:39.000Z | gemd/entity/template/has_property_templates.py | CitrineInformatics/gemd-python | 4f80045c1b481269c7451f6a205755c22093eb74 | [
"Apache-2.0"
]
| 3 | 2020-05-08T00:50:02.000Z | 2020-12-19T00:48:56.000Z | """For entities that have a property template."""
from gemd.entity.link_by_uid import LinkByUID
from gemd.entity.setters import validate_list
from gemd.entity.template.base_template import BaseTemplate
from gemd.entity.template.property_template import PropertyTemplate
from gemd.entity.bounds.base_bounds import BaseBounds
from typing import Iterable
class HasPropertyTemplates(object):
"""
Mixin-trait for entities that include property templates.
Parameters
----------
properties: List[(PropertyTemplate, BaseBounds)]
A list of tuples containing this entity's property templates as well
as any restrictions on those templates' bounds.
"""
def __init__(self, properties):
self._properties = None
self.properties = properties
@property
def properties(self):
"""
Get the list of property template/bounds tuples.
Returns
-------
List[(PropertyTemplate, bounds)]
List of this entity's property template/bounds pairs
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Set the list of parameter templates.
Parameters
----------
properties: List[(PropertyTemplate, bounds)]
A list of tuples containing this entity's property templates as well
as any restrictions on those templates' bounds.
Returns
-------
List[(PropertyTemplate, bounds)]
List of this entity's property template/bounds pairs
"""
if isinstance(properties, Iterable):
if any(isinstance(x, BaseBounds) for x in properties):
properties = [properties] # It's a template/bounds tuple (probably)
self._properties = validate_list(properties,
(PropertyTemplate, LinkByUID, list, tuple),
trigger=BaseTemplate._homogenize_ranges
)
| 32.52381 | 84 | 0.625671 | 1,694 | 0.826745 | 0 | 0 | 1,245 | 0.607613 | 0 | 0 | 1,026 | 0.500732 |
c8e3e5f641575e46034c6e7d21d6b9a28bd02474 | 1,547 | py | Python | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
]
| null | null | null | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
]
| null | null | null | app/main/forms.py | james-muriithi/blog | e653f2fbb3c1e5a873c393b4985cc12d726e451c | [
"Unlicense"
]
| null | null | null | from app.models import Subscriber
from flask_wtf import FlaskForm
from wtforms import TextAreaField, StringField, IntegerField, EmailField
from wtforms.validators import InputRequired, ValidationError
from flask import flash
class BlogForm(FlaskForm):
title = StringField('Title', validators=[InputRequired()])
category = IntegerField('Category', validators=[InputRequired()])
content = StringField('Content', validators=[InputRequired()])
image_path = StringField('Content', validators=[InputRequired()])
class EditBlogForm(FlaskForm):
title = StringField('Title', validators=[InputRequired()])
category = IntegerField('Category', validators=[InputRequired()])
content = StringField('Content', validators=[InputRequired()])
image_path = StringField('Content')
# comment form
class CommentForm(FlaskForm):
comment = TextAreaField('Leave a Comment', validators=[InputRequired()])
# subscriber form
class SubscriberForm(FlaskForm):
name = StringField('Name', validators=[InputRequired()])
email = StringField('Email', validators=[InputRequired()])
def validate_email(self,data_field):
if Subscriber.query.filter_by(email = data_field.data).first():
flash('Email already subscribed', 'error')
raise ValidationError('Email already subscribed')
class ProfileForm(FlaskForm):
"""Profile form"""
email = EmailField('Email',validators=[InputRequired()])
name = StringField('Name',validators=[InputRequired()])
about = TextAreaField('About') | 40.710526 | 76 | 0.728507 | 1,279 | 0.826761 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.147382 |
c8e4d42dd8ef4d4d14c2794784ca0f4e4747b37c | 278 | py | Python | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
]
| 6 | 2021-02-19T02:36:06.000Z | 2021-03-20T09:38:17.000Z | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
]
| 27 | 2021-01-13T06:43:44.000Z | 2021-05-12T04:55:28.000Z | miner/config.py | czhang-nbai/swan | 03a6ade93d9b8b193bd05bf851779784eb2ffde5 | [
"MIT"
]
| 7 | 2021-01-26T04:50:11.000Z | 2021-03-04T22:26:59.000Z | import toml
def read_config(_config_path=None):
if _config_path is None:
_config_path = './config.toml'
# script_dir = os.path.dirname(__file__)
# file_path = os.path.join(script_dir, config_path)
_config = toml.load(_config_path)
return _config
| 21.384615 | 55 | 0.694245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.381295 |
c8e6c52bd4d19fdf314e6096b12ca3b0f03e5a63 | 3,214 | py | Python | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
]
| 4 | 2021-03-01T18:28:34.000Z | 2021-03-11T12:20:16.000Z | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
]
| null | null | null | godaddy_dns.py | JohnMcSpedon/GoDaddy_DNS_migrator | e7439616f64a446254e4df05db115aaa0206691e | [
"MIT"
]
| null | null | null | """
Retrieve GoDaddy DNS settings via their developer API
See also:
https://developer.godaddy.com/doc/endpoint/domains#/
"""
import os
import time
from pprint import pprint
from typing import List
import requests
import credential_loaders
BASE_URL = "https://api.godaddy.com"
# You can easily replace these with a different CredentialLoader to match your key management system
API_KEY_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_KEY")
API_SECRET_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_SECRET")
# API_KEY_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_key.txt")
# API_SECRET_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_secret.txt")
def _get_headers() -> dict:
"""Get authorization header for GoDaddy Developer API.
https://developer.godaddy.com/keys
"""
api_key = API_KEY_CRED_LOADER.load_credentials()
api_secret = API_SECRET_CRED_LOADER.load_credentials()
return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)}
def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict:
"""Call GoDaddy developer API endpoint.
Only supports GET endpoints to keep access read-only.
"""
headers = _get_headers()
url = os.path.join(base_url, url_suffix)
resp = requests.get(url, headers=headers)
return resp.json()
def get_domains() -> List[str]:
"""Get list of Domains for this API key."""
ret = _call_endpoint("v1/domains")
# Example response:
# [{'createdAt': '2016-06-25T03:08:44.000Z',
# 'domain': 'mydomain.com',
# 'domainId': 12345678,
# 'expirationProtected': False,
# 'expires': '2020-06-25T03:08:44.000Z',
# 'holdRegistrar': False,
# 'locked': True,
# 'nameServers': None,
# 'privacy': False,
# 'renewAuto': True,
# 'renewDeadline': '2020-08-09T03:08:44.000Z',
# 'renewable': True,
# 'status': 'ACTIVE',
# 'transferProtected': False},]
domains = [d["domain"] for d in ret]
return domains
def get_domain_dns_records(domain):
"""Get DNS entries for a specific domain
Returns:
List with format (for example):
[ {'data': '160.153.162.20', 'name': '_dmarc', 'ttl': 3600, 'type': 'A'},
{'data': 'ns37.domaincontrol.com', 'name': '@', 'ttl': 3600, 'type': 'NS'}, ...]
"""
url_suffix = "v1/domains/{}/records".format(domain)
ret = _call_endpoint(url_suffix)
if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN":
# e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}
raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}")
return ret
def print_all_dns_records():
""" Print each domain and its DNS records (for domains linked to this API key)."""
for domain in sorted(get_domains()):
dns_records = get_domain_dns_records(domain)
print(domain)
pprint(dns_records)
print("*" * 50)
# TODO: poor man's rate limiter. improve?
time.sleep(2)
if __name__ == "__main__":
print_all_dns_records()
| 32.795918 | 120 | 0.671749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,843 | 0.573429 |
c8e80bc7bd958f10a7a1f279ed0d99283b77f722 | 1,184 | py | Python | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
]
| 10 | 2019-12-29T13:38:56.000Z | 2021-03-15T07:21:52.000Z | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
]
| 1 | 2021-03-15T07:45:45.000Z | 2021-03-17T11:10:53.000Z | preprocessing.py | Alloooshe/facelib_modular_face_recognition_pipline | 0313214b6f919e49e84235c1a6a4a4838b813e73 | [
"MIT"
]
| 2 | 2020-05-03T08:33:39.000Z | 2021-02-06T16:49:54.000Z | import cv2
import numpy as np
class preprocessing:
def process_image(self,image, rescale, recolor):
if rescale['req']:
image= self.rescale(image,rescale['width'], rescale['height'])
if recolor['req']:
image = self.rgb2gray(image)
return image
def rescale (self,image,width,height):
image= cv2.resize(image,(width,height))
return image
def rgb2gray(self,image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image
def crop (self,image,boxes ):
faces = []
for box in boxes :
x=int( round (box[0]))
y=int( round (box[1]))
w=int (round (box[2]) )
h=int (round ( box[3]))
cropped = image[y:h+y,x : w+x,:]
faces.append(cropped)
return faces
def resize2square (self,image,x,y):
resized= cv2.resize(image,(x,y),interpolation=cv2.INTER_AREA)
return resized
def preprocess_facenet(self, images):
ret = np.zeros([len(images),160,160,3])
for image in images :
resized = self.resize2square(image,160,160)
np.append(ret,resized)
return ret | 28.878049 | 71 | 0.579392 | 1,138 | 0.961149 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.021115 |
c8e8ef9bc1df23fffd3b87a416935aa12a7c1e19 | 214 | py | Python | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
]
| null | null | null | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
]
| null | null | null | app/database/pronto_soccorso.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from .tables.pronto_soccorsi import table
class ProntoSoccorso:
_table = table
def __init__(self, ps_dict):
# entity dict
self.e_d = ps_dict
def to_dict(self):
return self.e_d
| 14.266667 | 41 | 0.696262 | 144 | 0.672897 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.168224 |
c8ea55c5455ae4d69b07f53ce37792d7f4a82837 | 132 | py | Python | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
]
| null | null | null | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
]
| null | null | null | 3_binary_tree/__init__.py | freshklauser/LeeCodeSummary | d9d776ddfc44fee844123b848d43a78e9ba4117e | [
"MIT"
]
| 1 | 2021-11-18T01:58:29.000Z | 2021-11-18T01:58:29.000Z | # -*- coding: utf-8 -*-
# @Author : Administrator
# @DateTime : 2021/10/17 20:40
# @FileName : __init__.py
# @SoftWare : PyCharm
| 18.857143 | 30 | 0.621212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.954545 |
c8ebd9a417dcbfc90f2665cef2e143f107c15986 | 497 | py | Python | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
]
| null | null | null | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
]
| null | null | null | covid_19_stat.py | pavelkalinchuk/api | 3b2eccbb09b012ac2c841dd30c44a285a8f5bdc2 | [
"Apache-2.0"
]
| null | null | null | import requests
from datetime import date, timedelta
today = date.today()
yesterday = today - timedelta(days=1)
country = "Russia"
endpoint = f"https://api.covid19api.com/country/{country}/status/confirmed"
params = {"from": str(yesterday), "to": str(today)}
response = requests.get(endpoint, params=params).json()
total_confirmed = 0
for day in response:
cases = day.get("Cases", 0)
total_confirmed += cases
print("\n"f"Total Confirmed Covid-19 cases in {country}: {total_confirmed}")
| 29.235294 | 76 | 0.724346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.317907 |
c8ec940438930475725da4b1624b8e42cb723947 | 157 | py | Python | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
]
| 1 | 2021-05-17T11:49:12.000Z | 2021-05-17T11:49:12.000Z | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
]
| null | null | null | core/models/__init__.py | Brain-Engine/ImageNet | 893a8008e0e8e373bc66a7cbb40813db8495426a | [
"Apache-2.0"
]
| 1 | 2021-05-17T11:49:22.000Z | 2021-05-17T11:49:22.000Z | # import models from torchvision
from torchvision.models import *
# import models from efficientnet
from .efficientnet import b0, b1, b2, b3, b4, b5, b6, b7
| 31.4 | 56 | 0.764331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.414013 |
c8ee532a04ed15373dc8d2091c28d0c7dca10643 | 2,834 | py | Python | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
]
| null | null | null | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
]
| null | null | null | MPI/py/plot_mpi_timing.py | mlxd/myscripts | b8b7d6b270ef24b06028e21f066c2bb587f94cef | [
"MIT"
]
| null | null | null | #This file plots the results from the MPI timing runs
import sys
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.markers as mkr
plt_style='ggplot'
plt.rcParams['font.size'] = 11
plt.rcParams['font.family'] = 'serif'
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['figure.titlesize'] = 12
#We begin by loading the CSV file of rank pairings and times into the appropriate format
StartStr = str(sys.argv[1])
EndStr = str(sys.argv[2])
start = np.loadtxt(open(StartStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
end = np.loadtxt(open(EndStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')})
ds=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(start['A'],start['B'],start['t']) }]
de=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(end['A'],end['B'],end['t']) }]
#We take note of the starting time over all ranks as a 0 offset
t0 = np.min(start['t'])
#3D Rank A:B vs time diagram
fig = plt.figure()
plt.style.use(plt_style)
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.set_zlabel('time [s]')
ax.set_ylabel('Rank To Merge')
ax.set_xlabel('Rank Base')
#Plot the recorded times and connect ranks that have been merged toegther
for a in ds[0].keys():
ax.scatter( ds[0][a][0], ds[0][a][1], ds[0][a][2]-t0, c='r', marker='o') #Plot start
ax.scatter( de[0][a][0], de[0][a][1], de[0][a][2]-t0, c='b', marker='x') #Plot end
ax.plot( [ ds[0][a][0], de[0][a][0] ], [ ds[0][a][1], de[0][a][1] ], [ ds[0][a][2] - t0, de[0][a][2] - t0 ], c='k') #Draw line between start and finish
ax.set_zlim3d([ 0, np.max(end['t']) - t0 ])
ax.set_ylim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
ax.set_xlim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 3D plot output
plt.savefig('3d_%s_%s.pdf'%(StartStr, EndStr))
plt.clf()
plt.style.use( plt_style )
#2D connections diagram
#Draw lines to mark the MPI ranks
for ii in xrange(np.max([start['A'],start['B']])):
plt.axhline(ii, xmin=0, xmax=1, linewidth=0.5)
#Draw lines between the start and end for reducing 2 data sets
for a in ds[0].keys():
plt.plot( [ ds[0][a][2] - t0, de[0][a][2] - t0] , [ds[0][a][1], de[0][a][0]], linestyle='-', linewidth=0.5, c='k', alpha=0.8)
plt.scatter( start['t'] - t0, start['B'], marker='x', c='r', alpha=0.8)
plt.scatter( end['t'] - t0, end['A'], marker='o', c='b', alpha=0.8)
plt.xlabel('time [s]')
plt.ylabel('MPI rank')
plt.title('%s_%s'%(StartStr, EndStr))
plt.xlim([ 0, np.max(end['t']) - t0 ])
plt.ylim([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ])
plt.show()
#Save the 2D plot output
plt.savefig('2d_%s_%s.pdf'%(StartStr, EndStr))
| 38.821918 | 155 | 0.61856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 960 | 0.338744 |
c8efd5f50e23a88b242e0e5832ddd548e4a5108c | 1,809 | py | Python | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
]
| 25 | 2020-06-30T16:46:43.000Z | 2022-01-04T15:27:49.000Z | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
]
| 3 | 2020-11-25T15:09:33.000Z | 2021-05-08T11:25:14.000Z | src/entitykb/pipeline/filterers.py | genomoncology/entitykb | 61cf346a24f52fd8c1edea8827a816284ed6ecaf | [
"MIT"
]
| 2 | 2021-06-17T11:21:49.000Z | 2021-12-02T13:07:15.000Z | from typing import Iterator
from entitykb import Span, interfaces, Doc
class KeepExactNameOnly(interfaces.IFilterer):
""" Only keep spans that are an exact match. """
def is_keep(self, span: Span):
return span.name == span.text
class RemoveInexactSynonyms(interfaces.IFilterer):
""" Remove if not exact synonyms. """
def is_keep(self, span):
is_keep = span.name and (span.name.lower() == span.text.lower())
return is_keep or (span.text in span.synonyms)
class DedupeByKeyOffset(interfaces.IFilterer):
""" Keeps longest overlapping span sharing same key. """
def __init__(self, doc: Doc = None):
super().__init__(doc)
self.seen = set()
def span_tuple(self, span: Span, offset: int):
return span.entity_key, offset
def is_unique(self, span: Span) -> bool:
keys = {self.span_tuple(span, offset) for offset in span.offsets}
is_unique = self.seen.isdisjoint(keys)
if is_unique:
self.seen.update(keys)
return is_unique
@classmethod
def sort_key(cls, span: Span):
return (
-span.num_tokens,
span.match_type(),
span.offset,
span.label,
)
def filter(self, spans: Iterator[Span]) -> Iterator[Span]:
spans = sorted(spans, key=self.sort_key)
if len(spans) > 1:
spans = filter(self.is_unique, spans)
return spans
class DedupeByLabelOffset(DedupeByKeyOffset):
""" Keeps longest overlapping span sharing same label. """
def span_tuple(self, span: Span, offset: int):
return span.label, offset
class DedupeByOffset(DedupeByKeyOffset):
""" Keeps longest overlapping spans. """
def span_tuple(self, span: Span, offset: int):
return offset
| 27.409091 | 73 | 0.63571 | 1,722 | 0.951907 | 0 | 0 | 184 | 0.101714 | 0 | 0 | 239 | 0.132117 |
c8effc674c65f81f1f4c9fdac1c750120b3d16ef | 716 | py | Python | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
]
| 1 | 2022-01-27T22:29:38.000Z | 2022-01-27T22:29:38.000Z | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
]
| null | null | null | octavia-cli/unit_tests/test_entrypoint.py | pluralsh/airbyte | 9b1ed03fe482f5154f6c1843b1be76de87f3605d | [
"MIT"
]
| null | null | null | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from click.testing import CliRunner
from octavia_cli import entrypoint
def test_octavia():
runner = CliRunner()
result = runner.invoke(entrypoint.octavia)
assert result.exit_code == 0
assert result.output.startswith("Usage: octavia [OPTIONS] COMMAND [ARGS]...")
@pytest.mark.parametrize(
"command",
[entrypoint.init, entrypoint.apply, entrypoint.create, entrypoint.delete, entrypoint._list, entrypoint._import],
)
def test_not_implemented_commands(command):
runner = CliRunner()
result = runner.invoke(command)
assert result.exit_code == 1
assert result.output.endswith("not yet implemented.\n")
| 27.538462 | 116 | 0.734637 | 0 | 0 | 0 | 0 | 357 | 0.498603 | 0 | 0 | 135 | 0.188547 |
c8f2a4e3254c600092c6d8f19d958953e7b804a3 | 5,261 | py | Python | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| 1 | 2020-12-01T17:10:14.000Z | 2020-12-01T17:10:14.000Z | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| 1 | 2021-09-19T13:38:02.000Z | 2021-09-19T13:38:02.000Z | src/device/eltako/fsr61_actor.py | rosenloecher-it/enocean-mqtt-bridge | d56e41a1a67e70bdeb1aa46d10f48ed5a12ca59c | [
"MIT"
]
| null | null | null | import json
import logging
import random
from datetime import datetime
from typing import Optional
from paho.mqtt.client import MQTTMessage
from enocean.protocol.constants import PACKET
from enocean.protocol.packet import RadioPacket
from src.command.switch_command import SwitchCommand
from src.common.json_attributes import JsonAttributes
from src.common.switch_state import SwitchState
from src.device.base.cyclic_device import CheckCyclicTask
from src.device.base.scene_actor import SceneActor
from src.device.eltako.fsr61_eep import Fsr61Eep, Fsr61Action, Fsr61Command
from src.device.misc.rocker_switch_tools import RockerSwitchTools, RockerAction, RockerButton
from src.enocean_connector import EnoceanMessage
from src.tools.enocean_tools import EnoceanTools
from src.tools.pickle_tools import PickleTools
class Fsr61Actor(SceneActor, CheckCyclicTask):
"""
Specialized for: Eltako FSR61-230V (an ON/OFF relay switch)
"""
DEFAULT_REFRESH_RATE = 300 # in seconds
def __init__(self, name):
SceneActor.__init__(self, name)
CheckCyclicTask.__init__(self)
self._current_switch_state = None # type: Optional[SwitchState]
self._last_status_request = None # type: Optional[datetime]
def process_enocean_message(self, message: EnoceanMessage):
packet = message.payload # type: RadioPacket
if packet.packet_type != PACKET.RADIO:
self._logger.debug("skipped packet with packet_type=%s", EnoceanTools.packet_type_to_string(packet.rorg))
return
if packet.rorg == RockerSwitchTools.DEFAULT_EEP.rorg:
props = RockerSwitchTools.extract_props(packet)
self._logger.debug("proceed_enocean - got=%s", props)
action = RockerSwitchTools.extract_action(props) # type: RockerAction
if action.button == RockerButton.ROCK3:
self._current_switch_state = SwitchState.ON
elif action.button == RockerButton.ROCK2:
self._current_switch_state = SwitchState.OFF
else:
self._current_switch_state = SwitchState.ERROR
else:
self._current_switch_state = SwitchState.ERROR
if self._current_switch_state not in [SwitchState.ON, SwitchState.OFF]:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("proceed_enocean - pickled error packet:\n%s", PickleTools.pickle_packet(packet))
self._logger.debug("proceed_enocean - switch_state=%s", self._current_switch_state)
self._last_status_request = self._now()
self._reset_offline_refresh_timer()
message = self._create_json_message(self._current_switch_state)
self._publish_mqtt(message)
def _create_json_message(self, switch_state: SwitchState):
data = {
JsonAttributes.DEVICE: self.name,
JsonAttributes.STATE: switch_state.value,
JsonAttributes.TIMESTAMP: self._now().isoformat(),
}
json_text = json.dumps(data)
return json_text
def process_mqtt_message(self, message: MQTTMessage):
try:
self._logger.debug('process_mqtt_message: "%s"', message.payload)
command = SwitchCommand.parse(message.payload)
self._logger.debug("mqtt command: '{}'".format(repr(command)))
self._execute_actor_command(command)
except ValueError:
self._logger.error("cannot execute command! message: {}".format(message.payload))
def _execute_actor_command(self, command: SwitchCommand):
if command.is_toggle:
command = SwitchCommand.OFF if self._current_switch_state == SwitchState.ON else SwitchCommand.ON
if command.is_on_or_off:
action = Fsr61Action(
command=Fsr61Command.SWITCHING,
switch_state=SwitchState.ON if command.is_on else SwitchState.OFF,
)
elif command.is_update:
action = Fsr61Action(command=Fsr61Command.STATUS_REQUEST)
elif command.is_learn:
action = Fsr61Action(command=Fsr61Command.SWITCHING, switch_state=SwitchState.ON, learn=True)
else:
raise ValueError("SwitchCommand ({}) not supported!".format(command))
action.sender = self._enocean_sender
action.destination = self._enocean_target or 0xffffffff
props, packet = Fsr61Eep.create_props_and_packet(action)
self._logger.debug("sending '{}' => {}".format(action, props))
self._send_enocean_packet(packet)
def check_cyclic_tasks(self):
self._check_and_send_offline()
self._request_update()
def _request_update(self):
diff_seconds = None
now = self._now()
refresh_rate = self._randomized_refresh_rate
if self._last_status_request is not None:
diff_seconds = (now - self._last_status_request).total_seconds()
if diff_seconds is None or diff_seconds >= refresh_rate:
self._last_status_request = now
self._execute_actor_command(SwitchCommand.UPDATE)
@property
def _randomized_refresh_rate(self) -> int:
return self.DEFAULT_REFRESH_RATE + random.randint(0, self.DEFAULT_REFRESH_RATE * 0.1)
| 40.469231 | 117 | 0.698536 | 4,443 | 0.844516 | 0 | 0 | 150 | 0.028512 | 0 | 0 | 463 | 0.088006 |
c8f361858524234ea8e385c43bd790d28e9507fd | 1,960 | py | Python | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
]
| 20 | 2015-03-11T11:21:32.000Z | 2021-10-11T16:03:27.000Z | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
]
| 48 | 2015-01-15T18:41:01.000Z | 2022-01-05T13:53:58.000Z | neuroml/arraymorph_load_time_benchmark.py | NeuralEnsemble/libNeuroML | 75d1630a0c6354a3997c4068dc8cdc447491b6f8 | [
"BSD-3-Clause"
]
| 16 | 2015-01-14T21:53:46.000Z | 2019-09-04T23:05:27.000Z | import numpy as np
import neuroml
import neuroml.arraymorph as am
class Benchmark:
def __init__(self, num_segments):
self.num_segments = num_segments
def set_up(self):
num_segments = int(1e4) # Per cell
num_vertices = num_segments + 1
x = np.linspace(0, 10, num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1, 0.01, num_vertices)
vertices = np.array([x, y, z, d]).T
connectivity = range(-1, num_segments)
big_arraymorph = am.ArrayMorphology(
vertices=vertices, connectivity=connectivity
)
transposed_x = x + 10
transposed_vertices = np.array([transposed_x, y, z, d]).T
transposed_arraymorph = am.ArrayMorphology(
vertices=transposed_vertices, connectivity=connectivity
)
bigger_d = d + 0.5
fatter_vertices = np.array([x, y, z, bigger_d]).T
fatter_arraymorph = am.ArrayMorphology(
vertices=fatter_vertices, connectivity=connectivity
)
neuroml_cell = neuroml.Cell(id="cell_4")
neuroml_morphology = neuroml.Morphology(id="my_morph")
neuroml_cell.morphology = neuroml_morphology
self.transposed_arraymorph = transposed_arraymorph
self.fatter_arraymorph = fatter_arraymorph
self.big_arraymorph = big_arraymorph
self.cell_1 = neuroml.Cell(id="cell_1")
self.cell_2 = neuroml.Cell(id="cell_2")
self.cell_3 = neuroml.Cell(id="cell_3")
self.cell_1.morphology = transposed_arraymorph
self.cell_2.morphology = fatter_arraymorph
self.cell_3.morphology = big_arraymorph
self.test_doc = neuroml.NeuroMLDocument(id="TestDocument")
self.test_doc.cells.append(self.cell_1)
self.test_doc.cells.append(self.cell_2)
self.test_doc.cells.append(self.cell_3)
self.test_doc.cells.append(neuroml_cell)
| 31.612903 | 67 | 0.656122 | 1,891 | 0.964796 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.033673 |
c8f400891a861906013fd78e255d4aff2e9b28fa | 5,840 | py | Python | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
]
| 1 | 2019-04-22T16:56:07.000Z | 2019-04-22T16:56:07.000Z | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
]
| null | null | null | extractexamples.py | afcarl/contra | a82d14342b242cba7a8298d54aa0dabd9d77269d | [
"0BSD"
]
| null | null | null | #!/usr/bin/env python
# Extracts examples of given strings with context in a TAB-separated
# field format from given text documents.
from __future__ import with_statement
import sys
import re
from os import path
options = None
def argparser():
import argparse
ap=argparse.ArgumentParser(description="Extract examples of given strings with context from given texts")
ap.add_argument("-c", "--context", metavar="LEN", default="3", help="Context length (space-separated words)")
ap.add_argument("-s", "--strings", metavar="STR:LABEL", default=None, help="Strings to search for and labels to assign (format STR:LABEL[,STR:LABEL ...])")
ap.add_argument("-f", "--stringfile", metavar="FILE", default=None, help="File containing strings to search for and labels to assign (format STR<TAB>LABEL, one per line)")
ap.add_argument("-b", "--boundary", metavar="REGEX", default=r'\b', help="Regex string defining token boundaries for search")
ap.add_argument("-r", "--regex", default=False, action="store_true", help="Interpret input strings as regular expressions")
ap.add_argument("-i", "--ignorecase", default=False, action="store_true", help="Ignore case in string matching")
ap.add_argument("-v", "--verbose", default=False, action="store_true", help="Verbose output")
ap.add_argument("file", metavar="FILE", nargs='+', help="Source file(s)")
return ap
def string_to_regex(s):
global options
if options.ignorecase:
regex_flags = re.IGNORECASE
else:
regex_flags = 0
if not options.regex:
exp = options.boundary + re.escape(s) + options.boundary
else:
exp = options.boundary + s + options.boundary
return re.compile(exp, regex_flags)
def process(f, fn, str_re_lab):
text = f.read().rstrip('\n')
docid = path.basename(fn)
assert '\t' not in text, "ERROR: source text (%s) contains tab!" % fn
assert '\n' not in text, "ERROR: source text (%s) contains newline!" % fn
for s, re, label in str_re_lab:
for m in re.finditer(text):
# get contexts
left, right = text[:m.start()], text[m.end():]
lwords, rwords = left.split(' '), right.split(' ')
# cut, compensating for cases where the nearest "token" is empty
if len(lwords) != 0 and lwords[-1] == '':
loff = options.context+1
else:
loff = options.context
if len(rwords) != 0 and rwords[0] == '':
roff = options.context+1
else:
roff = options.context
left = ' '.join(lwords[-loff:])
right = ' '.join(rwords[:roff])
print "%s[%d:%d]\t%s\t%s\t%s\t%s" % (docid, m.start(), m.end(),
label,
left, m.group(), right)
def main(argv):
global options
options = argparser().parse_args(argv[1:])
# argument sanity check
if options.strings is None and options.stringfile is None:
print >> sys.stderr, "Please give either \"-s\" or \"-f\" argument."
return 1
if options.strings is not None and options.stringfile is not None:
print >> sys.stderr, "Please give either \"-s\" or \"-f\" argument, but not both."
return 1
try:
options.context = int(options.context)
assert options.context > 0
except Exception:
print >> sys.stderr, "Please give a positive integer for context length"
return 1
# determine strings to search for, store as (string, label) pairs
if options.strings is not None:
try:
strings = []
for string_label in options.strings.split(","):
s, l = string_label.split(":")
strings.append((s, l))
except ValueError:
print >> sys.stderr, "Failed to parse \"%s\" as a comma-separated list of STRING:LABEL pairs" % options.strings
return 1
else:
try:
strings = []
with open(options.stringfile, 'rU') as f:
for line in f:
try:
line = line.rstrip('\n')
s, l = line.split("\t")
strings.append((s, l))
except ValueError:
print >> sys.stderr, "Failed to parse \"%s\" in %s as a TAB-separated STRING:LABEL pair"
return 1
except IOError, e:
print >> sys.stderr, e
return 1
# check string and label sanity
if len(strings) == 0:
print >> sys.stderr, "No strings to search for defined."
return 1
seen = {}
for s, l in strings:
if s.strip() == "":
print >> sys.stderr, "Error: empty search string."
return 1
if l.strip() == "":
print >> sys.stderr, "Error: empty label."
return 1
if s.strip() != s:
print >> sys.stderr, "Warning: space in search string \"%s\"." % s
if s in seen:
print >> sys.stderr, "Warning: duplicate search string \"%s\"." % s
seen[s] = True
# create regular expressions for search
str_re_lab = []
for s, l in strings:
try:
str_re_lab.append((s, string_to_regex(s), l))
except Exception:
print >> sys.stderr, "Failed to compile \"%s\" as regular expression" % s
return 1
# primary processing
for fn in options.file:
try:
with open(fn, 'rU') as f:
process(f, fn, str_re_lab)
except IOError, e:
print >> sys.stderr, e
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 37.677419 | 175 | 0.55976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,715 | 0.293664 |
c8f61ba84ff26314734e24f05cd833da5e3ee801 | 2,813 | py | Python | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
]
| 206 | 2015-01-05T21:53:56.000Z | 2022-03-14T08:04:49.000Z | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
]
| 84 | 2015-01-25T19:57:33.000Z | 2021-05-11T15:46:56.000Z | pymtl/tools/translation/verilog_bug_test.py | belang/pymtl | 4a96738724b007cbd684753aed0ac3de5b5dbebb | [
"BSD-3-Clause"
]
| 99 | 2015-02-17T17:43:44.000Z | 2022-02-14T17:58:18.000Z | #=======================================================================
# verilog_bug_test.py
#=======================================================================
import pytest
from pymtl import *
from exceptions import VerilatorCompileError
pytestmark = requires_verilator
#-----------------------------------------------------------------------
# Point BitStruct
#-----------------------------------------------------------------------
class Point( BitStructDefinition ):
def __init__( s ):
s.x = BitField(4)
s.y = BitField(4)
#-----------------------------------------------------------------------
# setup_sim
#-----------------------------------------------------------------------
def setup_sim( model ):
model = TranslationTool( model )
model.elaborate()
sim = SimulationTool( model )
return model, sim
#-----------------------------------------------------------------------
# test_bitstruct_tick_reg
#-----------------------------------------------------------------------
@pytest.mark.parametrize(
'config', ['Tick','TickFields','Comb','CombFields']
)
def test_bitstruct_reg( config ):
class AssignBitStruct( Model ):
def __init__( s, config=None ):
s.in_ = InPort ( Point() )
s.out = OutPort( Point() )
if config == 'Tick':
@s.tick_rtl
def block():
s.out.next = s.in_
elif config == 'TickFields':
@s.tick_rtl
def block():
s.out.x.next = s.in_.x
s.out.y.next = s.in_.y
elif config == 'Comb':
@s.combinational
def block():
s.out.value = s.in_
elif config == 'CombFields':
@s.combinational
def block():
s.out.x.value = s.in_.x
s.out.y.value = s.in_.y
else: raise Exception( 'Invalid config =', config )
# verify verilator simulation
model, sim = setup_sim( AssignBitStruct( config ) )
for i in range( 10 ):
input_value = concat( *2*[Bits(4,i)] )
model.in_.value = input_value
sim.cycle()
assert model.out == input_value
# read verilog to verify our output signal is being declared as a reg
# (required by Synopsys design compiler)
with open( model.__class__.__name__+'.v', 'r' ) as fp:
assert 'output reg' in fp.read()
#-----------------------------------------------------------------------
# test_verilator_compile_error
#-----------------------------------------------------------------------
def test_verilator_compile_error( ):
class TestVerilatorCompileError( Model ):
def __init__( s ):
s.in_ = InPort(8)
s.out = OutPort(8)
@s.combinational
def logic():
s.in_.value = s.out
with pytest.raises( VerilatorCompileError ):
model = TestVerilatorCompileError()
model, sim = setup_sim( model )
| 29 | 72 | 0.460363 | 974 | 0.34625 | 0 | 0 | 1,333 | 0.473871 | 0 | 0 | 1,079 | 0.383576 |
c8f667d55a6083981558407ab139318c270d5ca3 | 436 | py | Python | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
]
| null | null | null | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
]
| null | null | null | library/TraverseDirectory-M2.py | remytanx/python3-created-in-github | 83b3dd0f36da6fc4df7c1cc37cac12f178f985a3 | [
"MIT"
]
| null | null | null | import os
# Get the list of all files with a specific extension
# In this example, we will take a path of a directory and try to
# list all the files, with a specific extension .py here,
# in the directory and its sub-directories recursively.
path = r'C:\Users\10900225\Documents\Witch\BTX\Workspaces\Library'
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".py")):
print(os.path.join(root,file)) | 33.538462 | 66 | 0.733945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.676606 |
c8f71840564fdc1ff2e1787b21b4d5173407d801 | 1,509 | py | Python | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
]
| null | null | null | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
]
| null | null | null | Modules/carlosma7/wizard/create_appointment.py | Carlosma7/Odoo | c234fcc18d15d4d8369e237286bee610fd76ceee | [
"CC0-1.0"
]
| null | null | null | #-*- coding: utf-8-*-
from odoo import api, fields, models, _
# Wizard class
class CreateAppointmentWizard(models.TransientModel):
_name = "create.appointment.wizard"
_description = "Create Appointment Wizard"
date_appointment = fields.Date(string='Date', required=False)
patient_id = fields.Many2one('hospital.patient', string="Patient", required=True)
# Wizard function
def action_create_appointment(self):
print("Wizard button is clicked")
vals = {
'patient_id': self.patient_id.id,
'date_appointment': self.date_appointment
}
# Create a new record
appointment_rec = self.env['hospital.appointment'].create(vals)
return {
'name': _('Appointment'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'hospital.appointment',
'res_id': appointment_rec.id,
}
# View appointment
def action_view_appointment(self):
# Method 1
# action = self.env.ref('carlosma7.action_hospital_appointment').read()[0]
# action['domain'] = [('patient_id', '=', self.patient_id.id)]
# return action
# Method 2
# action = self.env.['ir.actions.actions']._for_xml_id('carlosma7.action_hospital_appointment')
# action['domain'] = [('patient_id', '=', self.patient_id.id)]
# return action
# Method 3
return {
'type': 'ir.actions.act_window',
'name': 'Appointments',
'res_model': 'hospital.appointment',
'view_type': 'form',
'domain': [('patient_id', '=', self.patient_id.id)],
'view_mode': 'tree,form',
'target': 'current',
} | 29.019231 | 97 | 0.686547 | 1,430 | 0.947647 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.580517 |
c8f838e818d81e237d9d5d8fa11595a921a6fae3 | 4,731 | py | Python | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
]
| null | null | null | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
]
| null | null | null | groups.py | davidmehren/udm_group_matrix | ae71feef4bf299588aa473c95e9073c7d2f5f23e | [
"MIT"
]
| 1 | 2019-12-06T14:59:39.000Z | 2019-12-06T14:59:39.000Z | #!/bin/env python3
import re
from typing import List
import numpy as np
import matplotlib.pyplot as plt
filtered_users = ["join-backup", "join-slave", "ucs-sso"]
filtered_groups = ["computers", "dc backup hosts", "dc slave hosts"]
class LDAPUser:
name: str
def __init__(self, name):
self.name = name
def __eq__(self, o: 'LDAPUser') -> bool:
return self.name == o.name
def __lt__(self, o: 'LDAPUser') -> bool:
return self.name < o.name
def __hash__(self) -> int:
return self.name.__hash__()
class LDAPGroupList:
content: List['LDAPGroup']
def __init__(self):
self.content = []
def add(self, group):
if group.name not in filtered_groups:
self.content.append(group)
def get_by_name(self, name):
for _group in self.content:
if _group.name == name:
return _group
return None
def get_user_list(self):
user_list = set()
for group in self.content:
user_list.update(group.members)
return list(user_list)
def tidy(self):
new_content = []
for group in self.content:
if group.samba_rid < 0:
continue
if len(group.members) > 0:
new_content.append(group)
self.content = sorted(new_content)
class LDAPGroup:
name: str
samba_rid: int
subgroups: List[str]
members: List[LDAPUser]
def __str__(self) -> str:
_repr = f"{self.name}\n Mitglieder:\n"
for member in self.members:
_repr = _repr + f" {member.name}\n"
_repr = _repr + " Untergruppen:\n"
for _group in self.subgroups:
_repr = _repr + f" {_group}\n"
return _repr
def __lt__(self, o: 'LDAPGroup') -> bool:
return self.name < o.name
def __init__(self, name: str):
self.name = name.lower()
self.subgroups = []
self.members = []
def add_subgroup(self, group: str):
self.subgroups.append(group.lower())
def parse_subgroups(self, global_groups: LDAPGroupList):
for group_name in self.subgroups:
ldap_group = global_groups.get_by_name(group_name)
if ldap_group is None:
print(f"can't find group '{group_name}'")
else:
for member in ldap_group.members:
if member not in self.members:
self.members.append(member)
def add_member(self, member):
if member.name not in filtered_users:
self.members.append(member)
def read_groupdump():
_group_list = LDAPGroupList()
with open("groupdump.txt", "r") as file:
current_group = None
for line in file:
if line == "\n":
continue
if line.startswith("DN"):
current_group = LDAPGroup(re.findall(r"cn=(.*?),", line)[0])
_group_list.add(current_group)
# print(current_user)
if current_group.name.startswith("dns-") or current_group.name.startswith(
"ucs-") or current_group.name.startswith("join-"):
continue
if line.startswith(" users"):
user = LDAPUser(re.findall(r"uid=(.*?),", line)[0])
# print(" ", group)
current_group.add_member(user)
if line.startswith(" nestedGroup"):
subgroup = re.findall(r"cn=(.*?),", line)[0]
# print(" ", group)
current_group.add_subgroup(subgroup)
if line.startswith(" sambaRID:"):
rid = re.findall(r"([0-9]{1,4})", line)[0]
current_group.samba_rid = int(rid)
return _group_list
def paint_matrix(groups: LDAPGroupList):
user_list = sorted(groups.get_user_list(), reverse=True)
x_count = len(groups.content)
y_count = len(user_list)
matrix = np.zeros((x_count, y_count))
for g_index, group in enumerate(groups.content):
for user in group.members:
matrix[g_index][user_list.index(user)] = 1
plt.pcolor(matrix.T, edgecolors='k', cmap="Greys", vmin=0, vmax=1)
x_locations = [x + 0.5 for x in range(x_count)]
y_locations = [x + 0.5 for x in range(y_count)]
plt.xticks(x_locations, [group.name for group in groups.content], rotation=45, fontsize=4, ha="right")
plt.yticks(y_locations, [user.name for user in user_list], fontsize=2)
plt.tight_layout()
plt.savefig("groups.png", dpi=600)
if __name__ == '__main__':
groups = read_groupdump()
for group in groups.content:
group.parse_subgroups(groups)
groups.tidy()
paint_matrix(groups)
| 31.125 | 106 | 0.581484 | 2,375 | 0.502008 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.104629 |
c8f87a26e0ea3211d6cafee5a76cf221fb9382c8 | 107,098 | py | Python | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
]
| null | null | null | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
]
| 7 | 2016-08-12T15:12:43.000Z | 2020-06-07T03:19:13.000Z | src/dbobjects.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
]
| null | null | null | from sqlalchemy import create_engine, Column, Integer, BigInteger, String, Boolean, MetaData, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.types import DateTime, Date, Interval
from sqlalchemy.pool import NullPool
from .conf import settings
from logging import Logger
print("loaded dbobjects module")
class DB:
#print "loaded DB Class"
database_string = 'postgresql+psycopg2://' + settings.DB_USER + ':' + settings.DB_PASSWD + '@' + settings.DB_HOST + ':' + str(settings.DB_PORT) + '/' + settings.DB_DATABASE
pg_db_engine = create_engine(database_string, poolclass=NullPool, echo=settings.DEBUG_ALCHEMY)
mymetadata = MetaData(bind=pg_db_engine)
Base = declarative_base(metadata=mymetadata)
def __init__(self):
#postgresql[+driver]://<user>:<pass>@<host>/<dbname> #, server_side_cursors=True)
self.Session = sessionmaker() # Was
#self.Session = sessionmaker(bind=self.pg_db_engine) # JCS
loglevel = 'DEBUG'
self.log = Logger(settings.LOGGING_INI, loglevel)
class MapBase():
def __init__(self, field_dict):
if settings.DEBUG:
print("Base Class created: %s" % self.__class__.__name__)
#def __init__(self, field_dict):
if settings.DEBUG:
print(field_dict)
for x, y in field_dict.iteritems():
self.__setattr__(x,y)
def __repr__(self):
field_dict = vars(self)
out = ''
if len(field_dict) > 0:
for x, y in field_dict.iteritems():
if x[0] != "_":
out = out + "%s = %s, " % (x,y)
return "<%s(%s)>" % (self.__class__.__name__, out)
else:
return ''
class SiteServiceParticipation(DB.Base, MapBase):
__tablename__ = 'site_service_participation'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
site_service_participation_idid_num = Column(String(32))
site_service_participation_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_participation_idid_str = Column(String(32))
site_service_participation_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32)) # JCS
#site_service_idid_num_date_collected = Column(DateTime(timezone=False)) # JCS
destination = Column(String(32))
destination_date_collected = Column(DateTime(timezone=False))
destination_other = Column(String(32))
destination_other_date_collected = Column(DateTime(timezone=False))
destination_tenure = Column(String(32))
destination_tenure_date_collected = Column(DateTime(timezone=False))
disabling_condition = Column(String(32))
disabling_condition_date_collected = Column(DateTime(timezone=False))
participation_dates_start_date = Column(DateTime(timezone=False))
participation_dates_start_date_date_collected = Column(DateTime(timezone=False))
participation_dates_end_date = Column(DateTime(timezone=False))
participation_dates_end_date_date_collected = Column(DateTime(timezone=False))
veteran_status = Column(String(32))
veteran_status_date_collected = Column(DateTime(timezone=False))
#adding a reported column. Hopefully this will append the column to the table def.
reported = Column(Boolean)
site_service_participation_id_delete = Column(String(32))
site_service_participation_id_delete_occurred_date = Column(DateTime(timezone=False))
site_service_participation_id_delete_effective_date = Column(DateTime(timezone=False))
fk_participation_to_need = relationship('Need', backref='fk_need_to_participation')
fk_participation_to_serviceevent = relationship('ServiceEvent')
fk_participation_to_personhistorical = relationship('PersonHistorical')
fk_participation_to_person = Column(Integer, ForeignKey('person.id'))
useexisting = True
class Need(DB.Base, MapBase):
__tablename__ = 'need'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
export_index_id = Column(Integer, ForeignKey('export.id'))
need_idid_num = Column(String(32))
need_idid_num_date_collected = Column(DateTime(timezone=False))
need_idid_str = Column(String(32))
need_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32))
site_service_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_idid_str = Column(String(32))
site_service_idid_str_date_collected = Column(DateTime(timezone=False))
service_event_idid_num = Column(String(32))
service_event_idid_num_date_collected = Column(DateTime(timezone=False))
service_event_idid_str = Column(String(32))
service_event_idid_str_date_collected = Column(DateTime(timezone=False))
need_status = Column(String(32))
need_status_date_collected = Column(DateTime(timezone=False))
taxonomy = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_index_id = Column(Integer, ForeignKey('person.id'))
need_id_delete = Column(String(32))
need_id_delete_occurred_date = Column(DateTime(timezone=False))
need_id_delete_delete_effective_date = Column(DateTime(timezone=False))
need_effective_period_start_date = Column(DateTime(timezone=False))
need_effective_period_end_date = Column(DateTime(timezone=False))
need_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class Races(DB.Base, MapBase):
__tablename__ = 'races'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
race_unhashed = Column(Integer)
race_hashed = Column(String(32))
race_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
## HUD 3.0
race_data_collection_stage = Column(String(32))
race_date_effective = Column(DateTime(timezone=False))
useexisting = True
class OtherNames(DB.Base, MapBase):
__tablename__ = 'other_names'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
other_first_name_unhashed = Column(String(50))
other_first_name_hashed = Column(String(50))
other_first_name_date_collected = Column(DateTime(timezone=False))
other_first_name_date_effective = Column(DateTime(timezone=False))
other_first_name_data_collection_stage = Column(String(32))
other_middle_name_unhashed = Column(String(50))
other_middle_name_hashed = Column(String(50))
other_middle_name_date_collected = Column(DateTime(timezone=False))
other_middle_name_date_effective = Column(DateTime(timezone=False))
other_middle_name_data_collection_stage = Column(String(32))
other_last_name_unhashed = Column(String(50))
other_last_name_hashed = Column(String(50))
other_last_name_date_collected = Column(DateTime(timezone=False))
other_last_name_date_effective = Column(DateTime(timezone=False))
other_last_name_data_collection_stage = Column(String(32))
other_suffix_unhashed = Column(String(50))
other_suffix_hashed = Column(String(50))
other_suffix_date_collected = Column(DateTime(timezone=False))
other_suffix_date_effective = Column(DateTime(timezone=False))
other_suffix_data_collection_stage = Column(String(32))
useexisting = True
class HUDHomelessEpisodes(DB.Base, MapBase):
__tablename__ = 'hud_homeless_episodes'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
start_date = Column(String(32))
start_date_date_collected = Column(DateTime(timezone=False))
end_date = Column(String(32))
end_date_date_collected = Column(DateTime(timezone=False))
useexisting = True
class Veteran(DB.Base, MapBase):
__tablename__ = 'veteran'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
service_era = Column(Integer)
service_era_date_collected = Column(DateTime(timezone=False))
military_service_duration = Column(Integer)
military_service_duration_date_collected = Column(DateTime(timezone=False))
served_in_war_zone = Column(Integer)
served_in_war_zone_date_collected = Column(DateTime(timezone=False))
war_zone = Column(Integer)
war_zone_date_collected = Column(DateTime(timezone=False))
war_zone_other = Column(String(50))
war_zone_other_date_collected = Column(DateTime(timezone=False))
months_in_war_zone = Column(Integer)
months_in_war_zone_date_collected = Column(DateTime(timezone=False))
received_fire = Column(Integer)
received_fire_date_collected = Column(DateTime(timezone=False))
military_branch = Column(Integer)
military_branch_date_collected = Column(DateTime(timezone=False))
military_branch_other = Column(String(50))
military_branch_other_date_collected = Column(DateTime(timezone=False))
discharge_status = Column(Integer)
discharge_status_date_collected = Column(DateTime(timezone=False))
discharge_status_other = Column(String(50))
discharge_status_other_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class DrugHistory(DB.Base, MapBase):
__tablename__ = 'drug_history'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
drug_history_id = Column(String(32))
drug_history_id_date_collected = Column(DateTime(timezone=False))
drug_code = Column(Integer)
drug_code_date_collected = Column(DateTime(timezone=False))
drug_use_frequency = Column(Integer)
drug_use_frequency_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class EmergencyContact(DB.Base, MapBase):
__tablename__ = 'emergency_contact'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
emergency_contact_id = Column(String(32))
emergency_contact_id_date_collected = Column(DateTime(timezone=False))
emergency_contact_name = Column(String(32))
emergency_contact_name_date_collected = Column(DateTime(timezone=False))
emergency_contact_phone_number_0 = Column(String(32))
emergency_contact_phone_number_date_collected_0 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_0 = Column(String(32))
emergency_contact_phone_number_1 = Column(String(32))
emergency_contact_phone_number_date_collected_1 = Column(DateTime(timezone=False))
emergency_contact_phone_number_type_1 = Column(String(32))
emergency_contact_address_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_start_date = Column(DateTime(timezone=False))
emergency_contact_address_start_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_end_date = Column(DateTime(timezone=False))
emergency_contact_address_end_date_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line1 = Column(String(32))
emergency_contact_address_line1_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_line2 = Column(String(32))
emergency_contact_address_line2_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_city = Column(String(32))
emergency_contact_address_city_date_collected = Column(DateTime(timezone=False))
emergency_contact_address_state = Column(String(32))
emergency_contact_address_state_date_collected = Column(DateTime(timezone=False))
emergency_contact_relation_to_client = Column(String(32))
emergency_contact_relation_to_client_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class PersonAddress(DB.Base, MapBase):
__tablename__ = 'person_address'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
address_period_start_date = Column(DateTime(timezone=False))
address_period_start_date_date_collected = Column(DateTime(timezone=False))
address_period_end_date = Column(DateTime(timezone=False))
address_period_end_date_date_collected = Column(DateTime(timezone=False))
pre_address_line = Column(String(100))
pre_address_line_date_collected = Column(DateTime(timezone=False))
pre_address_line_date_effective = Column(DateTime(timezone=False))
pre_address_line_data_collection_stage = Column(String(32))
line1 = Column(String(100))
line1_date_collected = Column(DateTime(timezone=False))
line1_date_effective = Column(DateTime(timezone=False))
line1_data_collection_stage = Column(String(32))
line2 = Column(String(100))
line2_date_collected = Column(DateTime(timezone=False))
line2_date_effective = Column(DateTime(timezone=False))
line2_data_collection_stage = Column(String(32))
city = Column(String(100))
city_date_collected = Column(DateTime(timezone=False))
city_date_effective = Column(DateTime(timezone=False))
city_data_collection_stage = Column(String(32))
county = Column(String(32))
county_date_collected = Column(DateTime(timezone=False))
county_date_effective = Column(DateTime(timezone=False))
county_data_collection_stage = Column(String(32))
state = Column(String(32))
state_date_collected = Column(DateTime(timezone=False))
state_date_effective = Column(DateTime(timezone=False))
state_data_collection_stage = Column(String(32))
zipcode = Column(String(10))
zipcode_date_collected = Column(DateTime(timezone=False))
zipcode_date_effective = Column(DateTime(timezone=False))
zipcode_data_collection_stage = Column(String(32))
country = Column(String(32))
country_date_collected = Column(DateTime(timezone=False))
country_date_effective = Column(DateTime(timezone=False))
country_data_collection_stage = Column(String(32))
is_last_permanent_zip = Column(Integer)
is_last_permanent_zip_date_collected = Column(DateTime(timezone=False))
is_last_permanent_zip_date_effective = Column(DateTime(timezone=False))
is_last_permanent_zip_data_collection_stage = Column(String(32))
zip_quality_code = Column(Integer)
zip_quality_code_date_collected = Column(DateTime(timezone=False))
zip_quality_code_date_effective = Column(DateTime(timezone=False))
zip_quality_code_data_collection_stage = Column(String(32))
reported = Column(Boolean)
## HUD 3.0
person_address_delete = Column(String(32))
person_address_delete_occurred_date = Column(DateTime(timezone=False))
person_address_delete_effective_date = Column(DateTime(timezone=False))
useexisting = True
class PersonHistorical(DB.Base, MapBase):
__tablename__ = 'person_historical'
id = Column(Integer, primary_key=True)
call_index_id = Column(Integer, ForeignKey('call.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id')) # JCS
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id')) # JCS
person_historical_id_id_num = Column(String(32))
person_historical_id_id_str = Column(String(32))
person_historical_id_delete_effective_date = Column(DateTime(timezone=False))
person_historical_id_delete = Column(Integer)
person_historical_id_delete_occurred_date = Column(DateTime(timezone=False))
barrier_code = Column(String(32))
barrier_code_date_collected = Column(DateTime(timezone=False))
barrier_other = Column(String(32))
barrier_other_date_collected = Column(DateTime(timezone=False))
child_currently_enrolled_in_school = Column(String(32))
child_currently_enrolled_in_school_date_collected = Column(DateTime(timezone=False))
currently_employed = Column(String(32))
currently_employed_date_collected = Column(DateTime(timezone=False))
currently_in_school = Column(String(32))
currently_in_school_date_collected = Column(DateTime(timezone=False))
degree_code = Column(String(32))
degree_code_date_collected = Column(DateTime(timezone=False))
degree_other = Column(String(32))
degree_other_date_collected = Column(DateTime(timezone=False))
developmental_disability = Column(String(32))
developmental_disability_date_collected = Column(DateTime(timezone=False))
domestic_violence = Column(String(32))
domestic_violence_date_collected = Column(DateTime(timezone=False))
domestic_violence_how_long = Column(String(32))
domestic_violence_how_long_date_collected = Column(DateTime(timezone=False))
due_date = Column(String(32))
due_date_date_collected = Column(DateTime(timezone=False))
employment_tenure = Column(String(32))
employment_tenure_date_collected = Column(DateTime(timezone=False))
health_status = Column(String(32))
health_status_date_collected = Column(DateTime(timezone=False))
highest_school_level = Column(String(32))
highest_school_level_date_collected = Column(DateTime(timezone=False))
hivaids_status = Column(String(32))
hivaids_status_date_collected = Column(DateTime(timezone=False))
hours_worked_last_week = Column(String(32))
hours_worked_last_week_date_collected = Column(DateTime(timezone=False))
hud_chronic_homeless = Column(String(32))
hud_chronic_homeless_date_collected = Column(DateTime(timezone=False))
hud_homeless = Column(String(32))
hud_homeless_date_collected = Column(DateTime(timezone=False))
site_service_id = Column(Integer)
###HUDHomelessEpisodes (subtable)
###IncomeAndSources (subtable)
length_of_stay_at_prior_residence = Column(String(32))
length_of_stay_at_prior_residence_date_collected = Column(DateTime(timezone=False))
looking_for_work = Column(String(32))
looking_for_work_date_collected = Column(DateTime(timezone=False))
mental_health_indefinite = Column(String(32))
mental_health_indefinite_date_collected = Column(DateTime(timezone=False))
mental_health_problem = Column(String(32))
mental_health_problem_date_collected = Column(DateTime(timezone=False))
non_cash_source_code = Column(String(32))
non_cash_source_code_date_collected = Column(DateTime(timezone=False))
non_cash_source_other = Column(String(32))
non_cash_source_other_date_collected = Column(DateTime(timezone=False))
###PersonAddress (subtable)
person_email = Column(String(32))
person_email_date_collected = Column(DateTime(timezone=False))
person_phone_number = Column(String(32))
person_phone_number_date_collected = Column(DateTime(timezone=False))
physical_disability = Column(String(32))
physical_disability_date_collected = Column(DateTime(timezone=False))
pregnancy_status = Column(String(32))
pregnancy_status_date_collected = Column(DateTime(timezone=False))
prior_residence = Column(String(32))
prior_residence_date_collected = Column(DateTime(timezone=False))
prior_residence_other = Column(String(32))
prior_residence_other_date_collected = Column(DateTime(timezone=False))
reason_for_leaving = Column(String(32))
reason_for_leaving_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_other = Column(String(32))
reason_for_leaving_other_date_collected = Column(DateTime(timezone=False))
school_last_enrolled_date = Column(String(32))
school_last_enrolled_date_date_collected = Column(DateTime(timezone=False))
school_name = Column(String(32))
school_name_date_collected = Column(DateTime(timezone=False))
school_type = Column(String(32))
school_type_date_collected = Column(DateTime(timezone=False))
subsidy_other = Column(String(32))
subsidy_other_date_collected = Column(DateTime(timezone=False))
subsidy_type = Column(String(32))
subsidy_type_date_collected = Column(DateTime(timezone=False))
substance_abuse_indefinite = Column(String(32))
substance_abuse_indefinite_date_collected = Column(DateTime(timezone=False))
substance_abuse_problem = Column(String(32))
substance_abuse_problem_date_collected = Column(DateTime(timezone=False))
total_income = Column(String(32))
total_income_date_collected = Column(DateTime(timezone=False))
###Veteran (subtable)
vocational_training = Column(String(32))
vocational_training_date_collected = Column(DateTime(timezone=False))
annual_personal_income = Column(Integer)
annual_personal_income_date_collected = Column(DateTime(timezone=False))
employment_status = Column(Integer)
employment_status_date_collected = Column(DateTime(timezone=False))
family_size = Column(Integer)
family_size_date_collected = Column(DateTime(timezone=False))
hearing_impaired = Column(Integer)
hearing_impaired_date_collected = Column(DateTime(timezone=False))
marital_status = Column(Integer)
marital_status_date_collected = Column(DateTime(timezone=False))
non_ambulatory = Column(Integer)
non_ambulatory_date_collected = Column(DateTime(timezone=False))
residential_status = Column(Integer)
residential_status_date_collected = Column(DateTime(timezone=False))
visually_impaired = Column(Integer)
visually_impaired_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
fk_person_historical_to_income_and_sources = relationship('IncomeAndSources',
backref='fk_income_and_sources_to_person_historical')
fk_person_historical_to_veteran = relationship('Veteran', backref='fk_veteran_to_person_historical')
fk_person_historical_to_hud_homeless_episodes = relationship('HUDHomelessEpisodes',
backref='fk_hud_homeless_episodes_to_person_historical')
fk_person_historical_to_person_address = relationship('PersonAddress', backref='fk_person_address_to_person_historical')
useexisting = True
class IncomeAndSources(DB.Base, MapBase):
__tablename__ = 'income_and_sources'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
amount = Column(Integer)
amount_date_collected = Column(DateTime(timezone=False))
income_source_code = Column(Integer)
income_source_code_date_collected = Column(DateTime(timezone=False))
income_source_other = Column(String(32))
income_source_other_date_collected = Column(DateTime(timezone=False))
## HUD 3.0
income_and_source_id_id_num = Column(String(32))
income_and_source_id_id_str = Column(String(32))
income_and_source_id_id_delete_occurred_date = Column(DateTime(timezone=False))
income_and_source_id_id_delete_effective_date = Column(DateTime(timezone=False))
income_source_code_date_effective = Column(DateTime(timezone=False))
income_source_other_date_effective = Column(DateTime(timezone=False))
receiving_income_source_date_collected = Column(DateTime(timezone=False))
receiving_income_source_date_effective = Column(DateTime(timezone=False))
income_source_amount_date_effective = Column(DateTime(timezone=False))
income_and_source_id_id_delete = Column(Integer)
income_source_code_data_collection_stage = Column(String(32))
income_source_other_data_collection_stage = Column(String(32))
receiving_income_source = Column(Integer)
receiving_income_source_data_collection_stage = Column(String(32))
income_source_amount_data_collection_stage = Column(String(32))
useexisting = True
class Members(DB.Base, MapBase):
__tablename__ = 'members'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
relationship_to_head_of_household = Column(String(32))
relationship_to_head_of_household_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
class ReleaseOfInformation(DB.Base, MapBase):
__tablename__ = 'release_of_information'
id = Column(Integer, primary_key=True)
person_index_id = Column(Integer, ForeignKey('person.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
release_of_information_idid_num = Column(String(32))
release_of_information_idid_num_date_collected = Column(DateTime(timezone=False))
release_of_information_idid_str = Column(String(32))
release_of_information_idid_str_date_collected = Column(DateTime(timezone=False))
site_service_idid_num = Column(String(32))
site_service_idid_num_date_collected = Column(DateTime(timezone=False))
site_service_idid_str = Column(String(32))
site_service_idid_str_date_collected = Column(DateTime(timezone=False))
documentation = Column(String(32))
documentation_date_collected = Column(DateTime(timezone=False))
#EffectivePeriod (subtable)
start_date = Column(String(32))
start_date_date_collected = Column(DateTime(timezone=False))
end_date = Column(String(32))
end_date_date_collected = Column(DateTime(timezone=False))
release_granted = Column(String(32))
release_granted_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
## HUD 3.0
release_of_information_id_data_collection_stage = Column(String(32))
release_of_information_id_date_effective = Column(DateTime(timezone=False))
documentation_data_collection_stage = Column(String(32))
documentation_date_effective = Column(DateTime(timezone=False))
release_granted_data_collection_stage = Column(String(32))
release_granted_date_effective = Column(DateTime(timezone=False))
useexisting = True
class SourceExportLink(DB.Base, MapBase):
__tablename__ = 'source_export_link'
id = Column(Integer, primary_key=True)
source_index_id = Column(Integer, ForeignKey('source.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
useexisting = True
class Region(DB.Base, MapBase):
__tablename__ = 'region'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
region_id_id_num = Column(String(50))
region_id_id_str = Column(String(32))
site_service_id = Column(String(50))
region_type = Column(String(50))
region_type_date_collected = Column(DateTime(timezone=False))
region_type_date_effective = Column(DateTime(timezone=False))
region_type_data_collection_stage = Column(String(32))
region_description = Column(String(30))
region_description_date_collected = Column(DateTime(timezone=False))
region_description_date_effective = Column(DateTime(timezone=False))
region_description_data_collection_stage = Column(String(32))
useexisting = True
class Agency(DB.Base, MapBase):
__tablename__ = 'agency'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_delete = Column(Integer)
agency_delete_occurred_date = Column(DateTime(timezone=False))
agency_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
agency_description = Column(String(50))
irs_status = Column(String(50))
source_of_funds = Column(String(50))
record_owner = Column(String(50))
fein = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
useexisting = True
class AgencyChild(DB.Base, MapBase):
__tablename__ = 'agency_child'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
useexisting = True
class Service(DB.Base, MapBase):
__tablename__ = 'service'
id = Column(Integer, primary_key=True)
service_id = Column(String(50))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
service_delete = Column(Integer)
service_delete_occurred_date = Column(DateTime(timezone=False))
service_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
coc_code = Column(String(5))
configuration = Column(String(50))
direct_service_code = Column(String(50))
grantee_identifier = Column(String(10))
individual_family_code = Column(String(50))
residential_tracking_method = Column(String(50))
service_type = Column(String(50))
jfcs_service_type = Column(String(50))
service_effective_period_start_date = Column(DateTime(timezone=False))
service_effective_period_end_date = Column(DateTime(timezone=False))
service_recorded_date = Column(DateTime(timezone=False))
target_population_a = Column(String(50))
target_population_b = Column(String(50))
useexisting = True
class Site(DB.Base, MapBase):
__tablename__ = 'site'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
#agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
site_delete = Column(Integer)
site_delete_occurred_date = Column(DateTime(timezone=False))
site_delete_effective_date = Column(DateTime(timezone=False))
airs_key = Column(String(50))
airs_name = Column(String(50))
site_description = Column(String(50))
physical_address_pre_address_line = Column(String(100))
physical_address_line_1 = Column(String(100))
physical_address_line_2 = Column(String(100))
physical_address_city = Column(String(50))
physical_address_country = Column(String(50))
physical_address_state = Column(String(50))
physical_address_zip_code = Column(String(50))
physical_address_country = Column(String(50))
physical_address_reason_withheld = Column(String(50))
physical_address_confidential = Column(String(50))
physical_address_description = Column(String(50))
mailing_address_pre_address_line = Column(String(100))
mailing_address_line_1 = Column(String(100))
mailing_address_line_2 = Column(String(100))
mailing_address_city = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_state = Column(String(50))
mailing_address_zip_code = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_reason_withheld = Column(String(50))
mailing_address_confidential = Column(String(50))
mailing_address_description = Column(String(50))
no_physical_address_description = Column(String(50))
no_physical_address_explanation = Column(String(50))
disabilities_access = Column(String(50))
physical_location_description = Column(String(50))
bus_service_access = Column(String(50))
public_access_to_transportation = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
agency_key = Column(String(50))
useexisting = True
class SiteService(DB.Base, MapBase):
__tablename__ = 'site_service'
id = Column(Integer, primary_key=True)
site_service_id = Column(String(50))
export_index_id = Column(Integer, ForeignKey('export.id'))
report_index_id = Column(String(50), ForeignKey('report.report_id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
service_index_id = Column(Integer, ForeignKey(Service.id))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
site_service_delete = Column(Integer)
site_service_delete_occurred_date = Column(DateTime(timezone=False))
site_service_delete_effective_date = Column(DateTime(timezone=False))
name = Column(String(50))
key = Column(String(50))
description = Column(String(50))
fee_structure = Column(String(50))
gender_requirements = Column(String(50))
area_flexibility = Column(String(50))
service_not_always_available = Column(String(50))
service_group_key = Column(String(50))
site_id = Column(String(50))
geographic_code = Column(String(50))
geographic_code_date_collected = Column(DateTime(timezone=False))
geographic_code_date_effective = Column(DateTime(timezone=False))
geographic_code_data_collection_stage = Column(String(50))
housing_type = Column(String(50))
housing_type_date_collected = Column(DateTime(timezone=False))
housing_type_date_effective = Column(DateTime(timezone=False))
housing_type_data_collection_stage = Column(String(50))
principal = Column(String(50))
site_service_effective_period_start_date = Column(DateTime(timezone=False))
site_service_effective_period_end_date = Column(DateTime(timezone=False))
site_service_recorded_date = Column(DateTime(timezone=False))
site_service_type = Column(String(50))
useexisting = True
class FundingSource(DB.Base, MapBase):
__tablename__ = 'funding_source'
id = Column(Integer, primary_key=True)
service_index_id = Column(Integer, ForeignKey('service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
funding_source_id_id_num = Column(String(50))
funding_source_id_id_str = Column(String(32))
funding_source_id_delete = Column(String(50))
funding_source_id_delete_occurred_date = Column(DateTime(timezone=False))
funding_source_id_delete_effective_date = Column(DateTime(timezone=False))
federal_cfda_number = Column(String(50))
receives_mckinney_funding = Column(String(50))
advance_or_arrears = Column(String(50))
financial_assistance_amount = Column(String(50))
useexisting = True
class ResourceInfo(DB.Base, MapBase):
__tablename__ = 'resource_info'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
resource_specialist = Column(String(50))
available_for_directory = Column(String(50))
available_for_referral = Column(String(50))
available_for_research = Column(String(50))
date_added = Column(DateTime(timezone=False))
date_last_verified = Column(DateTime(timezone=False))
date_of_last_action = Column(DateTime(timezone=False))
last_action_type = Column(String(50))
useexisting = True
class Inventory(DB.Base, MapBase):
__tablename__ = 'inventory'
id = Column(Integer, primary_key=True)
service_index_id = Column(Integer, ForeignKey(Service.id))
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
inventory_delete = Column(Integer)
inventory_delete_occurred_date = Column(DateTime(timezone=False))
inventory_delete_effective_delete = Column(DateTime(timezone=False))
hmis_participation_period_start_date = Column(DateTime(timezone=False))
hmis_participation_period_end_date = Column(DateTime(timezone=False))
inventory_id_id_num = Column(String(50))
inventory_id_id_str = Column(String(32))
bed_inventory = Column(String(50))
bed_availability = Column(String(50))
bed_type = Column(String(50))
bed_individual_family_type = Column(String(50))
chronic_homeless_bed = Column(String(50))
domestic_violence_shelter_bed = Column(String(50))
household_type = Column(String(50))
hmis_participating_beds = Column(String(50))
inventory_effective_period_start_date = Column(DateTime(timezone=False))
inventory_effective_period_end_date = Column(DateTime(timezone=False))
inventory_recorded_date = Column(DateTime(timezone=False))
unit_inventory = Column(String(50))
useexisting = True
class AgeRequirements(DB.Base, MapBase):
__tablename__ = 'age_requirements'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
gender = Column(String(50))
minimum_age = Column(String(50))
maximum_age = Column(String(50))
useexisting = True
class AidRequirements(DB.Base, MapBase):
__tablename__ = 'aid_requirements'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
aid_requirements = Column(String(50))
useexisting = True
class Aka(DB.Base, MapBase):
__tablename__ = 'aka'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
# SBB20100914 Added Agency Location foreign key
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
name = Column(String(50))
confidential = Column(String(50))
description = Column(String(50))
useexisting = True
class ApplicationProcess(DB.Base, MapBase):
__tablename__ = 'application_process'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
step = Column(String(50))
description = Column(String(50))
useexisting = True
class Assignment(DB.Base, MapBase):
__tablename__ = 'assignment'
id = Column(Integer, primary_key=True)
hmis_asset_index_id = Column(Integer, ForeignKey('hmis_asset.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
assignment_id_id_num = Column(String(50))
assignment_id_id_str = Column(String(32))
assignment_id_delete = Column(Integer)
assignment_id_delete_occurred_date = Column(DateTime(timezone=False))
assignment_id_delete_effective_date = Column(DateTime(timezone=False))
person_id_id_num = Column(String(50))
person_id_id_str = Column(String(32))
household_id_id_num = Column(String(50))
household_id_id_str = Column(String(32))
useexisting = True
class AssignmentPeriod(DB.Base, MapBase):
__tablename__ = 'assignment_period'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
assignment_index_id = Column(Integer, ForeignKey(Assignment.id))
assignment_period_start_date = Column(DateTime(timezone=False))
assignment_period_end_date = Column(DateTime(timezone=False))
useexisting = True
class Call(DB.Base, MapBase):
__tablename__ = 'call'
id = Column(Integer, primary_key=True)
site_service_id = Column(String(50))
call_id_id_num = Column(String(50))
call_id_id_str = Column(String(32))
call_time = Column(DateTime(timezone=False))
call_duration = Column(Interval())
caseworker_id_id_num = Column(String(50))
caseworker_id_id_str = Column(String(32))
# FBY : TBC requested|required fields
caller_zipcode = Column(String(10))
caller_city = Column(String(128))
caller_state = Column(String(2))
caller_home_phone = Column(String(10))
class ChildEnrollmentStatus(DB.Base, MapBase):
__tablename__ = 'child_enrollment_status'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
child_enrollment_status_id_id_num = Column(String(50))
child_enrollment_status_id_id_str = Column(String(32))
child_enrollment_status_id_delete = Column(Integer)
child_enrollment_status_id_delete_occurred_date = Column(DateTime(timezone=False))
child_enrollment_status_id_delete_effective_date = Column(DateTime(timezone=False))
child_currently_enrolled_in_school = Column(String(50))
child_currently_enrolled_in_school_date_effective = Column(DateTime(timezone=False))
child_currently_enrolled_in_school_date_collected = Column(DateTime(timezone=False))
child_currently_enrolled_in_school_data_collection_stage = Column(String(50))
child_school_name = Column(String(50))
child_school_name_date_effective = Column(DateTime(timezone=False))
child_school_name_date_collected = Column(DateTime(timezone=False))
child_school_name_data_collection_stage = Column(String(50))
child_mckinney_vento_liaison = Column(String(50))
child_mckinney_vento_liaison_date_effective = Column(DateTime(timezone=False))
child_mckinney_vento_liaison_date_collected = Column(DateTime(timezone=False))
child_mckinney_vento_liaison_data_collection_stage = Column(String(50))
child_school_type = Column(String(50))
child_school_type_date_effective = Column(DateTime(timezone=False))
child_school_type_date_collected = Column(DateTime(timezone=False))
child_school_type_data_collection_stage = Column(String(50))
child_school_last_enrolled_date = Column(DateTime(timezone=False))
child_school_last_enrolled_date_date_collected = Column(DateTime(timezone=False))
child_school_last_enrolled_date_data_collection_stage = Column(String(50))
useexisting = True
class ChildEnrollmentStatusBarrier(DB.Base, MapBase):
__tablename__ = 'child_enrollment_status_barrier'
id = Column(Integer, primary_key=True)
child_enrollment_status_index_id = Column(Integer, ForeignKey(ChildEnrollmentStatus.id))
export_index_id = Column(Integer, ForeignKey('export.id'))
barrier_id_id_num = Column(String(50))
barrier_id_id_str = Column(String(32))
barrier_id_delete = Column(Integer)
barrier_id_delete_occurred_date = Column(DateTime(timezone=False))
barrier_id_delete_effective_date = Column(DateTime(timezone=False))
barrier_code = Column(String(50))
barrier_code_date_collected = Column(DateTime(timezone=False))
barrier_code_date_effective = Column(DateTime(timezone=False))
barrier_code_data_collection_stage = Column(String(50))
barrier_other = Column(String(50))
barrier_other_date_collected = Column(DateTime(timezone=False))
barrier_other_date_effective = Column(DateTime(timezone=False))
barrier_other_data_collection_stage = Column(String(50))
useexisting = True
class ChronicHealthCondition(DB.Base, MapBase):
__tablename__ = 'chronic_health_condition'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
has_chronic_health_condition = Column(String(50))
has_chronic_health_condition_date_collected = Column(DateTime(timezone=False))
has_chronic_health_condition_date_effective = Column(DateTime(timezone=False))
has_chronic_health_condition_data_collection_stage = Column(String(50))
receive_chronic_health_services = Column(String(50))
receive_chronic_health_services_date_collected = Column(DateTime(timezone=False))
receive_chronic_health_services_date_effective = Column(DateTime(timezone=False))
receive_chronic_health_services_data_collection_stage = Column(String(50))
useexisting = True
class Contact(DB.Base, MapBase):
__tablename__ = 'contact'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
title = Column(String(50))
name = Column(String(50))
type = Column(String(50))
useexisting = True
class ContactMade(DB.Base, MapBase):
__tablename__ = 'contact_made'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
contact_id_id_num = Column(String(50))
contact_id_id_str = Column(String(32))
contact_id_delete = Column(Integer)
contact_id_delete_occurred_date = Column(DateTime(timezone=False))
contact_id_delete_effective_date = Column(DateTime(timezone=False))
contact_date = Column(DateTime(timezone=False))
contact_date_data_collection_stage = Column(String(50))
contact_location = Column(String(50))
contact_location_data_collection_stage = Column(String(50))
useexisting = True
class CrossStreet(DB.Base, MapBase):
__tablename__ = 'cross_street'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
cross_street = Column(String(50))
useexisting = True
class CurrentlyInSchool(DB.Base, MapBase):
__tablename__ = 'currently_in_school'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
currently_in_school = Column(String(50))
currently_in_school_date_collected = Column(DateTime(timezone=False))
currently_in_school_date_effective = Column(DateTime(timezone=False))
currently_in_school_data_collection_stage = Column(String(50))
useexisting = True
class LicenseAccreditation(DB.Base, MapBase):
__tablename__ = 'license_accreditation'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
license = Column(String(50))
licensed_by = Column(String(50))
useexisting = True
class MentalHealthProblem(DB.Base, MapBase):
__tablename__ = 'mental_health_problem'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_mental_health_problem = Column(String(50))
has_mental_health_problem_date_collected = Column(DateTime(timezone=False))
has_mental_health_problem_date_effective = Column(DateTime(timezone=False))
has_mental_health_problem_data_collection_stage = Column(String(50))
mental_health_indefinite = Column(String(50))
mental_health_indefinite_date_collected = Column(DateTime(timezone=False))
mental_health_indefinite_date_effective = Column(DateTime(timezone=False))
mental_health_indefinite_data_collection_stage = Column(String(50))
receive_mental_health_services = Column(String(50))
receive_mental_health_services_date_collected = Column(DateTime(timezone=False))
receive_mental_health_services_date_effective = Column(DateTime(timezone=False))
receive_mental_health_services_data_collection_stage = Column(String(50))
useexisting = True
class NonCashBenefits(DB.Base, MapBase):
__tablename__ = 'non_cash_benefits'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
non_cash_benefit_id_id_num = Column(String(50))
non_cash_benefit_id_id_str = Column(String(32))
non_cash_benefit_id_id_delete = Column(Integer)
non_cash_benefit_id_id_delete_occurred_date = Column(DateTime(timezone=False))
non_cash_benefit_id_id_delete_effective_date = Column(DateTime(timezone=False))
non_cash_source_code = Column(String(50))
non_cash_source_code_date_collected = Column(DateTime(timezone=False))
non_cash_source_code_date_effective = Column(DateTime(timezone=False))
non_cash_source_code_data_collection_stage = Column(String(50))
non_cash_source_other = Column(String(50))
non_cash_source_other_date_collected = Column(DateTime(timezone=False))
non_cash_source_other_date_effective = Column(DateTime(timezone=False))
non_cash_source_other_data_collection_stage = Column(String(50))
receiving_non_cash_source = Column(String(50))
receiving_non_cash_source_date_collected = Column(DateTime(timezone=False))
receiving_non_cash_source_date_effective = Column(DateTime(timezone=False))
receiving_non_cash_source_data_collection_stage = Column(String(50))
useexisting = True
class AgencyLocation(DB.Base, MapBase):
__tablename__ = 'agency_location'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
key = Column(String(50))
name = Column(String(50))
site_description = Column(String(50))
physical_address_pre_address_line = Column(String(100))
physical_address_line_1 = Column(String(100))
physical_address_line_2 = Column(String(100))
physical_address_city = Column(String(50))
physical_address_country = Column(String(50))
physical_address_state = Column(String(50))
physical_address_zip_code = Column(String(50))
physical_address_county = Column(String(50))
physical_address_reason_withheld = Column(String(50))
physical_address_confidential = Column(String(50))
physical_address_description = Column(String(50))
mailing_address_pre_address_line = Column(String(100))
mailing_address_line_1 = Column(String(100))
mailing_address_line_2 = Column(String(100))
mailing_address_city = Column(String(50))
mailing_address_county = Column(String(50))
mailing_address_state = Column(String(50))
mailing_address_zip_code = Column(String(50))
mailing_address_country = Column(String(50))
mailing_address_reason_withheld = Column(String(50))
mailing_address_confidential = Column(String(50))
mailing_address_description = Column(String(50))
no_physical_address_description = Column(String(50))
no_physical_address_explanation = Column(String(50))
disabilities_access = Column(String(50))
physical_location_description = Column(String(50))
bus_service_access = Column(String(50))
public_access_to_transportation = Column(String(50))
year_inc = Column(String(50))
annual_budget_total = Column(String(50))
legal_status = Column(String(50))
exclude_from_website = Column(String(50))
exclude_from_directory = Column(String(50))
useexisting = True
class AgencyService(DB.Base, MapBase):
__tablename__ = 'agency_service'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
key = Column(String(50))
agency_key = Column(String(50))
name = Column(String(50))
useexisting = True
class NonCashBenefitsLast30Days(DB.Base, MapBase):
__tablename__ = 'non_cash_benefits_last_30_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_last_30_days = Column(String(50))
income_last_30_days_date_collected = Column(DateTime(timezone=False))
income_last_30_days_date_effective = Column(DateTime(timezone=False))
income_last_30_days_data_collection_stage = Column(String(50))
useexisting = True
class OtherAddress(DB.Base, MapBase):
__tablename__ = 'other_address'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
pre_address_line = Column(String(100))
line_1 = Column(String(100))
line_2 = Column(String(100))
city = Column(String(50))
county = Column(String(50))
state = Column(String(50))
zip_code = Column(String(50))
country = Column(String(50))
reason_withheld = Column(String(50))
confidential = Column(String(50))
description = Column(String(50))
useexisting = True
class OtherRequirements(DB.Base, MapBase):
__tablename__ = 'other_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
other_requirements = Column(String(50))
useexisting = True
class Phone(DB.Base, MapBase):
__tablename__ = 'phone'
id = Column(Integer, primary_key=True)
agency_index_id = Column(Integer, ForeignKey('agency.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
contact_index_id = Column(Integer, ForeignKey(Contact.id))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
phone_number = Column(String(50))
reason_withheld = Column(String(50))
extension = Column(String(50))
description = Column(String(50))
type = Column(String(50))
function = Column(String(50))
toll_free = Column(String(50))
confidential = Column(String(50))
person_phone_number = Column(String(50))
person_phone_number_date_collected = Column(DateTime(timezone=False))
person_phone_number_date_effective = Column(DateTime(timezone=False))
person_phone_number_data_collection_stage = Column(String(50))
useexisting = True
class PhysicalDisability(DB.Base, MapBase):
__tablename__ = 'physical_disability'
id = Column(Integer, primary_key=True)
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
has_physical_disability = Column(String(50))
has_physical_disability_date_collected = Column(DateTime(timezone=False))
has_physical_disability_date_effective = Column(DateTime(timezone=False))
has_physical_disability_data_collection_stage = Column(String(50))
receive_physical_disability_services = Column(String(50))
receive_physical_disability_services_date_collected = Column(DateTime(timezone=False))
receive_physical_disability_services_date_effective = Column(DateTime(timezone=False))
receive_physical_disability_services_data_collection_stage = Column(String(50))
useexisting = True
class PitCountSet(DB.Base, MapBase):
__tablename__ = 'pit_count_set'
id = Column(Integer, primary_key=True)
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
pit_count_set_id_id_num = Column(String(50))
pit_count_set_id_id_str = Column(String(32))
pit_count_set_id_delete = Column(Integer)
pit_count_set_id_delete_occurred_date = Column(DateTime(timezone=False))
pit_count_set_id_delete_effective_date = Column(DateTime(timezone=False))
hud_waiver_received = Column(String(50))
hud_waiver_date = Column(DateTime(timezone=False))
hud_waiver_effective_period_start_date = Column(DateTime(timezone=False))
hud_waiver_effective_period_end_date = Column(DateTime(timezone=False))
last_pit_sheltered_count_date = Column(DateTime(timezone=False))
last_pit_unsheltered_count_date = Column(DateTime(timezone=False))
useexisting = True
class PitCounts(DB.Base, MapBase):
__tablename__ = 'pit_counts'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
pit_count_set_index_id = Column(Integer, ForeignKey(PitCountSet.id))
pit_count_value = Column(String(50))
pit_count_effective_period_start_date = Column(DateTime(timezone=False))
pit_count_effective_period_end_date = Column(DateTime(timezone=False))
pit_count_recorded_date = Column(DateTime(timezone=False))
pit_count_household_type = Column(String(50))
useexisting = True
class Pregnancy(DB.Base, MapBase):
__tablename__ = 'pregnancy'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
pregnancy_id_id_num = Column(String(50))
pregnancy_id_id_str = Column(String(32))
pregnancy_id_id_delete = Column(Integer)
pregnancy_id_id_delete_occurred_date = Column(DateTime(timezone=False))
pregnancy_id_id_delete_effective_date = Column(DateTime(timezone=False))
pregnancy_status = Column(String(50))
pregnancy_status_date_collected = Column(DateTime(timezone=False))
pregnancy_status_date_effective = Column(DateTime(timezone=False))
pregnancy_status_data_collection_stage = Column(String(50))
due_date = Column(DateTime(timezone=False))
due_date_date_collected = Column(DateTime(timezone=False))
due_date_data_collection_stage = Column(String(50))
useexisting = True
class Degree(DB.Base, MapBase):
__tablename__ = 'degree'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
degree_id_id_num = Column(String(50))
degree_id_id_str = Column(String(32))
degree_id_delete = Column(Integer)
degree_id_delete_occurred_date = Column(DateTime(timezone=False))
degree_id_delete_effective_date = Column(DateTime(timezone=False))
degree_other = Column(String(50))
degree_other_date_collected = Column(DateTime(timezone=False))
degree_other_date_effective = Column(DateTime(timezone=False))
degree_other_data_collection_stage = Column(String(50))
useexisting = True
class PriorResidence(DB.Base, MapBase):
__tablename__ = 'prior_residence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
prior_residence_id_id_num = Column(String(50))
prior_residence_id_id_str = Column(String(32))
prior_residence_id_delete = Column(Integer)
prior_residence_id_delete_occurred_date = Column(DateTime(timezone=False))
prior_residence_id_delete_effective_date = Column(DateTime(timezone=False))
prior_residence_code = Column(String(50))
prior_residence_code_date_collected = Column(DateTime(timezone=False))
prior_residence_code_date_effective = Column(DateTime(timezone=False))
prior_residence_code_data_collection_stage = Column(String(50))
prior_residence_other = Column(String(50))
prior_residence_other_date_collected = Column(DateTime(timezone=False))
prior_residence_other_date_effective = Column(DateTime(timezone=False))
prior_residence_other_data_collection_stage = Column(String(50))
useexisting = True
class DegreeCode(DB.Base, MapBase):
__tablename__ = 'degree_code'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
degree_index_id = Column(Integer, ForeignKey(Degree.id))
degree_code = Column(String(50))
degree_date_collected = Column(DateTime(timezone=False))
degree_date_effective = Column(DateTime(timezone=False))
degree_data_collection_stage = Column(String(50))
useexisting = True
class Destinations(DB.Base, MapBase):
__tablename__ = 'destinations'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
destination_id_id_num = Column(String(50))
destination_id_id_str = Column(String(32))
destination_id_delete = Column(Integer)
destination_id_delete_occurred_date = Column(DateTime(timezone=False))
destination_id_delete_effective_date = Column(DateTime(timezone=False))
destination_code = Column(String(50))
destination_code_date_collected = Column(DateTime(timezone=False))
destination_code_date_effective = Column(DateTime(timezone=False))
destination_code_data_collection_stage = Column(String(50))
destination_other = Column(String(50))
destination_other_date_collected = Column(DateTime(timezone=False))
destination_other_date_effective = Column(DateTime(timezone=False))
destination_other_data_collection_stage = Column(String(50))
useexisting = True
class ReasonsForLeaving(DB.Base, MapBase):
__tablename__ = 'reasons_for_leaving'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id'))
reason_for_leaving_id_id_num = Column(String(50))
reason_for_leaving_id_id_str = Column(String(32))
reason_for_leaving_id_delete = Column(Integer)
reason_for_leaving_id_delete_occurred_date = Column(DateTime(timezone=False))
reason_for_leaving_id_delete_effective_date = Column(DateTime(timezone=False))
reason_for_leaving = Column(String(50))
reason_for_leaving_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_date_effective = Column(DateTime(timezone=False))
reason_for_leaving_data_collection_stage = Column(String(50))
reason_for_leaving_other = Column(String(50))
reason_for_leaving_other_date_collected = Column(DateTime(timezone=False))
reason_for_leaving_other_date_effective = Column(DateTime(timezone=False))
reason_for_leaving_other_data_collection_stage = Column(String(50))
useexisting = True
class DevelopmentalDisability(DB.Base, MapBase):
__tablename__ = 'developmental_disability'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_developmental_disability = Column(String(50))
has_developmental_disability_date_collected = Column(DateTime(timezone=False))
has_developmental_disability_date_effective = Column(DateTime(timezone=False))
has_developmental_disability_data_collection_stage = Column(String(50))
receive_developmental_disability = Column(String(50))
receive_developmental_disability_date_collected = Column(DateTime(timezone=False))
receive_developmental_disability_date_effective = Column(DateTime(timezone=False))
receive_developmental_disability_data_collection_stage = Column(String(50))
useexisting = True
class DisablingCondition(DB.Base, MapBase):
__tablename__ = 'disabling_condition'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
disabling_condition = Column(String(50))
disabling_condition_date_collected = Column(DateTime(timezone=False))
disabling_condition_date_effective = Column(DateTime(timezone=False))
disabling_condition_data_collection_stage = Column(String(50))
useexisting = True
class DocumentsRequired(DB.Base, MapBase):
__tablename__ = 'documents_required'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
documents_required = Column(String(50))
description = Column(String(50))
useexisting = True
class ResidencyRequirements(DB.Base, MapBase):
__tablename__ = 'residency_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
residency_requirements = Column(String(50))
useexisting = True
class DomesticViolence(DB.Base, MapBase):
__tablename__ = 'domestic_violence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
domestic_violence_survivor = Column(String(50))
domestic_violence_survivor_date_collected = Column(DateTime(timezone=False))
domestic_violence_survivor_date_effective = Column(DateTime(timezone=False))
domestic_violence_survivor_data_collection_stage = Column(String(50))
dv_occurred = Column(String(50))
dv_occurred_date_collected = Column(DateTime(timezone=False))
dv_occurred_date_effective = Column(DateTime(timezone=False))
dv_occurred_data_collection_stage = Column(String(50))
useexisting = True
class Email(DB.Base, MapBase):
__tablename__ = 'email'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
contact_index_id = Column(Integer, ForeignKey(Contact.id))
resource_info_index_id = Column(Integer, ForeignKey('resource_info.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
address = Column(String(100))
note = Column(String(50))
person_email = Column(String(50))
person_email_date_collected = Column(DateTime(timezone=False))
person_email_date_effective = Column(DateTime(timezone=False))
person_email_data_collection_stage = Column(String(50))
useexisting = True
class Seasonal(DB.Base, MapBase):
__tablename__ = 'seasonal'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
description = Column(String(50))
start_date = Column(String(50))
end_date = Column(String(50))
useexisting = True
class Employment(DB.Base, MapBase):
__tablename__ = 'employment'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
employment_id_id_num = Column(String(50))
employment_id_id_str = Column(String(32))
employment_id_id_delete = Column(Integer)
employment_id_id_delete_occurred_date = Column(DateTime(timezone=False))
employment_id_id_delete_effective_date = Column(DateTime(timezone=False))
currently_employed = Column(String(50))
currently_employed_date_collected = Column(DateTime(timezone=False))
currently_employed_date_effective = Column(DateTime(timezone=False))
currently_employed_data_collection_stage = Column(String(50))
hours_worked_last_week = Column(String(50))
hours_worked_last_week_date_collected = Column(DateTime(timezone=False))
hours_worked_last_week_date_effective = Column(DateTime(timezone=False))
hours_worked_last_week_data_collection_stage = Column(String(50))
employment_tenure = Column(String(50))
employment_tenure_date_collected = Column(DateTime(timezone=False))
employment_tenure_date_effective = Column(DateTime(timezone=False))
employment_tenure_data_collection_stage = Column(String(50))
looking_for_work = Column(String(50))
looking_for_work_date_collected = Column(DateTime(timezone=False))
looking_for_work_date_effective = Column(DateTime(timezone=False))
looking_for_work_data_collection_stage = Column(String(50))
useexisting = True
class EngagedDate(DB.Base, MapBase):
__tablename__ = 'engaged_date'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
engaged_date = Column(DateTime(timezone=False))
engaged_date_date_collected = Column(DateTime(timezone=False))
engaged_date_data_collection_stage = Column(String(50))
useexisting = True
class ServiceEventNotes(DB.Base, MapBase):
__tablename__ = 'service_event_notes'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
note_id_id_num = Column(String(50))
note_id_id_str = Column(String(32))
note_delete = Column(Integer)
note_delete_occurred_date = Column(DateTime(timezone=False))
note_delete_effective_date = Column(DateTime(timezone=False))
note_text = Column(String(255))
note_text_date_collected = Column(DateTime(timezone=False))
note_text_date_effective = Column(DateTime(timezone=False))
note_text_data_collection_stage = Column(String(50))
useexisting = True
class FamilyRequirements(DB.Base, MapBase):
__tablename__ = 'family_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
family_requirements = Column(String(50))
useexisting = True
class ServiceGroup(DB.Base, MapBase):
__tablename__ = 'service_group'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
key = Column(String(50))
name = Column(String(50))
program_name = Column(String(50))
useexisting = True
class GeographicAreaServed(DB.Base, MapBase):
__tablename__ = 'geographic_area_served'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
zipcode = Column(String(50))
census_track = Column(String(50))
city = Column(String(50))
county = Column(String(50))
state = Column(String(50))
country = Column(String(50))
description = Column(String(50))
useexisting = True
class HealthStatus(DB.Base, MapBase):
__tablename__ = 'health_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
health_status = Column(String(50))
health_status_date_collected = Column(DateTime(timezone=False))
health_status_date_effective = Column(DateTime(timezone=False))
health_status_data_collection_stage = Column(String(50))
useexisting = True
class HighestSchoolLevel(DB.Base, MapBase):
__tablename__ = 'highest_school_level'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
highest_school_level = Column(String(50))
highest_school_level_date_collected = Column(DateTime(timezone=False))
highest_school_level_date_effective = Column(DateTime(timezone=False))
highest_school_level_data_collection_stage = Column(String(50))
useexisting = True
class HivAidsStatus(DB.Base, MapBase):
__tablename__ = 'hiv_aids_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_hiv_aids = Column(String(50))
has_hiv_aids_date_collected = Column(DateTime(timezone=False))
has_hiv_aids_date_effective = Column(DateTime(timezone=False))
has_hiv_aids_data_collection_stage = Column(String(50))
receive_hiv_aids_services = Column(String(50))
receive_hiv_aids_services_date_collected = Column(DateTime(timezone=False))
receive_hiv_aids_services_date_effective = Column(DateTime(timezone=False))
receive_hiv_aids_services_data_collection_stage = Column(String(50))
useexisting = True
class SpatialLocation(DB.Base, MapBase):
__tablename__ = 'spatial_location'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
description = Column(String(50))
datum = Column(String(50))
latitude = Column(String(50))
longitude = Column(String(50))
useexisting = True
class HmisAsset(DB.Base, MapBase):
__tablename__ = 'hmis_asset'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
asset_id_id_num = Column(String(50))
asset_id_id_str = Column(String(32))
asset_id_delete = Column(Integer)
asset_id_delete_occurred_date = Column(DateTime(timezone=False))
asset_id_delete_effective_date = Column(DateTime(timezone=False))
asset_count = Column(String(50))
asset_count_bed_availability = Column(String(50))
asset_count_bed_type = Column(String(50))
asset_count_bed_individual_family_type = Column(String(50))
asset_count_chronic_homeless_bed = Column(String(50))
asset_count_domestic_violence_shelter_bed = Column(String(50))
asset_count_household_type = Column(String(50))
asset_type = Column(String(50))
asset_effective_period_start_date = Column(DateTime(timezone=False))
asset_effective_period_end_date = Column(DateTime(timezone=False))
asset_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class SubstanceAbuseProblem(DB.Base, MapBase):
__tablename__ = 'substance_abuse_problem'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
has_substance_abuse_problem = Column(String(50))
has_substance_abuse_problem_date_collected = Column(DateTime(timezone=False))
has_substance_abuse_problem_date_effective = Column(DateTime(timezone=False))
has_substance_abuse_problem_data_collection_stage = Column(String(50))
substance_abuse_indefinite = Column(String(50))
substance_abuse_indefinite_date_collected = Column(DateTime(timezone=False))
substance_abuse_indefinite_date_effective = Column(DateTime(timezone=False))
substance_abuse_indefinite_data_collection_stage = Column(String(50))
receive_substance_abuse_services = Column(String(50))
receive_substance_abuse_services_date_collected = Column(DateTime(timezone=False))
receive_substance_abuse_services_date_effective = Column(DateTime(timezone=False))
receive_substance_abuse_services_data_collection_stage = Column(String(50))
useexisting = True
class HousingStatus(DB.Base, MapBase):
__tablename__ = 'housing_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
housing_status = Column(String(50))
housing_status_date_collected = Column(DateTime(timezone=False))
housing_status_date_effective = Column(DateTime(timezone=False))
housing_status_data_collection_stage = Column(String(50))
useexisting = True
class Taxonomy(DB.Base, MapBase):
__tablename__ = 'taxonomy'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
need_index_id = Column(Integer, ForeignKey('need.id'))
code = Column(String(300))
useexisting = True
class HudChronicHomeless(DB.Base, MapBase):
__tablename__ = 'hud_chronic_homeless'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
hud_chronic_homeless = Column(String(50))
hud_chronic_homeless_date_collected = Column(DateTime(timezone=False))
hud_chronic_homeless_date_effective = Column(DateTime(timezone=False))
hud_chronic_homeless_data_collection_stage = Column(String(50))
useexisting = True
class TimeOpen(DB.Base, MapBase):
__tablename__ = 'time_open'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
languages_index_id = Column(Integer, ForeignKey('languages.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
notes = Column(String(50))
useexisting = True
class TimeOpenDays(DB.Base, MapBase):
__tablename__ = 'time_open_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
time_open_index_id = Column(Integer, ForeignKey(TimeOpen.id))
day_of_week = Column(String(50))
from_time = Column(String(50))
to_time = Column(String(50))
useexisting = True
class Url(DB.Base, MapBase):
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
agency_index_id = Column(Integer, ForeignKey('agency.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
address = Column(String(50))
note = Column(String(50))
useexisting = True
class VeteranMilitaryBranches(DB.Base, MapBase):
__tablename__ = 'veteran_military_branches'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
military_branch_id_id_num = Column(String(50))
military_branch_id_id_str = Column(String(32))
military_branch_id_id_delete = Column(Integer)
military_branch_id_id_delete_occurred_date = Column(DateTime(timezone=False))
military_branch_id_id_delete_effective_date = Column(DateTime(timezone=False))
discharge_status = Column(String(50))
discharge_status_date_collected = Column(DateTime(timezone=False))
discharge_status_date_effective = Column(DateTime(timezone=False))
discharge_status_data_collection_stage = Column(String(50))
discharge_status_other = Column(String(50))
discharge_status_other_date_collected = Column(DateTime(timezone=False))
discharge_status_other_date_effective = Column(DateTime(timezone=False))
discharge_status_other_data_collection_stage = Column(String(50))
military_branch = Column(String(50))
military_branch_date_collected = Column(DateTime(timezone=False))
military_branch_date_effective = Column(DateTime(timezone=False))
military_branch_data_collection_stage = Column(String(50))
military_branch_other = Column(String(50))
military_branch_other_date_collected = Column(DateTime(timezone=False))
military_branch_other_date_effective = Column(DateTime(timezone=False))
military_branch_other_data_collection_stage = Column(String(50))
useexisting = True
class IncomeLast30Days(DB.Base, MapBase):
__tablename__ = 'income_last_30_days'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_last_30_days = Column(String(50))
income_last_30_days_date_collected = Column(DateTime(timezone=False))
income_last_30_days_date_effective = Column(DateTime(timezone=False))
income_last_30_days_data_collection_stage = Column(String(50))
useexisting = True
class VeteranMilitaryServiceDuration(DB.Base, MapBase):
__tablename__ = 'veteran_military_service_duration'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
military_service_duration = Column(String(50))
military_service_duration_date_collected = Column(DateTime(timezone=False))
military_service_duration_date_effective = Column(DateTime(timezone=False))
military_service_duration_data_collection_stage = Column(String(50))
useexisting = True
class IncomeRequirements(DB.Base, MapBase):
__tablename__ = 'income_requirements'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
income_requirements = Column(String(50))
useexisting = True
class VeteranServedInWarZone(DB.Base, MapBase):
__tablename__ = 'veteran_served_in_war_zone'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
served_in_war_zone = Column(String(50))
served_in_war_zone_date_collected = Column(DateTime(timezone=False))
served_in_war_zone_date_effective = Column(DateTime(timezone=False))
served_in_war_zone_data_collection_stage = Column(String(50))
useexisting = True
class IncomeTotalMonthly(DB.Base, MapBase):
__tablename__ = 'income_total_monthly'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
income_total_monthly = Column(String(50))
income_total_monthly_date_collected = Column(DateTime(timezone=False))
income_total_monthly_date_effective = Column(DateTime(timezone=False))
income_total_monthly_data_collection_stage = Column(String(50))
useexisting = True
class VeteranServiceEra(DB.Base, MapBase):
__tablename__ = 'veteran_service_era'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
service_era = Column(String(50))
service_era_date_collected = Column(DateTime(timezone=False))
service_era_date_effective = Column(DateTime(timezone=False))
service_era_data_collection_stage = Column(String(50))
useexisting = True
class VeteranVeteranStatus(DB.Base, MapBase):
__tablename__ = 'veteran_veteran_status'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
veteran_status = Column(String(50))
veteran_status_date_collected = Column(DateTime(timezone=False))
veteran_status_date_effective = Column(DateTime(timezone=False))
veteran_status_data_collection_stage = Column(String(50))
useexisting = True
class Languages(DB.Base, MapBase):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_index_id = Column(Integer, ForeignKey('site.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
agency_location_index_id = Column(Integer, ForeignKey('agency_location.id'))
name = Column(String(50))
notes = Column(String(50))
useexisting = True
class VeteranWarzonesServed(DB.Base, MapBase):
__tablename__ = 'veteran_warzones_served'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
war_zone_id_id_num = Column(String(50))
war_zone_id_id_str = Column(String(32))
war_zone_id_id_delete = Column(Integer)
war_zone_id_id_delete_occurred_date = Column(DateTime(timezone=False))
war_zone_id_id_delete_effective_date = Column(DateTime(timezone=False))
months_in_war_zone = Column(String(50))
months_in_war_zone_date_collected = Column(DateTime(timezone=False))
months_in_war_zone_date_effective = Column(DateTime(timezone=False))
months_in_war_zone_data_collection_stage = Column(String(50))
received_fire = Column(String(50))
received_fire_date_collected = Column(DateTime(timezone=False))
received_fire_date_effective = Column(DateTime(timezone=False))
received_fire_data_collection_stage = Column(String(50))
war_zone = Column(String(50))
war_zone_date_collected = Column(DateTime(timezone=False))
war_zone_date_effective = Column(DateTime(timezone=False))
war_zone_data_collection_stage = Column(String(50))
war_zone_other = Column(String(50))
war_zone_other_date_collected = Column(DateTime(timezone=False))
war_zone_other_date_effective = Column(DateTime(timezone=False))
war_zone_other_data_collection_stage = Column(String(50))
useexisting = True
class LengthOfStayAtPriorResidence(DB.Base, MapBase):
__tablename__ = 'length_of_stay_at_prior_residence'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
length_of_stay_at_prior_residence = Column(String(50))
length_of_stay_at_prior_residence_date_collected = Column(DateTime(timezone=False))
length_of_stay_at_prior_residence_date_effective = Column(DateTime(timezone=False))
length_of_stay_at_prior_residence_data_collection_stage = Column(String(50))
useexisting = True
def __repr__(self):
field_dict = vars(self)
out = ''
if len(field_dict) > 0:
for x, y in field_dict.iteritems():
if x[0] != "_":
out = out + "%s = %s, " % (x,y)
return "<%s(%s)>" % (self.__class__.__name__, out)
else:
return ''
class VocationalTraining(DB.Base, MapBase):
__tablename__ = 'vocational_training'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
vocational_training = Column(String(50))
vocational_training_date_collected = Column(DateTime(timezone=False))
vocational_training_date_effective = Column(DateTime(timezone=False))
vocational_training_data_collection_stage = Column(String(50))
useexisting = True
class Export(DB.Base, MapBase):
__tablename__ = 'export'
id = Column(Integer, primary_key=True)
export_id = Column(String(50), primary_key=False, unique=False)
export_id_date_collected = Column(DateTime(timezone=False))
export_date = Column(DateTime(timezone=False))
export_date_date_collected = Column(DateTime(timezone=False))
export_period_start_date = Column(DateTime(timezone=False))
export_period_start_date_date_collected = Column(DateTime(timezone=False))
export_period_end_date = Column(DateTime(timezone=False))
export_period_end_date_date_collected = Column(DateTime(timezone=False))
export_software_vendor = Column(String(50))
export_software_vendor_date_collected = Column(DateTime(timezone=False))
export_software_version = Column(String(10))
export_software_version_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
export_id_id_num = Column(String(50))
export_id_id_str = Column(String(50))
export_id_delete_occurred_date = Column(DateTime(timezone=False))
export_id_delete_effective_date = Column(DateTime(timezone=False))
export_id_delete = Column(String(32))
fk_export_to_person = relationship('Person', backref='fk_person_to_export')
#$fk_export_to_household = relationship('Household', backref='fk_household_to_export')
# 'fk_export_to_database': relation(Source, backref='fk_database_to_export')
useexisting = True
class Report(DB.Base, MapBase):
__tablename__ = 'report'
report_id = Column(String(50), primary_key=True, unique=True)
report_id_date_collected = Column(DateTime(timezone=False))
report_date = Column(DateTime(timezone=False))
report_date_date_collected = Column(DateTime(timezone=False))
report_period_start_date = Column(DateTime(timezone=False))
report_period_start_date_date_collected = Column(DateTime(timezone=False))
report_period_end_date = Column(DateTime(timezone=False))
report_period_end_date_date_collected = Column(DateTime(timezone=False))
report_software_vendor = Column(String(50))
report_software_vendor_date_collected = Column(DateTime(timezone=False))
report_software_version = Column(String(10))
report_software_version_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
report_id_id_num = Column(String(50))
report_id_id_str = Column(String(50))
report_id_id_delete_occurred_date = Column(DateTime(timezone=False))
report_id_id_delete_effective_date = Column(DateTime(timezone=False))
report_id_id_delete = Column(String(32))
export_index_id = Column(Integer, ForeignKey('export.id'))
#fk_report_to_person = relationship('Person', backref='fk_person_to_report')
#fk_report_to_household = relationship('Household', backref='fk_household_to_report')
#fk_report_to_database = relationship('Source', backref='fk_database_to_report')
useexisting = True
class FosterChildEver(DB.Base, MapBase):
__tablename__ = 'foster_child_ever'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
person_historical_index_id = Column(Integer, ForeignKey('person_historical.id'))
foster_child_ever = Column(Integer)
foster_child_ever_date_collected = Column(DateTime(timezone=False))
foster_child_ever_date_effective = Column(DateTime(timezone=False))
useexisting = True
class Household(DB.Base, MapBase):
__tablename__ = 'household'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_id = Column(String(50), ForeignKey('report.report_id'))
household_id_num = Column(String(32))
household_id_num_date_collected = Column(DateTime(timezone=False))
household_id_str = Column(String(32))
household_id_str_date_collected = Column(DateTime(timezone=False))
head_of_household_id_unhashed = Column(String(32))
head_of_household_id_unhashed_date_collected = Column(DateTime(timezone=False))
head_of_household_id_hashed = Column(String(32))
head_of_household_id_hashed_date_collected = Column(DateTime(timezone=False))
reported = Column(Boolean)
useexisting = True
fk_household_to_members = relationship('Members', backref='fk_members_to_household')
class Person(DB.Base, MapBase):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
report_id = Column(String(50), ForeignKey('report.report_id'))
person_id_hashed = Column(String(32))
person_id_unhashed = Column(String(50))
person_id_date_collected = Column(DateTime(timezone=False))
person_date_of_birth_hashed = Column(String(32))
person_date_of_birth_hashed_date_collected = Column(DateTime(timezone=False))
person_date_of_birth_unhashed = Column(DateTime(timezone=False))
person_date_of_birth_unhashed_date_collected = Column(DateTime(timezone=False))
person_ethnicity_hashed = Column(String(32))
person_ethnicity_unhashed = Column(Integer)
person_ethnicity_hashed_date_collected = Column(DateTime(timezone=False))
person_ethnicity_unhashed_date_collected = Column(DateTime(timezone=False))
person_gender_hashed = Column(String(32))
person_gender_unhashed = Column(Integer)
person_gender_hashed_date_collected = Column(DateTime(timezone=False))
person_gender_unhashed_date_collected = Column(DateTime(timezone=False))
person_gender_unhashed_date_effective = Column(DateTime(timezone=False))
person_gender_hashed_date_effective = Column(DateTime(timezone=False))
person_legal_first_name_hashed = Column(String(32))
person_legal_first_name_unhashed = Column(String(50))
person_legal_first_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_first_name_hashed_date_effective = Column(DateTime(timezone=False))
person_legal_first_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_first_name_unhashed_date_effective = Column(DateTime(timezone=False)) # JCS Added
person_legal_last_name_hashed = Column(String(32))
person_legal_last_name_unhashed = Column(String(50))
person_legal_last_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_last_name_unhashed_date_effective = Column(DateTime(timezone=False))
person_legal_last_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_middle_name_hashed = Column(String(32))
person_legal_middle_name_unhashed = Column(String(50))
person_legal_middle_name_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_middle_name_hashed_date_collected = Column(DateTime(timezone=False))
person_legal_suffix_hashed = Column(String(32))
person_legal_suffix_unhashed = Column(String(50))
person_legal_suffix_unhashed_date_collected = Column(DateTime(timezone=False))
person_legal_suffix_hashed_date_collected = Column(DateTime(timezone=False))
#OtherNames is in its own table as there can be multiple OtherNames
#Race is in its own table as there can be multiple races
person_social_security_number_hashed = Column(String(32))
person_social_security_number_unhashed = Column(String(9))
person_social_security_number_unhashed_date_collected = Column(DateTime(timezone=False))
person_social_security_number_hashed_date_effective = Column(DateTime(timezone=False))
person_social_security_number_unhashed_date_effective = Column(DateTime(timezone=False))
person_social_security_number_hashed_date_collected = Column(DateTime(timezone=False))
person_social_security_number_quality_code = Column(String(2))
person_social_security_number_quality_code_date_collected = Column(DateTime(timezone=False))
person_social_security_number_quality_code_date_effective = Column(DateTime(timezone=False))
#PersonHistorical has its own table
#SiteServiceParticipation has its own table
#ReleaseOfInformation has its own table
reported = Column(Boolean)
# HUD 3.0
person_id_id_num = Column(String(50))
person_id_id_str = Column(String(50))
person_id_delete = Column(String(32))
person_id_delete_occurred_date = Column(DateTime(timezone=False))
person_id_delete_effective_date = Column(DateTime(timezone=False))
person_date_of_birth_type = Column(Integer)
person_date_of_birth_type_date_collected = Column(DateTime(timezone=False))
fk_person_to_other_names = relationship('OtherNames', backref='fk_other_names_to_person')
site_service_participations = relationship("SiteServiceParticipation", backref="person")
fk_person_to_person_historical = relationship('PersonHistorical', backref='fk_person_historical_to_person')
fk_person_to_release_of_information = relationship('ReleaseOfInformation', backref='fk_release_of_information_to_person')
fk_person_to_races = relationship('Races', backref='fk_races_to_person')
useexisting = True
#class DeduplicationLink(DB.Base, MapBase):
class ServiceEvent(DB.Base, MapBase):
__tablename__ = 'service_event'
id = Column(Integer, primary_key=True)
export_index_id = Column(Integer, ForeignKey('export.id'))
site_service_index_id = Column(Integer, ForeignKey('site_service.id'))
household_index_id = Column(Integer, ForeignKey('household.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
need_index_id = Column(Integer, ForeignKey('need.id'))
site_service_participation_index_id = Column(Integer, ForeignKey('site_service_participation.id'))
service_event_idid_num = Column(String(32))
service_event_idid_num_date_collected = Column(DateTime(timezone=False))
service_event_idid_str = Column(String(32))
service_event_idid_str_date_collected = Column(DateTime(timezone=False))
household_idid_num = Column(String(32))
is_referral = Column(String(32))
is_referral_date_collected = Column(DateTime(timezone=False))
quantity_of_service = Column(String(32))
quantity_of_service_date_collected = Column(DateTime(timezone=False))
quantity_of_service_measure = Column(String(32))
quantity_of_service_measure_date_collected = Column(DateTime(timezone=False))
service_airs_code = Column(String(300))
service_airs_code_date_collected = Column(DateTime(timezone=False))
service_period_start_date = Column(DateTime(timezone=False))
service_period_start_date_date_collected = Column(DateTime(timezone=False))
service_period_end_date = Column(DateTime(timezone=False))
service_period_end_date_date_collected = Column(DateTime(timezone=False))
service_unit = Column(String(32))
service_unit_date_collected = Column(DateTime(timezone=False))
type_of_service = Column(String(32))
type_of_service_date_collected = Column(DateTime(timezone=False))
type_of_service_other = Column(String(32))
type_of_service_other_date_collected = Column(DateTime(timezone=False))
type_of_service_par = Column(Integer)
#adding a reported column. Hopefully this will append the column to the table def.
reported = Column(Boolean)
service_event_id_delete = Column(String(32))
service_event_ind_fam = Column(Integer)
site_service_id = Column(String(50))
hmis_service_event_code_type_of_service = Column(String(50))
hmis_service_event_code_type_of_service_other = Column(String(50))
hprp_financial_assistance_service_event_code = Column(String(50))
hprp_relocation_stabilization_service_event_code = Column(String(50))
service_event_id_delete_occurred_date = Column(DateTime(timezone=False))
service_event_id_delete_effective_date = Column(DateTime(timezone=False))
service_event_provision_date = Column(DateTime(timezone=False))
service_event_recorded_date = Column(DateTime(timezone=False))
useexisting = True
class Referral(DB.Base, MapBase):
__tablename__ = 'referral'
id = Column(Integer, primary_key=True)
service_event_index_id = Column(Integer, ForeignKey('service_event.id'))
export_index_id = Column(Integer, ForeignKey('export.id'))
person_index_id = Column(Integer, ForeignKey('person.id'))
need_index_id = Column(Integer, ForeignKey('need.id')) # ??
#referral_id_date_effective = Column(DateTime(timezone=False))
referral_idid_num = Column(String(50))
referral_idid_str = Column(String(32))
referral_delete = Column(Integer)
referral_delete_occurred_date = Column(DateTime(timezone=False))
referral_delete_effective_date = Column(DateTime(timezone=False))
referral_agency_referred_to_idid_num = Column(String(50))
referral_agency_referred_to_idid_str = Column(String(50))
referral_agency_referred_to_name = Column(String(50))
referral_agency_referred_to_name_data_collection_stage = Column(String(50))
referral_agency_referred_to_name_date_collected = Column(DateTime(timezone=False))
referral_agency_referred_to_name_date_effective = Column(DateTime(timezone=False))
referral_call_idid_num = Column(String(50))
referral_call_idid_str = Column(String(50))
referral_need_idid_num = Column(String(50)) # In TBC, these refer to an already defined Need
referral_need_idid_str = Column(String(50))
useexisting = True
# FBY : TBC requested|required field
referral_need_notes = Column(String)
class Source(DB.Base, MapBase):
__tablename__ = 'source'
id = Column(Integer, primary_key=True)
report_id = Column(String(50), ForeignKey('report.report_id'))
source_id = Column(String(50))
source_id_date_collected = Column(DateTime(timezone=False))
source_email = Column(String(255))
source_email_date_collected = Column(DateTime(timezone=False))
source_contact_extension = Column(String(10))
source_contact_extension_date_collected = Column(DateTime(timezone=False))
source_contact_first = Column(String(20))
source_contact_first_date_collected = Column(DateTime(timezone=False))
source_contact_last = Column(String(20))
source_contact_last_date_collected = Column(DateTime(timezone=False))
source_contact_phone = Column(String(20))
source_contact_phone_date_collected = Column(DateTime(timezone=False))
source_name = Column(String(50))
source_name_date_collected = Column(DateTime(timezone=False))
#HUD 3.0
schema_version = Column(String(50))
source_id_id_num = Column(String(50))
source_id_id_str = Column(String(50))
source_id_delete = Column(Integer)
source_id_delete_occurred_date = Column(DateTime(timezone=False))
source_id_delete_effective_date = Column(DateTime(timezone=False))
software_vendor = Column(String(50))
software_version = Column(String(50))
source_contact_email = Column(String(255))
useexisting = True
#properties={'fk_source_to_export': relation(Export, backref='fk_export_to_source')})
class SystemConfiguration(DB.Base, MapBase):
__tablename__ = 'system_configuration_table'
id = Column(Integer, primary_key=True)
vendor_name = Column(String(50))
processing_mode = Column(String(4)) # TEST or PROD
source_id = Column(String(50))
odbid = Column(Integer)
providerid = Column(Integer)
userid = Column(Integer)
useexisting = True
class LastDateTime(DB.Base, MapBase):
# FBY: This table is used to record the document lifecycle: received, shredded, transmitted via SOAP
__tablename__ = 'last_date_time'
id = Column(Integer, primary_key=True)
event = Column(String(50))
event_date_time = Column(DateTime(timezone=False))
useexisting = True
def test():
from . import postgresutils
utils = postgresutils.Utils()
utils.blank_database()
print("instantiating db")
db = DB()
session = db.Session()
db.Base.metadata.create_all(db.pg_db_engine)
new = Source(source_id_id_num = 1, source_name='Orange County Corrections')
session.add(new)
session.commit()
print("done")
if __name__ == "__main__":
import sys
sys.exit(test())
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE
| 49.789865 | 176 | 0.757755 | 104,305 | 0.973921 | 0 | 0 | 0 | 0 | 0 | 0 | 8,931 | 0.083391 |
c8f8cdcd902d01952d5a7b8a680f7b6b5e1cd1d5 | 3,154 | py | Python | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
]
| null | null | null | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
]
| 2 | 2021-09-07T23:43:53.000Z | 2022-01-13T00:39:55.000Z | ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/beads.py | mikebourbeauart/PerlerPrinter | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | [
"MIT"
]
| 1 | 2019-10-21T17:12:07.000Z | 2019-10-21T17:12:07.000Z | import Image
from ImageColor import getrgb
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import uuid
BEAD_RADIUS = 1.75*mm
BEAD_THICKNESS = 1*mm
BOARD_SPACING = 4.85*mm
BOARD_BORDER = 4*mm
#A4 60x43 = 2580
#A3 86x60 = 5160
#A2 86x120 = 10,320
#MARQUEE A4+A4 = 120x43
class beadColours():
def __init__(self):
self.palette = self.getColours()
def getColours(self, csv="colours\\all.csv"):
"""read colour table
CODE, NAME, R, G, B, TYPE, INCLUDE/EXCLUDE"""
palette = []
with open(csv, 'r') as f:
read_data = f.read()
lines = read_data.split("\n")
f.closed
return lines
def bestMatch(self, r=0, g=0, b=0):
"""return nearest bead colour to the r,g,b value specified"""
tmp = []
for row in self.palette:
cell = row.split(",")
if cell[0] != 'CODE' and cell[6] != 'E': #ignore some lines
if cell[0][0] in ('H'): #Hama and Perler only for now
tmp_r = int(cell[2])
tmp_g = int(cell[3])
tmp_b = int(cell[4])
if tmp_r > r: dif_r = tmp_r - r
else: dif_r = r - tmp_r
if tmp_g > g: dif_g = tmp_g - g
else: dif_g = g - tmp_g
if tmp_b > b: dif_b = tmp_b - b
else: dif_b = b - tmp_b
difference = dif_r + dif_g + dif_b
tmp.append((difference, tmp_r, tmp_g, tmp_b))
tmp.sort()
return tmp[0][1:]
colours = beadColours()
#read image file header
try:
im = Image.open("images\\pikachu.gif")
image_width = im.size[0]
image_height = im.size[1]
image_format = im.format
except IOError:
print "Error opening file"
out_file = 'result%s.pdf' % uuid.uuid1()
pdf = canvas.Canvas(out_file, pagesize=A4)
##work out the best orientation
a4_width, a4_height = A4
#if (width - (BOARD_BORDER * 2)) < (image_width * BOARD_SPACING):
#width_temp = width
#width = height
#height = width_temp
#for now, just use generated page size
width = (image_width * BOARD_SPACING) + (BOARD_BORDER * 2)
height = (image_height * BOARD_SPACING) + (BOARD_BORDER * 2)
if width < a4_width and width < a4_height:
height = a4_height
pdf.setPageSize((width, height))
im = im.convert('RGB')
data = list(im.getdata())
list_pos = 0
for y in range(0, im.size[1]):
pos_y = height - BOARD_BORDER - (y * BOARD_SPACING)
for x in range(0, im.size[0]):
r = data[list_pos][0]
g = data[list_pos][1]
b = data[list_pos][2]
r, g, b = colours.bestMatch(r,g,b)
pos_x = BOARD_BORDER + (x * BOARD_SPACING)
pdf.setLineWidth(BEAD_THICKNESS)
pdf.setStrokeColorRGB(float(r)/255,float(g)/255,float(b)/255)
pdf.circle(pos_x, pos_y, BEAD_RADIUS, stroke=1, fill=0)
#for light colour we need a thin black border
if r + g + b >= 750:
pdf.setLineWidth(0.25*mm)
pdf.setStrokeColorRGB(0,0,0)
pdf.circle(pos_x, pos_y, BEAD_RADIUS + (BEAD_THICKNESS / 2), stroke=1, fill=0)
pdf.circle(pos_x, pos_y, BEAD_RADIUS - (BEAD_THICKNESS / 2), stroke=1, fill=0)
list_pos += 1
pdf.showPage()
pdf.save()
| 28.414414 | 84 | 0.616677 | 1,156 | 0.366519 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.195625 |
c8f8f117d6dace7d4b6c578a60f491f9e6393f0d | 1,836 | py | Python | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
]
| null | null | null | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
]
| null | null | null | common_tools/report_dialog.py | jamiecook/AequilibraE | b1013d59cbeaf6fc4e1a944cf31f20460a2a4156 | [
"MIT"
]
| null | null | null | """
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Report dialog
Purpose: Dialog for showing the report from algorithm runs
Original Author: Pedro Camargo ([email protected])
Contributors:
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2014-03-19
Updated: 30/09/2016
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from qgis.core import *
from PyQt4 import QtGui, uic
from PyQt4.QtGui import *
import sys
import os
from auxiliary_functions import standard_path
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_report.ui'))
class ReportDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, iface, reporting):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.path = standard_path()
self.reporting = reporting
for t in reporting:
self.all_data.append(t)
self.but_save_log.clicked.connect(self.save_log)
self.but_close.clicked.connect(self.exit_procedure)
def save_log(self):
file_types = "Text files(*.txt)"
new_name = QFileDialog.getSaveFileName(None, 'Save log', self.path, file_types)
if len(new_name) > 0:
if new_name[-3].upper() != 'TXT':
new_name = new_name + '.txt'
outp = open(new_name, 'w')
for t in self.reporting:
print >> outp, t
outp.flush()
outp.close()
self.exit_procedure()
def exit_procedure(self):
self.close()
| 30.6 | 108 | 0.56427 | 946 | 0.515251 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.384532 |
c8f9b47386e455dd9e70d1f591e4c141b1b8e828 | 21,580 | py | Python | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
]
| null | null | null | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
]
| 13 | 2018-11-20T22:55:39.000Z | 2022-03-11T23:36:18.000Z | gui/robot_data_visualizer.py | wh1210/robot-data-visualizer | ebb59687233a8d09c8ed327c66ed1d69c4623136 | [
"MIT"
]
| 2 | 2018-11-09T01:48:07.000Z | 2018-12-29T23:10:53.000Z | import os
import sys
sys.path.append('.')
sys.path.append('..')
import warnings
warnings.filterwarnings("ignore")
from datetime import datetime
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.lines as lines
import matplotlib.image as mpimg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkinter as tk
from tools.get_dates_umich import get_dates_umich
from tools.staticmap_for_gps import map_for_gps
from tools.data_manager import DataManager
from tools.view_lidar import hokuyo_plot
from tools.view_lidar import threshold_lidar_pts
class VisualizerFrame(tk.Frame):
"""
This is the main window where the robot data is seen by the user.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.label = None
self.ax_map = None
self.ax_gps = None
self.ax_lidar = None
self.map_plot = None
self.gps_plot = None
self.lidar_plot = None
self.canvas = None
self.data_manager = None
self.gps_data = None
self.lidar_data = None
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.map_image = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.label = tk.Label(self, text="Viewer")
self.label.pack(side=tk.TOP)
self.fig = Figure(figsize=(5, 4), dpi=100)
self.ax_map = self.fig.add_subplot(111)
self.ax_gps = self.fig.add_subplot(111)
self.ax_lidar = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.master)
self.canvas.draw()
self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
def callback_initialize_data_manager(self):
"""
This callback responds to the *Load Data* button.
:return: None
"""
date = self.parent.toolbar.date.get()
if self.data_manager is None:
self.setup_data(date)
else:
if self.data_manager.date is not date:
os.chdir('../..') # TODO patched here - add this to end of load_gps() / load_lidar() functions
self.setup_data(date)
else:
pass
def setup_data(self, date):
"""
This function sets up all of the data (except lidar) needed by the application.
:param date: Determines which date from the robotics dataset to use.
:type date: str.
:return: None
"""
if self.data_manager is not None:
os.chdir(self.data_manager.owd)
self.ax_gps.clear()
self.ax_map.clear()
self.ax_lidar.clear()
self.canvas.draw()
self.gps_on = False
self.map_on = False
self.lidar_on = False
self.parent.set_status('DM_START', hold=True)
self.data_manager = DataManager(date)
self.data_manager.setup_data_files('sensor_data')
self.data_manager.load_gps()
x_coords, y_coords = map_for_gps(self.data_manager.data_dict, self.data_manager.data_dir)
self.lidar_data = None
self.gps_data = [x_coords, y_coords] # in image coords
self.map_image = mpimg.imread(os.path.join(self.data_manager.data_dir, 'map.png'))
self.label.config(text='Viewer')
self.parent.set_status('DM_READY')
def callback_gps_on(self):
"""
This callback responds to the *On* button under the *GPS Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.gps_on:
self.gps_on = True
self.parent.set_status('GPS_START')
idx = self.get_idx_for_gps_update()
self.update_timestamp(idx)
self.gps_plot = self.ax_gps.plot(self.gps_data[0][:idx], self.gps_data[1][:idx], 'r')[0]
self.canvas.show()
self.parent.set_status('GPS_READY')
else:
pass
else:
self.callback_lidar_off()
self.callback_gps_on()
def callback_gps_off(self):
"""
This callback responds to the *Off* button under the *GPS Control* menu.
:return: None
"""
if self.gps_on:
self.gps_on = False
self.update_gps(0)
self.label.config(text='Viewer')
self.parent.set_status('GPS_REMOVE')
else:
pass
def callback_gps_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *GPS Control* menu.
:return: None
"""
self.gps_on = True
idx = self.get_idx_for_gps_update()
self.update_gps(idx)
self.update_timestamp(idx)
self.parent.set_status('GPS_UPDATE')
def update_gps(self, idx):
"""
This function updates the GPS data that is displayed in the main viewing window.
:param idx: Index into the array of GPS data that is to be displayed.
:type idx: int.
:return: None
"""
if self.gps_data is not None:
self.gps_plot.set_xdata(self.gps_data[0][:idx])
self.gps_plot.set_ydata(self.gps_data[1][:idx])
self.canvas.draw()
else:
pass
def update_timestamp(self, idx):
"""
This function updates the timestamp in the main viewing window.
:param idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:type idx: int.
:return: None
"""
curr_tstamp = self.get_timestamp_for_gps_update(idx)
self.label.config(text=str('time stamp: ' + curr_tstamp))
def get_idx_for_gps_update(self):
"""
This function returns the index to be used for updating the GPS data.
:return: int -- the index to be used for the GPS update
"""
slider_val = self.parent.control.gps_control.selection_scale.get()
idx_ratio = len(self.gps_data[0]) / 100
return int(slider_val * idx_ratio)
def get_timestamp_for_gps_update(self, gps_data_idx):
"""
This function returns the timestamp in a readable format for the given GPS data index.
:param gps_data_idx: Index into the array of GPS data to be used for retrieval of the time stamp.
:return: str -- the timestamp
"""
idx_ratio = len(self.data_manager.data_dict['gps']['tstamp']) / len(self.gps_data[0])
idx = int(gps_data_idx * idx_ratio) - 1
ts = int(self.data_manager.data_dict['gps']['tstamp'][idx] / 1000000)
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def callback_map_on(self):
"""
This callback responds to the *On* button under the *Map Control* menu.
:return: None
"""
if not self.lidar_on:
if not self.map_on:
self.map_on = True
if self.map_image is not None:
self.ax_map.imshow(self.map_image)
# draw scale on the map
map_scale = self.get_map_scale()
line = lines.Line2D([0, 200], [0, 0], linewidth=4, color='b')
self.ax_map.add_line(line)
distance = map_scale * 200
if distance > 1000:
scale_str = "scale = " + str(float("%.2f" % (distance / 1000))) + " kilometers"
else:
scale_str = "scale = " + str(float("%.2f" % (distance))) + " meters"
self.ax_map.text(0, -10, scale_str, fontsize=8)
self.canvas.draw()
self.parent.set_status('MAP_READY')
else:
self.parent.set_status('MAP_ERROR')
else:
pass
else:
self.callback_lidar_off()
self.callback_map_on()
def callback_map_off(self):
"""
This callback responds to the *Off* button under the *Map Control* menu.
:return: None
"""
if self.map_on:
self.map_on = False
self.ax_map.clear()
if self.gps_on:
self.gps_on = False
self.callback_gps_on() # because the previous line clears both map and gps
self.canvas.draw()
else:
pass
def callback_date_changed(self):
"""
This callback responds to a change in the date selection menu in the toolbar.
:return: None
"""
new_date = self.parent.toolbar.date.get() # Need to call get() because this is a StringVar object
if self.parent.toolbar.date is not new_date:
self.parent.toolbar.date.set(new_date)
else:
pass
def get_map_scale(self):
"""
This function calculates the map scale in units of meters per pixel.
:return: float64 -- map scale (m/px)
"""
k = 111000 # meters per degree of latitude (approx.)
lat_range = self.data_manager.data_dict['gps_range'][0]
d_lat_range = abs(lat_range[0] - lat_range[1])
d_x_pixels = abs(max(self.gps_data[0]) - min(self.gps_data[0]))
map_scale = d_lat_range * k / d_x_pixels
return map_scale # units of meters per pixel
def callback_lidar_slider_changed(self, event):
"""
This callback responds to the scale position changing under the *Lidar Control* menu.
:return: None
"""
self.lidar_on = True
idx = self.get_idx_for_lidar_update()
self.update_lidar(idx)
# self.update_timestamp(idx)
self.parent.set_status('Lidar updated')
def get_idx_for_lidar_update(self):
"""
This function returns the index to be used for updating the Lidar data.
:return: int -- the index to be used for the Lidar update
"""
slider_val = self.parent.control.lidar_control.selection_scale.get()
idx_ratio = len(self.lidar_data) / 100
return max(int(slider_val * idx_ratio) - 1, 0)
def update_lidar(self, idx):
"""
This function updates the Lidar data that is displayed in the main viewing window.
:param idx: Index into the array of Lidar data that is to be displayed.
:type idx: int.
:return: None
"""
if self.lidar_data is not None:
yt, xt, _ = threshold_lidar_pts(self.lidar_data[idx])
self.lidar_plot.set_xdata(xt)
self.lidar_plot.set_ydata(yt)
self.canvas.draw()
else:
pass
def callback_lidar_on(self):
"""
This callback responds to the *On* button under the *Lidar Control* menu.
:return: None
"""
if not self.lidar_on:
self.lidar_on = True
self.callback_map_off()
self.callback_gps_off()
if self.data_manager is None:
self.callback_initialize_data_manager()
if not 'lidar' in self.data_manager.data_dict.keys():
self.data_manager.setup_data_files('hokuyo')
pickled = True
delete_pickle = False
self.data_manager.load_lidar(4000, pickled, delete_pickle) # TODO - global constant for lidar samples
self.lidar_data = self.data_manager.data_dict['lidar']
xlimits, ylimits = [-32, 32], [-32, 32]
self.ax_lidar.set_xlim(xlimits)
self.ax_lidar.set_ylim(ylimits)
hokuyo_plot(self.ax_lidar)
yt, xt, _ = threshold_lidar_pts(self.lidar_data[0])
self.lidar_plot = self.ax_lidar.plot(xt, yt, 'r.')[0]
self.canvas.show()
else:
pass
def callback_lidar_off(self):
"""
This callback responds to the *Off* button under the *Lidar Control* menu.
:return: None
"""
if self.lidar_on:
self.lidar_on = False
self.ax_lidar.clear()
self.canvas.draw()
else:
pass
class ToolbarFrame(tk.Frame):
"""
This class represents the toolbar at the top of the window.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.date = None
self.dates = get_dates_umich()
self.load_button = None
self.option_menu = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.dates = get_dates_umich()
self.load_button = tk.Button(self, text="Load Data")
self.load_button.pack(side=tk.LEFT, padx=2, pady=2)
self.date = tk.StringVar(self)
self.date.set(self.dates[24])
self.option_menu = tk.OptionMenu(self, self.date, *self.dates, command=self.callback_date_changed)
self.option_menu.pack(side=tk.LEFT, padx=2, pady=2)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.load_button.config(command=self.parent.window.callback_initialize_data_manager)
def callback_date_changed(self, event):
self.parent.window.callback_date_changed()
class ControlFrame(tk.Frame):
"""
This class represents the controls on the right hand side of the main
window. There are two nested classes for the slam and map controls.
"""
def __init__(self, parent):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = parent
self.slam_control = None
self.map_control = None
self.lidar_control = None
self.widgets()
class GpsControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.selection_scale = None
self.scale_val = None
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="GPS Control", bg="blue", fg="white")
label.pack(side=tk.TOP, fill=tk.X)
self.selection_scale = tk.Scale(self, orient=tk.HORIZONTAL, to=100, variable=self.scale_val)
self.selection_scale.set(100)
self.selection_scale.pack(side=tk.TOP)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_gps_on)
self.off_button.config(command=self.root.window.callback_gps_off)
self.selection_scale.bind("<ButtonRelease-1>", self.root.window.callback_gps_slider_changed)
class MapControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="Map Control", bg="blue", fg="white")
label.pack(fill=tk.X)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_map_on)
self.off_button.config(command=self.root.window.callback_map_off)
class LidarControlFrame(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, width=400)
self.parent = parent
self.root = root
self.scale_val = None
self.on_button = None
self.off_button = None
self.widgets()
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
label = tk.Label(self, text="Lidar Control", bg="blue", fg="white")
label.pack(side=tk.TOP, fill=tk.X)
self.selection_scale = tk.Scale(self, orient=tk.HORIZONTAL, to=100, variable=self.scale_val)
self.selection_scale.set(100)
self.selection_scale.pack(side=tk.TOP)
self.on_button = tk.Button(self, text="On", bg="green", fg="white")
self.on_button.pack(side=tk.LEFT)
self.off_button = tk.Button(self, text="Off", bg="red", fg="white")
self.off_button.pack(side=tk.RIGHT)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.on_button.config(command=self.root.window.callback_lidar_on)
self.off_button.config(command=self.root.window.callback_lidar_off)
self.selection_scale.bind("<ButtonRelease-1>", self.root.window.callback_lidar_slider_changed)
def widgets(self):
"""
Set up widgets for the frame.
:return: None
"""
self.gps_control = self.GpsControlFrame(self, self.root)
self.gps_control.pack(fill=tk.X)
self.map_control = self.MapControlFrame(self, self.root)
self.map_control.pack(fill=tk.X)
self.lidar_control = self.LidarControlFrame(self, self.root)
self.lidar_control.pack(fill=tk.X)
def bind_widgets(self):
"""
Bind widgets to their callback functions.
:return: None
"""
self.gps_control.bind_widgets()
self.map_control.bind_widgets()
self.lidar_control.bind_widgets()
class MainWindow(tk.Tk):
"""
This is the main window for the application. Here the main layout is
established using a combination of the above classes and individual
tkinter widgets.
"""
def __init__(self, parent):
tk.Tk.__init__(self, parent)
self.parent = parent
self.status_text = dict(READY="Ready",
DM_START="Initializing data manager ...",
DM_READY="Data is ready",
DM_NOT_READY="Data not loaded",
GPS_START="GPS loading ...",
GPS_READY="GPS is ready",
GPS_REMOVE="GPS removed",
GPS_UPDATE="GPS updated",
MAP_START="Map loading ...",
MAP_READY="Map is ready",
MAP_REMOVE="Map removed",
MAP_ERROR="Must load data before map can be displayed")
self.STATUS_DELAY = 2000 # (ms) delay between status changes
self.title("Robot Data Visualizer")
self.mainWidgets()
def mainWidgets(self):
"""
Set up widgets for the main window frame.
:return: None
"""
# Toolbar
self.toolbar = ToolbarFrame(self)
self.toolbar.pack(side=tk.TOP, fill=tk.X)
# Status bar
self.status = tk.Label(self, text=self.status_text['READY'], bd=1, relief=tk.SUNKEN, anchor=tk.W)
self.status.pack(side=tk.BOTTOM, fill=tk.X)
# Controls - GPS and Map
self.control = ControlFrame(self)
self.control.pack(side=tk.RIGHT, fill=tk.Y)
# Main viewing window
self.window = VisualizerFrame(self)
self.window.pack(side=tk.LEFT, padx=2, pady=2)
# Bind widgets to their callback functions
self.toolbar.bind_widgets()
self.control.bind_widgets()
def set_status(self, status, hold=False):
"""
This function sets the status bar at the bottom of the window (with a time delay).
:param status: Key to look up status message in the status_text dictionary.
:type status: str.
:param hold: When *hold=True*, the status update will not time out.
:type hold: bool.
:return: None
"""
if status in self.status_text.keys():
self.status.config(text=self.status_text[status])
if not hold:
self.status.after(self.STATUS_DELAY, lambda: self.status.config(text=self.status_text['READY']))
else:
self.status.config(text=str(status))
if not hold:
self.status.after(self.STATUS_DELAY, lambda: self.status.config(text=self.status_text['READY']))
if __name__ == '__main__':
app = MainWindow(None)
app.mainloop()
| 34.091627 | 117 | 0.580445 | 20,885 | 0.967794 | 0 | 0 | 0 | 0 | 0 | 0 | 6,058 | 0.280723 |
c8fa3bb594a67f398ad5e9f8e305ca9da2fda5ed | 1,780 | py | Python | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
]
| null | null | null | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
]
| null | null | null | day10/day10.py | BroderickCarlin/AdventOfCode | 52d12d16f3d291a51984e6d85dbe97e604abc005 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
lengths = "187,254,0,81,169,219,1,190,19,102,255,56,46,32,2,216"
suffix = [17, 31, 73, 47, 23]
num_rounds = 64
def puzzle1():
knot = range(256)
skip_size = 0
idx1 = 0
for l in [int(a) for a in lengths.split(",")]:
idx2 = idx1 + l
k = []
if idx2 >= len(knot):
k = knot[idx1:] + knot[:idx2 - len(knot)]
else:
k = knot[idx1:idx2]
k = list(reversed(k))
if idx2 >= len(knot):
knot[idx1:] = k[:len(knot) - idx1]
knot[:idx2 - len(knot)] = k[len(knot) - idx1:]
else:
knot[idx1:idx2] = k
idx1 += skip_size + l
while idx1 >= len(knot): idx1 -= len(knot)
skip_size += 1
return knot[0] * knot[1]
def puzzle2():
knot = range(256)
hash_knot = ""
skip_size = 0
idx1 = 0
for _ in range(num_rounds):
for l in list(bytearray(lengths)) + suffix:
idx2 = idx1 + l
k = []
if idx2 >= len(knot):
k = knot[idx1:] + knot[:idx2 - len(knot)]
else:
k = knot[idx1:idx2]
k = list(reversed(k))
if idx2 >= len(knot):
knot[idx1:] = k[:len(knot) - idx1]
knot[:idx2 - len(knot)] = k[len(knot) - idx1:]
else:
knot[idx1:idx2] = k
idx1 += skip_size + l
while idx1 >= len(knot): idx1 -= len(knot)
skip_size += 1
for x in range(16):
s = 0
for y in range(16):
s ^= knot[x * 16 + y]
hash_knot += "%0.2X" % s
return hash_knot
if __name__ == "__main__":
print("1: {}".format(puzzle1()))
print("2: {}".format(puzzle2()))
| 24.383562 | 64 | 0.455056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.075281 |
c8fc7cc35ebc665797970c840fc5d039b1988b5c | 1,914 | py | Python | 17tensorflow/tf2/2my_model.py | cheerfulwang/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
]
| 2 | 2021-01-04T10:44:44.000Z | 2022-02-13T07:53:41.000Z | 17tensorflow/tf2/2my_model.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
]
| null | null | null | 17tensorflow/tf2/2my_model.py | zm79287/python-tutorial | d0f7348e1da4ff954e3add66e1aae55d599283ee | [
"Apache-2.0"
]
| 2 | 2020-11-23T08:58:51.000Z | 2022-02-13T07:53:42.000Z | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
# 超参
num_words = 2000
num_tags = 12
num_departments = 4
# 输入
body_input = keras.Input(shape=(None,), name='body')
title_input = keras.Input(shape=(None,), name='title')
tag_input = keras.Input(shape=(num_tags,), name='tag')
# 嵌入层
body_feat = layers.Embedding(num_words, 64)(body_input)
title_feat = layers.Embedding(num_words, 64)(title_input)
# 特征提取层
body_feat = layers.LSTM(32)(body_feat)
title_feat = layers.LSTM(128)(title_feat)
features = layers.concatenate([title_feat,body_feat, tag_input])
# 分类层
priority_pred = layers.Dense(1, activation='sigmoid', name='priority')(features)
department_pred = layers.Dense(num_departments, activation='softmax', name='department')(features)
# 构建模型
model = keras.Model(inputs=[body_input, title_input, tag_input],
outputs=[priority_pred, department_pred])
model.summary()
keras.utils.plot_model(model, 'multi_model.png', show_shapes=True)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority': 'binary_crossentropy',
'department': 'categorical_crossentropy'},
loss_weights=[1., 0.2])
import numpy as np
# 载入输入数据
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tag_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')
# 标签
priority_label = np.random.random(size=(1280, 1))
department_label = np.random.randint(2, size=(1280, num_departments))
# 训练
history = model.fit(
{'title': title_data, 'body':body_data, 'tag':tag_data},
{'priority':priority_label, 'department':department_label},
batch_size=32,
epochs=5
)
model.save('model_save.h5')
del model
model = keras.models.load_model('model_save.h5') | 29.446154 | 98 | 0.719436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.206687 |
c8ff0f334dbba342f0a95112a0a41bb1cc0f4aaf | 3,937 | py | Python | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/nxos/tests/ShowIpOspf/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z |
expected_output = {
'vrf':
{'default':
{'address_family':
{'ipv4':
{'instance':
{'1':
{'areas':
{'0.0.0.0':
{'area_id': '0.0.0.0',
'area_type': 'normal',
'authentication': 'none',
'existed': '1w5d',
'numbers':
{'active_interfaces': 4,
'interfaces': 6,
'loopback_interfaces': 4,
'passive_interfaces': 0},
'statistics':
{'area_scope_lsa_cksum_sum': '1',
'area_scope_lsa_count': 1,
'spf_last_run_time': 0.000447,
'spf_runs_count': 2}}},
'auto_cost':
{'bandwidth_unit': 'mbps',
'enable': False,
'reference_bandwidth': 40000},
'enable': False,
'discard_route_external': True,
'discard_route_internal': True,
'graceful_restart':
{'ietf':
{'enable': True,
'exist_status': 'none',
'restart_interval': 60,
'state': 'Inactive',
'type': 'ietf'}},
'instance': 1,
'nsr':
{'enable': True},
'numbers':
{'active_areas':
{'normal': 1,
'nssa': 0,
'stub': 0,
'total': 1},
'areas':
{'normal': 1,
'nssa': 0,
'stub': 0,
'total': 1}},
'opaque_lsa_enable': True,
'preference':
{'single_value':
{'all': 110}},
'router_id': '10.100.2.2',
'single_tos_routes_enable': True,
'spf_control':
{'paths': 8,
'throttle':
{'lsa':
{'group_pacing': 10,
'hold': 5000,
'maximum': 5000,
'minimum': 1000,
'numbers':
{'external_lsas':
{'checksum': '0',
'total': 0},
'opaque_as_lsas':
{'checksum': '0',
'total': 0}},
'start': 0.0},
'spf':
{'hold': 1000,
'maximum': 5000,
'start': 200}}}}}}}}}}
| 49.2125 | 73 | 0.216916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 928 | 0.235712 |
c8ffacba13563fc63e94eff5bc851a3e548d81b6 | 4,566 | py | Python | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
]
| 2 | 2018-12-20T01:38:56.000Z | 2018-12-29T14:49:36.000Z | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
]
| null | null | null | rain/cloud/system/system.py | SuPerCxyz/rain | 578b6d125f535414d3ea3fcfee4015b70fed560c | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import platform
import time
from getdevinfo import getdevinfo
import psutil
from rain.common import rain_log
from rain.common import utils
from rain.common.utils import async_call
logger = rain_log.logg(__name__)
class SystemInfo(object):
"""system information.
Collect system information, including cpu, memory, hostname, boot time,
login information...
"""
def __init__(self):
self.thread = {}
def _load_stat(self):
"""Collecting system load.
"""
cpu_count = psutil.cpu_count()
with open("/proc/loadavg") as f:
con = f.read().split()
load_1 = con[0]
load_5 = con[1]
load_15 = con[2]
sys_load_1 = round(float(load_1)/cpu_count * 100, 2)
sys_load_5 = round(float(load_5)/cpu_count * 100, 2)
sys_load_15 = round(float(load_15)/cpu_count * 100, 2)
system_load = {
'sys_load_1': sys_load_1,
'sys_load_5': sys_load_5,
'sys_load_15': sys_load_15,
'load_1': load_1,
'load_5': load_5,
'load_15': load_15
}
logger.info('Collect system load.')
return system_load
@async_call
def _cpu_percent(self):
tmp = psutil.cpu_percent(interval=1, percpu=True)
self.thread['cpu_percent'] = tmp
@async_call
def _cpus_times_percent(self):
tmp = psutil.cpu_times_percent(interval=1, percpu=True)
self.thread['cpus_times_percent'] = tmp
def get_cpuinfo_info(self):
"""Collect the number of cpu and usage information and
return the dictionary type.
"""
cpu_count = psutil.cpu_count()
self._cpu_percent()
self._cpus_times_percent()
while True:
if len(self.thread.keys()) == 2:
break
time.sleep(0.1)
cpu_percent_info = []
for cpu in self.thread['cpus_times_percent']:
percent_info = {
'user': cpu.user,
'system': cpu.system,
'idle': cpu.idle,
'iowait': cpu.iowait
}
cpu_percent_info.append(percent_info)
system_load = self._load_stat()
cpu_info_dict = {
'cpu_count': cpu_count,
'cpu_percent': self.thread['cpu_percent'],
'cpu_percent_info': cpu_percent_info,
'system_load': system_load
}
logger.info('Collect cpu related information.')
return cpu_info_dict
def get_memcache_info(self):
"""Collect memory and swap information and return dictionary type.
"""
memcache_info = psutil.virtual_memory()
memcache_total = memcache_info.total / 1024 ** 2
memcache_used = memcache_info.used / 1024 ** 2
memcache_available = memcache_info.available / 1024 ** 2
memcache_buff = memcache_info.cached / 1024 ** 2
memcache_cached = memcache_info.cached / 1024 ** 2
memcache_percent = memcache_info.percent
memcache_info_dict = {
'memcache_total_MB': memcache_total,
'memcache_used_MB': memcache_used,
'memcache_available_MB': memcache_available,
'memcache_buff_MB': memcache_buff,
'memcache_cached_MB': memcache_cached,
'memcache_percent': memcache_percent
}
logger.info('Collect memory related information.')
return memcache_info_dict
def _get_user(self):
"""Collect login user information.
"""
user_info_list = []
user_list = psutil.users()
for user in user_list:
user_dict = {}
user_dict['name'] = user.name
user_dict['host'] = user.host
user_dict['conn_time'] = utils.str_time(user.started)
user_info_list.append(user_dict)
return user_info_list
def get_system_info(self):
"""Collect system information.
"""
system_info = {}
system_info['python_version'] = platform.python_version()
system_info['hostname'] = platform.node()
system_info['system_info'] = platform.platform()
system_info['boot_time'] = utils.str_time(psutil.boot_time())
system_info['time'] = time.asctime(time.localtime(time.time()))
system_info['user'] = self._get_user()
logger.info('Collect user login information.')
return system_info
| 33.328467 | 75 | 0.592641 | 4,289 | 0.939334 | 0 | 0 | 296 | 0.064827 | 0 | 0 | 1,034 | 0.226456 |
c8ffe69de767e55075d5f9e090d7f69a2c93dd80 | 7,517 | py | Python | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
]
| 3 | 2019-11-16T15:40:14.000Z | 2021-12-28T14:26:36.000Z | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
]
| null | null | null | models.py | rudrasohan/Trust-Region-Policy-Optimization | bbaadf37aa3ea4ccc35907038eea4add9e5e050c | [
"MIT"
]
| null | null | null | """Model Definations for trpo."""
import gym
import numpy as np
import torch
import time
import scipy.optimize
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from distributions import DiagonalGaussian
from helpers import get_flat_params, set_flat_params, get_flat_grads
#from helpers import sample_trajectories, compute_advantage_returns, get_flat_params
class Model(object):
"""Generic Model Template"""
def __init__(self,
observation_space,
action_space,
**kwargs):
#super(Model).__init__(**kwargs)
self.observation_space = observation_space
self.action_space = action_space
self.obs_dim = None
self.act_dim = None
if isinstance(self.observation_space, gym.spaces.Box):
self.obs_dim = np.prod(self.observation_space.shape)
else:
self.obs_dim = self.observation_space.n
if isinstance(self.action_space, gym.spaces.Box):
self.act_dim = np.prod(self.action_space.shape)
else:
self.act_dim = self.action_space.n
class MLP_Policy(nn.Module):
"""MLP model fo the network"""
def __init__(self, input_dim, output_dim, name, **kwargs):
super(MLP_Policy, self).__init__()
self.name = name
self.use_new_head = False
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, output_dim)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
if bool(kwargs):
self.use_new_head = kwargs["use_new_head"]
self.fc4 = nn.Linear(64, output_dim)
else:
self.log_std = nn.Parameter(torch.zeros(output_dim))
#print(self.log_std.size())
#self.bn1 = nn.BatchNorm1d(64)
#self.bn2 = nn.BatchNorm1d(64)
def forward(self, x):
#print(self.fc1(x))
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
mean = self.fc3(x)
if self.use_new_head:
std = self.fc4(x)
else:
std = self.log_std.expand(mean.size())
#print(mean)
return mean, std
class MLP_Value(nn.Module):
"""MLP model fo the network"""
def __init__(self, input_dim, output_dim, name, **kwargs):
super(MLP_Value, self).__init__()
self.name = name
self.fc1 = nn.Linear(input_dim, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, output_dim)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
#print(self.fc1(x))
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
out = self.fc3(x)
return out
class GaussianMLPPolicy(Model):
"""Gaussian MLP Policy"""
def __init__(self, observation_space, action_space, **kwargs):
Model.__init__(self, observation_space, action_space, **kwargs)
#self.mean_network = MLP(self.obs_dim, self.act_dim, "mean").type(torch.float64)
self.std_net = None
#self.std_network = None
#print(kwargs)
if bool(kwargs):
self.std_net = kwargs["use_std_net"]
if self.std_net:
self.network = MLP_Policy(self.obs_dim, self.act_dim, "MLP_policy", use_new_head=True)#.type(torch.float64)
else:
self.network = MLP_Policy(self.obs_dim, self.act_dim, "MLP_policy")#.type(torch.float64)
def actions(self, obs):
obs = torch.from_numpy(obs)
mean, log_std = self.network(obs)
dist = DiagonalGaussian(mean, log_std)
sample = dist.sample()
return sample, dist.logli(sample)
def get_dists(self, obs):
obs = torch.from_numpy(obs)
mean, log_std = self.network(obs)
dist = DiagonalGaussian(mean, log_std)
return dist
def clear_grads(self):
self.network.zero_grad()
class MLPBaseline(Model):
""""MLP Baseline"""
def __init__(self, observation_space, action_space, **kwargs):
Model.__init__(self, observation_space, action_space, **kwargs)
self.value = MLP_Value(self.obs_dim, 1, "MLP_baseline")
#self.criterion = nn.MSELoss()
#self.optimizer = torch.optim.LBFGS(self.value.parameters())
def predict(self, obs):
obs = torch.tensor(obs)
with torch.no_grad():
val = self.value(obs)
return val
def compute_baseline(self, obs):
obs = Variable(torch.tensor(obs))
return self.value(obs)
def clear_grads(self):
self.value.zero_grad()
def update(self, trajs):
obs = np.asarray(trajs["state"])
#obs = torch.from_numpy(obs)
returns = trajs["returns"]
baselines = trajs["baselines"]
targets = returns * 0.9 + 0.1 * baselines
#returns =
#targets = Variable(returns)
#print(targets)
'''
def closure():
self.clear_grads()
values = self.value(torch.from_numpy(obs))
self.optimizer.zero_grad()
loss = self.criterion(values, targets)
print("LBFGS_LOSS:{}".format(loss))
loss.backward()
return loss
'''
#self.optimizer.step(closure)
#curr_params = get_flat_params(self.value.parameters()).data.detach().double().numpy()
curr_flat_params = get_flat_params(self.value).detach().double().numpy()
def val_loss_grad(x):
set_flat_params(self.value, torch.tensor(x))
self.clear_grads()
#for param in self.value.parameters():
#if param.grad is not None:
#print("HHAHAHAHAHHA")
#param.grad.data.fill_(0)
#values_ = #self.value(torch.from_numpy(obs))
values_ = self.compute_baseline(obs)
#print("VALUES",values_.size())
#print("TARGETS",targets.size())
#print((values_-targets).size())
#time1 = time.time()
vf_loss = (values_ - targets).pow(2).mean()
#print("LBFGS_LOSS:{}".format(vf_loss))
#time2 = time.time()
#print("TIME:{}".format(time2-time1))
#for param in self.value.parameters():
# vf_loss += param.pow(2).sum() * 1e-2
vf_loss.backward()
flat_grad = get_flat_grads(self.value)
return (vf_loss.data.double().numpy(), flat_grad.data.double().numpy())
new_params, _, opt_info = scipy.optimize.fmin_l_bfgs_b(val_loss_grad, curr_flat_params, maxiter=25)
set_flat_params(self.value, torch.tensor(new_params))
print(opt_info)
def test_policy_value():
env = gym.make("MountainCarContinuous-v0")
policy = GaussianMLPPolicy(env.observation_space, env.action_space, use_std_net=True)
paths = sample_trajectories(env, policy, 1000)
print(len(paths["rewards"]))
baseline = MLPBaseline(env.observation_space, env.action_space)
compute_advantage_returns(paths, baseline, 0.9, 0.1)
print(paths.keys())
baseline.update(paths)
print(paths['dist'].keys())
flat_params_mean = get_flat_params(policy.mean_network.parameters())
flat_params_std = get_flat_params(policy.std_network.parameters())
print(flat_params)
#test_policy_value() | 33.261062 | 119 | 0.600905 | 6,435 | 0.85606 | 0 | 0 | 0 | 0 | 0 | 0 | 1,783 | 0.237196 |
7400b5e2ffa5344609e346cb8a0ec6ea5d60b0b6 | 148 | py | Python | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
]
| null | null | null | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
]
| null | null | null | sample_1/admin.py | JordanEC/django-rest-and-angular | 571eb2a7d966c2b7f1f520a764420207387709cd | [
"MIT"
]
| null | null | null | from django.contrib import admin
from sample_1.models import *
# Register your models here.
admin.site.register(Author)
admin.site.register(Book)
| 18.5 | 32 | 0.797297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.189189 |
740167bc86931d3454e29781523554e25235165c | 2,217 | py | Python | irs_lqr/dynamical_system.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
]
| 6 | 2021-11-20T19:05:06.000Z | 2022-01-31T00:10:41.000Z | irs_lqr/dynamical_system.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
]
| 10 | 2021-07-24T19:50:36.000Z | 2021-11-20T19:06:40.000Z | irs_lqr/dynamical_system.py | lujieyang/irs_lqr | bc9cade6a3bb2fa2d76bdd5fe453030a7b28700f | [
"MIT"
]
| 1 | 2021-12-15T22:09:31.000Z | 2021-12-15T22:09:31.000Z | class DynamicalSystem:
def __init__(self):
"""
Base virtual dynamical systems class.
Any dynamics as an input to the system must inherit from this class.
TODO(terry-suh): Consider using ABC?
"""
self.h = 0
self.dim_x = 0
self.dim_u = 0
def dynamics(self, x, u):
"""
Numerical expression for dynamics in state-space form.
args:
- x_t (np.array, dim: n): state
- u_t (np.array, dim: m): action
returns
- x_{t+1} (np.array, dim: n), next state.
"""
raise NotImplementedError("This class is virtual.")
def dynamics_batch(self, x, u):
"""
Special batch implementation of dynamics that allows
parallel evaluation. If the dynamics cannot be easily
batched, replace this method with a for loop over the
dynamics function.
args:
- x_t (np.array, dim: B x n): batched state
- u_t (np.array, dim: B x m): batched action
returns
- x_{t+1} (np.array, dim: B x n): next batched state.
"""
raise NotImplementedError("This class is virtual.")
def jacobian_xu(self, x, u):
"""
Numerical jacobian of dynamics w.r.t. x and u.
Should be a fat matrix with the first n columns corresponding
to dfdx, and the last m columns corresponding to dfdu.
args:
- x_t (np.array, dim: n): state
- u_t (np.array, dim: m): action
returns:
- J_xu (np.array, dim: n x (n + m)): df/dxu
"""
raise NotImplementedError("This class is virtual.")
def jacobian_xu_batch(self, x, u):
"""
Batch jacobian of dynamics w.r.t. x and u that allows for faster
parallelized computations. If Jacobian computation cannot be
easily batched, replace this method with a for loop over the
jacobian_xu function.
args:
- x_t (np.array, dim: B x n): state
- u_t (np.array, dim: B x m): action
returns:
- J_xu (np.array, dim: B x n x (n + m)): batched Jacobians.
"""
raise NotImplementedError("This class is virtual.")
| 33.089552 | 76 | 0.57014 | 2,216 | 0.999549 | 0 | 0 | 0 | 0 | 0 | 0 | 1,761 | 0.794317 |
740438f708cbfe346a44823a28bc4994e0b1022b | 196 | py | Python | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
]
| 1 | 2021-05-24T18:52:53.000Z | 2021-05-24T18:52:53.000Z | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
]
| null | null | null | uuid1/models.py | charlesDavid009/Uuid | 7553843c0112e7f0e248cd5692eccca72553e720 | [
"MIT"
]
| null | null | null | from django.db import models
import uuid
# Create your models here.
class Uuid(models.Model):
uuids = models.CharField(max_length=225)
created = models.DateTimeField(auto_now_add=True)
| 19.6 | 53 | 0.760204 | 124 | 0.632653 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.132653 |
74043e623f8ec052206750dba77f648adab74816 | 1,660 | py | Python | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
]
| 2 | 2020-11-23T16:32:11.000Z | 2021-05-14T00:35:16.000Z | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
]
| null | null | null | comedy-org.py | qedpi/file-organizer | 07bdc6fd8e752aae03078529dfefe4838f4f4c4e | [
"MIT"
]
| null | null | null | import os
from shutil import move, rmtree
from itertools import chain
from genres import genre_of, DOWNLOAD_DIR, DST_DIRS, VIDEO_EXTENSIONS
print(genre_of)
print(f'moving files from {DOWNLOAD_DIR}: \n'
# f'with keywords: {COMEDY_TAGS} \n'
# f'with extensions: {VIDEO_EXTENSIONS} \n'
)
files_moved = 0
for file_name in os.listdir(DOWNLOAD_DIR):
name_parts = file_name.split('.')
# check single & double word combos todo: generalize to more than 2
two_words = ('.'.join(name_parts[i:i + 2]) for i in range(len(name_parts) - 1))
file_path = os.path.join(DOWNLOAD_DIR, file_name)
if os.path.isfile(file_path): # skip files
continue
# print(file_name, os.access(file_path, os.W_OK)) # todo: doesn't check if it's locked!
# move files to corresponding dir
try:
# print(f'Try {file_name}')
# with open(os.path.join(DOWNLOAD_DIR, file_name), 'r') as f:
if any((keyword := part) in genre_of for part in chain(name_parts, two_words)):
dst_dir = DST_DIRS[genre_of[keyword]]
# move video file
for maybe_vid in (name for name in os.listdir(file_path)):
if any(maybe_vid.endswith(ext) for ext in VIDEO_EXTENSIONS):
move(os.path.join(file_path, maybe_vid), dst_dir)
print(f'moved {maybe_vid} to {dst_dir}')
# delete empty file
rmtree(file_path)
files_moved += 1
# now extract the vid & delete dir
except PermissionError:
print('permission denied')
continue # skip this file if locked (eg by qTorrent)
print(f'{files_moved = }') | 37.727273 | 92 | 0.638554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.359036 |
7405313149ad1d453f1faa1ff9ea0b0aec012d46 | 3,572 | py | Python | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
]
| 5 | 2016-05-16T18:46:26.000Z | 2019-07-08T15:16:41.000Z | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
]
| 46 | 2016-02-18T16:54:36.000Z | 2022-03-25T19:43:45.000Z | keeper/v2api/projects.py | lsst-sqre/ltd-keeper | c658bcce726764e7416a8a386b418e83912b0f32 | [
"Apache-2.0",
"MIT"
]
| 4 | 2016-08-20T23:10:07.000Z | 2022-03-25T19:52:09.000Z | """Handlers for project-related APIs."""
from __future__ import annotations
from typing import Dict, Tuple
from flask import request
from flask_accept import accept_fallback
from keeper.auth import token_auth
from keeper.logutils import log_route
from keeper.models import Organization, Product, db
from keeper.services.createproduct import create_product
from keeper.services.updateproduct import update_product
from keeper.taskrunner import launch_tasks
from keeper.v2api import v2api
from ._models import (
ProjectPatchRequest,
ProjectPostRequest,
ProjectResponse,
ProjectsResponse,
)
from ._urls import url_for_project
__all__ = ["get_projects", "get_project", "create_project", "update_project"]
@v2api.route("/orgs/<org>/projects", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_projects(org: str) -> str:
products = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.all()
)
response = ProjectsResponse.from_products(products)
return response.json()
@v2api.route("/orgs/<org>/projects/<slug>", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_project(org: str, slug: str) -> str:
product = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.filter(Product.slug == slug)
.first_or_404()
)
response = ProjectResponse.from_product(product)
return response.json()
@v2api.route("/orgs/<org>/projects", methods=["POST"])
@accept_fallback
@log_route()
@token_auth.login_required
def create_project(org: str) -> Tuple[str, int, Dict[str, str]]:
request_data = ProjectPostRequest.parse_obj(request.json)
organization = Organization.query.filter(
Organization.slug == org
).first_or_404()
try:
product, default_edition = create_product(
org=organization,
slug=request_data.slug,
doc_repo=request_data.source_repo_url,
title=request_data.title,
default_edition_mode=(
request_data.default_edition_mode
if request_data.default_edition_mode is not None
else None
),
)
except Exception:
db.session.rollback()
raise
task = launch_tasks()
response = ProjectResponse.from_product(product, task=task)
project_url = url_for_project(product)
return response.json(), 201, {"Location": project_url}
@v2api.route("/orgs/<org>/projects/<slug>", methods=["PATCH"])
@accept_fallback
@log_route()
@token_auth.login_required
def update_project(org: str, slug: str) -> Tuple[str, int, Dict[str, str]]:
request_data = ProjectPatchRequest.parse_obj(request.json)
product = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.filter(Product.slug == slug)
.first_or_404()
)
try:
product = update_product(
product=product,
new_doc_repo=request_data.source_repo_url,
new_title=request_data.title,
)
except Exception:
db.session.rollback()
raise
task = launch_tasks()
response = ProjectResponse.from_product(product, task=task)
project_url = url_for_project(product)
return response.json(), 200, {"Location": project_url}
| 28.349206 | 77 | 0.676932 | 0 | 0 | 0 | 0 | 2,837 | 0.794233 | 0 | 0 | 244 | 0.068309 |
7405685566287cf4e859fe85e98cb0c021c50b86 | 2,237 | py | Python | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
]
| null | null | null | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
]
| null | null | null | plugins/markdown_extensions/katex.py | raabrp/rraabblog | a1d47ede918f4838ac3bbcff9ef4e7c67f851c32 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Allow server-side KaTeX rendering for Markdown through node.js
The markdown extension adds regex patterns for `$` and `$$` in the source `.md`
file, and applies KaTeX to the intermediate text with a `python-bond` call to
node.js
requires
* node
* npm
* katex (npm install katex)
* python-bond (pip3 install --user python-bond)
KaTeX: https://github.com/Khan/KaTeX
"""
import markdown
from markdown.util import etree
import bond
JS = bond.make_bond('JavaScript')
JS.eval_block(
r'''
katex = require('katex');
function render(s, is_block) {
return katex.renderToString(s, {
displayMode: is_block,
throwOnError: false
});
}
'''
)
katex = JS.callable('render')
memoise = {}
###############################################################################
class MathPattern(markdown.inlinepatterns.Pattern):
def __init__(self, tag, pattern):
super().__init__(pattern)
self.tag = tag
def handleMatch(self, m):
global memoise
node = markdown.util.etree.Element(self.tag)
node.set('class', 'math')
orig = m.group('math')
entry = (orig, self.tag == 'div')
if entry in memoise:
result = memoise[entry]
else:
result = katex(orig, self.tag == 'div')
memoise[entry] = result
node.text = result
return node
class Katex(markdown.Extension):
def extendMarkdown(self, md, md_globals):
# Regex to detect math delimiters
math_inline_regex = \
r'(?P<prefix>\$)(?P<math>.+?)(?P<suffix>(?<!\s)\2)'
math_block_regex = \
r'(?P<prefix>\$\$|\\begin\{(.+?)\}|\\\[)(?P<math>.+?)(?P<suffix>\2|\\end\{\3\}|\\\])'
# Process math before escapes are processed since escape processing
# will interfere. The order in which the displayed and inlined math
# is registered below matters
md.inlinePatterns.add(
'math_block',
MathPattern('div', math_block_regex),
'<escape'
)
md.inlinePatterns.add(
'math_inline',
MathPattern('span', math_inline_regex),
'<escape'
)
| 24.053763 | 97 | 0.565042 | 1,396 | 0.62405 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.493518 |
7407dcd338f0c898023c04aaa216c45c15fae02b | 247 | py | Python | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
]
| null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
]
| null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/05_While-Loop/00.Book-Exercises-7.1-Complex-Loops-02-Numbers-N-to-1.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
]
| null | null | null | # числата от N до 1 в обратен ред
# Да се напише програма, която отпечатва числата от n до 1 в обратен ред (стъпка -1).
# Например, ако n = 100, то резултатът ще е: 100, 99, 98, …, 3, 2, 1.
n = int(input())
for i in range(n, 0, -1):
print(i) | 30.875 | 85 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.831933 |
7408452dfdbed6f56d0e2243de45d1e90b286cdf | 1,490 | py | Python | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
]
| null | null | null | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
]
| null | null | null | simpleclassroom/urls.py | cbetheridge/simpleclassroom | 9e99262ffdb4efc0e27566855866dfc26244bf26 | [
"MIT"
]
| null | null | null | """simpleclassroom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from views import views
from views import io
urlpatterns = [
url(r'^$', views.display_classrooms, name='index'),
url(r'^classrooms/', views.display_classrooms, name='classrooms'),
url(r'^student_list/', views.display_students, name='student list'),
url(r'^student_details/', views.display_student_details, name='student view'),
url(r'^io/add_class/', io.add_classroom, name='add class'),
url(r'^io/del_class/', io.delete_classroom, name='delete class'),
url(r'^io/add_student/', io.add_student, name='add student'),
url(r'^io/del_student/', io.delete_student, name='delete student'),
url(r'^io/enroll/', io.enroll_student, name='enroll student'),
url(r'^io/unenroll/', io.unenroll_student, name='unenroll student'),
url(r'^admin/', admin.site.urls),
]
| 42.571429 | 80 | 0.713423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.638255 |
cd9d087613da3991818c9538bda9aacfcb7b2302 | 714 | py | Python | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
]
| null | null | null | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
]
| null | null | null | Mechanize/checkWords.py | rpvnwnkl/DailyWriter | 7934d636219e46b9875f31e327bf52993e15e517 | [
"MIT"
]
| null | null | null | #!usr/bin/env python
import sys, logging
import re
import mechanize
logger = logging.getLogger('mechanize')
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
br = mechanize.Browser()
br.set_debug_http(True)
br.set_debug_responses(True)
br.set_debug_redirects(True)
br.open("https://750words.com/auth")
email = open('email.txt', 'r').read()
password = open('password.txt', 'r').read()
print email, password
br.select_form(nr=0)
br['person[email_address]'] = '[email protected]'
br['person[password]'] = 'password'
response2 = br.submit()
print br.title
print response2.geturl()
print response2.info()
print response2.read()
print br.select_form(nr=0)
print br['entry[body]']
| 23.032258 | 52 | 0.752101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.242297 |
cd9ec9af338573f552a9119ee09d53bff7f7cebd | 4,939 | py | Python | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
]
| 18 | 2017-11-10T15:09:41.000Z | 2021-01-12T07:48:46.000Z | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
]
| null | null | null | simplereg/data_writer.py | gift-surg/SimpleReg | 9d9a774f5b7823c2256844c9d0260395604fb396 | [
"BSD-3-Clause"
]
| 3 | 2019-03-20T14:13:03.000Z | 2020-01-15T01:32:51.000Z | # \file DataWriter.py
# \brief Class to read data
#
# \author Michael Ebner ([email protected])
# \date June 2018
import os
import sys
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
from simplereg.definitions import ALLOWED_IMAGES
from simplereg.definitions import ALLOWED_LANDMARKS
from simplereg.definitions import ALLOWED_TRANSFORMS
from simplereg.definitions import ALLOWED_TRANSFORMS_DISPLACEMENTS
class DataWriter(object):
@staticmethod
def write_image(image_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_IMAGES:
raise IOError("Image file extension must be of type %s " %
", or ".join(ALLOWED_IMAGES))
if isinstance(image_sitk, sitk.Image):
sitkh.write_nifti_image_sitk(
image_sitk=image_sitk,
path_to_file=path_to_file,
verbose=verbose)
else:
sitkh.write_nifti_image_itk(
image_itk=image_sitk,
path_to_file=path_to_file,
verbose=verbose)
@staticmethod
def write_vector_image(vector_image_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_IMAGES:
raise IOError("Image file extension must be of type %s " %
", or ".join(ALLOWED_IMAGES))
if isinstance(vector_image_sitk, sitk.Image):
sitkh.write_sitk_vector_image(
vector_image_sitk,
path_to_file,
verbose=verbose,
)
else:
raise ValueError("Only implemented for SimpleITK images")
@staticmethod
def write_landmarks(landmarks_nda, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_LANDMARKS:
raise IOError("Landmark file extension must be of type %s " %
", or ".join(ALLOWED_LANDMARKS))
ph.write_array_to_file(
path_to_file, landmarks_nda, delimiter=" ", access_mode="w",
verbose=verbose)
@staticmethod
def write_transform(transform_sitk, path_to_file, verbose=0):
extension = ph.strip_filename_extension(path_to_file)[1]
if extension not in ALLOWED_TRANSFORMS and \
extension not in ALLOWED_TRANSFORMS_DISPLACEMENTS:
raise IOError("Transform file extension must be of type "
"%s (transformation) or %s (displacements)" % (
", ".join(ALLOWED_TRANSFORMS),
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS)))
if extension in ALLOWED_TRANSFORMS:
if isinstance(transform_sitk, sitk.Image):
raise IOError("Cannot convert displacement field (%s) to "
"transform (%s)" % (
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
", ".join(ALLOWED_TRANSFORMS),
))
if isinstance(transform_sitk, sitk.Transform):
ph.create_directory(os.path.dirname(path_to_file))
sitk.WriteTransform(transform_sitk, path_to_file)
if verbose:
ph.print_info("Transform written to '%s'" % path_to_file)
elif isinstance(transform_sitk, np.ndarray):
ph.write_array_to_file(
path_to_file,
transform_sitk,
delimiter=" ",
access_mode="w",
verbose=verbose)
else:
raise IOError("Transform must be of type "
"sitk.Transform or np.ndarray")
else:
if isinstance(transform_sitk, sitk.Transform):
raise IOError("Cannot convert transform (%s) to "
"displacement field (%s)" % (
", ".join(ALLOWED_TRANSFORMS),
", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
))
elif isinstance(transform_sitk, sitk.Image):
sitkh.write_nifti_image_sitk(
image_sitk=transform_sitk,
path_to_file=path_to_file,
verbose=verbose)
elif isinstance(transform_sitk, nib.nifti1.Nifti1Image):
ph.create_directory(os.path.dirname(path_to_file))
nib.save(transform_sitk, path_to_file)
else:
raise IOError("Transform must be of type "
"sitk.Image or nibabel.nifti1.Nifti1Image")
| 40.154472 | 78 | 0.578255 | 4,423 | 0.895525 | 0 | 0 | 4,370 | 0.884794 | 0 | 0 | 707 | 0.143146 |
cd9f005c2266883ac0727dd4f11b65c0cc61acbf | 3,881 | py | Python | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
]
| null | null | null | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
]
| null | null | null | configman/datetime_util.py | peterbe/configman | 724d80b25a0ebbb2e75ad69e92a6611494cd68b4 | [
"BSD-3-Clause"
]
| null | null | null | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, [email protected]
# Peter Bengtsson, [email protected]
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import datetime
def datetime_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DDTHH:MM:SS.S
and convert it into an instance of datetime.datetime
"""
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
return datetime.datetime.strptime(s, '%Y-%m-%d')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def date_from_ISO_string(s):
""" Take an ISO date string of the form YYYY-MM-DD
and convert it into an instance of datetime.date
"""
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
def datetime_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DDTHH:MM:SS.S
"""
return aDate.isoformat()
def date_to_ISO_string(aDate):
""" Take a datetime and convert to string of the form YYYY-MM-DD
"""
return aDate.strftime('%Y-%m-%d')
def hours_str_to_timedelta(hoursAsString):
return datetime.timedelta(hours=int(hoursAsString))
def timedelta_to_seconds(td):
return td.days * 24 * 60 * 60 + td.seconds
def str_to_timedelta(input_str):
""" a string conversion function for timedelta for strings in the format
DD:HH:MM:SS
"""
days, hours, minutes, seconds = 0, 0, 0, 0
details = input_str.split(':')
if len(details) >= 4:
days = int(details[-4])
if len(details) >= 3:
hours = int(details[-3])
if len(details) >= 2:
minutes = int(details[-2])
if len(details) >= 1:
seconds = int(details[-1])
return datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)
def timedelta_to_str(aTimedelta):
""" a conversion function for time deltas to string in the form
DD:HH:MM:SS
"""
days = aTimedelta.days
temp_seconds = aTimedelta.seconds
hours = temp_seconds / 3600
minutes = (temp_seconds - hours * 3600) / 60
seconds = temp_seconds - hours * 3600 - minutes * 60
return '%d:%d:%d:%d' % (days, hours, minutes, seconds)
| 34.651786 | 79 | 0.67328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,337 | 0.602164 |
cda50a569978706c9bec0db5233be29def4df294 | 2,952 | py | Python | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
]
| null | null | null | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
]
| null | null | null | src/tests/voluntario/test_api.py | Akijunior/Atados | 255c9c9137e48aa82fdea63f9d6d65a3720c3f92 | [
"MIT"
]
| null | null | null | from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from rest_framework.views import status
from voluntario.models import Voluntario
class BaseViewTest(APITestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(username='3',
password='12test12',
email='[email protected]')
self.token = Token.objects.get(user=self.user)
self.voluntario = {
"nome": 'Eduardo',
"sobrenome": 'Costa',
"cidade": 'Teresina',
"bairro": 'Dirceu',
}
self.client = APIClient()
def test_usuario_nao_pode_cadastrar_novos_voluntarios_sem_estar_logado(self):
response = self.client.post('/api/v1/voluntarios/', self.voluntario)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_usuario_nao_pode_atualizar_dados_de_um_voluntario_sem_estar_logado(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.client.login(username='3', password='12test12')
response = self.client.post('/api/v1/voluntarios/', self.voluntario)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.client.logout()
voluntario = Voluntario.objects.first()
sobrenome = {"sobrenome": "Fonseca"}
response = self.client.put(f'/api/v1/voluntarios/{voluntario.pk}/', sobrenome)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_usuario_tem_acesso_a_listagem_de_voluntarios_mesmo_sem_estar_logado(self):
response = self.client.get('/api/v1/voluntarios/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_usuario_pode_cadastrar_novos_voluntarios_se_estiver_logado(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.client.login(username='3', password='12test12')
response = self.client.post('/api/v1/voluntarios/', self.voluntario)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_usuario_pode_atualizar_dados_de_um_voluntario_se_estiver_logado(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.client.login(username='3', password='12test12')
response = self.client.post('/api/v1/voluntarios/', self.voluntario)
voluntario = Voluntario.objects.first()
sobrenome = {"sobrenome": "Fonseca"}
response = self.client.put(f'/api/v1/voluntarios/{voluntario.pk}/', sobrenome)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def tearDown(self):
User.objects.all().delete()
self.client.logout()
| 42.782609 | 87 | 0.686992 | 2,671 | 0.90481 | 0 | 0 | 0 | 0 | 0 | 0 | 389 | 0.131775 |
cda678a982b6a913bc586a56ae657d42e29745b5 | 508 | py | Python | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
]
| 9 | 2022-03-02T13:58:29.000Z | 2022-03-31T06:45:40.000Z | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
]
| null | null | null | main.py | ki-ljl/Scaffold-Federated-Learning | 12e04217df3af2c326ea90fef6cff47beaaec485 | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
"""
@Time:2022/05/05 12:57
@Author:KI
@File:main.py
@Motto:Hungry And Humble
"""
from data_process import clients_wind
from server import Scaffold
def main():
K, C, E, B, r = 10, 0.5, 30, 50, 10
input_dim = 28
lr = 0.08
options = {'K': K, 'C': C, 'E': E, 'B': B, 'r': r, 'clients': clients_wind,
'input_dim': input_dim, 'lr': lr}
scaffold = Scaffold(options)
scaffold.server()
scaffold.global_test()
if __name__ == '__main__':
main()
| 20.32 | 79 | 0.582677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.30814 |
cda77d54e2daf57e2851f4a131c3a29a3329d0d5 | 464 | py | Python | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
]
| null | null | null | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
]
| 4 | 2017-09-29T18:34:06.000Z | 2018-05-23T19:07:17.000Z | reddit/files/setup_cassandra.py | mitodl/reddit-formula | 68c597f5391b8bf960de3d701225de2fc45d04e4 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
import pycassa
sys = pycassa.SystemManager("cassandra.service.consul:9160")
if "reddit" not in sys.list_keyspaces():
print "creating keyspace 'reddit'"
sys.create_keyspace("reddit", "SimpleStrategy", {"replication_factor": "3"})
print "done"
if "permacache" not in sys.get_keyspace_column_families("reddit"):
print "creating column family 'permacache'"
sys.create_column_family("reddit", "permacache")
print "done"
| 30.933333 | 80 | 0.726293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.482759 |
cda9eb07b967369dac4f17bb21af05cd80acf296 | 1,472 | py | Python | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
]
| null | null | null | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
]
| null | null | null | Data Analysis with Pandas Intermediate/Pandas Internals_ Series-145.py | vipmunot/Data-Analysis-using-Python | 34586d8cbbc336508c4a7a68abe14944f1096252 | [
"MIT"
]
| null | null | null | ## 1. Data Structures ##
import pandas as pd
fandango = pd.read_csv('fandango_score_comparison.csv')
print(fandango.head(2))
## 2. Integer Indexes ##
fandango = pd.read_csv('fandango_score_comparison.csv')
series_film = fandango['FILM']
series_rt = fandango['RottenTomatoes']
print(series_film[:5])
print(series_rt[:5])
## 3. Custom Indexes ##
# Import the Series object from pandas
from pandas import Series
film_names = series_film.values
rt_scores = series_rt.values
series_custom=pd.Series(index = film_names, data = rt_scores)
## 4. Integer Index Preservation ##
series_custom = Series(rt_scores , index=film_names)
series_custom[['Minions (2015)', 'Leviathan (2014)']]
fiveten = series_custom[5:10]
print(fiveten)
## 5. Reindexing ##
original_index = series_custom.index.tolist()
sorted_by_index = series_custom.reindex(index = sorted(original_index))
## 6. Sorting ##
sc2 = series_custom.sort_index()
sc3 = series_custom.sort_values()
print(sc2.head(10))
print(sc3.head(10))
## 7. Transforming Columns With Vectorized Operations ##
series_normalized = series_custom/20
## 8. Comparing and Filtering ##
criteria_one = series_custom > 50
criteria_two = series_custom < 75
both_criteria = series_custom[criteria_one & criteria_two]
## 9. Alignment ##
rt_critics = Series(fandango['RottenTomatoes'].values, index=fandango['FILM'])
rt_users = Series(fandango['RottenTomatoes_User'].values, index=fandango['FILM'])
rt_mean =(rt_users + rt_critics) / 2 | 25.37931 | 81 | 0.754076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 452 | 0.307065 |
cdad55a9ce2a49755ae4b294972c1f2e61c115f9 | 425 | py | Python | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/01058/1058.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
]
| 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | n=int(input())
link=[[100]*n for i in range(n)]
for i in range(n):
x=input()
for j in range(n):
if x[j]=='Y': link[i][j]=1
for i in range(n):
for j in range(n):
for k in range(n):
if link[j][i]+link[i][k]<link[j][k]:
link[j][k]=link[j][i]+link[i][k]
link[k][j]=link[j][k]
ans=0
for i in range(n):
t=0
for j in range(n):
if link[i][j]<=2 and i!=j: t+=1
ans=max(t,ans)
print(ans) | 20.238095 | 42 | 0.52 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.007059 |
cdae861a30ba2bb3bd941147a704995ddbb3e7b8 | 4,894 | py | Python | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
]
| 104 | 2015-01-21T16:10:46.000Z | 2021-05-31T06:53:35.000Z | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
]
| 26 | 2015-04-09T04:12:48.000Z | 2018-12-22T18:41:33.000Z | pytest_ipynb/plugin.py | kevingerman/pytest-ipynb | 04b5fed4f280983f64254b01e3b24b7733e99224 | [
"BSD-3-Clause"
]
| 21 | 2015-02-06T10:07:28.000Z | 2021-04-19T21:31:48.000Z | import pytest
import os,sys
import warnings
try:
from exceptions import Exception, TypeError, ImportError
except:
pass
from runipy.notebook_runner import NotebookRunner
wrapped_stdin = sys.stdin
sys.stdin = sys.__stdin__
sys.stdin = wrapped_stdin
try:
from Queue import Empty
except:
from queue import Empty
# code copied from runipy main.py
with warnings.catch_warnings():
try:
from IPython.utils.shimmodule import ShimWarning
warnings.filterwarnings('error', '', ShimWarning)
except ImportError:
class ShimWarning(Warning):
"""Warning issued by iPython 4.x regarding deprecated API."""
pass
try:
# IPython 3
from IPython.nbformat import reads, NBFormatError
except ShimWarning:
# IPython 4
from nbformat import reads, NBFormatError
except ImportError:
# IPython 2
from IPython.nbformat.current import reads, NBFormatError
finally:
warnings.resetwarnings()
class IPyNbException(Exception):
""" custom exception for error reporting. """
def pytest_collect_file(path, parent):
if path.fnmatch("test*.ipynb"):
return IPyNbFile(path, parent)
def get_cell_description(cell_input):
"""Gets cell description
Cell description is the first line of a cell,
in one of this formats:
* single line docstring
* single line comment
* function definition
"""
try:
first_line = cell_input.split("\n")[0]
if first_line.startswith(('"', '#', 'def')):
return first_line.replace('"','').replace("#",'').replace('def ', '').replace("_", " ").strip()
except:
pass
return "no description"
class IPyNbFile(pytest.File):
def collect(self):
with self.fspath.open() as f:
payload = f.read()
self.notebook_folder = self.fspath.dirname
try:
# Ipython 3
self.nb = reads(payload, 3)
except (TypeError, NBFormatError):
# Ipython 2
self.nb = reads(payload, 'json')
self.runner = NotebookRunner(self.nb)
cell_num = 1
for cell in self.runner.iter_code_cells():
yield IPyNbCell(self.name, self, cell_num, cell)
cell_num += 1
def setup(self):
self.fixture_cell = None
def teardown(self):
self.runner.shutdown_kernel()
class IPyNbCell(pytest.Item):
def __init__(self, name, parent, cell_num, cell):
super(IPyNbCell, self).__init__(name, parent)
self.cell_num = cell_num
self.cell = cell
self.cell_description = get_cell_description(self.cell.input)
def runtest(self):
self.parent.runner.km.restart_kernel()
if self.parent.notebook_folder:
self.parent.runner.kc.execute(
"""import os
os.chdir("%s")""" % self.parent.notebook_folder)
if ("SKIPCI" in self.cell_description) and ("CI" in os.environ):
pass
else:
if self.parent.fixture_cell:
self.parent.runner.kc.execute(self.parent.fixture_cell.input, allow_stdin=False)
msg_id = self.parent.runner.kc.execute(self.cell.input, allow_stdin=False)
if self.cell_description.lower().startswith("fixture") or self.cell_description.lower().startswith("setup"):
self.parent.fixture_cell = self.cell
timeout = 20
while True:
try:
msg = self.parent.runner.kc.get_shell_msg(block=True, timeout=timeout)
if msg.get("parent_header", None) and msg["parent_header"].get("msg_id", None) == msg_id:
break
except Empty:
raise IPyNbException(self.cell_num, self.cell_description,
self.cell.input,
"Timeout of %d seconds exceeded executing cell: %s" % (timeout, self.cell.input))
reply = msg['content']
if reply['status'] == 'error':
raise IPyNbException(self.cell_num, self.cell_description, self.cell.input, '\n'.join(reply['traceback']))
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, IPyNbException):
return "\n".join([
"Notebook execution failed",
"Cell %d: %s\n\n"
"Input:\n%s\n\n"
"Traceback:\n%s\n" % excinfo.value.args,
])
else:
return "pytest plugin exception: %s" % str(excinfo.value)
def _makeid(self):
description = self.parent.nodeid + "::" + self.name
description += "::" + "cell %d" % self.cell_num
if self.cell_description:
description += ", " + self.cell_description
return description
| 32.845638 | 122 | 0.599918 | 3,379 | 0.690437 | 534 | 0.109113 | 0 | 0 | 0 | 0 | 842 | 0.172047 |
cdaf411884a90226584098d678014eeaecc826d5 | 90 | py | Python | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
]
| null | null | null | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
]
| 7 | 2019-08-25T01:53:14.000Z | 2022-03-11T23:57:08.000Z | test.py | stpwin/fb-groub-sentiment | 0c0c860bf7b405e7cc4a7fac5a337b751dddb910 | [
"bzip2-1.0.6"
]
| null | null | null | items = {"a": True, "b": False}
b = [v for k, v in items.items() if v == True]
print(b)
| 15 | 46 | 0.533333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.066667 |
cdb4d928fe81a97440ce0c56dea2317a5512f228 | 2,258 | py | Python | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
]
| 1 | 2020-04-07T14:36:49.000Z | 2020-04-07T14:36:49.000Z | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
]
| 3 | 2020-01-16T18:25:46.000Z | 2021-05-19T20:51:52.000Z | setup.py | vbrinnel/ztflc | b1ccab67e5e0e385d8406f179c1ad0c346afa129 | [
"Apache-2.0"
]
| 1 | 2021-03-31T19:47:33.000Z | 2021-03-31T19:47:33.000Z | #! /usr/bin/env python
#
DESCRIPTION = "ztflc: Force photometry lc fitter"
LONG_DESCRIPTION = """ Force photometry lc fitter"""
DISTNAME = "ztflc"
AUTHOR = "Mickael Rigault"
MAINTAINER = "Mickael Rigault"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://github.com/MickaelRigault/ztflc/"
LICENSE = "BSD (3-clause)"
DOWNLOAD_URL = "https://github.com/MickaelRigault/ztflc/tarball/0.2"
VERSION = "0.2.3"
try:
from setuptools import setup, find_packages
_has_setuptools = True
except ImportError:
from distutils.core import setup
_has_setuptools = False
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import ztfquery
except ImportError:
install_requires.append("ztfquery")
try:
import pandas
except ImportError:
install_requires.append("pandas")
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
if _has_setuptools:
packages = find_packages()
print(packages)
else:
# This should be updated if new submodules are added
packages = ["ztflc"]
setup(
name=DISTNAME,
author=AUTHOR,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
scripts=["bin/forcephoto.py"],
packages=packages,
include_package_data=True,
# package_data={'pysedm': ['data/*.*']},
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Astronomy",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
],
)
| 26.880952 | 68 | 0.637733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.375111 |
cdb4fa7248d3772a040373832306d9f403d71783 | 311 | py | Python | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
]
| null | null | null | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
]
| null | null | null | ibm1.py | thovo/ibm1 | df00eca56827e294642d503972f29ab3e139caf0 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
__author__ = 'thovo'
import sys
def ibm1():
#Check for arguments
args_length = len(sys.argv)
print "The number of arguments: "+str(args_length)
i = 0
while i < args_length:
print "The argument number " + str(i) + " is " + str(sys.argv[i])
i += 1
ibm1() | 18.294118 | 73 | 0.59164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.318328 |
cdb6e8d6090040ad0dc31239d89e99153192bd44 | 1,927 | py | Python | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
]
| 2 | 2021-05-22T19:19:56.000Z | 2021-08-16T11:34:11.000Z | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
]
| null | null | null | wordfinds/raw.py | GrandMoff100/WordFinds | 4b56532f399178e5f2b18b246084644061c5bfc2 | [
"MIT"
]
| 1 | 2021-11-09T13:55:43.000Z | 2021-11-09T13:55:43.000Z | import random
from .utils import filler
from .array import RawWordFindArray, WordArray
class RawWordFind(RawWordFindArray):
def __init__(self, size, wordbank):
super().__init__(size, wordbank)
for word in wordbank.words:
if not self.valid_word_length(word):
raise ValueError(
'The word "{}" cannot fit into a {}x{} array.' .format(word, *self.size) +
'Try using less words or shorter ones.')
total = sum([len(word) for word in wordbank.words])
w,h = size
if total > w * h:
raise ValueError(f'Cannot fit {total} characters in a {w}x{h} array. Try using less words or shorter ones.')
self.letter_array = self.generate()
def directions(self, x, y, word):
return [
(x-len(word), y-len(word)),
(x-len(word), y),
(x-len(word),y+len(word)),
(x, y-len(word)),
(x, y),
(x,y+len(word)),
(x+len(word), y-len(word)),
(x+len(word), y),
(x+len(word),y+len(word)),
]
def find_spots(self, grid, word):
w, h = self.size
for x in range(w):
for y in range(h):
for end in self.directions(x,y,word):
try:
grid.place_word(word,(x,y),end,True)
yield (x,y), end
except (ValueError, IndexError):
pass
def generate(self):
w, h = self.size
grid = WordArray([['.' for _ in range(w)] for _ in range(h)])
for word in self.wordbank.words:
start, end = random.choice(list(self.find_spots(grid, word)))
grid.place_word(word, start, end)
return WordArray([[x if x != '.' else filler() for x in row] for row in grid])
class WordFind(RawWordFind):
pass
| 32.661017 | 120 | 0.511676 | 1,833 | 0.95122 | 379 | 0.196679 | 0 | 0 | 0 | 0 | 181 | 0.093928 |
cdb7047c417fa314c5e02129e1672265cc3318ba | 2,969 | py | Python | src/neon/frontend/aeon_shim.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
]
| 13 | 2018-03-17T00:27:18.000Z | 2020-06-18T01:36:34.000Z | src/neon/frontend/aeon_shim.py | MUTTERSCHIFF/ngraph-neon | 762e8ea639cdc671311ee4929bd1ee8cdf83e8bb | [
"Apache-2.0"
]
| 20 | 2018-03-17T14:49:04.000Z | 2018-04-19T17:47:38.000Z | src/neon/frontend/aeon_shim.py | NervanaSystems/ngraph-neon | 8988ab90ee81c8b219ea5c374702e56d7f383302 | [
"Apache-2.0"
]
| 5 | 2018-03-23T22:47:17.000Z | 2020-10-21T16:15:02.000Z | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, absolute_import
import logging
from builtins import object
import neon as ng
logger = logging.getLogger(__name__)
try:
from aeon import DataLoader
except ImportError:
msg = "\n".join(["",
"Unable to import Aeon module.",
"Please see installation instructions at:",
"*****************",
"https://github.com/NervanaSystems/aeon/blob/rc1-master/README.md",
"*****************",
""])
logger.error(msg)
raise ImportError(msg)
NAME_MAP = {"channels": "C",
"height": "H",
"width": "W",
"frames": "D"}
"""Converts aeon axis names to canonical ngraph axis types."""
class AeonDataLoader(object):
def __init__(self, config, *args, **kwargs):
self.config = config
self._dataloader = DataLoader(config)
self.ndata = self._dataloader.ndata
if self.ndata < self._dataloader.batch_size:
raise ValueError('Number of examples is smaller than the batch size')
def __next__(self):
bufs = next(self._dataloader)
bufs_dict = dict((key, val) for key, val in bufs)
if 'label' in bufs_dict:
bufs_dict['label'] = bufs_dict['label'].flatten()
return bufs_dict
def __iter__(self):
return self
def make_placeholders(self, include_iteration=False):
placeholders = {}
batch_axis = ng.make_axis(self._dataloader.batch_size, name="N")
for placeholder_name, axis_info in self._dataloader.axes_info:
p_axes = ng.make_axes([batch_axis])
for nm, sz in axis_info:
if placeholder_name == 'label':
continue
if nm in NAME_MAP:
nm = NAME_MAP[nm]
p_axes += ng.make_axis(name=nm, length=sz)
placeholders[placeholder_name] = ng.placeholder(p_axes)
if include_iteration:
placeholders['iteration'] = ng.placeholder(axes=())
return placeholders
def reset(self):
self._dataloader.reset()
def ndata(self):
self._dataloader.ndata
| 35.345238 | 88 | 0.583025 | 1,440 | 0.485012 | 0 | 0 | 0 | 0 | 0 | 0 | 1,118 | 0.376558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.