id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
480276
|
import os
from pynq import Overlay, DefaultIP
from utils.paths import lib_path
class DNAMemoryMap(DefaultIP):
def __init__(self, description):
super().__init__(description)
bindto = ['ABR:user:zynq_AXI_DNA:1.0']
# found in ip_dict above as 'type'
# Creating 'getter' for mmio registers
@property
def reg0(self):
return self.read(0x00)
@property
def reg1(self):
return self.read(0x04)
@property
def reg2(self):
return self.read(0x08)
@property
def reg3(self):
return self.read(0x0C)
class IDDriver(object):
def __init__(self):
bitstream = os.path.join(lib_path, 'dna_extractor.bit')
# Load bitstream
ol = Overlay(os.path.join(bitstream))
ol.download()
# Read both 32b register containing DNA
ID0 = ol.zynq_AXI_DNA_0.reg0
ID1 = ol.zynq_AXI_DNA_0.reg1
# Concatenate into a single 64b value
self.id_bytes = (ID0.to_bytes(4, 'big') + ID1.to_bytes(4, 'big'))
self.id_int = int.from_bytes(self.id_bytes, 'big')
|
480284
|
import abc
from inspect import getargspec
from functools import wraps
from copy import deepcopy
from lizard.util.pretty_print import list_string_value
from lizard.bitutil import copy_bits
class HardwareModel(object):
__metaclass__ = abc.ABCMeta
def __init__(s, interface):
s.interface = interface
s.model_methods = {}
s.ready_methods = {}
s.state_element_names = []
s.saved_state = {}
s.state_reset_values = {}
s.anonymous_state_counter = 0
def reset(s):
s._pre_cycle_wrapper()
s._reset()
for name, state_element in s._state_elements():
if isinstance(state_element, HardwareModel):
state_element.reset()
else:
setattr(s, name, deepcopy(s.state_reset_values[name]))
s.cycle()
@abc.abstractmethod
def line_trace(s):
pass
@abc.abstractmethod
def _pre_call(s, method, call_index):
pass
@abc.abstractmethod
def _post_call(s, method, call_index):
pass
def _pre_cycle_wrapper(s):
s.back_prop_tracking = []
s._pre_cycle()
def _post_cycle_wrapper(s):
s._post_cycle()
@abc.abstractmethod
def _pre_cycle(s):
pass
@abc.abstractmethod
def _post_cycle(s):
pass
def _reset(s):
pass
def state(s, **kwargs):
for k, v in kwargs.iteritems():
if hasattr(s, k):
raise ValueError('Member already present: {}'.foramt(k))
else:
setattr(s, k, v)
s.state_element_names.append(k)
# save the initial value if not a hardware model
if not isinstance(v, HardwareModel):
s.state_reset_values[k] = deepcopy(v)
def register_state(s, hardware_model):
if not isinstance(hardware_model, HardwareModel):
raise ValueError('Must be HardwareModel')
name = '_anonymous_state_member_{}'.format(s.anonymous_state_counter)
s.anonymous_state_counter += 1
s.state(**{name: hardware_model})
def _state_elements(s):
return [(name, getattr(s, name)) for name in s.state_element_names]
def snapshot_model_state(s):
s.extra_model_state = s._snapshot_model_state()
for name, state_element in s._state_elements():
if isinstance(state_element, HardwareModel):
state_element.snapshot_model_state()
else:
s.saved_state[name] = deepcopy(state_element)
def restore_model_state(s):
s._pre_cycle_wrapper()
s._restore_model_state(s.extra_model_state)
for name, state_element in s._state_elements():
if isinstance(state_element, HardwareModel):
state_element.restore_model_state()
else:
setattr(s, name, deepcopy(s.saved_state[name]))
def _snapshot_model_state(s):
pass
def _restore_model_state(s, state):
pass
@staticmethod
def validate(func):
@wraps(func)
def validate_init(s, *args, **kwargs):
result = func(s, *args, **kwargs)
if len(s.model_methods) != len(s.interface.methods):
raise ValueError('Not all methods from interface implemented')
# Ensure every method that is supposed to have a ready signal has one
for name, method in s.interface.methods.iteritems():
if method.rdy and name not in s.ready_methods:
raise ValueError(
'Method has rdy signal but no ready method: {}'.format(name))
return result
return validate_init
def _check_method(s, name, method_dict):
if name in method_dict:
raise ValueError('Duplicate function: {}'.format(name))
if name not in s.interface.methods:
raise ValueError('Method not in interface: {}'.format(name))
def ready_method(s, func):
s.ready_method_explicit(func.__name__, func, True)
def ready_method_explicit(s, name, func_like, validate_args):
s._check_method(name, s.ready_methods)
if validate_args:
arg_spec = getargspec(func_like)
if len(
arg_spec.args
) != 1 or arg_spec.varargs is not None or arg_spec.keywords is not None:
raise ValueError(
'Ready function must take exactly 1 argument (call_index)')
s.ready_methods[name] = func_like
def model_method(s, func):
s.model_method_explicit(func.__name__, func, True)
def model_method_explicit(s, name, func_like, validate_args):
s._check_method(name, s.model_methods)
method = s.interface.methods[name]
if validate_args:
arg_spec = getargspec(func_like)
for arg in arg_spec.args:
if not isinstance(arg, str):
raise ValueError('Illegal argument nest in function: {}'.format(name))
if arg not in method.args:
raise ValueError('Argument not found: {} in function: {}'.format(
arg, name))
if len(arg_spec.args) != len(method.args):
raise ValueError(
'Incorrect number of arguments in function: {}'.format(name))
if arg_spec.varargs is not None:
raise ValueError('Function must have no *args: {}'.format(name))
if arg_spec.keywords is not None:
raise ValueError('Function must have no *kwargs: {}'.format(name))
s.model_methods[name] = func_like
def wrapper(_call_index, *args, **kwargs):
method = s.interface.methods[name]
s._pre_call(method, _call_index)
# check to see if the method is ready
if name in s.ready_methods and not s.ready_methods[name](_call_index):
result = not_ready_instance
else:
# call this method
result = func_like(*args, **kwargs)
if isinstance(result, NotReady):
raise ValueError(
'Method may not return not ready -- use ready_method decorator')
s._post_call(method, _call_index)
# interpret the result
if not isinstance(result, NotReady):
# Normalize an empty to return to a length 0 result
if result is None:
result = Result()
returned_size = 1
if isinstance(result, Result):
returned_size = result._size
if len(method.rets) != returned_size:
raise ValueError(
'CL function {}: incorrect return size: expected: {} actual: {}'
.format(name, len(method.rets), returned_size))
if isinstance(
result,
Result) and set(method.rets.keys()) != set(result._data.keys()):
raise ValueError(
'CL function {}: incorrect return names: expected: {} actual: {}'
.format(name, list_string_value(method.rets.keys()),
list_string_value(result._data.keys())))
# Normalize a singleton return into a result
if not isinstance(result, Result):
result = Result(**{method.rets.keys()[0]: result})
# Log the result in the back_prop_tracking
# This is used to ensure that when a future method is called
# the result to a prior method doesn't mutate
s._back_prop_track(method.name, _call_index, result)
# Freeze the result so if the caller preserves it across multiple cycles it doesn't change
return s._freeze_result(result)
if hasattr(s, name):
raise ValueError('Internal wrapper error')
setattr(s, name, MethodDispatcher(name, wrapper, s.ready_methods))
def cycle(s):
s._post_cycle_wrapper()
s._pre_cycle_wrapper()
@staticmethod
def _freeze_result(result):
result = HardwareModel._freeze_result_to_dict(result)
if isinstance(result, NotReady):
return result
else:
return Result(**result)
@staticmethod
def _freeze_result_to_dict(result):
if isinstance(result, NotReady):
return result
frozen = {}
for name, value in result._data.iteritems():
frozen[name] = copy_bits(value)
return frozen
def _back_prop_track(s, method_name, call_index, result):
s.back_prop_tracking.append(
(method_name, call_index, result, s._freeze_result_to_dict(result)))
for method_name, call_index, result, frozen in s.back_prop_tracking:
if s._freeze_result_to_dict(result) != frozen:
raise ValueError(
'Illegal backpropagation detected on method: {}[{}]'.format(
method_name, call_index))
class NotReady(object):
_created = False
def __init__(s):
if NotReady._created:
raise ValueError('singleton')
else:
NotReady._created = True
not_ready_instance = NotReady()
class Result(object):
def __init__(s, **kwargs):
s._size = len(kwargs)
s._data = {}
for k, v in kwargs.items():
s._data[k] = v
setattr(s, k, v)
def copy(s):
temp = {}
for k, v in s._data.iteritems():
temp[k] = copy_bits(v)
return Result(**temp)
def __str__(s):
return '[{}]'.format(', '.join(
'{}={}'.format(k, v) for k, v in s._data.iteritems()))
class MethodDispatcher(object):
def __init__(s, name, wrapper_func, ready_dict):
s.name = name
s.wrapper_func = wrapper_func
s.ready_dict = ready_dict
def rdy(s, call_index=None):
return s.ready_dict[s.name](call_index)
def __getitem__(s, key):
def index_dispatch(*args, **kwargs):
return s.wrapper_func(key, *args, **kwargs)
return index_dispatch
def __call__(s, *args, **kwargs):
return s[None](*args, **kwargs)
|
480306
|
from datetime import datetime
from dateutil.tz import tzutc
{
'total_results': 3,
'page': 1,
'per_page': 3,
'results': [
{
'id': 886482,
'login': 'niconoe',
'spam': False,
'suspended': False,
'created_at': datetime(2018, 4, 23, 17, 11, 14, tzinfo=tzutc()),
'login_autocomplete': 'niconoe',
'login_exact': 'niconoe',
'name': '<NAME>',
'name_autocomplete': '<NAME>',
'orcid': 'https://orcid.org/0000-0002-9503-4750',
'icon': 'https://static.inaturalist.org/attachments/users/icons/886482/thumb.jpg?1529671435',
'observations_count': 928,
'identifications_count': 118,
'journal_posts_count': 0,
'activity_count': 1046,
'species_count': 396,
'universal_search_rank': 928,
'roles': [],
'site_id': 1,
'icon_url': 'https://static.inaturalist.org/attachments/users/icons/886482/medium.jpg?1529671435',
},
{
'id': 2909130,
'login': 'niconoerbo',
'spam': False,
'suspended': False,
'created_at': datetime(2020, 5, 5, 6, 28, 32, tzinfo=tzutc()),
'login_autocomplete': 'niconoerbo',
'login_exact': 'niconoerbo',
'name': None,
'name_autocomplete': None,
'orcid': None,
'icon': None,
'observations_count': 6,
'identifications_count': 0,
'journal_posts_count': 0,
'activity_count': 6,
'universal_search_rank': 6,
'roles': [],
'site_id': 1,
'icon_url': None,
},
{
'id': 3358478,
'login': 'nicono',
'spam': False,
'suspended': False,
'created_at': datetime(2020, 7, 20, 18, 7, 44, tzinfo=tzutc()),
'login_autocomplete': 'nicono',
'login_exact': 'nicono',
'name': None,
'name_autocomplete': None,
'orcid': None,
'icon': None,
'observations_count': 0,
'identifications_count': 0,
'journal_posts_count': 0,
'activity_count': 0,
'universal_search_rank': 0,
'roles': [],
'site_id': 1,
'icon_url': None,
},
],
}
|
480322
|
from typing import List
import json
import e2e.Libs.Ristretto.Ristretto as Ristretto
from e2e.Libs.BLS import PrivateKey, PublicKey
from e2e.Classes.Transactions.Data import Data
from e2e.Classes.Consensus.Verification import SignedVerification
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Classes.Consensus.MeritRemoval import SignedMeritRemoval
from e2e.Classes.Consensus.SpamFilter import SpamFilter
from e2e.Vectors.Generation.PrototypeChain import PrototypeChain
proto: PrototypeChain = PrototypeChain(1, False)
edPrivKey: Ristretto.SigningKey = Ristretto.SigningKey(b'\0' * 32)
edPubKey: bytes = edPrivKey.get_verifying_key()
blsPrivKey: PrivateKey = PrivateKey(0)
blsPubKey: PublicKey = blsPrivKey.toPublicKey()
spamFilter: SpamFilter = SpamFilter(5)
#Create the initial Data and two competing Datas.
datas: List[Data] = [Data(bytes(32), edPubKey)]
datas.append(Data(datas[0].hash, b"Initial Data."))
datas.append(Data(datas[0].hash, b"Second Data."))
for data in datas:
data.sign(edPrivKey)
data.beat(spamFilter)
#Create Verifications for all 3.
verifs: List[SignedVerification] = []
packets: List[VerificationPacket] = []
for data in datas:
verifs.append(SignedVerification(data.hash, 0))
verifs[-1].sign(0, blsPrivKey)
packets.append(VerificationPacket(data.hash, [0]))
#Create a MeritRemoval out of the conflicting Verifications.
mr: SignedMeritRemoval = SignedMeritRemoval(verifs[1], verifs[2])
#Generate a Block containing the MeritRemoval.
proto.add(packets=packets)
with open("e2e/Vectors/Consensus/MeritRemoval/VerifyCompeting.json", "w") as vectors:
vectors.write(json.dumps({
"blockchain": proto.toJSON(),
"datas": [data.toJSON() for data in datas],
"verification": verifs[0].toSignedJSON(),
"removal": mr.toSignedJSON()
}))
|
480354
|
import math
def solve():
C, G = [int(i) for i in raw_input().split()]
gadgets = [[int(i) for i in raw_input().split()] for _ in xrange(G)]
sol = [[0] * (C+1) for _ in xrange(G+1)]
for i in xrange(1, G+1):
for j in xrange(1, C+1):
new_j = j - gadgets[i-1][0]
if new_j < 0:
sol[i][j] = sol[i-1][j]
continue
sol[i][j] = max(sol[i-1][j], gadgets[i-1][1]+sol[i-1][new_j])
print sol[-1][-1]
def main():
N = int(raw_input())
for _ in xrange(N):
solve()
if __name__ == "__main__":
main()
|
480396
|
from sys import argv
import json
vcfFile = open(argv[1], 'r')
outputExt = '.smf'
output = ""
for line in vcfFile.readlines():
if line[0] == '#':
continue
mutationDict = {}
components = line.split('\t')
mutationDict['chromosome'] = components[0]
mutationDict['pos'] = components[1]
mutationDict['ref'] = components[3]
mutationDict['found'] = components[4]
indexOfLastEquals = components[7].rfind('=')
mutTypeLong = components[7][(indexOfLastEquals + 1):]
mutTypeShort = mutTypeLong[:3]
mutationDict['type'] = mutTypeShort
if mutTypeShort == 'DEL':
mutationDict['ref'] = mutationDict['ref'][1:] #Doesn't include the reference base that was NOT deleted, so remove the ref char and add up to pos to account for removing that char
mutationDict['found'] = '-'
mutationDict['pos'] = str(int(mutationDict['pos']) + 1)
indexOfFirstEquals = components[7].find('=')
indexOfSemicolon = components[7].find(';')
alleleFreq = float(components[7][indexOfFirstEquals + 1:indexOfSemicolon])
homoHeteroStr = ''
if alleleFreq == 1:
homoHeteroStr = '*'
else:
homoHeteroStr = '&'
mutationDict['alleles'] = homoHeteroStr
output += json.dumps(mutationDict).replace('\\r','') + '\n'
outputFile = open(argv[2] + outputExt , 'w')
outputFile.write(output)
outputFile.close()
|
480480
|
from django.test import override_settings
from django.test.testcases import TestCase
from mock import patch
from robber.expect import expect
from document_cloud.factories import DocumentCrawlerFactory
class DocumentCrawlerTestCase(TestCase):
@override_settings(S3_BUCKET_CRAWLER_LOG='crawler_logs_bucket')
@patch('document_cloud.models.document_crawler.aws')
def test_log_url(self, aws_mock):
aws_mock.s3.generate_presigned_url.return_value = 'presigned_log_url'
document_crawler = DocumentCrawlerFactory(
id=1,
source_type='SUMMARY_REPORTS_COPA',
log_key='summary_reports_copa/2019-02-27-100142.txt'
)
expect(document_crawler.log_url).to.eq('presigned_log_url')
expect(aws_mock.s3.generate_presigned_url).to.be.called_with(
ClientMethod='get_object',
Params={
'Bucket': 'crawler_logs_bucket',
'Key': 'summary_reports_copa/2019-02-27-100142.txt',
},
ExpiresIn=604800
)
def test_log_url_with_empty_log_key(self):
document_crawler = DocumentCrawlerFactory(
id=1,
source_type='SUMMARY_REPORTS_COPA',
)
expect(document_crawler.log_url).to.be.none()
|
480541
|
import paginate
from flask import render_template, jsonify, flash, redirect, url_for, request
from flask.views import MethodView
from flask_login import login_required
from paginate_sqlalchemy import SqlalchemyOrmWrapper
from social_flask_sqlalchemy.models import UserSocialAuth
from sqlalchemy import func, desc
from werkzeug.datastructures import MultiDict
from nanumlectures.common import is_admin_role, paginate_link_tag
from nanumlectures.database import db_session
from nanumlectures.models import Roundtable, Lecture, Library, VoteBooks, User, OTandParty
class LectureListView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
current_page = request.args.get("page", 1, type=int)
search_option = request.args.get("search_option", '')
search_word = request.args.get("search_word", '')
if search_option and search_option in ['lecture_name', 'session_time', 'lecture_title']:
search_column = getattr(Lecture, search_option)
if search_option == "roundtable_num" and search_word and not search_word.isdecimal():
flash('๊ฐ์ตํ์ฐจ๋ ์ซ์๋ง ์
๋ ฅํ์
์ผ ํฉ๋๋ค.')
search_word = None
page_url = url_for("admin.lecturer")
if search_word:
page_url = url_for("admin.lecturer", search_option=search_option, search_word=search_word)
page_url = str(page_url) + "&page=$page"
else:
page_url = str(page_url) + "?page=$page"
items_per_page = 10
records = db_session.query(Lecture).outerjoin(Roundtable).outerjoin(Library)
if search_word:
if search_option == 'roundtable_num':
records = records.filter(Roundtable.roundtable_num == search_word)
elif search_option == 'library_name':
records = records.filter(Library.library_name.ilike('%{}%'.format(search_word)))
elif search_option == 'session_time':
records = records.filter(search_column == int(search_word) - 1)
else:
records = records.filter(search_column.ilike('%{}%'.format(search_word)))
records = records.order_by(desc(Lecture.roundtable_id))
total_cnt = records.count()
paginator = paginate.Page(records, current_page, page_url=page_url,
items_per_page=items_per_page,
wrapper_class=SqlalchemyOrmWrapper)
return render_template("admin/lecturer.html", paginator=paginator,
paginate_link_tag=paginate_link_tag,
page_url=page_url, items_per_page=items_per_page,
total_cnt=total_cnt, page=current_page)
class LectureFindView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
roundtable_id = request.args.get("roundtable_id", -1, type=int)
library_id = request.args.get("library_id", -1, type=int)
lecture_time = request.args.get("lecture_time", -1, type=int)
if roundtable_id > -1 and lecture_time > -1:
lecture_record = db_session.query(Lecture).filter(
Lecture.roundtable_id == roundtable_id,
Lecture.library_id == library_id,
Lecture.session_time == lecture_time).first()
if not lecture_record:
return jsonify(success=True, msg='๋ฑ๋ก๋ ๊ฐ์ฐ์ด ์์ต๋๋ค')
return jsonify(success=False,
lecture_title=lecture_record.lecture_title)
class LectureRegView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
# ์๋ก ์
๋ ฅํ ํ์ฐจ ์ ๋ณด๋ฅผ ๋ฐ์์์ ๋๊ฒจ์ค๋ค
# ๋จ, ์ง๋ ํ์ฐจ์ ๊ฐ์ฐ์ ์ ๋ณด๋ฅผ ๋ฑ๋กํ ์ผ์ด ์๋ค๊ณ ๊ฐ์ ํ๋ค.
main_roundtable = db_session.query(Roundtable).filter(Roundtable.is_active == True).first()
# ๋ง์ฝ ๊ฐ์ตํ์ฐจ ๋ฑ๋ก๋์ง ์์์ ๊ฒฝ์ฐ ๊ฐ์ตํ์ฐจ ๋ฑ๋ก ํ๋ฉด์ผ๋ก ๋๋ ค๋ณด๋ธ๋ค.
if not main_roundtable:
flash('๊ฐ์ฐ์๋ฅผ ๋ฑ๋กํ์๋ ค๋ฉด ๊ฐ์ตํ์ฐจ ๋ฑ๋ก๋ถํฐ ํ์
์ผ ํฉ๋๋ค')
return redirect(url_for('admin.roundtable_reg'))
return render_template("admin/lecturer_reg.html", latest_roundtable=main_roundtable)
def post(self):
req_json = MultiDict(request.get_json())
user_record = db_session.query(User).outerjoin(UserSocialAuth).filter(
UserSocialAuth.uid == req_json.get("lectureID")).first()
lecturer_obj = Lecture()
lecturer_obj.roundtable_id = req_json.get('roundtable_id')
lecturer_obj.library_id = req_json.get('library').get('id')
lecturer_obj.session_time = req_json.get('lectureTime')
lecturer_obj.lecture_title = req_json.get('lectureTitle')
lecturer_obj.lecture_summary = req_json.get('lectureSummary')
lecturer_obj.lecture_expected_audience = req_json.get('lectureExpectedAudience')
lecturer_obj.lecture_user_id = (user_record and user_record.id) or None
lecturer_obj.lecture_name = req_json.get('lectureName')
lecturer_obj.lecture_belong = req_json.get('lectureBelong')
lecturer_obj.lecture_hp = req_json.get('lectureHp')
lecturer_obj.lecture_email = req_json.get('lectureEmail')
lecturer_obj.lecture_public_yn = req_json.get('lectureUserYn', type=bool)
db_session.add(lecturer_obj)
return jsonify(success=True)
class LectureEditView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, lecturer):
return render_template("admin/lecturer_edit.html", lecturer=lecturer)
def post(self, lecturer):
req_json = MultiDict(request.get_json())
user_record = db_session.query(User).outerjoin(UserSocialAuth).filter(
UserSocialAuth.uid == req_json.get("lectureID")).first()
lecturer.library_id = req_json.get('library').get('id')
lecturer.lecture_time = req_json.get('lectureTime')
lecturer.lecture_title = req_json.get('lectureTitle')
lecturer.lecture_summary = req_json.get('lectureSummary')
lecturer.lecture_expected_audience = req_json.get('lectureExpectedAudience')
lecturer.lecture_user_id = (user_record and user_record.id) or None
lecturer.lecture_name = req_json.get('lectureName')
lecturer.lecture_belong = req_json.get('lectureBelong')
lecturer.lecture_hp = req_json.get('lectureHp')
lecturer.lecture_email = req_json.get('lectureEmail')
lecturer.lecture_public_yn = req_json.get('lecturePublicYn', type=bool)
return jsonify(success=True)
class LectureDetailView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, lecturer):
vote_books = db_session.query(VoteBooks).filter(
VoteBooks.roundtable_id == lecturer.roundtable_id,
VoteBooks.lecture_user_id == lecturer.lecture_user_id).first()
return render_template("admin/lecturer_view.html", lecturer=lecturer, vote_books=vote_books)
def delete(self, lecturer):
vote_books = db_session.query(VoteBooks).filter(
VoteBooks.roundtable_id == lecturer.roundtable_id,
VoteBooks.lecture_user_id == lecturer.lecture_user_id).first()
if vote_books:
db_session.delete(vote_books)
db_session.query(OTandParty).filter(
OTandParty.party_user_id == lecturer.lecture_user_id,
OTandParty.roundtable_id == lecturer.roundtable_id).delete()
db_session.delete(lecturer)
return jsonify(success=True)
|
480568
|
from django.urls import path
from django.contrib.auth.views import LogoutView
from . import views
urlpatterns = [
# Paths inside UI
path('login/', views.login_view, name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('register/', views.register_user, name='register'),
path('profile/', views.profile_view, name='profile')
]
|
480569
|
from imax import transforms
from jax import numpy as jnp
from utils import compare
# test_img_rgba = jnp.asarray(Image.open('./test.jpeg').convert('RGBA')).astype('uint8')
# test_img_rgb = jnp.asarray(Image.open('./test.jpeg').convert('RGB')).astype('uint8')
rgb_img = jnp.array(
[[[255, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype='uint8')
rgba_img = jnp.array(
[[[255, 0, 0, 255],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
], dtype='uint8')
def test_data():
inputs = None
targets = rgb_img
outputs = rgba_img[:, :, :3]
compare(inputs, targets, outputs)
def test_horizontal_flip():
inputs = rgba_img
targets = rgba_img[:, ::-1]
outputs = transforms.apply_transform(rgba_img,
transforms.flip(horizontal=True,
vertical=False))
compare(inputs, targets, outputs)
def test_vertical_flip():
inputs = rgba_img
targets = rgba_img[::-1]
outputs = transforms.apply_transform(rgba_img,
transforms.flip(horizontal=False,
vertical=True))
compare(inputs, targets, outputs)
def test_rotate90():
inputs = rgba_img
targets = jnp.rot90(rgba_img, k=2)
outputs = transforms.apply_transform(rgba_img, transforms.rotate90(n=2))
compare(inputs, targets, outputs)
def test_scale():
factor = 3
inputs = jnp.pad(jnp.ones((1, 1, 4), dtype='uint8') * 255,
((1, 1), (1, 1), (0, 0)), constant_values=0)
targets = jnp.ones_like(rgba_img)*255
outputs = transforms.apply_transform(
jnp.pad(jnp.ones((1, 1, 4), dtype='uint8'),
((1, 1), (1, 1), (0, 0)), constant_values=0)*255,
transforms.scale_3d(scale_x=factor, scale_y=factor), bilinear=False)
compare(inputs, targets, outputs)
|
480583
|
from sklearn import tree
from sklearn.externals.six import StringIO
import pydotplus
X = [[0, 0], [1, 1]]
Y = [0, 1]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, Y)# train the model
result = clf.predict([[0.2, 0.1]])#predict ([0])
result_p = clf.predict_proba([[0.8, 0.1]])#predict probability ([[ 0. 1.]])
print(result)
print(result_p)
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
print(dot_data.getvalue())
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("dtree.pdf")
|
480586
|
import textwrap
from hamcrest import assert_that
from hamcrest import ends_with
from allure_commons_test.report import has_test_case
from allure_commons_test.label import has_package
def test_path_with_dots_test(allured_testdir):
path = allured_testdir.testdir.mkpydir("path.with.dots")
path.join("test_path.py").write(
textwrap.dedent(
"""\
def test_path_with_dots_test_example():
pass
""")
)
allured_testdir.run_with_allure()
assert_that(allured_testdir.allure_report,
has_test_case("test_path_with_dots_test_example",
has_package(ends_with("path.with.dots.test_path"))
)
)
def test_with_no_package(allured_testdir):
"""
>>> def test_package_less(request):
... pass
"""
allured_testdir.parse_docstring_source()
allured_testdir.testdir.makeini("""[pytest]""")
allured_testdir.run_with_allure(allured_testdir.testdir.tmpdir)
assert_that(allured_testdir.allure_report,
has_test_case("test_package_less",
has_package("test_with_no_package"))
)
|
480636
|
import abc
import json
import logging
import os
import numpy as np
import tensorflow as tf
def zip_weights(model, ckpt, variables_mapping, self_weight_names, **kwargs):
weights, values = [], []
used_weights = [w for w in model.trainable_weights if w.name in self_weight_names]
for w in used_weights:
var = variables_mapping.get(w.name, None)
if var is None:
logging.warning("Model weight: %s not collected in weights mapping.", w.name)
continue
v = tf.train.load_variable(ckpt, var)
if w.name == "bert/nsp/dense/kernel:0":
v = v.T
weights.append(w)
values.append(v)
if kwargs.get("verbose", True):
logging.info("Load weight: {:60s} <-- {}".format(w.name, variables_mapping[w.name]))
mapped_values = zip(weights, values)
return mapped_values
def parse_pretrained_model_files(pretrained_model_dir):
config_file, ckpt, vocab = None, None, None
pretrained_model_dir = os.path.abspath(pretrained_model_dir)
if not os.path.exists(pretrained_model_dir):
logging.info("pretrain model dir: {} is not exists.".format(pretrained_model_dir))
return config_file, ckpt, vocab
for f in os.listdir(pretrained_model_dir):
if "config" in str(f) and str(f).endswith(".json"):
config_file = os.path.join(pretrained_model_dir, f)
if "vocab" in str(f):
vocab = os.path.join(pretrained_model_dir, f)
if "ckpt" in str(f):
n = ".".join(str(f).split(".")[:-1])
ckpt = os.path.join(pretrained_model_dir, n)
return config_file, ckpt, vocab
class AbstractAdapter(abc.ABC):
"""Abstract model weights adapter."""
@abc.abstractmethod
def adapte_config(self, model_path, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def adapte_weights(self, model, model_config, model_path, **kwargs):
raise NotImplementedError()
class BaseAdapter(AbstractAdapter):
"""Base adapter for pretrained models."""
def __init__(
self,
use_functional_api=True,
with_mlm=False,
with_nsp=False,
with_sop=False,
skip_token_embedding=False,
skip_position_embedding=False,
skip_segment_embedding=False,
skip_embedding_layernorm=False,
skip_pooler=False,
check_weights=True,
verbose=True,
**kwargs
):
self.use_functional_api = use_functional_api
self.with_mlm = with_mlm
self.with_nsp = with_nsp
self.with_sop = with_sop
self.check_weights = check_weights
self.verbose = verbose
self.model_files = None
self._pretrained_weights_map = {}
self.weights_to_skip = set()
# skip weights
self.skip_token_embedding = skip_token_embedding
self.skip_position_embedding = skip_position_embedding
self.skip_segment_embedding = skip_segment_embedding
self.skip_embedding_layernorm = skip_embedding_layernorm
self.skip_pooler = skip_pooler
logging.info(
"Adapter skipping config: %s",
json.dumps(
{
"skip_token_embedding": self.skip_token_embedding,
"skip_position_embedding": self.skip_position_embedding,
"skip_segment_embedding": self.skip_segment_embedding,
"skip_embedding_layernorm": self.skip_embedding_layernorm,
"skip_pooler": self.skip_pooler,
}
),
)
def _parse_files(self, model_path, **kwargs):
config_file, ckpt, vocab = parse_pretrained_model_files(model_path)
return {
"config_file": config_file,
"ckpt": ckpt,
"vocab_file": vocab,
}
def _read_pretrained_weights(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
ckpt = self.model_files["ckpt"]
ckpt_weight_names = [w for (w, _) in tf.train.list_variables(ckpt)]
ckpt_weights_map = {w: tf.train.load_variable(ckpt, w) for w in ckpt_weight_names}
return ckpt_weights_map
def adapte_weights(self, model, model_config, model_path, **kwargs):
self._pretrained_weights_map = self._read_pretrained_weights(model_path, **kwargs)
weights_mapping = {}
bert_weights_mapping = self._adapte_backbone_weights(model, model_config, **kwargs)
weights_mapping.update(bert_weights_mapping)
if self.with_mlm:
mlm_weights = self._adapte_mlm_weights(model, model_config, **kwargs)
weights_mapping.update(mlm_weights)
if self.with_nsp:
nsp_weights = self._adapte_nsp_weights(model, model_config, **kwargs)
weights_mapping.update(nsp_weights)
if self.with_sop:
sop_weights = self._adapte_sop_weights(model, model_config, **kwargs)
weights_mapping.update(sop_weights)
# skip weights
self._skipping_weights(model, **kwargs)
take_values = set(weights_mapping.values())
for k in self._pretrained_weights_map.keys():
if k not in take_values:
logging.info("pretrained weight: {} not used.".format(k))
zipping_weights, zipping_values = self._zip_weights(model, model_config, weights_mapping, **kwargs)
tf.keras.backend.batch_set_value(zip(zipping_weights, zipping_values))
# check weights
self._check_weights(model, zipping_weights, zipping_values, weights_mapping, **kwargs)
@abc.abstractmethod
def _adapte_backbone_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_mlm_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_nsp_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _adapte_sop_weights(self, model, model_config, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _zip_weights(self, model, model_config, weights_mapping, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def get_backbone_prefix(self, model):
raise NotImplementedError()
def _skipping_weights(self, model, **kwargs):
backbone_prefix = self.get_backbone_prefix(model)
def _skip(w):
self.weights_to_skip.add(w)
logging.info("Weights will be skipped to load: %s", w)
if self.skip_token_embedding:
_skip("{}/embeddings/word_embeddings:0".format(backbone_prefix))
if self.skip_position_embedding:
_skip("{}/embeddings/position_embeddings:0".format(backbone_prefix))
if self.skip_segment_embedding:
_skip("{}/embeddings/token_type_embeddings:0".format(backbone_prefix))
if self.skip_embedding_layernorm:
_skip("{}/embeddings/LayerNorm/gamma:0".format(backbone_prefix))
_skip("{}/embeddings/LayerNorm/beta:0".format(backbone_prefix))
if self.skip_pooler:
_skip("{}/pooler/dense/kernel:0".format(backbone_prefix))
_skip("{}/pooler/dense/bias:0".format(backbone_prefix))
def _check_weights(self, model, zipping_weights, zipping_values, weights_mapping, **kwargs):
if not self.check_weights:
logging.info("Skipped to check weights due to option `check_weights` set to `False`")
return
for k, v in zip(zipping_weights, zipping_values):
vv = self._pretrained_weights_map[weights_mapping[k.name]]
try:
assert np.allclose(v, vv)
except Exception as e:
logging.warning("{} & {} not close!".format(k, weights_mapping[k.name]))
logging.warning("{} -> \n {}".format(k, v))
logging.warning("{} -> \n {}".format(weights_mapping[k.name], vv))
logging.warning(e)
logging.warning("=" * 80)
class AbstractBertAdapter(BaseAdapter):
"""Abstract Bert adapter"""
def get_backbone_prefix(self, model):
return model.bert_model.name if self.use_functional_api else model.name + "/" + model.bert_model.name
def adapte_config(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
config_file = self.model_files["config_file"]
with open(config_file, mode="rt", encoding="utf8") as fin:
config = json.load(fin)
model_config = {
"vocab_size": config["vocab_size"],
"activation": config["hidden_act"],
"max_positions": config["max_position_embeddings"],
"hidden_size": config["hidden_size"],
"type_vocab_size": config["type_vocab_size"],
"intermediate_size": config["intermediate_size"],
"hidden_dropout_rate": config["hidden_dropout_prob"],
"attention_dropout_rate": config["attention_probs_dropout_prob"],
"initializer_range": config["initializer_range"],
"num_layers": config["num_hidden_layers"],
"num_attention_heads": config["num_attention_heads"],
}
return model_config
class AbstractAlbertAdapter(BaseAdapter):
"""Abstract adapter for albert"""
def get_backbone_prefix(self, model):
return model.albert_model.name if self.use_functional_api else model.name + "/" + model.albert_model.name
def adapte_config(self, model_path, **kwargs):
if self.model_files is None:
self.model_files = self._parse_files(model_path, **kwargs)
config_file = self.model_files["config_file"]
with open(config_file, mode="rt", encoding="utf8") as fin:
config = json.load(fin)
model_config = {
"vocab_size": config["vocab_size"],
"max_positions": config["max_position_embeddings"],
"embedding_size": config["embedding_size"],
"type_vocab_size": config["type_vocab_size"],
"num_layers": config["num_hidden_layers"],
"num_groups": config["num_hidden_groups"],
"num_layers_each_group": config["inner_group_num"],
"hidden_size": config["hidden_size"],
"num_attention_heads": config["num_attention_heads"],
"intermediate_size": config["intermediate_size"],
"activation": config["hidden_act"],
"hidden_dropout_rate": config["hidden_dropout_prob"],
"attention_dropout_rate": config["attention_probs_dropout_prob"],
"initializer_range": config["initializer_range"],
}
return model_config
|
480661
|
import json
from difflib import get_close_matches
data=json.load(open("data.json"))
def definition(w):
w=w.lower()
title=w.title()
W=w.upper()
if w in data:
return data[w]
elif title in data:
return data[title]
elif W in data:
return data[W]
elif len(get_close_matches(w,data.keys(),cutoff=0.75))>0 :
ans=input( "Did you mean {} . type 'Y' if you mean it or type 'N' for no : ".format(get_close_matches(w,data.keys(),cutoff=0.75)[0].upper()))
if ans.lower()=='y':
return definition(get_close_matches(w,data.keys(),cutoff=0.75)[0])
else:
return "The word doesn't exist"
else:
return "The word doesn't exist . Please verify it"
word = input("Enter the word: ")
output=definition(word)
if type(output)==list:
for i in output:
print(i)
else:
print(output)
|
480665
|
import setuptools
from itomate.itomate import version
with open("readme.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="itomate",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Automate your iTerm layouts and workflows",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kamranahmedse/itomate",
packages=setuptools.find_packages(),
install_requires=[
"iterm2>=1.1",
"PyYAML>=5.3.1",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
],
python_requires='>=3.7.0',
license="MIT",
entry_points="""
[console_scripts]
itomate=itomate.itomate:main
"""
)
|
480679
|
expected_output = {
"interface-policer-information": {
"physical-interface": [
{
"admin-status": "up",
"logical-interface": [
{
"admin-status": "up",
"name": "ge-0/0/2.0",
"oper-status": "up",
"policer-information": [
{
"policer-family": "inet",
"policer-input": "GE_1M-ge-0/0/2.0-log_int-i",
"policer-output": "GE_1M-ge-0/0/2.0-log_int-o",
},
{
"policer-family": "inet6",
"policer-input": "GE_1M-ge-0/0/2.0-log_int-i",
"policer-output": "GE_1M-ge-0/0/2.0-log_int-o",
},
{
"policer-family": "multiservice",
"policer-input": "__default_arp_policer__",
},
],
}
],
"name": "ge-0/0/2",
"oper-status": "up",
}
]
}
}
|
480702
|
from decimal import Decimal
from enum import Enum
from pathlib import Path
from math import pi, isclose
from PIL import Image
from awsimple import dict_to_dynamodb, dynamodb_to_dict
class TstClass(Enum):
a = 1
b = 2
def test_make_serializable():
values = {
"d": Decimal(1.0),
"s": "s",
"bool": True,
"a": TstClass.a,
"b": TstClass.b,
"binary": b"\0\1",
"ni": -100, # negative integer
"nbi": -100000000000000000000000000000000000, # negative big integer
"pi": pi,
}
values["image"] = Image.open(Path("test_awsimple", "280px-PNG_transparency_demonstration_1.png"))
values = dict_to_dynamodb(values)
serial_values = dynamodb_to_dict(values)
assert serial_values["d"] == 1.0
assert serial_values["s"] == "s"
assert serial_values["bool"] is True
assert serial_values["a"] == "a"
assert serial_values["b"] == "b"
assert len(serial_values["image"]) == 140065
assert serial_values["binary"] == "b'\\x00\\x01'"
assert isinstance(serial_values["ni"], int)
assert isinstance(serial_values["nbi"], float) # ends up being a float, even though we'd prefer it as an int
assert isclose(serial_values["pi"], pi)
|
480706
|
from edflow.iterators.template_iterator import TemplateIterator
class Iterator(TemplateIterator):
"""
Clean iterator skeleton for initialization.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def save(self, checkpoint_path):
"""
Function for saving the model at a given state
Parameters
----------
checkpoint_path: The path where the saved checkpoint should lie.
"""
def restore(self, checkpoint_path):
"""
Function for model restoration from a given checkpoint.
Parameters
----------
checkpoint_path: The path where the checkpoint for restoring lies.
Returns
-------
The restored model from the given checkpoint.
"""
pass
def step_op(self, model, **kwargs):
"""
The main method to be called for training by the iterator. Calculating the loss, optimizer step etc.
Parameters
----------
model : The given model class.
Returns
-------
A dictionary with `train_op`, `log_op` and `eval_op` keys and their returns as their values.
"""
inputs, labels = kwargs["inputs"], kwargs["labels"]
outputs = model(inputs)
def train_op():
"""Takes care of the training process."""
pass
def log_op():
"""
Takes care of the logging process.
Returns
-------
A dictionary whose values are to be logged.
"""
return {"inputs": inputs, "labels": labels}
def eval_op():
"""
Takes care of the evaluation.
Returns
-------
A dictionary with values to be evaluated.
"""
return {"outputs": outputs}
return {"train_op": train_op, "log_op": log_op, "eval_op": eval_op}
|
480711
|
import logging
import os
from typing import Dict, List
from common_utils.labels import UserLabels
from controller.invoker.invoker_task_base import TaskBaseInvoker
from controller.utils import utils
from id_definition.error_codes import CTLResponseCode
from proto import backend_pb2
class TaskExportingInvoker(TaskBaseInvoker):
def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp:
exporting_request = request.req_create_task.exporting
logging.info(f"exporting_requests: {exporting_request}")
asset_dir = exporting_request.asset_dir
if not asset_dir:
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message="empty asset_dir")
os.makedirs(asset_dir, exist_ok=True)
annotation_dir = exporting_request.annotation_dir
if exporting_request.format != backend_pb2.LabelFormat.NO_ANNOTATION:
if not annotation_dir:
return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED,
message="empty annotation_dir")
os.makedirs(annotation_dir, exist_ok=True)
return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="")
@classmethod
def subtask_weights(cls) -> List[float]:
return [1.0]
@classmethod
def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str],
request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str,
previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp:
exporting_request = request.req_create_task.exporting
asset_dir = exporting_request.asset_dir
annotation_dir = exporting_request.annotation_dir
media_location = assets_config['assetskvlocation']
exporting_response = cls.exporting_cmd(repo_root=repo_root,
dataset_id=exporting_request.dataset_id,
annotation_format=utils.annotation_format_str(exporting_request.format),
asset_dir=asset_dir,
annotation_dir=annotation_dir,
media_location=media_location,
work_dir=subtask_workdir)
return exporting_response
@staticmethod
def exporting_cmd(repo_root: str,
dataset_id: str,
annotation_format: str,
asset_dir: str,
annotation_dir: str,
media_location: str,
work_dir: str,
keywords: List[str] = None) -> backend_pb2.GeneralResp:
exporting_cmd = [
utils.mir_executable(), 'export', '--root', repo_root, '--media-location', media_location, '--asset-dir',
asset_dir, '--annotation-dir', annotation_dir, '--src-revs', f"{dataset_id}@{dataset_id}", '--format',
annotation_format, '-w', work_dir
]
if keywords:
exporting_cmd.append('--cis')
exporting_cmd.append(';'.join(keywords))
return utils.run_command(exporting_cmd)
|
480738
|
import pickle as pkl
import design_bench as db
import numpy as np
from design_bench.oracles.feature_extractors.morgan_fingerprint_features import MorganFingerprintFeatures
if __name__ == "__main__":
with open("type_assay_pairs.pkl", "rb") as f:
type_assay_pairs = pkl.load(f)
all_rank_corr = []
for type_name, assay_name in type_assay_pairs:
task = db.make(
'ChEMBLMorganFingerprint-FullyConnected-v0',
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=50,
min_percentile=0,
assay_chembl_id=assay_name,
standard_type=type_name),
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
feature_extractor=MorganFingerprintFeatures(dtype=np.float32),
model_kwargs=dict(
hidden_size=512,
activation='relu',
num_layers=2,
epochs=5,
shuffle_buffer=5000,
learning_rate=0.0001),
split_kwargs=dict(
val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=True,
disk_target=f"chembl-{type_name}-{assay_name}/split",
is_absolute=False))
)
print(type_name, assay_name,
task.oracle.params['rank_correlation'])
all_rank_corr.append(task.oracle.params['rank_correlation'])
best_type_name, best_assay_name = \
type_assay_pairs[np.argmax(np.array(all_rank_corr))]
|
480745
|
from django.shortcuts import render
from django.views.generic import View
class CustomerServiceView(View):
def get(self, request, *args, **kwargs):
links = [
{'page': 'orders', 'icon': 'shopping_cart', 'title': 'Commande'},
{'page': 'returns', 'icon': 'flight_landing', 'title': 'Retour'},
{'page': 'delivery', 'icon': 'flight_takeoff', 'title': 'Livraison'},
]
template = 'pages/customer_service.html'
if 'page_name' in kwargs:
page_name = kwargs['page_name']
if page_name == 'orders':
template = 'pages/customer_care/faq/orders.html'
elif page_name == 'delivery':
template = 'pages/customer_care/faq/delivery.html'
elif page_name == 'returns':
template = 'pages/customer_care/faq/returns.html'
context = {
'links': links,
}
return render(request, template, context=context)
def post(self, request, **kwargs):
email = request.POST.get('email')
reason = request.POST.get('reason')
order = request.POST.get('order')
message = request.POST.get('message')
if not email or not reason \
or not order or not message:
messages.error(
request, "Le message n'a pas pu รชtre envoyรฉ car des champs sont manquants")
return redirect('customer_care')
if len(message) < 50:
messages.error(
request, "Votre message doit faire au moins 50 charactรจres", extra_tags='alert-warning')
return redirect('customer_care')
authorized_reasons = ['where', 'missing', 'refund', 'payment-question',
'defectuous', 'refund-duration', 'other']
if reason not in authorized_reasons:
messages.error(
request, "Une erreur s'est produite - CUS-RE", extra_tags='alert-warning')
return redirect('customer_care')
try:
send_mail(
f"Customer Care - From: {email} - Order: {order}",
message=message,
from_email='<EMAIL>',
recipient_list=[
'<EMAIL>'
],
html_message="""
Bonjour,
"""
)
except:
messages.error(
request, "Une erreur s'est produite - CUS-NS", extra_tags='alert-warning')
return redirect('customer_care')
messages.error(request, "Merci de nous avoir contacter",
extra_tags='alert-success')
return redirect('customer_care')
|
480753
|
import glob
import imp
import inspect
import logging
import os
import uuid
import warnings
# py 2/3 compatibility
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
try:
from importlib.machinery import SourceFileLoader
from types import ModuleType
def load_source(modname, fname):
loader = SourceFileLoader(modname, fname)
mod = ModuleType(loader.name)
loader.exec_module(mod)
return mod
except ImportError as err:
load_source = lambda modname, fname: imp.load_source(modname, fname)
from ipypublish import export_plugins
def _get_module_path(module):
"""return a directory path to a module"""
return pathlib.Path(os.path.dirname(os.path.abspath(inspect.getfile(module))))
def _get_modules(path):
""" get modules from a directory
Properties
----------
path : str or path-like
Returns
-------
modules : list of modules
load_errors: list of str
Examples
--------
>>> from jsonextended.utils import MockPath
>>> mod1 = MockPath('mod1.py', is_file=True,
... content="name='modname1'")
>>> dir = MockPath(structure=[mod1])
>>> modules, errors = _get_modules(dir)
>>> errors
[]
>>> list(modules.keys())
['mod1']
>>> modules['mod1'].name
'modname1'
"""
# get potential plugin python files
if hasattr(path, 'glob'):
pypaths = path.glob('*.py')
else:
pypaths = glob.glob(os.path.join(path, '*.py'))
modules = {}
load_errors = []
for pypath in pypaths:
# use uuid to ensure no conflicts in name space
mod_name = str(uuid.uuid4())
try:
if hasattr(pypath, 'resolve'):
# Make the path absolute, resolving any symlinks
pypath = pypath.resolve()
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("ignore", category=ImportWarning)
# for MockPaths
if hasattr(pypath, 'maketemp'):
with pypath.maketemp() as fpath:
module = load_source(mod_name, str(fpath))
pypath = pypath.name
else:
module = load_source(mod_name, str(pypath))
modules[os.path.splitext(os.path.basename(str(pypath)))[0]] = module
except Exception as err:
load_errors.append((str(pypath), 'Load Error: {}'.format(err)))
continue
return modules, load_errors
_plugins_dict = {}
def add_directory(path):
""" add a directory of export plugin modules to the existing dict
plugins must have: oformat, template and config attributes and a doc string
Properties
----------
path : str or path-like
"""
modules, load_errors = _get_modules(path)
for mod_name, mod in modules.items():
try:
descript = getattr(mod, '__doc__')
oformat = getattr(mod, 'oformat')
template = getattr(mod, 'template')
config = getattr(mod, 'config')
except AttributeError:
continue
_plugins_dict[mod_name] = {'descript': descript,
'oformat': oformat,
'template': template,
'config': config}
return load_errors
logging.debug('loading builtin plugins')
load_errors = add_directory(_get_module_path(export_plugins))
if load_errors:
raise IOError(
'errors in builtin plugins loading: {}'.format('\n'.join(['{0}: {1}'.format(a, b) for a, b in load_errors])))
def get():
""" return export plugins
"""
return _plugins_dict.copy()
|
480758
|
from .utils import conditional_token_address, load_evm_abi, usdc_address
hash_zero = "0x0000000000000000000000000000000000000000000000000000000000000000"
def redeem(web3_provider, condition_id, num_outcomes):
conditional_token_abi = load_evm_abi('ConditionalTokens.json')
index_set = [1 << x for x in range(num_outcomes)]
contract = web3_provider.eth.contract(address=conditional_token_address, abi=conditional_token_abi)
trx_hash = contract.functions.redeemPositions(usdc_address, hash_zero, condition_id, index_set).transact()
web3_provider.eth.wait_for_transaction_receipt(trx_hash)
return trx_hash
|
480781
|
import logging
import multiprocessing
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def dataframe_transform_parallel(
df, transformer
):
cpu_count = multiprocessing.cpu_count()
workers_count = int(round(cpu_count))
logger.log(15, 'Dataframe_transform_parallel running pool with '+str(workers_count)+' workers')
df_chunks = np.array_split(df, workers_count)
df_list = execute_multiprocessing(workers_count=workers_count, transformer=transformer, chunks=df_chunks)
df_combined = pd.concat(df_list, axis=0, ignore_index=True)
return df_combined
# If multiprocessing_method is 'fork', initialization time scales linearly with current allocated memory, dramatically slowing down runs. forkserver makes this time constant
def execute_multiprocessing(workers_count, transformer, chunks, multiprocessing_method='forkserver'):
logger.log(15, 'Execute_multiprocessing starting worker pool...')
ctx = multiprocessing.get_context(multiprocessing_method)
with ctx.Pool(workers_count) as pool:
out = pool.map(transformer, chunks)
return out
def force_forkserver():
"""
Forces forkserver multiprocessing mode if not set. This is needed for HPO and CUDA.
The CUDA runtime does not support the fork start method: either the spawn or forkserver start method are required.
forkserver is used because spawn is still affected by locking issues
"""
if ('forkserver' in multiprocessing.get_all_start_methods()) & (not is_forkserver_enabled()):
logger.warning('WARNING: changing multiprocessing start method to forkserver')
multiprocessing.set_start_method('forkserver', force=True)
def is_forkserver_enabled():
"""
Return True if current multiprocessing start method is forkserver.
"""
return multiprocessing.get_start_method(allow_none=True) == 'forkserver'
def is_fork_enabled():
"""
Return True if current multiprocessing start method is fork.
"""
return multiprocessing.get_start_method(allow_none=True) == 'fork'
|
480794
|
import torch
import soundfile as sf
class VocoderWrapper():
def __init__(self, device):
self.device = device
self.vocoder = torch.hub.load('descriptinc/melgan-neurips', 'load_melgan')
self.n_mels = 80
self.sr = 22050
def mel2wav(self, mel, save=''):
device = self.device
with torch.no_grad():
if type(mel) is torch.Tensor:
mel = mel.squeeze()
mel = mel[None].to(device).float()
else:
mel = torch.from_numpy(mel[None]).to(device).float()
y = self.vocoder.inverse(mel).cpu().numpy().flatten()
if save != '':
# librosa.output.write_wav(path=save, y=y, sr=sr)
sf.write(file=save, data=y, samplerate=self.sr)
return y
def get_vocoder(device):
return VocoderWrapper(device=device)
|
480802
|
from seesaw.externalprocess import ExternalProcess
from seesaw.pipeline import Pipeline
from seesaw.runner import SimpleRunner
from seesaw.task import PrintItem, SimpleTask
from seesaw.test_base import BaseTestCase
class ExternalProcessTest(BaseTestCase):
def test_max_items(self):
pipeline = Pipeline(PrintItem(), PrintItem())
pipeline.has_failed = None
def fail_callback(task, item):
pipeline.has_failed = True
pipeline.on_fail_item += fail_callback
runner = SimpleRunner(pipeline, max_items=3)
def finish_item_callback(runner, pipeline, item):
if runner.item_count > 10:
raise Exception('Too many items.')
runner.on_pipeline_finish_item += finish_item_callback
runner.start()
self.assertFalse(pipeline.has_failed)
self.assertEqual(3, runner.item_count)
self.assertIOLoopOK()
def test_max_items_with_subproc(self):
pipeline = Pipeline(PrintItem(), PrintItem(),
ExternalProcess("pwd", ["pwd"]))
pipeline.has_failed = None
def fail_callback(task, item):
pipeline.has_failed = True
pipeline.on_fail_item += fail_callback
runner = SimpleRunner(pipeline, max_items=3)
def finish_item_callback(runner, pipeline, item):
if runner.item_count > 10:
raise Exception('Too many items.')
runner.on_pipeline_finish_item += finish_item_callback
runner.start()
self.assertFalse(pipeline.has_failed)
self.assertEqual(3, runner.item_count)
self.assertIOLoopOK()
def test_no_stack_overflow(self):
pipeline = Pipeline(PrintItem())
pipeline.has_failed = None
def fail_callback(task, item):
pipeline.has_failed = True
pipeline.on_fail_item += fail_callback
runner = SimpleRunner(pipeline, max_items=50)
def finish_item_callback(runner, pipeline, item):
if runner.item_count > 200:
raise Exception('Too many items.')
runner.on_pipeline_finish_item += finish_item_callback
runner.start()
self.assertFalse(pipeline.has_failed)
self.assertEqual(50, runner.item_count)
self.assertIOLoopOK()
def test_spurious_item_events(self):
class StupidTask(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "StupidTask")
def process(self, item):
item.log_output('Failing the item.')
self.fail_item(item)
item.log_output('Completing the item.')
self.complete_item(item)
item.log_output('Failing the item.')
self.fail_item(item)
pipeline = Pipeline(StupidTask())
pipeline.fail_count_test = 0
def fail_callback(task, item):
pipeline.fail_count_test += 1
pipeline.on_fail_item += fail_callback
runner = SimpleRunner(pipeline, max_items=1)
runner.start()
self.assertEqual(1, pipeline.fail_count_test)
self.assertIOLoopOK()
|
480807
|
import json, decimal, pytest
from flask_restplus import marshal, fields
from app.api.now_applications.response_models import NOW_APPLICATION_MODEL
from tests.now_application_factories import NOWApplicationIdentityFactory, NOWApplicationFactory
class TestNOWApplication:
"""PUT mines/now-applications/<guid>"""
@pytest.mark.skip(
reason='Application changes now fire a request to NROS so need to mock the service call.')
def test_put_application_field(self, test_client, db_session, auth_headers):
now_application = NOWApplicationFactory()
test_application = NOWApplicationIdentityFactory(now_application=now_application)
assert test_application.now_application
data = marshal(test_application.now_application, NOW_APPLICATION_MODEL)
new_latitude = '-55.111'
data['latitude'] = new_latitude
put_resp = test_client.put(
f'/now-applications/{test_application.now_application_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert decimal.Decimal(put_data['latitude']) == decimal.Decimal(new_latitude)
|
480833
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB
input_file = 'adult.data.txt'
# Reading the data
X = []
y = []
count_lessthan50k = 0
count_morethan50k = 0
num_images_threshold = 30000
with open(input_file, 'r') as f:
for line in f.readlines():
if '?' in line:
continue
data = line[:-1].split(', ')
if data[-1] == '<=50K' and count_lessthan50k < num_images_threshold:
X.append(data)
count_lessthan50k = count_lessthan50k + 1
elif data[-1] == '>50K' and count_morethan50k < num_images_threshold:
X.append(data)
count_morethan50k = count_morethan50k + 1
if count_lessthan50k >= num_images_threshold and count_morethan50k >= num_images_threshold:
break
X = np.array(X)
# Convert string data to numerical data
label_encoder = []
X_encoded = np.empty(X.shape)
for i,item in enumerate(X[0]):
if item.isdigit():
X_encoded[:, i] = X[:, i]
else:
label_encoder.append(preprocessing.LabelEncoder())
X_encoded[:, i] = label_encoder[-1].fit_transform(X[:, i])
X = X_encoded[:, :-1].astype(int)
y = X_encoded[:, -1].astype(int)
# Build a classifier
classifier_gaussiannb = GaussianNB()
classifier_gaussiannb.fit(X, y)
# Cross validation
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.25, random_state=5)
classifier_gaussiannb = GaussianNB()
classifier_gaussiannb.fit(X_train, y_train)
y_test_pred = classifier_gaussiannb.predict(X_test)
# compute F1 score of the classifier
f1 = cross_validation.cross_val_score(classifier_gaussiannb,
X, y, scoring='f1_weighted', cv=5)
print "F1 score: " + str(round(100*f1.mean(), 2)) + "%"
# Testing encoding on single data instance
input_data = ['39', 'State-gov', '77516', 'Bachelors', '13', 'Never-married', 'Adm-clerical', 'Not-in-family', 'White', 'Male', '2174', '0', '40', 'United-States']
count = 0
input_data_encoded = [-1] * len(input_data)
for i,item in enumerate(input_data):
if item.isdigit():
input_data_encoded[i] = int(input_data[i])
else:
input_data_encoded[i] = int(label_encoder[count].transform(input_data[i]))
count = count + 1
input_data_encoded = np.array(input_data_encoded)
# Predict and print output for a particular datapoint
output_class = classifier_gaussiannb.predict(input_data_encoded)
print label_encoder[-1].inverse_transform(output_class)[0]
|
480912
|
import pytest
from os.path import isdir
from dafter.fetcher import DATASETS_FOLDER
from dafter.fetcher import DATASETS_CONFIG_FOLDER
def test_directories():
assert isdir(DATASETS_FOLDER) == True
assert isdir(DATASETS_CONFIG_FOLDER) == True
if __name__ == "__main__":
pytest.main([__file__])
|
480926
|
import argparse
import time
from copy import deepcopy
from pathlib import Path
import numpy as np
import pandas as pd
import sklearn
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import qiqc
from qiqc.datasets import load_qiqc, build_datasets
from qiqc.preprocessing.modules import load_pretrained_vectors
from qiqc.training import classification_metrics, ClassificationResult
from qiqc.utils import set_seed, load_module
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--modelfile', '-m', type=Path, required=True)
_args, others = parser.parse_known_args(args)
modules = load_module(_args.modelfile)
config = modules.ExperimentConfigBuilder().build(args=args)
qiqc.utils.rmtree_after_confirmation(config.outdir, config.test)
train(config, modules)
def train(config, modules):
print(config)
start = time.time()
set_seed(config.seed)
config.outdir.mkdir(parents=True, exist_ok=True)
build_model = modules.build_model
Preprocessor = modules.Preprocessor
TextNormalizer = modules.TextNormalizer
TextTokenizer = modules.TextTokenizer
WordEmbeddingFeaturizer = modules.WordEmbeddingFeaturizer
WordExtraFeaturizer = modules.WordExtraFeaturizer
SentenceExtraFeaturizer = modules.SentenceExtraFeaturizer
Ensembler = modules.Ensembler
train_df, submit_df = load_qiqc(n_rows=config.n_rows)
datasets = build_datasets(train_df, submit_df, config.holdout, config.seed)
train_dataset, test_dataset, submit_dataset = datasets
print('Tokenize texts...')
preprocessor = Preprocessor()
normalizer = TextNormalizer(config)
tokenizer = TextTokenizer(config)
train_dataset.tokens, test_dataset.tokens, submit_dataset.tokens = \
preprocessor.tokenize(datasets, normalizer, tokenizer)
print('Build vocabulary...')
vocab = preprocessor.build_vocab(datasets, config)
print('Build token ids...')
train_dataset.tids, test_dataset.tids, submit_dataset.tids = \
preprocessor.build_tokenids(datasets, vocab, config)
print('Build sentence extra features...')
sentence_extra_featurizer = SentenceExtraFeaturizer(config)
train_dataset._X2, test_dataset._X2, submit_dataset._X2 = \
preprocessor.build_sentence_features(
datasets, sentence_extra_featurizer)
[d.build(config.device) for d in datasets]
print('Load pretrained vectors...')
pretrained_vectors = load_pretrained_vectors(
config.use_pretrained_vectors, vocab.token2id, test=config.test)
print('Build word embedding matrix...')
word_embedding_featurizer = WordEmbeddingFeaturizer(config, vocab)
embedding_matrices = preprocessor.build_embedding_matrices(
datasets, word_embedding_featurizer, vocab, pretrained_vectors)
print('Build word extra features...')
word_extra_featurizer = WordExtraFeaturizer(config, vocab)
word_extra_features = word_extra_featurizer(vocab)
print('Build models...')
word_features_cv = [
preprocessor.build_word_features(
word_embedding_featurizer, embedding_matrices, word_extra_features)
for i in range(config.cv)]
models = [
build_model(
config, word_features, sentence_extra_featurizer.n_dims
) for word_features in word_features_cv]
print('Start training...')
splitter = sklearn.model_selection.StratifiedKFold(
n_splits=config.cv, shuffle=True, random_state=config.seed)
train_results, valid_results = [], []
best_models = []
for i_cv, (train_indices, valid_indices) in enumerate(
splitter.split(train_dataset.df, train_dataset.df.target)):
if config.cv_part is not None and i_cv >= config.cv_part:
break
train_tensor = train_dataset.build_labeled_dataset(train_indices)
valid_tensor = train_dataset.build_labeled_dataset(valid_indices)
valid_iter = DataLoader(
valid_tensor, batch_size=config.batchsize_valid)
model = models.pop(0)
model = model.to_device(config.device)
model_snapshots = []
optimizer = torch.optim.Adam(model.parameters(), config.lr)
train_result = ClassificationResult('train', config.outdir, str(i_cv))
valid_result = ClassificationResult('valid', config.outdir, str(i_cv))
batchsize = config.batchsize
for epoch in range(config.epochs):
if epoch in config.scale_batchsize:
batchsize *= 2
print(f'Batchsize: {batchsize}')
epoch_start = time.time()
sampler = None
train_iter = DataLoader(
train_tensor, sampler=sampler, drop_last=True,
batch_size=batchsize, shuffle=sampler is None)
_summary = []
# Training loop
for i, batch in enumerate(
tqdm(train_iter, desc='train', leave=False)):
model.train()
optimizer.zero_grad()
loss, output = model.calc_loss(*batch)
loss.backward()
optimizer.step()
train_result.add_record(**output)
train_result.calc_score(epoch)
_summary.append(train_result.summary.iloc[-1])
# Validation loop
if epoch >= config.validate_from:
for i, batch in enumerate(
tqdm(valid_iter, desc='valid', leave=False)):
model.eval()
loss, output = model.calc_loss(*batch)
valid_result.add_record(**output)
valid_result.calc_score(epoch)
_summary.append(valid_result.summary.iloc[-1])
_model = deepcopy(model)
_model.threshold = valid_result.summary.threshold[epoch]
model_snapshots.append(_model)
summary = pd.DataFrame(_summary).set_index('name')
epoch_time = time.time() - epoch_start
pbar = '#' * (i_cv + 1) + '-' * (config.cv - 1 - i_cv)
tqdm.write(f'\n{pbar} cv: {i_cv} / {config.cv}, epoch {epoch}, '
f'time: {epoch_time}')
tqdm.write(str(summary))
train_results.append(train_result)
valid_results.append(valid_result)
best_indices = valid_result.summary.fbeta.argsort()[::-1]
best_models.extend([model_snapshots[i] for i in
best_indices[:config.ensembler_n_snapshots]])
# Build ensembler
train_X, train_X2, train_t = \
train_dataset.X, train_dataset.X2, train_dataset.t
ensembler = Ensembler(config, best_models, valid_results)
ensembler.fit(train_X, train_X2, train_t)
scores = dict(
valid_fbeta=np.array([r.best_fbeta for r in valid_results]).mean(),
valid_epoch=np.array([r.best_epoch for r in valid_results]).mean(),
threshold_cv=ensembler.threshold_cv,
threshold=ensembler.threshold,
elapsed_time=time.time() - start,
)
if config.holdout:
test_X, test_X2, test_t = \
test_dataset.X, test_dataset.X2, test_dataset._t
y, t = ensembler.predict_proba(test_X, test_X2), test_t
y_pred = y > ensembler.threshold
y_pred_cv = y > ensembler.threshold_cv
result = classification_metrics(y_pred, t)
result_cv = classification_metrics(y_pred_cv, t)
result_theoretical = classification_metrics(y, t)
scores.update(dict(
test_fbeta=result['fbeta'],
test_fbeta_cv=result_cv['fbeta'],
test_fbeta_theoretical=result_theoretical['fbeta'],
test_threshold_theoretical=result_theoretical['threshold'],
))
print(scores)
# Predict submit datasets
submit_y = ensembler.predict(submit_dataset.X, submit_dataset.X2)
submit_df['prediction'] = submit_y
submit_df = submit_df[['qid', 'prediction']]
submit_df.to_csv(config.outdir / 'submission.csv', index=False)
return scores
if __name__ == '__main__':
main()
|
480927
|
import sys
if len(sys.argv) != 2:
raise Exception("{0} requires two parameters".format(sys.argv[0]))
input_cmd = ""
output_cmd = {
"": "\nprint('empty input file')\n",
"first": "\nprint('first test')\n",
"second": "\nprint('second test')\n",
"multiline": "def multiline(req, opt = False):\n"
" if opt:\n"
" print('multiline -> optional')\n"
" \n"
" print('multiline test: {0}'.format(req))\n",
"temporary": "\ntemporary_file_path = r'{0}'\n".format(sys.argv[1]),
"extra \"quoted \\\"arguments\\\"\"": "\nprint('test with extra additional quoted arguments')\n",
"display": "def display():\n"
" print('display test')\n",
"js_statements": "js_one = 'first JS'\n"
"js_two = 'second JS'\n",
"sql_statements": "SELECT 'first SQL';\n"
"SELECT 'second SQL';\n",
"py_statements": "py_one = 'first Python'\n"
"py_two = 'second Python'\n",
"print('this')": "\nprint('that')\n"
}
with open(sys.argv[1], "r") as f:
input_cmd = f.read()
if not input_cmd in output_cmd:
output_cmd[input_cmd] = "print('unexpected test: \\'{0}\\'')\n".format(input_cmd)
with open(sys.argv[1], "w") as f:
f.write(output_cmd[input_cmd])
|
480962
|
def bar1():
"""__NATIVE__
PmReturn_t retval = PM_RET_OK;
/* If wrong number of args, raise TypeError */
if (NATIVE_GET_NUM_ARGS() != 0)
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
NATIVE_SET_TOS(PM_NONE);
return retval;
"""
pass
def bar2():
return bar1()
|
480988
|
GITHUB_AUTH_TOKEN = ""
EVERNOTE_PROD_TOKEN = ""
EVERNOTE_SANDBOX_TOKEN = ""
NOTEBOOK_TO_SYNC = "gist-evernote"
|
480992
|
import json
from urllib2 import urlopen, Request
import ifcb
from ifcb.io import Timestamped
from ifcb.io.path import Resolver
from ifcb.io import TARGET_INFO
import re
from ifcb.io.pids import parse_id
import urllib2 as urllib
from PIL import Image
from cStringIO import StringIO
def get_json(pid):
print 'fetching ' + pid + '.json'
return json.loads(''.join(urlopen(Request(pid + '.json'))))
class Target(object):
info = None
pid = None
bin = None
def __init__(self, pid, bin=None, info=None):
self.pid = pid
self.bin = bin
self.info = info
def __getattribute__(self,name):
if name in TARGET_INFO:
return self.info[name]
elif name == 'info':
if object.__getattribute__(self,'info') is None:
self.info = get_json(object.__getattribute__(self,'pid'))
return object.__getattribute__(self,'info')
elif name == 'bin':
if object.__getattribute__(self,'bin') is None:
self.bin = Bin(self.binID)
return self.bin
else:
return object.__getattribute__(self,name)
def image(self):
img_file = urllib.urlopen(self.pid+'.png')
return Image.open(StringIO(img_file.read()))
class Bin(Timestamped):
bin_info = None # dict containing information about the bin
def __init__(self, pid, info=None):
self.pid = pid
#self.instrument = self.info()['instrument']
def __repr__(self):
return '{Bin ' + self.pid + '}'
def info(self):
if self.bin_info is None:
self.bin_info = get_json(self.pid+'/full')
return self.bin_info
def properties(self,include_pid=False):
props = self.info().copy()
del props['targets']
if not include_pid:
del props['pid']
return props
def headers(self):
return self.properties()
# generate all targets
def __iter__(self):
for t in self.info()['targets']:
yield Target(t['pid'], self, t)
def all_targets(self):
return list(self)
# retrieve the nth target (1-based!)
# more efficient than subscripting the result of all_targets
def target(self,n):
for target in self:
if n == target.targetNumber:
return target
# return number of targets
def length(self):
return length(self.all_targets())
## image access
def all_images(self):
for target in self:
yield target.image()
# convenience method for getting a specific image
def image(self,n):
return self.target(n).image()
# compute the pid of a target, given its index
def target_pid(self,target_number):
return self.target(n).targetNumber
class Client(Resolver):
def latest_bins(self,n=10):
feed = get_json('http://ifcb-data.whoi.edu/feed')
length = min(len(feed),n)
return [Bin(info['pid']) for info in feed[:length]]
def latest_bin(self):
"""Return latest bin"""
return self.latest_bins(1)[0]
def resolve(self,pid):
oid = parse_id(pid)
if oid.isbin:
return Bin(pid)
elif oid.istarget:
return Target(pid)
# FIXME add day dir
raise KeyError('unrecognized pid '+pid)
|
480999
|
from bson import ObjectId
from datetime import datetime
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from comments.models import Comment
from comments.signals import comment_done, comment_delete
from documents import get_collection
from documents.models import Document
from documents.signals import document_done, fork_done, star_done, document_delete, fork_delete
from profiles.management.signals import follow_done, unfollow_done
from newsfeed.constants import (NEWS_TYPE_COMMENT, NEWS_TYPE_DOCUMENT,
NEWS_TYPE_FORK, NEWS_TYPE_STAR,
NEWS_TYPE_FOLLOWING, NEWS_TYPE_REGISTRATION)
RELATED_MODELS = {
NEWS_TYPE_COMMENT: Comment,
NEWS_TYPE_DOCUMENT: Document,
NEWS_TYPE_FORK: Document,
NEWS_TYPE_STAR: Document
}
class EntryManager(object):
"""
A manager that allows you to manage newsfeed items.
"""
def __init__(self):
self.load()
def load(self):
self.collection = get_collection("newsfeed")
def create(self, object_id, news_type, sender, recipients=None,
related_object=None):
"""
Creates newsfeed item from provided parameters
"""
followers = sender.followers.values_list("follower_id", flat=True)
recipients = (recipients if recipients is not None
else list(followers) + [sender.pk])
entry_bundle = {
"object_id": object_id,
"news_type": news_type,
"date_created": datetime.now(),
"sender": {
"username": sender.username,
"email": sender.email # it's required for gravatar
},
"recipients": recipients
}
# sometimes we have to create custom related object bundle.
# for example: following actions. because user actions are
# holding on relational database.
if related_object is not None:
entry_bundle["related_object"] = related_object
self.collection.insert(entry_bundle)
def add_to_recipients(self, following, follower):
"""
Adds the id of follower to the recipients of followed profile's entries.
"""
self.collection.update(
{"sender.username": following.username},
{"$push": {"recipients": follower.id}}, multi=True)
def remove_from_recipients(self, following, follower):
"""
Removes follower id from the recipients of followed profile's entries.
"""
self.collection.update(
{"sender.username": following.username},
{"$pull": {"recipients": follower.id}}, multi=True)
def delete(self, object_type, object_id):
"""
Removes news entry from provided object type and object id.
"""
self.collection.remove({
"news_type": object_type,
"object_id": object_id})
class Entry(dict):
"""
A model that wraps mongodb document for newsfeed.
"""
objects = EntryManager()
@property
def related_object(self):
news_type = self.get("news_type")
object_id = self.get("object_id")
model = RELATED_MODELS.get(news_type)
if model is None:
return self.get("related_object")
return model.objects.get(_id=ObjectId(object_id))
@receiver(comment_done)
@receiver(fork_done)
@receiver(document_done)
def create_news_entry(instance, **kwargs):
"""
Creates news entries for the following types:
- Comments
- Forks
- Documents
That models have `get_news_type` method.
"""
if instance.is_public:
Entry.objects.create(
object_id=instance._id,
news_type=instance.get_news_type(),
sender=instance.user
)
@receiver(star_done)
def create_star_entry(instance, user, **kwargs):
"""
Creates news entry for document stargazers.
Actually, there is a no model for stargazers.
It's just an array that holds starred user ids on the document model.
For that reason, `star_done` signals provides `user` parameter.
"""
Entry.objects.create(
object_id=instance._id,
news_type=NEWS_TYPE_STAR,
sender=user
)
@receiver(follow_done)
def create_following_entry(follower, following, **kwargs):
"""
Creates news entry for following actions.
"""
Entry.objects.create(
object_id=following.id,
news_type=NEWS_TYPE_FOLLOWING,
sender=follower,
related_object=dict(username=following.username,
email=following.email)
)
@receiver(follow_done)
def add_to_recipients(follower, following, **kwargs):
"""
Adds the entries of followed profile to follower's newsfeed.
"""
Entry.objects.add_to_recipients(
following=following, follower=follower)
@receiver(unfollow_done)
def remove_from_recipients(follower, following, **kwargs):
"""
Removes the entries of unfollowed profile.
"""
Entry.objects.remove_from_recipients(following=following,
follower=follower)
@receiver(comment_delete)
@receiver(document_delete)
@receiver(fork_delete)
def remove_news_entry(instance, **kwargs):
Entry.objects.delete(
object_type=instance.get_news_type(),
object_id=instance._id
)
@receiver(post_save, sender=User)
def create_registration_entry(instance, created, **kwargs):
if created:
Entry.objects.create(
object_id=instance.id,
news_type=NEWS_TYPE_REGISTRATION,
sender=instance,
related_object=dict(username=instance.username,
email=instance.email),
recipients=[]
)
|
481017
|
from src.DataReader.KF_Data.KF_PrepData import DataManager
from scipy import signal
from src.Params import *
from src.Models.KF_Model.KF_BLock import *
from src.Models.KF_Model.KF_Model import *
import torch.optim as optim
import matplotlib.pyplot as plt
from src.Params import getNoiseLevel
dsName, subType, seq = 'kitti', 'none', [0, 2, 7, 10]
isTrain = True
wName = 'Weights/' + branchName() + '_' + dsName + '_' + subType + '_KF'
def preClamp(data):
if dsName=='kitti':
return data
N = data.shape[0]
for i in range(0, N):
row = data[i, :]
for j in range(0, 3):
val = row[j]
if val > 1:
val = 1
elif val < -1:
val = -1
row[j] = val
data[i] = row
return data
def filtfilt(data):
y = np.zeros_like(data)
b, a = signal.butter(8, 0.1)
for i in range(0, 3):
y[:, i] = signal.filtfilt(b, a, data[:, i], padlen=100)
return y
def plotter(filt, gt):
plt.figure()
plt.subplot(311)
plt.plot(gt[:, 0], 'r.')
plt.plot(filt[:, 0], 'b.')
plt.subplot(312)
plt.plot(gt[:, 1], 'r')
plt.plot(filt[:, 1], 'b.')
plt.subplot(313)
plt.plot(gt[:, 2], 'r')
plt.plot(filt[:, 2], 'b.')
posFilt = integrate(filt)
posGT = integrate(gt)
plt.figure()
plt.subplot(311)
plt.plot(posGT[:, 0], 'r')
plt.plot(posFilt[:, 0], 'g')
plt.subplot(312)
plt.plot(posGT[:, 1], 'r')
plt.plot(posFilt[:, 1], 'g')
plt.subplot(313)
plt.plot(posGT[:, 2], 'r')
plt.plot(posFilt[:, 2], 'g')
plt.figure()
plt.plot(posGT[:, 0], posGT[:, 2], 'r')
plt.plot(posFilt[:, 0], posFilt[:, 2], 'g')
return posFilt, posGT
def prepData(seqLocal = seq):
dm = DataManager()
dm.initHelper(dsName, subType, seqLocal)
dt = dm.dt
pSignal = dm.accdt_gnd
pSignal = preClamp(pSignal)
mSignal = dm.pr_dtr_gnd
mSignal = preClamp((mSignal))
mCov = dm.dtr_cov_gnd
gtSignal = preClamp(dm.gt_dtr_gnd)
gtSignal = filtfilt(gtSignal)
return gtSignal, dt, pSignal, mSignal, mCov
def main():
kfNumpy = KFBlock()
gtSignal, dt, pSignal, mSignal, mCov = prepData(seqLocal=seq)
posGT = np.cumsum(gtSignal, axis=0)
gnet = GuessNet()
if not isTrain:
gnet.train()
checkPoint = torch.load(wName + '.pt')
gnet.load_state_dict(checkPoint['model_state_dict'])
gnet.load_state_dict(checkPoint['optimizer_state_dict'])
else:
gnet.eval()
kf = TorchKFBLock(gtSignal, dt, pSignal, mSignal, mCov)
rmser = GetRMSE()
optimizer = optim.RMSprop(gnet.parameters(), lr=10 ** -4)
fig = plt.gcf()
fig.show()
fig.canvas.draw()
iterN = 50 if isTrain else 1
for epoch in range(0, iterN):
guess, sign = gnet()
filt = kf(guess, sign)
velRMSE, posRMSE = rmser(filt, gtSignal)
params = guess.data.numpy()
paramsSign = sign.data.numpy()
loss = posRMSE.data.numpy() + velRMSE.data.numpy()
theLOss = velRMSE + posRMSE
if isTrain:
if epoch == 10:
optimizer = optim.RMSprop(gnet.parameters(), lr=10 ** -4)
optimizer.zero_grad()
theLOss.backward(torch.ones_like(posRMSE))
optimizer.step()
temp = filt.data.numpy()
posKF = np.cumsum(temp, axis=0)
fig.clear()
plt.subplot(311)
plt.plot(posGT[:, 0], 'r')
plt.plot(posKF[:, 0], 'b')
plt.subplot(312)
plt.plot(posGT[:, 1], 'r')
plt.plot(posKF[:, 1], 'b')
plt.subplot(313)
plt.plot(posGT[:, 2], 'r')
plt.plot(posKF[:, 2], 'b')
plt.pause(0.001)
fig.canvas.draw()
plt.savefig('KFOptimHistory/'+dsName +' ' + subType + ' temp ' + str(epoch) + '.png')
#if np.mod(epoch, 10):
print('epoch: %d' % epoch)
print('params: ')
print(params)
print(paramsSign)
print('posRMSE: %.4f, %.4f, %.4f' %(loss[0], loss[1], loss[2]))
torch.save({
'model_state_dict': gnet.state_dict(),
'optimizer_state_dict': gnet.state_dict(),
}, wName + '.pt')
if isTrain:
kfRes = filt.data.numpy()
_, _ = plotter(kfRes, gtSignal)
else:
noise = getNoiseLevel()
for ii in range(5, 6):
gtSignal, dt, pSignal, mSignal, mCov = prepData(seqLocal=[ii])
kfNumpy.setR(params, paramsSign)
kfRes = kfNumpy.runKF(dt, pSignal, mSignal, mCov)
posFilt, posGT = plotter(kfRes, gtSignal)
np.savetxt('Results/Data/posFilt' + str(ii) + '_' + str(noise) + '.txt', posFilt)
np.savetxt('Results/Data/posGT' + str(ii) + '_' + str(noise) + '.txt', posGT)
plt.show()
if __name__ == '__main__':
main()
|
481055
|
from storage.team07.storageBeans.TableHash import Hash
# funciones de bases de datos
t = Hash() # hash para almacenar base de datos , estructura incial
# dabatase functions , only name database
def createDatabase(database: str) -> int: # recibo el string con el nombre de la base de datos
try:
if type(database) is str:
if (t.search(database) is False):
t.insert(database)
return 0 # la operaciรณn es exito
else:
return 2 # no realize nada porque la base de datos ya existe
else:
return 1 # error porque no es string
except:
return 1 # error en la operaciรณn
def showDatabases(): # retornare una lista
return t.getData() # metodo que me retorna una lista con los nombres de las bases de datos
def alterDatabase(databaseOld, databaseNew): # cambio de nombres en las bases de datos , (nodo en la table hash)
try:
if t.search(databaseOld): # chech if the old database exist
if t.search(databaseNew): # check if the new database doesnยดt exist
return 3 # new database exist , so is like an error
else: # if all is correct , change the name of database
if t.updateName(databaseNew, databaseOld):
return 0 # succesfull
else:
return 1 # error
else:
return 2 # database doesnยดt exist
except:
return 2 # error
def dropDatabase(database):
try:
if t.search(database): # means that the database exist
t.delete(database)
return 0 # succesfull operation delete
else: # database doesnยดt exist
return 2
except:
return 1 # error
# ----------------------------------------------------------
def createTable(database, table, numberColumns):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
return 3 # table exist
else:
data.BHash.insertTable(table, numberColumns) # se inserto una nueva tabla
return 0
else:
return 2 # database doesnยดt exist
except:
return 1
def showTables(database):
if t.search(database):
data = t.getDataBase(database) # get the database
return data.BHash.getDataTables() # return name of tables
else:
return None
def dropTable(database, table):
try:
if t.search(database):
data = t.getDataBase(database)
if data.BHash.searchTable(table):
data.BHash.deleteTable(table)
return 0
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1
def alterTable(database, tableOld, tableNew):
try:
if t.search(database):
data = t.getDataBase(database) # get the database
if data.BHash.searchTable(tableOld): # table exist
if data.BHash.searchTable(tableNew): # new table exist
return 4
else:
data.BHash.updateNameTable(tableNew, tableOld) # all data is correct -> change the name of table
return 0
else: # table doesnยดt exist
return 3
else:
return 2
except:
return 1
def insert(database, table, register):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if len(register) == avl.noColumnas: # la columna si existe
avl.agregar(register) # inserto un nuevo registro
# avl.preorden()
return 0
else: # columnas fuera de limites
return 5
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
def alterAddPK(database, table, columns): # retorna una lista
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if avl.pk is None:
avl.pk = columns # primary key sera una lista
# if avl.raiz is not None:
# if len(columns) == avl.noColumnas: # porque ya habia ingresado datos
# avl.recalcularIndices() # HACER ESTA FUNCION EN CASA
# return 0
# else: # columnas fuera de limites
# return 5
return 0
else: # error ya tenia pk
return 4
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
def extractTable(database, table):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree # obtengo el arbol que contiene las tuplas
return avl.getTuplas() # obtengo todas las tuplas registradas
else:
return None # table doesnยดt exist
else:
return None # database doesnยดt exist
except:
return None
def truncate(database, table):
from storage.team07.storageBeans.AVLtree import arbolAVL
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
noColumnas = avl.noColumnas
tabla.AVLtree = None
tabla.AVLtree = arbolAVL(noColumnas) # un nuevo arbol en la tabla, que esta vacio
return 0
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
# hasta estas funciones interfaz
def alterDropPK(database, table):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if avl.pk is None:
return 4 # pk no existia
else: # si existe pk
avl.pk = None
return 0
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
def alterAddColumn(database, table, default):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
avl.noColumnas += 1
avl.addNewColumna(default) # default , es el nuevo valor que todos tendran en sus tuplas
return 0
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
def alterDropColumn(database, table, columnNumber):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if 0 <= columnNumber < avl.noColumnas: # la columna si existe
if avl.noColumnas > 1:
if avl.pk is not None:
for i in avl.pk:
if i == columnNumber: # significa que esta tratando de eliminar una columna que es pk
return 4
# significa que no hay conflictos
avl.eliminarColumna(columnNumber)
return 0
else:
return 4 # error porque se quedara sin columnas
else: # columnas fuera de limites
return 5
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
# hasta aqui puedo mandar a llamar , a la interfaz
def loadCSV(file, database, table):
lista = []
try:
archivo = open(file, "r")
contador = 0
for linea in archivo.readlines():
linea.split(sep=',')
lineaCompleta = ""
i = 0
for _ in linea:
if i > 1:
lineaCompleta += str(linea[i])
i += 1
lista.append(insert(database, table, lineaCompleta))
contador += 1
archivo.close()
if contador > 0:
return lista
else:
lista = []
return lista
except:
lista = []
return lista
def extractRow(database, table, columns):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if avl.search(columns): # significa que existe el registro
return avl.getRegistro(columns) # el metodo se ejecuto con exito
else:
lista = []
return lista
else:
lista = []
return lista # table doesnยดt exist
else:
lista = []
return lista # database doesnยดt exist
except:
lista = []
return lista # cualquier error
def update(database, table, register, columns):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if avl.search(columns): # si existe el registro entonces , hago el update
listaUpdate = []
for i in range(avl.noColumnas):
listaUpdate.append(None)
for clave in register: # recorro el diccionario enviado
valor = register[clave]
id_clave = int(clave)
listaUpdate[id_clave] = valor
avl.updateTupla(listaUpdate)
return 0
else: # no existe el registro , la llave que se envio no existe
return 4
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
def delete(database, table, columns):
try:
if t.search(database):
data = t.getDataBase(database) # obtengo la base de datos
if data.BHash.searchTable(table):
tabla = data.BHash.getTable(table) # obtengo la tabla
avl = tabla.AVLtree
if avl.search(columns): # si existe el registro entonces , hago el update
avl.eliminarTupla(columns)
return 0
else: # no existe el registro , la llave que se envio no existe
return 4
else:
return 3 # table doesnยดt exist
else:
return 2 # database doesnยดt exist
except:
return 1 # cualquier error
|
481072
|
import planarity
import networkx as nx
# Example of the complete graph of 5 nodes, K5
G=nx.complete_graph(5)
# K5 is not planar
print(planarity.is_planar(G)) # False
# find forbidden Kuratowski subgraph
K=planarity.kuratowski_subgraph(G)
print(K.edges()) # K5 edges
|
481095
|
import gc
import glob
import os
import json
import random
import numpy as np
import scipy.ndimage.interpolation as inter
import tensorflow as tf
from keras import backend as K
from numpyencoder import NumpyEncoder
from scipy.signal import medfilt
from scipy.spatial.distance import cdist
from sklearn.metrics import auc, classification_report, confusion_matrix, roc_curve
from tqdm import tqdm
###################################################################################
def data_generator(T,C,result="classification"):
"""
Generates data for training, validation and testing.
Processes joint data into features.
:param T: joint data with labels
:param C: preset params
:param result: type of data to generate (for classification or regression task)
:return: featurized data for model input
"""
X_0 = []
X_1 = []
Y = []
for i in tqdm(range(len(T['pose']))):
p = np.copy(T['pose'][i])
p = zoom(p,target_l=C['frame_l'],joints_num=C['joint_n'],joints_dim=C['joint_d'])
label = np.zeros(C['clc_num'])
if result == "classification":
y_label_index = T['label'][i]
label[y_label_index] = 1
elif result == "regression":
label[0] = T['label'][i]
M = get_CG(p,C)
X_0.append(M)
X_1.append(p)
Y.append(label)
X_0 = np.stack(X_0)
X_1 = np.stack(X_1)
Y = np.stack(Y)
return X_0,X_1,Y
def save_json(filename, attributes, names):
"""
Save training parameters and evaluation results to json file.
:param filename: save filename
:param attributes: attributes to save
:param names: name of attributes to save in json file
"""
with open(filename, "w", encoding="utf8") as outfile:
d = {}
for i in range(len(attributes)):
name = names[i]
attribute = attributes[i]
d[name] = attribute
json.dump(d, outfile, indent=4, cls=NumpyEncoder)
def get_predictions(file):
"""
Returns prediction_list (class probabilities) and predicted_final_classes
:param file: file produced by save_json()
:return: prediction_list (class probabilities) and predicted_final_classes
"""
with open(file) as json_file:
data = json.load(json_file)
pred_probs = data['prediction_list']
pred_classes = data['predicted_final_classes']
return pred_classes, pred_probs
def get_predicted_class(preds):
"""
Get predicted classes for each clip in one video.
:param preds: predicted class probabilities for one video
:return: predicted class for one video
"""
p = np.array(preds)
pred_classes = []
for clip in p:
prediction = np.argmax(clip)
pred_classes.append(prediction)
return pred_classes
def single_vote(pred):
"""
Get majority vote of predicted classes for the clips in one video.
:param preds: list of predicted class for each clip of one video
:return: majority vote of predicted class for one video
"""
p = np.array(pred)
counts = np.bincount(p)
max_count = 0
max_index = 0
for i in range(len(counts)):
if max_count < counts[i]:
max_index = i
max_count = counts[i]
return max_index
def get_vote(pred_classes):
"""
Get majority vote of predicted class for list of videos.
:param preds: list of predicted class for each clip of each video
:return: list of majority votes of predicted class for each video
"""
majority_votes = []
for pred_class in pred_classes:
vote = single_vote(pred_class)
majority_votes.append(vote)
return majority_votes
def total_video_vote(pred):
"""
Get majority vote of all videos (one class prediction per video)
:param preds: class probabilities for all clips for given videos
:return: list of one majority vote prediction for each video
"""
pred_classes = get_predicted_class(pred)
return single_vote(pred_classes)
# Rescale to be 64 frames
def zoom(p,target_l=64,joints_num=25,joints_dim=3):
l = p.shape[0]
p_new = np.empty([target_l,joints_num,joints_dim])
for m in range(joints_num):
for n in range(joints_dim):
p_new[:,m,n] = medfilt(p_new[:,m,n],3)
p_new[:,m,n] = inter.zoom(p[:,m,n],target_l/l)[:target_l]
return p_new
def sampling_frame(p,C):
full_l = p.shape[0] # full length
if random.uniform(0,1)<0.5: # aligment sampling
valid_l = np.round(np.random.uniform(0.85,1)*full_l)
s = random.randint(0, full_l-int(valid_l))
e = s+valid_l # sample end point
p = p[int(s):int(e),:,:]
else: # without aligment sampling
valid_l = np.round(np.random.uniform(0.9,1)*full_l)
index = np.sort(np.random.choice(range(0,full_l),int(valid_l),replace=False))
p = p[index,:,:]
p = zoom(p,C['frame_l'],C['joint_n'],C['joint_d'])
return p
def norm_scale(x):
return (x-np.mean(x))/np.mean(x)
def get_CG(p,C):
M = []
iu = np.triu_indices(C['joint_n'],1,C['joint_n'])
for f in range(C['frame_l']):
d_m = cdist(p[f],p[f],'euclidean')
d_m = d_m[iu]
M.append(d_m)
M = np.stack(M)
M = norm_scale(M)
return M
# Custom Keras loss function to calculate accuracy of regression predictions for classes
def acc(y_true, y_pred):
rounded = K.cast(tf.keras.backend.round(y_pred), dtype='int32')
equal = tf.keras.backend.equal(rounded, K.cast(y_true, dtype='int32'))
equal_int = tf.keras.backend.cast(equal,"int32")
num_correct = K.sum(equal_int)
ones = tf.keras.backend.cast(tf.keras.backend.ones_like(rounded), "int32")
num_total = tf.keras.backend.sum(ones)
return num_correct / num_total
|
481121
|
import cv2
def video_frame_generator(video_input_path):
"""Opens the video file and yields one frame at a time for decoding"""
active_video = cv2.VideoCapture(video_input_path)
total_video_frames = int(active_video.get(cv2.CAP_PROP_FRAME_COUNT))
current_frame_position = 1
yield total_video_frames
for frame in range(total_video_frames):
yield {'frame': active_video.read()[1], 'current_frame_position': current_frame_position}
current_frame_position += 1
|
481133
|
from .update import Campaign
from .update import FirmwareImage
from .update import FirmwareManifest
from .update import UpdateAPI
|
481178
|
import argparse
import os
import time
import torch
from tensorboardX import SummaryWriter
from tqdm import tqdm
from dataset import problem
from utils.optimizer import LRScheduler
from utils import utils
def summarize_train(writer, global_step, last_time, model, opt,
inputs, targets, optimizer, loss, pred, ans):
if opt.summary_grad:
for name, param in model.named_parameters():
if not param.requires_grad:
continue
norm = torch.norm(param.grad.data.view(-1))
writer.add_scalar('gradient_norm/' + name, norm,
global_step)
writer.add_scalar('input_stats/batch_size',
targets.size(0), global_step)
if inputs is not None:
writer.add_scalar('input_stats/input_length',
inputs.size(1), global_step)
i_nonpad = (inputs != opt.src_pad_idx).view(-1).type(torch.float32)
writer.add_scalar('input_stats/inputs_nonpadding_frac',
i_nonpad.mean(), global_step)
writer.add_scalar('input_stats/target_length',
targets.size(1), global_step)
t_nonpad = (targets != opt.trg_pad_idx).view(-1).type(torch.float32)
writer.add_scalar('input_stats/target_nonpadding_frac',
t_nonpad.mean(), global_step)
writer.add_scalar('optimizer/learning_rate',
optimizer.learning_rate(), global_step)
writer.add_scalar('loss', loss.item(), global_step)
acc = utils.get_accuracy(pred, ans, opt.trg_pad_idx)
writer.add_scalar('training/accuracy',
acc, global_step)
steps_per_sec = 100.0 / (time.time() - last_time)
writer.add_scalar('global_step/sec', steps_per_sec,
global_step)
def train(train_data, model, opt, global_step, optimizer, t_vocab_size,
label_smoothing, writer):
model.train()
last_time = time.time()
pbar = tqdm(total=len(train_data.dataset), ascii=True)
for batch in train_data:
inputs = None
if opt.has_inputs:
inputs = batch.src
targets = batch.trg
pred = model(inputs, targets)
pred = pred.view(-1, pred.size(-1))
ans = targets.view(-1)
loss = utils.get_loss(pred, ans, t_vocab_size,
label_smoothing, opt.trg_pad_idx)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if global_step % 100 == 0:
summarize_train(writer, global_step, last_time, model, opt,
inputs, targets, optimizer, loss, pred, ans)
last_time = time.time()
pbar.set_description('[Loss: {:.4f}]'.format(loss.item()))
global_step += 1
pbar.update(targets.size(0))
pbar.close()
train_data.reload_examples()
return global_step
def validation(validation_data, model, global_step, t_vocab_size, val_writer,
opt):
model.eval()
total_loss = 0.0
total_cnt = 0
for batch in validation_data:
inputs = None
if opt.has_inputs:
inputs = batch.src
targets = batch.trg
with torch.no_grad():
pred = model(inputs, targets)
pred = pred.view(-1, pred.size(-1))
ans = targets.view(-1)
loss = utils.get_loss(pred, ans, t_vocab_size, 0,
opt.trg_pad_idx)
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
val_loss = total_loss / total_cnt
print("Validation Loss", val_loss)
val_writer.add_scalar('loss', val_loss, global_step)
return val_loss
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--problem', required=True)
parser.add_argument('--train_step', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=4096)
parser.add_argument('--max_length', type=int, default=100)
parser.add_argument('--n_layers', type=int, default=6)
parser.add_argument('--hidden_size', type=int, default=512)
parser.add_argument('--filter_size', type=int, default=2048)
parser.add_argument('--warmup', type=int, default=16000)
parser.add_argument('--val_every', type=int, default=5)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--label_smoothing', type=float, default=0.1)
parser.add_argument('--model', type=str, default='transformer')
parser.add_argument('--output_dir', type=str, default='./output')
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--parallel', action='store_true')
parser.add_argument('--summary_grad', action='store_true')
opt = parser.parse_args()
device = torch.device('cpu' if opt.no_cuda else 'cuda')
if not os.path.exists(opt.output_dir + '/last/models'):
os.makedirs(opt.output_dir + '/last/models')
if not os.path.exists(opt.data_dir):
os.makedirs(opt.data_dir)
train_data, validation_data, i_vocab_size, t_vocab_size, opt = \
problem.prepare(opt.problem, opt.data_dir, opt.max_length,
opt.batch_size, device, opt)
if i_vocab_size is not None:
print("# of vocabs (input):", i_vocab_size)
print("# of vocabs (target):", t_vocab_size)
if opt.model == 'transformer':
from model.transformer import Transformer
model_fn = Transformer
elif opt.model == 'fast_transformer':
from model.fast_transformer import FastTransformer
model_fn = FastTransformer
if os.path.exists(opt.output_dir + '/last/models/last_model.pt'):
print("Load a checkpoint...")
last_model_path = opt.output_dir + '/last/models'
model, global_step = utils.load_checkpoint(last_model_path, device,
is_eval=False)
else:
model = model_fn(i_vocab_size, t_vocab_size,
n_layers=opt.n_layers,
hidden_size=opt.hidden_size,
filter_size=opt.filter_size,
dropout_rate=opt.dropout,
share_target_embedding=opt.share_target_embedding,
has_inputs=opt.has_inputs,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx)
model = model.to(device=device)
global_step = 0
if opt.parallel:
print("Use", torch.cuda.device_count(), "GPUs")
model = torch.nn.DataParallel(model)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("# of parameters: {}".format(num_params))
optimizer = LRScheduler(
filter(lambda x: x.requires_grad, model.parameters()),
opt.hidden_size, opt.warmup, step=global_step)
writer = SummaryWriter(opt.output_dir + '/last')
val_writer = SummaryWriter(opt.output_dir + '/last/val')
best_val_loss = float('inf')
for t_step in range(opt.train_step):
print("Epoch", t_step)
start_epoch_time = time.time()
global_step = train(train_data, model, opt, global_step,
optimizer, t_vocab_size, opt.label_smoothing,
writer)
print("Epoch Time: {:.2f} sec".format(time.time() - start_epoch_time))
if t_step % opt.val_every != 0:
continue
val_loss = validation(validation_data, model, global_step,
t_vocab_size, val_writer, opt)
utils.save_checkpoint(model, opt.output_dir + '/last/models',
global_step, val_loss < best_val_loss)
best_val_loss = min(val_loss, best_val_loss)
if __name__ == '__main__':
main()
|
481227
|
import threading
import time
import api
import db
DATABASE = db.getDb("bot.db")
def getTimer(bot, logger):
class Timer(threading.Thread):
def __init__(self, threadName):
super(Timer, self).__init__(name=threadName)
def run(self):
while True:
for item in DATABASE.getAllPackages():
information = api.TrackerApi.getPackageInformation(item[0], item[3])
if information["data"] and information["data"][0]["time"] != item[4]:
logger.info("Found update: " + item[0])
bot.send_message(item[2], item[5] + "\n" + information["data"][0]["data"] + "\nStatus: " +
api.getStatusFromCode(information["status"]))
DATABASE.update(item[2], item[0], information["status"], information["data"][0]["time"])
logger.info("Timer sleeping.")
time.sleep(60 * 10)
return Timer
|
481237
|
expected_output={
"statistics":{
"vlan":{
"vlan":330,
"mdns_pkt_sent":{
"pkt_sent":1,
"ipv4_sent":{
"ipv4_sent_val":1,
"ipv4_adv_sent":0,
"ipv4_qry_sent":1
},
"ipv6_sent":{
"ipv6_sent_val":0,
"ipv6_adv_sent":0,
"ipv6_qry_sent":0
}
},
"mdns_rate_lim":0,
"mdns_pkt_rcvd":{
"pkt_rcvd":0,
"adv_rcvd":0,
"queries_rcvd":{
"qry_count":0,
"ipv4_rcvd":{
"ipv4_rcvd_val":0,
"ipv4_adv_rcvd":0,
"ipv4_qry_rcvd":0
},
"ipv6_rcvd":{
"ipv6_rcvd_val":0,
"ipv6_adv_rcvd":0,
"ipv6_qry_rcvd":0
}
}
},
"mdns_pkt_drop":0,
"qry_type":{
"PTR":{
"qry_type_val":0
},
"SRV":{
"qry_type_val":0
},
"A":{
"qry_type_val":0
},
"AAAA":{
"qry_type_val":0
},
"TXT":{
"qry_type_val":0
},
"ANY":{
"qry_type_val":0
}
}
}
}
}
|
481251
|
import os
import pytest
from S3Scanner.S3Service import S3Service
from S3Scanner.S3Bucket import BucketExists, Permission, S3BucketObject, S3Bucket
from TestUtils import TestBucketService
from S3Scanner.exceptions import AccessDeniedException, BucketMightNotExistException
from pathlib import Path
from urllib3 import disable_warnings
testingFolder = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'test/')
setupRan = False
"""
S3Service.py methods to test:
- init()
- โ๏ธ Test service.aws_creds_configured is false when forceNoCreds = False
- check_bucket_exists()
- โ๏ธ Test against that exists
- โ๏ธ Test against one that doesn't
- check_perm_read_acl()
- โ๏ธ Test against bucket with AllUsers allowed
- โ๏ธ Test against bucket with AuthUsers allowed
- โ๏ธ Test against bucket with all denied
- check_perm_read()
- โ๏ธ Test against bucket with AuthUsers read permission
- โ๏ธ Test against bucket with AllUsers read permission
- โ๏ธ Test against bucket with no read permission
- check_perm_write()
- โ๏ธ Test against bucket with no write permissions
- โ๏ธ Test against bucket with AuthUsers write permission
- โ๏ธ Test against bucket with AllUsers write permission
- โ๏ธ Test against bucket with AllUsers and AuthUsers write permission
- check_perm_write_acl()
- โ๏ธ Test against bucket with AllUsers allowed
- โ๏ธ Test against bucket with AuthUsers allowed
- โ๏ธ Test against bucket with both AllUsers allowed
- โ๏ธ Test against bucket with no groups allowed
- enumerate_bucket_objects()
- โ๏ธ Test against empty bucket
- โ๏ธ Test against not empty bucket with read permission
- โ๏ธ Test against bucket without read permission
- parse_found_acl()
- โ๏ธ Test against JSON with FULL_CONTROL for AllUsers
- โ๏ธ Test against JSON with FULL_CONTROL for AuthUsers
- โ๏ธ Test against empty JSON
- โ๏ธ Test against JSON with ReadACP for AuthUsers and Write for AllUsers
"""
def test_setup_new():
global setupRan
if setupRan: # We only need to run this once per test-run
return
# Create testingFolder if it doesn't exist
if not os.path.exists(testingFolder) or not os.path.isdir(testingFolder):
os.makedirs(testingFolder)
setupRan = True
def test_init():
test_setup_new()
s = S3Service(forceNoCreds=True)
assert s.aws_creds_configured is False
def test_bucket_exists():
test_setup_new()
s = S3Service()
# Bucket that does exist
b1 = S3Bucket('s3scanner-private')
s.check_bucket_exists(b1)
assert b1.exists is BucketExists.YES
# Bucket that doesn't exist (hopefully)
b2 = S3Bucket('asfasfasdfasdfasdf')
s.check_bucket_exists(b2)
assert b2.exists is BucketExists.NO
# Pass a thing that's not a bucket
with pytest.raises(ValueError):
s.check_bucket_exists("asdfasdf")
def test_check_perm_read():
test_setup_new()
s = S3Service()
# Bucket that no one can list
b1 = S3Bucket('s3scanner-private')
b1.exists = BucketExists.YES
s.check_perm_read(b1)
if s.aws_creds_configured:
assert b1.AuthUsersRead == Permission.DENIED
else:
assert b1.AllUsersRead == Permission.DENIED
# Bucket that only AuthenticatedUsers can list
b2 = S3Bucket('s3scanner-auth-read')
b2.exists = BucketExists.YES
s.check_perm_read(b2)
if s.aws_creds_configured:
assert b2.AuthUsersRead == Permission.ALLOWED
else:
assert b2.AllUsersRead == Permission.DENIED
# Bucket that Everyone can list
b3 = S3Bucket('s3scanner-long')
b3.exists = BucketExists.YES
s.check_perm_read(b3)
if s.aws_creds_configured:
assert b3.AuthUsersRead == Permission.ALLOWED
else:
assert b3.AllUsersRead == Permission.ALLOWED
def test_enumerate_bucket_objects():
test_setup_new()
s = S3Service()
# Empty bucket
b1 = S3Bucket('s3scanner-empty')
b1.exists = BucketExists.YES
s.check_perm_read(b1)
if s.aws_creds_configured:
assert b1.AuthUsersRead == Permission.ALLOWED
else:
assert b1.AllUsersRead == Permission.ALLOWED
s.enumerate_bucket_objects(b1)
assert b1.objects_enumerated is True
assert b1.bucketSize == 0
# Bucket with > 1000 items
if s.aws_creds_configured:
b2 = S3Bucket('s3scanner-auth-read')
b2.exists = BucketExists.YES
s.check_perm_read(b2)
assert b2.AuthUsersRead == Permission.ALLOWED
s.enumerate_bucket_objects(b2)
assert b2.objects_enumerated is True
assert b2.bucketSize == 4143
assert b2.get_human_readable_size() == "4.0KB"
else:
print("[test_enumerate_bucket_objects] Skipping test due to no AWS creds")
# Bucket without read permission
b3 = S3Bucket('s3scanner-private')
b3.exists = BucketExists.YES
s.check_perm_read(b3)
if s.aws_creds_configured:
assert b3.AuthUsersRead == Permission.DENIED
else:
assert b3.AllUsersRead == Permission.DENIED
try:
s.enumerate_bucket_objects(b3)
except AccessDeniedException:
pass
# Try to enumerate before checking if bucket exists
b4 = S3Bucket('s3scanner-enumerate-bucket')
with pytest.raises(Exception):
s.enumerate_bucket_objects(b4)
def test_check_perm_read_acl():
test_setup_new()
s = S3Service()
# Bucket with no read ACL perms
b1 = S3Bucket('s3scanner-private')
b1.exists = BucketExists.YES
s.check_perm_read_acl(b1)
if s.aws_creds_configured:
assert b1.AuthUsersReadACP == Permission.DENIED
else:
assert b1.AllUsersReadACP == Permission.DENIED
# Bucket that allows AuthenticatedUsers to read ACL
if s.aws_creds_configured:
b2 = S3Bucket('s3scanner-auth-read-acl')
b2.exists = BucketExists.YES
s.check_perm_read_acl(b2)
if s.aws_creds_configured:
assert b2.AuthUsersReadACP == Permission.ALLOWED
else:
assert b2.AllUsersReadACP == Permission.DENIED
# Bucket that allows AllUsers to read ACL
b3 = S3Bucket('s3scanner-all-readacp')
b3.exists = BucketExists.YES
s.check_perm_read_acl(b3)
assert b3.AllUsersReadACP == Permission.ALLOWED
assert b3.AllUsersWrite == Permission.DENIED
assert b3.AllUsersWriteACP == Permission.DENIED
assert b3.AuthUsersReadACP == Permission.DENIED
assert b3.AuthUsersWriteACP == Permission.DENIED
assert b3.AuthUsersWrite == Permission.DENIED
def test_check_perm_write(do_dangerous_test):
test_setup_new()
s = S3Service()
sAnon = S3Service(forceNoCreds=True)
# Bucket with no write perms
b1 = S3Bucket('flaws.cloud')
b1.exists = BucketExists.YES
s.check_perm_write(b1)
if s.aws_creds_configured:
assert b1.AuthUsersWrite == Permission.DENIED
else:
assert b1.AllUsersWrite == Permission.DENIED
if do_dangerous_test:
print("[test_check_perm_write] Doing dangerous test")
ts = TestBucketService()
danger_bucket_1 = ts.create_bucket(1) # Bucket with AuthUser Write, WriteACP permissions
try:
b2 = S3Bucket(danger_bucket_1)
b2.exists = BucketExists.YES
sAnon.check_perm_write(b2)
s.check_perm_write(b2)
assert b2.AuthUsersWrite == Permission.ALLOWED
assert b2.AllUsersWrite == Permission.DENIED
finally:
ts.delete_bucket(danger_bucket_1)
danger_bucket_2 = ts.create_bucket(2) # Bucket with AllUser Write, WriteACP permissions
try:
b3 = S3Bucket(danger_bucket_2)
b3.exists = BucketExists.YES
sAnon.check_perm_write(b3)
s.check_perm_write(b3)
assert b3.AllUsersWrite == Permission.ALLOWED
assert b3.AuthUsersWrite == Permission.UNKNOWN
finally:
ts.delete_bucket(danger_bucket_2)
# Bucket with AllUsers and AuthUser Write permissions
danger_bucket_4 = ts.create_bucket(4)
try:
b4 = S3Bucket(danger_bucket_4)
b4.exists = BucketExists.YES
sAnon.check_perm_write(b4)
s.check_perm_write(b4)
assert b4.AllUsersWrite == Permission.ALLOWED
assert b4.AuthUsersWrite == Permission.UNKNOWN
finally:
ts.delete_bucket(danger_bucket_4)
else:
print("[test_check_perm_write] Skipping dangerous test")
def test_check_perm_write_acl(do_dangerous_test):
test_setup_new()
s = S3Service()
sNoCreds = S3Service(forceNoCreds=True)
# Bucket with no permissions
b1 = S3Bucket('s3scanner-private')
b1.exists = BucketExists.YES
s.check_perm_write_acl(b1)
if s.aws_creds_configured:
assert b1.AuthUsersWriteACP == Permission.DENIED
assert b1.AllUsersWriteACP == Permission.UNKNOWN
else:
assert b1.AllUsersWriteACP == Permission.DENIED
assert b1.AuthUsersWriteACP == Permission.UNKNOWN
if do_dangerous_test:
print("[test_check_perm_write_acl] Doing dangerous tests...")
ts = TestBucketService()
# Bucket with WRITE_ACP enabled for AuthUsers
danger_bucket_3 = ts.create_bucket(3)
try:
b2 = S3Bucket(danger_bucket_3)
b2.exists = BucketExists.YES
# Check for read/write permissions so when we check for write_acl we
# send the same perms that it had originally
sNoCreds.check_perm_read(b2)
s.check_perm_read(b2)
sNoCreds.check_perm_write(b2)
s.check_perm_write(b2)
# Check for WriteACP
sNoCreds.check_perm_write_acl(b2)
s.check_perm_write_acl(b2)
# Grab permissions after our check so we can compare to original
sNoCreds.check_perm_write(b2)
s.check_perm_write(b2)
sNoCreds.check_perm_read(b2)
s.check_perm_read(b2)
if s.aws_creds_configured:
assert b2.AuthUsersWriteACP == Permission.ALLOWED
# Make sure we didn't change the original permissions
assert b2.AuthUsersWrite == Permission.ALLOWED
assert b2.AllUsersWrite == Permission.DENIED
assert b2.AllUsersRead == Permission.ALLOWED
assert b2.AuthUsersRead == Permission.UNKNOWN
else:
assert b2.AllUsersRead == Permission.ALLOWED
assert b2.AuthUsersWriteACP == Permission.UNKNOWN
except Exception as e:
raise e
finally:
ts.delete_bucket(danger_bucket_3)
# Bucket with WRITE_ACP enabled for AllUsers
danger_bucket_2 = ts.create_bucket(2)
try:
b3 = S3Bucket(danger_bucket_2)
b3.exists = BucketExists.YES
sNoCreds.check_perm_read(b3)
s.check_perm_read(b3)
sNoCreds.check_perm_write(b3)
s.check_perm_write(b3)
sNoCreds.check_perm_write_acl(b3)
s.check_perm_write_acl(b3)
sNoCreds.check_perm_write(b3)
s.check_perm_write(b3)
sNoCreds.check_perm_read(b3)
s.check_perm_read(b3)
if s.aws_creds_configured:
assert b3.AllUsersWriteACP == Permission.ALLOWED
assert b3.AuthUsersWriteACP == Permission.UNKNOWN
assert b3.AllUsersWrite == Permission.ALLOWED
else:
assert b3.AllUsersRead == Permission.ALLOWED
assert b3.AuthUsersWriteACP == Permission.UNKNOWN
except Exception as e:
raise e
finally:
ts.delete_bucket(danger_bucket_2)
# Bucket with WRITE_ACP enabled for both AllUsers and AuthUsers
danger_bucket_5 = ts.create_bucket(5)
try:
b5 = S3Bucket(danger_bucket_5)
b5.exists = BucketExists.YES
sNoCreds.check_perm_read(b5)
s.check_perm_read(b5)
sNoCreds.check_perm_write(b5)
s.check_perm_write(b5)
sNoCreds.check_perm_write_acl(b5)
s.check_perm_write_acl(b5)
sNoCreds.check_perm_write(b5)
s.check_perm_write(b5)
sNoCreds.check_perm_read(b5)
s.check_perm_read(b5)
assert b5.AllUsersWriteACP == Permission.ALLOWED
assert b5.AuthUsersWriteACP == Permission.UNKNOWN
assert b5.AllUsersWrite == Permission.DENIED
assert b5.AuthUsersWrite == Permission.DENIED
except Exception as e:
raise e
finally:
ts.delete_bucket(danger_bucket_5)
else:
print("[test_check_perm_write_acl] Skipping dangerous test...")
def test_parse_found_acl():
test_setup_new()
sAnon = S3Service(forceNoCreds=True)
b1 = S3Bucket('s3scanner-all-read-readacl')
b1.exists = BucketExists.YES
sAnon.check_perm_read_acl(b1)
assert b1.foundACL is not None
assert b1.AllUsersRead == Permission.ALLOWED
assert b1.AllUsersReadACP == Permission.ALLOWED
assert b1.AllUsersWrite == Permission.DENIED
assert b1.AllUsersWriteACP == Permission.DENIED
assert b1.AllUsersFullControl == Permission.DENIED
assert b1.AuthUsersReadACP == Permission.DENIED
assert b1.AuthUsersRead == Permission.DENIED
assert b1.AuthUsersWrite == Permission.DENIED
assert b1.AuthUsersWriteACP == Permission.DENIED
assert b1.AuthUsersFullControl == Permission.DENIED
test_acls_1 = {
'Grants': [
{
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'
},
'Permission': 'FULL_CONTROL'
}
]
}
b2 = S3Bucket('test-acl-doesnt-exist')
b2.exists = BucketExists.YES
b2.foundACL = test_acls_1
sAnon.parse_found_acl(b2)
assert b2.AllUsersRead == Permission.ALLOWED
assert b2.AllUsersReadACP == Permission.ALLOWED
assert b2.AllUsersWrite == Permission.ALLOWED
assert b2.AllUsersWriteACP == Permission.ALLOWED
assert b2.AllUsersFullControl == Permission.ALLOWED
assert b2.AuthUsersRead == Permission.DENIED
assert b2.AuthUsersReadACP == Permission.DENIED
assert b2.AuthUsersWrite == Permission.DENIED
assert b2.AuthUsersWriteACP == Permission.DENIED
assert b2.AuthUsersFullControl == Permission.DENIED
test_acls_2 = {
'Grants': [
{
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
},
'Permission': 'FULL_CONTROL'
}
]
}
b3 = S3Bucket('test-acl2-doesnt-exist')
b3.exists = BucketExists.YES
b3.foundACL = test_acls_2
sAnon.parse_found_acl(b3)
assert b3.AllUsersRead == Permission.DENIED
assert b3.AllUsersReadACP == Permission.DENIED
assert b3.AllUsersWrite == Permission.DENIED
assert b3.AllUsersWriteACP == Permission.DENIED
assert b3.AllUsersFullControl == Permission.DENIED
assert b3.AuthUsersRead == Permission.ALLOWED
assert b3.AuthUsersReadACP == Permission.ALLOWED
assert b3.AuthUsersWrite == Permission.ALLOWED
assert b3.AuthUsersWriteACP == Permission.ALLOWED
assert b3.AuthUsersFullControl == Permission.ALLOWED
test_acls_3 = {
'Grants': [
{
'Grantee': {
'Type': 'Group',
'URI': 'asdfasdf'
},
'Permission': 'READ'
}
]
}
b4 = S3Bucket('test-acl3-doesnt-exist')
b4.exists = BucketExists.YES
b4.foundACL = test_acls_3
sAnon.parse_found_acl(b4)
all_permissions = [b4.AllUsersRead, b4.AllUsersReadACP, b4.AllUsersWrite, b4.AllUsersWriteACP,
b4.AllUsersFullControl, b4.AuthUsersRead, b4.AuthUsersReadACP, b4.AuthUsersWrite,
b4.AuthUsersWriteACP, b4.AuthUsersFullControl]
for p in all_permissions:
assert p == Permission.DENIED
test_acls_4 = {
'Grants': [
{
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
},
'Permission': 'READ_ACP'
},
{
'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'
},
'Permission': 'READ_ACP'
}
]
}
b5 = S3Bucket('test-acl4-doesnt-exist')
b5.exists = BucketExists.YES
b5.foundACL = test_acls_4
sAnon.parse_found_acl(b5)
assert b5.AllUsersRead == Permission.DENIED
assert b5.AllUsersReadACP == Permission.ALLOWED
assert b5.AllUsersWrite == Permission.DENIED
assert b5.AllUsersWriteACP == Permission.DENIED
assert b5.AllUsersFullControl == Permission.DENIED
assert b5.AuthUsersRead == Permission.DENIED
assert b5.AuthUsersReadACP == Permission.ALLOWED
assert b5.AuthUsersWrite == Permission.DENIED
assert b5.AuthUsersWriteACP == Permission.DENIED
assert b5.AuthUsersFullControl == Permission.DENIED
def test_check_perms_without_checking_bucket_exists():
test_setup_new()
sAnon = S3Service(forceNoCreds=True)
b1 = S3Bucket('blahblah')
with pytest.raises(BucketMightNotExistException):
sAnon.check_perm_read_acl(b1)
with pytest.raises(BucketMightNotExistException):
sAnon.check_perm_read(b1)
with pytest.raises(BucketMightNotExistException):
sAnon.check_perm_write(b1)
with pytest.raises(BucketMightNotExistException):
sAnon.check_perm_write_acl(b1)
def test_no_ssl():
test_setup_new()
S3Service(verify_ssl=False)
def test_download_file():
test_setup_new()
s = S3Service()
# Try to download a file that already exists
dest_folder = os.path.realpath(testingFolder)
Path(os.path.join(dest_folder, 'test_download_file.txt')).touch()
size = Path(os.path.join(dest_folder, 'test_download_file.txt')).stat().st_size
o = S3BucketObject(size=size, last_modified="2020-12-31_03-02-11z", key="test_download_file.txt")
b = S3Bucket("bucket-no-existo")
s.download_file(os.path.join(dest_folder, ''), b, True, o)
def test_validate_endpoint_url_nonaws():
disable_warnings()
s = S3Service()
# Test CenturyLink_Lumen
s.endpoint_url = 'https://useast.os.ctl.io'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test DigitalOcean
s.endpoint_url = 'https://sfo2.digitaloceanspaces.com'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test Dreamhost
s.endpoint_url = 'https://objects.dreamhost.com'
assert s.validate_endpoint_url(use_ssl=False, verify_ssl=False, endpoint_address_style='vhost') is True
# Test GCP
s.endpoint_url = 'https://storage.googleapis.com'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test IBM
s.endpoint_url = 'https://s3.us-east.cloud-object-storage.appdomain.cloud'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test Linode
s.endpoint_url = 'https://eu-central-1.linodeobjects.com'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test Scaleway
s.endpoint_url = 'https://s3.nl-ams.scw.cloud'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test Vultr
s.endpoint_url = 'https://ewr1.vultrobjects.com'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
# Test Wasabi
s.endpoint_url = 'https://s3.wasabisys.com'
assert s.validate_endpoint_url(use_ssl=True, verify_ssl=True, endpoint_address_style='path') is True
|
481279
|
class ExampleClass(object):
def __init__(self, number):
self.X = int
self.number = number
def get_number(self):
return self.X(self.number)
def main():
dict_example = {}
obj = ExampleClass(20.123)
print(type(ExampleClass))
dict_example[ExampleClass] = str(obj.get_number())
print(dict_example)
if __name__ == "__main__":
main()
|
481291
|
from tokenizers import (BertWordPieceTokenizer,
SentencePieceBPETokenizer,
ByteLevelBPETokenizer,
CharBPETokenizer)
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
tokenizer = BertWordPieceTokenizer("../data/bert-base-uncased-vocab.txt", lowercase=True)
print(tokenizer)
# Tokenizer(vocabulary_size=30522, model=BertWordPiece, unk_token=[UNK],
# sep_token=[SEP], cls_token=[CLS], pad_token=[PAD], mask_token=[MASK],
# clean_text=True, handle_chinese_chars=True, strip_accents=True,
# lowercase=True, wordpieces_prefix=##)
# Tokenizers provide exhaustive outputs: tokens, mapping to original string, attention/special token masks.
# They also handle model's max input lengths as well as padding (to directly encode in padded batches)
output = tokenizer.encode("Hello, y'all! How are you?")
print(output) # Encoding(num_tokens=12, attributes=[ids, type_ids, tokens, offsets, attention_mask, special_tokens_mask, overflowing])
print(f"ids: {output.ids}") # [101, 7592, 1010, 1061, 1005, 2035, 999, 2129, 2024, 2017, 1029, 102]
print(f"type_ids: {output.type_ids}") # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print(f"tokens: {output.tokens}") # ['[CLS]', 'hello', ',', 'y', "'", 'all', '!', 'how', 'are', 'you', '?', '[SEP]']
print(f"offsets: {output.offsets}") # [(0, 0), (0, 5), (5, 6), (7, 8), (8, 9), (9, 12), (12, 13),
# (14,17), (18, 21), (22, 25), (25, 26), (0, 0)]
print(f"attention_mask: {output.attention_mask}") # [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print(f"special_tokens_mask: {output.special_tokens_mask}") # [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
print(f"overflowing: {output.overflowing}") # []
# Provided tokenizers
# CharBPETokenizer: The original BPE
# ByteLevelBPETokenizer: The byte level version of the BPE
# SentencePieceBPETokenizer: A BPE implementation compatible with the one used by SentencePiece
# BertWordPieceTokenizer: The famous Bert tokenizer, using WordPiece
DATAFILE = '../data/pg16457.txt'
MODELDIR = 'models'
input_text = 'This is a test'
# Training the tokenizers
print("========= CharBPETokenizer ==========")
# CharBPETokenizer
tokenizer = CharBPETokenizer()
tokenizer.train([DATAFILE], vocab_size=500)
tokenizer.save(MODELDIR, 'char_bpe')
output = tokenizer.encode(input_text)
print(output.tokens) # ['T', 'his</w>', 'is</w>', 'a</w>', 't', 'est</w>']
print("========= ByteLevelBPETokenizer ==========")
# ByteLevelBPETokenizer
tokenizer = ByteLevelBPETokenizer()
tokenizer.train([DATAFILE], vocab_size=500)
tokenizer.save(MODELDIR, 'byte_bpe')
output = tokenizer.encode(input_text)
print(output.tokens) # ['T', 'h', 'is', 'ฤ is', 'ฤ a', 'ฤ t', 'est']
print("========= SentencePieceBPETokenizer ==========")
# SentencePieceBPETokenizer
tokenizer = SentencePieceBPETokenizer()
tokenizer.train([DATAFILE], vocab_size=500)
tokenizer.save(MODELDIR, 'tok_sp_bpe')
output = tokenizer.encode(input_text)
print(output.tokens) # ['โT', 'h', 'is', 'โis', 'โa', 'โt', 'est']
print("========= BertWordPieceTokenizer ==========")
# BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer()
tokenizer.train([DATAFILE], vocab_size=500)
tokenizer.save(MODELDIR, 'bert_bpe')
output = tokenizer.encode(input_text)
print(output.tokens) # ['this', 'is', 'a', 't', '##est']
|
481306
|
from django.urls import path
from rest_framework import routers
from buildhistory import views
router = routers.DefaultRouter()
router.register('builders', views.BuilderAPIView, basename='builders')
router.register('builds', views.BuildHistoryAPIView, basename='builds')
router.register('files', views.InstalledFilesAPIView, basename='files')
urlpatterns = [
path('', views.all_builds, name='all_builds'),
path('buildbot2/submit', views.buildbot2_submit, name='buildbot2_submit')
]
|
481362
|
import torch as th
import torch.nn as nn
import numpy as np
class PositionalEncoding(nn.Module):
"Position Encoding module"
def __init__(self, dim_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = th.zeros(max_len, dim_model, dtype=th.float)
position = th.arange(0, max_len, dtype=th.float).unsqueeze(1)
div_term = th.exp(th.arange(0, dim_model, 2, dtype=th.float) *
-(np.log(10000.0) / dim_model))
pe[:, 0::2] = th.sin(position * div_term)
pe[:, 1::2] = th.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe) # Not a parameter but should be in state_dict
def forward(self, pos):
return th.index_select(self.pe, 1, pos).squeeze(0)
class Embeddings(nn.Module):
"Word Embedding module"
def __init__(self, vocab_size, dim_model):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab_size, dim_model)
self.dim_model = dim_model
def forward(self, x):
return self.lut(x) * np.sqrt(self.dim_model)
|
481408
|
import re
S = input()
r = re.compile("^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$")
print(True if r.search(S) else False)
|
481409
|
from __future__ import annotations
from typing import List
import pandas as pd
from reamber.base.Property import list_props
from reamber.base.lists.notes.HoldList import HoldList
from reamber.osu.OsuHold import OsuHold
from reamber.osu.lists.notes.OsuNoteList import OsuNoteList
@list_props(OsuHold)
class OsuHoldList(HoldList[OsuHold], OsuNoteList[OsuHold]):
@staticmethod
def read(strings: List[str], keys: int) -> OsuHoldList:
""" A shortcut to reading OsuHit in a loop to create a OsuHoldList
:param strings: A List of strings to loop through OsuHold.read
:param keys: The number of keys
"""
return OsuHoldList(pd.DataFrame([OsuHold.read_string(s, keys, as_dict=True) for s in strings]) if strings else [])
def write(self, keys: int) -> List[str]:
return [h.write_string(keys) for h in self]
|
481418
|
import torch
import numbers
from vel.api.metrics.averaging_metric import AveragingNamedMetric
from vel.rl.api.base import OptimizerAlgoBase
from vel.math.functions import explained_variance
from vel.schedules.constant import ConstantSchedule
class PpoPolicyGradient(OptimizerAlgoBase):
""" Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """
def __init__(self, entropy_coefficient, value_coefficient, cliprange, max_grad_norm, normalize_advantage=True):
super().__init__(max_grad_norm)
self.entropy_coefficient = entropy_coefficient
self.value_coefficient = value_coefficient
self.normalize_advantage = normalize_advantage
if isinstance(cliprange, numbers.Number):
self.cliprange = ConstantSchedule(cliprange)
else:
self.cliprange = cliprange
def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
evaluator = model.evaluate(rollout)
# Part 0.0 - Rollout values
advantages = evaluator.get('rollout:estimated_advantages')
rollout_values = evaluator.get('rollout:estimated_values')
rollout_action_logprobs = evaluator.get('rollout:action:logprobs')
returns = evaluator.get('rollout:estimated_returns')
# PART 0.1 - Model evaluation
entropy = evaluator.get('model:entropy')
model_values = evaluator.get('model:estimated_values')
model_action_logprobs = evaluator.get('model:action:logprobs')
# Select the cliprange
current_cliprange = self.cliprange.value(batch_info['progress'])
# Normalize the advantages?
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# PART 1 - policy entropy
policy_entropy = torch.mean(entropy)
# PART 2 - value function
value_output_clipped = rollout_values + torch.clamp(
model_values - rollout_values, -current_cliprange, current_cliprange
)
value_loss_part1 = (model_values - returns).pow(2)
value_loss_part2 = (value_output_clipped - returns).pow(2)
value_loss = 0.5 * torch.mean(torch.max(value_loss_part1, value_loss_part2))
# PART 3 - policy gradient loss
ratio = torch.exp(model_action_logprobs - rollout_action_logprobs)
pg_loss_part1 = -advantages * ratio
pg_loss_part2 = -advantages * torch.clamp(ratio, 1.0 - current_cliprange, 1.0 + current_cliprange)
policy_loss = torch.mean(torch.max(pg_loss_part1, pg_loss_part2))
loss_value = (
policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss
)
loss_value.backward()
with torch.no_grad():
approx_kl_divergence = 0.5 * torch.mean((model_action_logprobs - rollout_action_logprobs).pow(2))
clip_fraction = torch.mean((torch.abs(ratio - 1.0) > current_cliprange).to(dtype=torch.float))
return {
'policy_loss': policy_loss.item(),
'value_loss': value_loss.item(),
'policy_entropy': policy_entropy.item(),
'approx_kl_divergence': approx_kl_divergence.item(),
'clip_fraction': clip_fraction.item(),
'advantage_norm': torch.norm(advantages).item(),
'explained_variance': explained_variance(returns, rollout_values)
}
def metrics(self) -> list:
""" List of metrics to track for this learning process """
return [
AveragingNamedMetric("policy_loss"),
AveragingNamedMetric("value_loss"),
AveragingNamedMetric("policy_entropy"),
AveragingNamedMetric("approx_kl_divergence"),
AveragingNamedMetric("clip_fraction"),
AveragingNamedMetric("grad_norm"),
AveragingNamedMetric("advantage_norm"),
AveragingNamedMetric("explained_variance")
]
def create(entropy_coefficient, value_coefficient, cliprange, max_grad_norm, normalize_advantage=True):
return PpoPolicyGradient(
entropy_coefficient, value_coefficient, cliprange, max_grad_norm, normalize_advantage=normalize_advantage
)
|
481461
|
from .window import LauncherWindow
from . import actions
__all__ = [
"LauncherWindow",
"actions"
]
|
481465
|
import pytest
from multielo import MultiElo
import numpy as np
from typing import List
@pytest.mark.parametrize(
"k, d, s, ratings, true_expected, true_new",
[
(32, 400, 1, np.array([1000, 1000]), [0.5, 0.5], [1016, 984]),
(32, 400, 1, [1200, 1000], [0.75974693, 0.24025307], [1207.68809835, 992.31190165]),
(32, 400, 1, [1000, 1200], [0.24025307, 0.75974693], [1024.31190165, 1175.68809835]),
(32, 400, 1, np.array([1200, 800]), [0.90909091, 0.09090909], [1202.90909091, 797.09090909]),
(64, 400, 1, [1200, 1000], [0.75974693, 0.24025307], [1215.37619669, 984.62380331]),
(64, 800, 1, [1200, 1000], [0.640065, 0.359935], [1223.03584001, 976.96415999]),
(32, 800, 1, [1200, 1000], [0.640065, 0.359935], [1211.51792001, 988.48207999]),
(32, 200, 1, [1200, 1000], [0.90909091, 0.09090909], [1202.90909091, 997.09090909]),
(32, 400, 1.5, [1200, 1000], [0.75974693, 0.24025307], [1207.68809835, 992.31190165]),
(32, 400, 1, [1200, 1000, 900], [0.53625579, 0.29343936, 0.17030485],
[1208.34629612, 1002.55321444, 889.10048944]),
(32, 400, 1, [1000, 1200, 900], [0.29343936, 0.53625579, 0.17030485],
[1023.88654777, 1187.01296279, 889.10048944]),
(32, 400, 1.25, [1200, 1000, 900], [0.53625579, 0.29343936, 0.17030485],
[1209.98732176, 1000.9121888, 889.10048944]),
(32, 400, 1.5, [1200, 1000, 900], [0.53625579, 0.29343936, 0.17030485],
[1211.39391517, 999.50559539, 889.10048944]),
(32, 400, 2, [1200, 1000, 900], [0.53625579, 0.29343936, 0.17030485],
[1213.67962945, 997.21988111, 889.10048944]),
(32, 400, 1.25, [1200, 1000, 900, 1050], [0.38535873, 0.21814249, 0.13458826, 0.26191052],
[1214.82857088, 1009.6423915, 900.67244749, 1024.85659012]),
]
)
def test_elo_changes(k, d, s, ratings, true_expected, true_new):
"""
Test some known values to make sure Elo is calculating the correct updates.
"""
elo = MultiElo(k_value=k, d_value=d, score_function_base=s)
assert np.allclose(elo.get_expected_scores(ratings), true_expected)
assert np.allclose(elo.get_new_ratings(ratings), true_new)
def test_zero_sum():
"""
make sure expected scores sum to 1 and rating changes are zero sum
"""
for n_players in [2, 3, 4, 10]:
for _ in range(10):
k = np.random.uniform(16, 64)
d = np.random.uniform(200, 800)
ratings = np.random.uniform(600, 1400, size=n_players)
elo = MultiElo(k_value=k, d_value=d)
assert np.allclose(elo.get_expected_scores(ratings).sum(), 1), \
f"expected ratings do not sum to 1 for k={k}, d={d}, ratings={ratings}"
assert np.allclose(elo.get_new_ratings(ratings).sum(), ratings.sum()), \
f"rating changes are not zero sum for k={k}, d={d}, ratings={ratings}"
@pytest.mark.parametrize(
"results, result_order, new_ratings",
[
([1000, 1000], [1, 1], [1000, 1000]),
([1200, 1000], [0, 0], [1191.6880983472654, 1008.3119016527346]),
([1200, 1000, 800], [1, 2, 2], [1207.06479284, 989.33333333, 803.60187383]),
([1200, 1000, 800], [1, 1, 2], [1196.39812617, 1010.66666667, 792.93520716]),
([1200, 1000, 800], [1, 1, 1], [1185.7314595, 1000, 814.2685405]),
]
)
def test_ties(results: List[float], result_order: List[int], new_ratings: List[float]):
elo = MultiElo(k_value=32, d_value=400, score_function_base=1)
assert np.allclose(elo.get_new_ratings(results, result_order=result_order), new_ratings)
def test_out_of_order_ratings():
"""If we reverse the change the order of ratings and account for it in result_order,
the new ratings should be the same"""
elo = MultiElo()
result_1 = elo.get_new_ratings([1200, 1000])
result_2 = elo.get_new_ratings([1000, 1200], result_order=[2, 1])
print(f"result_1: {result_1}")
print(f"result_2: {result_2}")
assert result_1[0] == result_2[1]
assert result_1[1] == result_2[0]
result_1 = elo.get_new_ratings([1200, 1000, 800], result_order=[1, 2, 2])
result_2 = elo.get_new_ratings([1000, 800, 1200], result_order=[2, 2, 1])
print(f"result_1: {result_1}")
print(f"result_2: {result_2}")
assert result_1[0] == result_2[2]
assert result_1[1] == result_2[0]
assert result_1[2] == result_2[1]
|
481473
|
import yaml
import random
import sys
import pprint
from decorator import decorator
from line_profiler import LineProfiler
color_ansi = {'yellow': '\x1b[33m',
'red': '\x1b[31m',
'blue': '\x1b[34m',
'green': '\x1b[32m',
'white': '\x1b[37m',
'black': '\x1b[30m',
'purple': '\x1b[35m',
'reset all': '\x1b[0m'}
@decorator
def profile_each_line(func, *args, **kwargs):
profiler = LineProfiler()
profiled_func = profiler(func)
retval = None
try:
retval = profiled_func(*args, **kwargs)
finally:
profiler.print_stats()
return retval
def get_supported_apps(apps_path='apps/'):
"""
Returns a list of strings correspdoning to the app_id's that are fully operational in the learning library.
Usage: ::\n
app_id_list = utils.get_supported_apps()
print app_id_list
>>> ['StochasticBanditsPureExploration', 'DuelingBanditsPureExploration', 'StochasticLinearBanditsExploreExploit', 'PoolBasedTripletMDS']
"""
import os
return [d for d in next(os.walk(os.path.dirname(apps_path)))[1] if d[0] not in {'.', '_'}]
def get_app(app_id, exp_uid, db, ell):
"""
Returns an object correspoding to the app_id that contains methods like initExp,getQuery,etc.
Usage: ::\n
app = utils.get_app(app_id)
print app
>>> <next.apps.StochasticBanditsPureExploration.StochasticBanditsPureExploration.StochasticBanditsPureExploration object at 0x103c9dcd0>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.App'
app_module = __import__(next_path,fromlist=[''])
app_class = getattr(app_module, 'App')
return app_class(app_id, exp_uid, db, ell)
def get_app_alg(app_id,alg_id):
"""
Returns an object correspoding to the alg_id that contains methods like initExp,getQuery,etc.
Note that each algorithm (with an alg_id) is a child of an app (with an app_id), hence the app_id input
Usage: ::\n
alg = utils.get_app_alg(app_id,alg_id)
print alg
>>> <next.apps.PoolBasedTripletMDS.RandomSampling.RandomSampling.RandomSampling object at 0x103cb7e10>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
alg_id = str(alg_id) # soemtimes input is unicode formatted which causes error
next_path = 'apps.{}.algs.{}'.format(app_id, alg_id, alg_id)
alg_module = __import__(next_path, fromlist=[''])
alg_class = getattr(alg_module, 'MyAlg')
return alg_class()
def getDocUID(exp_uid,alg_uid=None):
"""
Each instance of an app (with an (app_id,exp_uid) pair) and an algorithm (with an (app_id,exp_uid,alg_id,alg_uid) tuple)
gets its own namespace. This method defines that namespace given the exp_uid, or (exp_uid,alg_uid)
Usage::\n
print utils.getDocUID(exp_uid)
>>> 'eee9d58c61d580029113ba593446d23a'
print utils.getDocUID(exp_uid,alg_uid)
>>> 'eee9d58c61d580029113ba593446d23a-f081d374abac6c009f5a74877f8b9f3c'
"""
if alg_uid==None:
return exp_uid
else:
return exp_uid + "-" + alg_uid
import os
def getNewUID():
"""
Returns length 32 string of random hex that is generated from machine state - good enough for cryptography
Probability of collision is 1 in 340282366920938463463374607431768211456
Used for unique identifiers all over the system
"""
uid = os.urandom(16).encode('hex')
return uid
from datetime import datetime
def datetimeNow(format='datetime'):
"""
Returns the current datetime in the format used throughout the system.
For consistency, one should ALWAYS call this method, do not make your own call to datetime.
Usage: ::\n
utils.datetimeNow()
>>> datetime.datetime(2015, 2, 17, 11, 5, 56, 27822)
"""
date = datetime.now()
if format=='string':
return datetime2str(date)
else:
return date
def datetime2str(obj_datetime):
"""
Converts a datetime string into a datetime object in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
print date_str
>>> '2015-02-17 11:11:07.489925'
"""
return str(obj_datetime)
def str2datetime(str_time):
"""
Converts a datetime object into the string format used in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
utils.str2datetime(date_str)
"""
try:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S.%f')
except:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S')
def _get_filename(target):
return target['alt_description']
def filenames_to_ids(filenames, targets):
_to_ids = filenames_to_ids
if isinstance(filenames[0], list):
return [_to_ids(files, targets) for files in filenames]
if isinstance(filenames[0], tuple):
return tuple([_to_ids(files, targets) for files in filenames])
if isinstance(filenames[0], dict):
return {k: _to_ids(v, targets) for k, v in filenames.items()}
ids = {_get_filename(target): target['target_id'] for target in targets}
not_in_targets = set(filenames) - set(ids)
if len(not_in_targets) > 0:
msg = 'Filenames specified in init.yaml "{}" in the not found the list of targets'
raise ValueError(msg.format(not_in_targets))
return [ids[filename] for filename in filenames]
def debug_print(*args, **kwargs):
color = kwargs.get('color', 'yellow')
for a in args:
if type(a) in {str}:
lines = a.split('\n')
for line in lines:
pprint_arg = pprint.pformat(line).split('\n')
for line2 in pprint_arg:
print '{}{}{}'.format(color_ansi[color],
line2,
color_ansi['reset all'])
else:
pprint_a = pprint.pformat(a).split('\n')
for line in pprint_a:
print '{}{}{}'.format(color_ansi[color],
line,
color_ansi['reset all'])
print ''
def random_string(length=20):
letters = list('qwertyuiopasdfghkjlzxcvbnm')
s = [random.choice(letters) for _ in range(length)]
s = ''.join(s)
return s
import time
def timeit(f):
"""
Utility used to time the duration of code execution. This script can be composed with any other script.
Usage::\n
def f(n):
return n**n
def g(n):
return n,n**n
answer0,dt = timeit(f)(3)
answer1,answer2,dt = timeit(g)(3)
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
# TODO: delete these three lines. Use
# `grep -Hnri ,.*,.* = .*utils.timeit` to find all locations this function
# is are used (typically in `a, b, c, dt = utils.timeit(...)(...)`. We want
# `a, dt = utils.timeit(...)(...)`.
return result, (te-ts)
return timed
|
481604
|
from typing import Dict, Generic, Optional, TypeVar, Union, List
from vkwave.api.token.token import AnyABCToken
from .strategy import ABCGetTokenStrategy, NotImplementedGetTokenStrategy
from .types import GroupId, UserId
import random
T = TypeVar("T", GroupId, UserId)
class TokenStorage(Generic[T]):
def __init__(
self,
available: Optional[Dict[T, AnyABCToken]] = None,
get_token_strategy: Optional[ABCGetTokenStrategy] = None,
):
self.tokens: Dict[T, AnyABCToken] = available or dict()
self.get_token_strategy: ABCGetTokenStrategy[T] = (
get_token_strategy or NotImplementedGetTokenStrategy[T]()
)
def append(self, id_to_add: T, token: AnyABCToken):
self.tokens[id_to_add] = token
def _get_cached(self, id_to_check: T) -> Optional[AnyABCToken]:
return self.tokens.get(id_to_check)
async def get_token(self, id_to_check: T) -> AnyABCToken:
cached = self._get_cached(id_to_check)
if cached:
return cached
token = await self.get_token_strategy.get_token(id_to_check)
self.tokens[id_to_check] = token
return token
class UserTokenStorage(Generic[T]):
def __init__(self, current_token: Union[List[AnyABCToken], AnyABCToken]):
super().__init__()
self.current_token = current_token
async def get_token(self):
if isinstance(self.current_token, list):
return random.choice(self.current_token)
else:
return self.current_token
|
481617
|
import pytest
from notion.block import NotionBlock, NotionBlockList
from notion.page import NotionPage
@pytest.fixture
def page() -> NotionPage:
return NotionPage(
blocks=NotionBlockList([
NotionBlock(id='block1', data={
'value': {
'content': ['block2', 'block3'],
},
}),
]),
)
def test_one_pass(notion, page, fetch_page, fetch_blocks):
fetch_page.return_value = page
fetch_blocks.return_value = NotionBlockList([NotionBlock(id='block2', data={}), NotionBlock(id='block3', data={})])
fetched = notion.fetch_page_recursively('100500')
assert fetched.blocks.have_block_with_id('block2')
assert fetched.blocks.have_block_with_id('block3')
def test_two_passes(notion, page, fetch_page, fetch_blocks):
fetch_page.return_value = page
fetch_blocks.side_effect = [
NotionBlockList([NotionBlock(id='block2', data={'value': {'content': ['block4', 'block5']}})]),
NotionBlockList([NotionBlock(id='block4', data={}), NotionBlock(id='block5', data={})]),
]
fetched = notion.fetch_page_recursively('100500')
assert fetched.blocks.have_block_with_id('block4')
assert fetched.blocks.have_block_with_id('block5')
def test_fetching_does_not_get_stuck_in_inifinite_loop_when_notion_does_not_return_one_of_requested_blocks(notion, page, fetch_page, fetch_blocks):
fetch_page.return_value = page
fetch_blocks.return_value = NotionBlockList([NotionBlock(id='block2', data={})]) # return only block2, despite requested block2 and block3
notion.fetch_page_recursively('100500')
fetch_page.assert_called_once()
|
481627
|
import pd_base_tests
import pdb, sys
from collections import OrderedDict
from ptf import config
from ptf.testutils import *
from ptf.thriftutils import *
from r2p2.p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
from pal_rpc.ttypes import *
from mirror_pd_rpc.ttypes import *
from pkt_pd_rpc.ttypes import *
import time
dev_id = 0
handle_ipv4 = []
handle_mac = []
def addPorts(test):
swports = [188, 184, 180, 176, 172, 168, 164, 160, 156, 152, 148, 144]
test.pal.pal_port_add_all(dev_id, pal_port_speed_t.BF_SPEED_40G, pal_fec_type_t.BF_FEC_TYP_NONE)
test.pal.pal_port_enable_all(dev_id)
ports_not_up = True
print "Waiting for ports to come up..."
sys.stdout.flush()
num_tries = 12
i = 0
while ports_not_up:
ports_not_up = False
for p in swports:
x = test.pal.pal_port_oper_status_get(dev_id, p)
if x == pal_oper_status_t.BF_PORT_DOWN:
ports_not_up = True
print " port", p, "is down"
sys.stdout.flush()
time.sleep(3)
break
i = i + 1
if i >= num_tries:
break
assert ports_not_up == False
print "All ports up."
sys.stdout.flush()
return
class Test(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, ["r2p2"])
def runTest(self):
sess_hdl = self.conn_mgr.client_init()
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
addPorts(self)
start_ip = 0x0a010001
switch_portbase = 188
server_num = 12
core_num = 8
## write to register
jbsq_n = 3
self.client.register_write_jbsq_n(sess_hdl, dev_tgt, 0, jbsq_n)
time.sleep(1)
flags = r2p2_register_flags_t(read_hw_sync = True)
jbsq_value = self.client.register_read_jbsq_n(sess_hdl, dev_tgt, 0, flags)
print "jbsq_value", jbsq_value
for i in range(server_num):
match_spec = r2p2_ipv4_route_match_spec_t(start_ip+i)
action_spec = r2p2_act_rewrite_iface_action_spec_t(switch_portbase-4*i)
self.client.ipv4_route_table_add_with_act_rewrite_iface(sess_hdl,dev_tgt, match_spec, action_spec)
netx_mac_list = ["\x00\x00\x00\x00\x00\x01", "\x00\x00\x00\x00\x00\x02",\
"\x00\x00\x00\x00\x00\x03", "\x00\x00\x00\x00\x00\x04",\
"\x00\x00\x00\x00\x00\x05", "\x00\x00\x00\x00\x00\x06",\
"\x00\x00\x00\x00\x00\x07", "\x00\x00\x00\x00\x00\x08"]
for i in xrange(server_num):
for j in xrange(core_num):
match_spec = r2p2_send_to_curserver_match_spec_t(i, j)
dst_mac = netx_mac_list[j]
action_spec = r2p2_act_set_vfip_action_spec_t(switch_portbase-4*i,start_ip+i,dst_mac)
self.client.send_to_curserver_table_add_with_act_set_vfip(sess_hdl,dev_tgt, match_spec, action_spec)
# add entries for table "set_mac"
match_spec = r2p2_set_mac_match_spec_t(188)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x2e", "\x3c\xfd\xfe\xab\xde\xd8")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(184)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x32", "\x3c\xfd\xfe\xa6\xeb\x10")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(180)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x36", "\x3c\xfd\xfe\xaa\x5d\x00")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(176)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x3a", "\x3c\xfd\xfe\xaa\x46\x68")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(172)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x3e", "\x3c\xfd\xfe\xab\xde\xf0")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(168)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x42", "\x3c\xfd\xfe\xab\xdf\x90")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(164)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x46", "\x3c\xfd\xfe\xab\xe0\x50")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(160)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x4a", "\x3c\xfd\xfe\xab\xd9\xf0")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(156)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x4e", "\x3c\xfd\xfe\xc3\xdf\xe0")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(152)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x52", "\x3c\xfd\xfe\xc3\xe9\xf0")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(148)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x56", "\x3c\xfd\xfe\xc3\xe0\x60")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
match_spec = r2p2_set_mac_match_spec_t(144)
action_spec = r2p2_act_set_mac_action_spec_t("\xa8\x2b\xb5\xde\x92\x5a", "\x3c\xfd\xfe\xc3\xe9\xb0")
result = self.client.set_mac_table_add_with_act_set_mac(sess_hdl, dev_tgt, match_spec, action_spec)
handle_mac.append(result)
self.conn_mgr.complete_operations(sess_hdl);
queue_list = [0]*8
core_idx = 1
while True:
time.sleep(1)
flags = r2p2_register_flags_t(read_hw_sync = True)
for i in xrange(8):
if i == 0:
value_t = self.client.register_read_server_qlen1(sess_hdl, dev_tgt, core_idx, flags)
elif i == 1:
value_t = self.client.register_read_server_qlen2(sess_hdl, dev_tgt, core_idx, flags)
elif i == 2:
value_t = self.client.register_read_server_qlen3(sess_hdl, dev_tgt, core_idx, flags)
elif i == 3:
value_t = self.client.register_read_server_qlen4(sess_hdl, dev_tgt, core_idx, flags)
elif i == 4:
value_t = self.client.register_read_server_qlen5(sess_hdl, dev_tgt, core_idx, flags)
elif i == 5:
value_t = self.client.register_read_server_qlen6(sess_hdl, dev_tgt, core_idx, flags)
elif i == 6:
value_t = self.client.register_read_server_qlen7(sess_hdl, dev_tgt, core_idx, flags)
elif i == 7:
value_t = self.client.register_read_server_qlen8(sess_hdl, dev_tgt, core_idx, flags)
queue_list[i] = value_t[1].f1
print queue_list
print '\n'
sys.stdout.flush()
|
481632
|
import os
from tqdm import tqdm
import albumentations as A
import cv2
import matplotlib.pyplot as plt
import time
from visualizer import visualize
from PIL import Image
import pandas as pd
DIR_IMG_SRC = "data\\img\\ori"
DIR_MASK_SRC = "data\\img\\mask"
MASK_FORMAT = ".png"
IMG_FORMAT = ".jpg"
N_IMG = len(os.listdir(DIR_IMG_SRC))
N_AUG_PER_IMG = 0
DATASET = pd.read_csv("data\\label\\dataset.csv", sep=',', index_col=0)
pathDfAugmented = "data\\label\\datasetAugmented.csv"
DATASET_AUGMENTED = []
def askInfos():
global N_AUG_PER_IMG
os.system('cls')
print("##################")
print("# DATA AUGMENTER #")
print("##################\n")
print("~~ Nombre d'images : " + str(N_IMG) + "\n")
print("~~ Nombre de copy par image : ")
newAugMultiplier = input()
if(int(newAugMultiplier) == (0 or 1)):
askInfos()
print("~~ Nombre total aprรจs augmentation : " + str(N_IMG*int(newAugMultiplier)) + "\n")
print("~~ Params OK ? o/n : ")
confirm = input()
if(confirm == "o"):
N_AUG_PER_IMG = int(newAugMultiplier)
launchAugmentation()
elif(confirm == "n"):
askInfos()
def launchAugmentation():
transform = A.Compose([
A.Flip(p=0.5),
A.Transpose(p=0.5),
A.RandomRotate90(p=0.5)
])
for index, row in tqdm(DATASET.iterrows(), total=DATASET.shape[0]):
rawImgPath = row['x_path'].split('.')[0]
rawMaskPath = row['y_path'].split('.')[0]
baseImage = cv2.imread(row['x_path'], cv2.IMREAD_COLOR)
baseMask = cv2.imread(row['y_path'], cv2.IMREAD_GRAYSCALE)
for i in range(N_AUG_PER_IMG):
newImgPath = rawImgPath + "_aug_{:d}".format(i) + IMG_FORMAT
newMaskPath = rawMaskPath + "_aug_{:d}".format(i) + MASK_FORMAT
augmented = transform(image=baseImage, mask=baseMask)
cv2.imwrite(newImgPath, augmented['image'])
cv2.imwrite(newMaskPath, augmented['mask'])
DATASET_AUGMENTED.append([newImgPath, newMaskPath])
#print(DATASET_AUGMENTED)
df = pd.DataFrame(DATASET_AUGMENTED, columns=['x_path', 'y_path'], dtype=str)
globalDf = pd.concat([df, DATASET], ignore_index=True, sort=False, keys=['original', 'augmented'])
globalDf = globalDf.sample(frac=1).reset_index(drop=True)
# merge dataset et dataset augmented
globalDf.to_csv(pathDfAugmented, sep=',')
if __name__ == "__main__":
askInfos()
|
481637
|
from .coarsening import coarsen_adj
from .distortion import (perm_features, filter_adj, filter_features,
gray_color_threshold, degree_threshold,
area_threshold)
from .grid import grid_adj, grid_points, grid_mass
from .spatial import receptive_fields, fill_features
__all__ = [
'coarsen_adj',
'perm_features',
'filter_adj',
'filter_features',
'gray_color_threshold',
'degree_threshold',
'area_threshold',
'grid_adj',
'grid_points',
'grid_mass',
'receptive_fields',
'fill_features',
]
|
481654
|
import os
import json
import utils
from utils import dump_json
TESTING = utils.TESTING
dir = os.path.dirname(os.path.realpath(__file__))
class Cache:
def __init__(self, name, testing=None):
self.name = name
self.cache = None
self.testing = TESTING
def file_path(self):
return os.path.join(dir, self.name + '.json')
def get(self, word):
if self.testing:
return None
if self.cache is None:
self.cache = {}
p = self.file_path()
if os.path.isfile(p):
with open(p, 'r') as f:
self.cache = json.load(f)
return self.cache[word] if word in self.cache else None
def put(self, word, record):
if self.testing:
return
self.cache[word] = record
with open(self.file_path(), 'w') as f:
f.write(dump_json(self.cache))
|
481655
|
import abc
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from util import check_feature_array, check_multilabel_array, check_is_fitted
def decision_maker_from_args(args):
decision_makers = []
decision_maker_names = args.decision_maker
allow_multiple = False
if decision_maker_names is None:
decision_maker_names = []
elif type(decision_maker_names) == str:
decision_maker_names = [decision_maker_names]
elif type(decision_maker_names) == list:
allow_multiple = True
for decision_maker_name in decision_maker_names:
if decision_maker_name == 'max':
decision_maker = MaximumDecisionMaker()
elif decision_maker_name in ('mean', 'median', 'zero'):
decision_maker = CentralDecisionMaker(measure=decision_maker_name)
elif decision_maker_name == 'decision-tree':
decision_maker = DecisionTreeDecisionMaker()
elif decision_maker_name == 'random-forest':
decision_maker = RandomForestDecisionMaker()
elif decision_maker_name == 'k-neighbors':
decision_maker = KNeighborsDecisionMaker()
elif decision_maker_name == 'log-regression':
decision_maker = LogisticRegressionDecisionMaker()
elif decision_maker_name == 'svm':
decision_maker = SVMDecisionMaker()
elif decision_maker_name == 'naive-bayes':
decision_maker = NaiveBayesDecisionMaker()
elif decision_maker_name == 'perceptron':
decision_maker = PerceptronDecisionMaker()
else:
decision_maker = None
assert decision_maker is not None
decision_makers.append(decision_maker)
for decision_maker in decision_makers:
for arg_name in vars(args):
split = arg_name.split('decision_maker_')
if len(split) != 2:
continue
value = getattr(args, arg_name)
setattr(decision_maker, split[1], value)
if 'n_jobs' in vars(args):
decision_maker.n_jobs = args.n_jobs
if allow_multiple:
return decision_makers
else:
if len(decision_makers) == 0:
return None
else:
return decision_makers[0]
def add_decision_maker_parser_arguments(parser, allow_multiple=False):
decision_makers = ['max', 'mean', 'median', 'zero', 'decision-tree', 'random-forest', 'k-neighbors',
'log-regression', 'svm', 'naive-bayes', 'perceptron']
if allow_multiple:
parser.add_argument('--decision-maker', choices=decision_makers, nargs='+', default=decision_makers)
else:
parser.add_argument('--decision-maker', choices=[None] + decision_makers, default=None)
parser.add_argument('--decision-maker-penalty', choices=['l1', 'l2'], default='l2')
parser.add_argument('--decision-maker-C', type=float, default=1.)
parser.add_argument('--decision-maker-kernel', choices=['linear', 'poly', 'rbf', 'sigmoid'], default='linear')
parser.add_argument('--decision-maker-gamma', type=float, default=0.)
parser.add_argument('--decision-maker-max-depth', type=int, default=None)
parser.add_argument('--decision-maker-splitter', choices=['best', 'random'], default='best')
parser.add_argument('--decision-maker-criterion', choices=['gini', 'entropy'], default='gini')
parser.add_argument('--decision-maker-n-estimators', type=int, default=10)
parser.add_argument('--decision-bootstrap', type=bool, default=True)
class DecisionMaker(BaseEstimator, ClassifierMixin):
"""Base class for all DecisionMaker implementations. DecisionMakers are used to find a mapping from a multi-label
classifier to the binary predictions.
Concretely, a multi-label classifier calculates some sort of measure (e.g. the loglikelihood) for each possible
label which it provides to the DecisionMaker instance. The DecisionMaker then attempts to find a good mapping from
the scores (which can be any real number) to the binary predictions (where each value is either 1 or 0). Depending
on the DecisionMaker, more than one prediction value per sample can be set to 1.
To give an example, consider the following matrix of measures:
[[ 100, -100, 200],
[-300, 100, -300]]
A decision maker would then output a matrix that looks somewhat like this:
[[1, 0, 1],
[0, 1, 0]]
Notes
------
If the DecisionMaker implements fit(self, X, y), fit will be called when training the classifier. This allows to
implement supervised DecisionMakers that will be trained on the same training data that is used in the multi-label
classifier.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.n_jobs = 1
@abc.abstractmethod
def predict(self, X):
"""Decide for each feature in each sample if it is on or off.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
predictions : array, shape (n_samples, n_features)
For each sample, that is each row, the decision maker attempts to decide if the feature is on (encoded
as 1) or off (encoded as 0).
"""
pass
class MaximumDecisionMaker(DecisionMaker):
def predict(self, X):
"""Decide for each feature in each sample if it is on or off. ExtremumDecisionMaker calculates the
maximum for each row and turns on the feature with that extreme value.
Exactly one feature per row is turned on.
"""
X = check_feature_array(X)
max_indexes = np.argmax(X, axis=1)
predictions = np.zeros(X.shape, dtype=int)
predictions[range(predictions.shape[0]), max_indexes] = 1
return predictions
class CentralDecisionMaker(DecisionMaker):
def __init__(self, measure='median', threshold=0.0):
"""CentralDecisionMaker uses some measure of central tendency to make decisions.
Parameters
----------
measure : string, default: 'median'
The measure of central tendency to be used. Possible values are 'median', 'mean' and 'zero'.
threshold : float, default: 0.0
The threshold that is used to decide if a feature is on or off. The threshold can be used to introduce
bias towards either class by increasing it (bias towards off) or decreasing it (bias towards on).
"""
super(CentralDecisionMaker, self).__init__()
if measure not in ['median', 'mean', 'zero']:
raise ValueError('unknown measure %s' % measure)
self.measure = measure
self.threshold = threshold
def predict(self, X):
"""Decide for each feature in each sample if it is on or off. The decision is made by the following simple
calculation for each row x, where central_measure is the specified measure:
x_scaled = x - central_measure
predictions[x_scaled >= threshold] = 1
predictions[x_scaled < threshold] = 0
"""
X = check_feature_array(X)
central_measure = None
if self.measure == 'median':
central_measure = np.median(X, axis=1)
elif self.measure == 'mean':
central_measure = np.mean(X, axis=1)
elif self.measure == 'zero':
central_measure = 0.0
assert central_measure is not None
scaled_X = (X.T - central_measure).T
predictions = np.zeros(scaled_X.shape, dtype=int)
predictions[scaled_X >= self.threshold] = 1
return predictions
class _MultiLabelDecisionMaker(DecisionMaker):
def __init__(self):
super(_MultiLabelDecisionMaker, self).__init__()
self.model_ = None
self.n_features_ = None
def _init_model(self):
raise NotImplementedError()
def fit(self, X, y):
"""Fit the _MultiLabelDecisionMaker according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and n_features is the number of features.
y : array-like, shape (n_samples, n_features)
Binary target values, where for each sample a feature is either on (encoded as 1) or off (encoded as 0).
"""
X = check_feature_array(X)
y = check_multilabel_array(y)
if X.shape != y.shape:
raise ValueError('X (shape=%s) and y (shape=%s) must have equal shapes' % (X.shape, y.shape))
self.n_features_ = X.shape[1]
self.model_ = self._init_model().fit(X, y)
def predict(self, X):
"""Decide for each feature in each sample if it is on or off. _MultiLabelDecisionMaker uses a multi-label
classifier to predict the multi-labels for all features. However, the classifier must first be trained by
calling fit.
"""
check_is_fitted(self, 'model_', 'n_features_')
X = check_feature_array(X, self.n_features_)
predictions = self.model_.predict(X)
return predictions
class DecisionTreeDecisionMaker(_MultiLabelDecisionMaker):
def __init__(self, splitter='best', criterion='gini', max_depth=None):
super(DecisionTreeDecisionMaker, self).__init__()
self.splitter = splitter
self.criterion = criterion
self.max_depth = max_depth
def _init_model(self):
return DecisionTreeClassifier(splitter=self.splitter, criterion=self.criterion, max_depth=self.max_depth)
class RandomForestDecisionMaker(_MultiLabelDecisionMaker):
def __init__(self, n_estimators=10, bootstrap=True, criterion='gini', max_depth=None):
super(RandomForestDecisionMaker, self).__init__()
self.n_estimators = n_estimators
self.bootstrap = bootstrap
self.criterion = criterion
self.max_depth = max_depth
def _init_model(self):
return RandomForestClassifier(n_estimators=self.n_estimators, bootstrap=self.bootstrap,
criterion=self.criterion, max_depth=self.max_depth, n_jobs=self.n_jobs)
class KNeighborsDecisionMaker(_MultiLabelDecisionMaker):
def _init_model(self):
return KNeighborsClassifier()
class _BinaryRelevanceDecisionMaker(DecisionMaker):
def __init__(self):
super(_BinaryRelevanceDecisionMaker, self).__init__()
self.model_ = None
self.n_features_ = None
def _init_model(self):
raise NotImplementedError()
def fit(self, X, y):
X = check_feature_array(X)
y = check_multilabel_array(y)
if X.shape != y.shape:
raise ValueError('X (shape=%s) and y (shape=%s) must have equal shapes' % (X.shape, y.shape))
self.n_features_ = X.shape[1]
self.model_ = OneVsRestClassifier(self._init_model(), n_jobs=self.n_jobs).fit(X, y)
def predict(self, X):
check_is_fitted(self, 'model_', 'n_features_')
X = check_feature_array(X, self.n_features_)
predictions = self.model_.predict(X)
return predictions
class LogisticRegressionDecisionMaker(_BinaryRelevanceDecisionMaker):
def __init__(self, penalty='l2', C=1.):
super(LogisticRegressionDecisionMaker, self).__init__()
self.penalty = penalty
self.C = C
def _init_model(self):
return LogisticRegression(solver='liblinear', C=self.C, penalty=self.penalty)
class SVMDecisionMaker(_BinaryRelevanceDecisionMaker):
def __init__(self, C=1., penalty='l2'):
super(SVMDecisionMaker, self).__init__()
self.C = C
self.penalty = penalty
def _init_model(self):
return LinearSVC(C=self.C, dual=False, penalty=self.penalty, loss='squared_hinge')
class NaiveBayesDecisionMaker(_BinaryRelevanceDecisionMaker):
def _init_model(self):
return GaussianNB()
class PerceptronDecisionMaker(_BinaryRelevanceDecisionMaker):
def _init_model(self):
return Perceptron()
|
481686
|
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.urls import reverse
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from dal import autocomplete
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, HTML, Hidden, Submit
from dal import autocomplete
from danceschool.core.models import Invoice
from .models import PayAtDoorFormModel
from .constants import ATTHEDOOR_PAYMENTMETHOD_CHOICES
class CashPaymentMixin(object):
'''
This mixin provides methods for cleaning fields that are used in all
cash payment forms.
'''
def clean_submissionUser(self):
user_id = self.data.get('submissionUser') or None
if user_id == 'None' or not user_id:
return None
user = User.objects.get(id=user_id)
if not user:
raise ValidationError(_('submissionUser not found.'))
return user
def clean_invoice(self):
invoice_id = self.data.get('invoice') or None
if invoice_id:
try:
invoice = Invoice.objects.get(id=invoice_id)
return invoice
except ObjectDoesNotExist:
raise ValidationError(_('Invoice not found.'))
class WillPayAtDoorForm(forms.Form):
'''
This is the form that customers fill out indicating
that they intend to provide a cash payment at-the-door.
When this form is submitted, the registration is allowed
to proceed, but the invoice is not yet marked as paid.
'''
invoice = forms.ModelChoiceField(queryset=Invoice.objects.all(), required=True)
submissionUser = forms.ModelChoiceField(queryset=User.objects.all(), required=False)
instance = forms.ModelChoiceField(queryset=PayAtDoorFormModel.objects.all(), required=True)
willPayAtDoor = forms.BooleanField(
label=_('I will pay at the door'),
required=True,
help_text=_(
'You will receive a registration confirmation email, but will be ' +
'required to complete your payment at the door to finalize your ' +
'registration.'
)
)
def __init__(self, *args, **kwargs):
subUser = kwargs.pop('user', '')
instance = kwargs.pop('instance', None)
invoiceId = kwargs.pop('invoice', None)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_tag = False # Our template must explicitly include the <form tag>
self.helper.form_action = reverse('doorWillPayHandler')
subUser_layout = Layout(Hidden('submissionUser', subUser)) if subUser else Layout()
self.helper.layout = Layout(
HTML("""
<div class="card mt-4">
<h6 class="card-header" role="tab" id="door_headingOne">
""" + str(_('Pay at the door')) + """
</h6>
<div class="card-body">
"""),
Hidden('invoice', invoiceId),
subUser_layout,
Hidden('instance', instance),
'willPayAtDoor',
Submit('submit', _('Submit')),
HTML("""
</div>
</div>
"""),
)
super().__init__(*args, **kwargs)
class DoorPaymentForm(CashPaymentMixin, forms.Form):
'''
This is the form that staff users fill out to indicate
that they received a cash payment at-the-door.
'''
submissionUser = forms.ModelChoiceField(
queryset=User.objects.filter(Q(staffmember__isnull=False) | Q(is_staff=True)),
required=True
)
invoice = forms.ModelChoiceField(queryset=Invoice.objects.all(), required=True)
amountPaid = forms.FloatField(label=_('Amount Paid'), required=True, min_value=0)
paymentMethod = forms.ChoiceField(
label=_('Payment method'),
required=True,
initial='Cash',
choices=ATTHEDOOR_PAYMENTMETHOD_CHOICES,
)
payerEmail = forms.EmailField(label=_('Payer Email Address'), required=False)
receivedBy = forms.ModelChoiceField(
queryset=User.objects.filter(Q(staffmember__isnull=False) | Q(is_staff=True)),
label=_('Payment received by:'),
required=True,
widget=autocomplete.ModelSelect2(
url='autocompleteUser',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a user name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 2,
'data-widget-maximum-values': 4,
'class': 'modern-style',
}
)
)
def __init__(self, *args, **kwargs):
subUser = kwargs.pop('user', '')
invoiceId = kwargs.pop('invoice', None)
initialAmount = kwargs.pop('initialAmount', None)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_tag = False # Our template must explicitly include the <form tag>
self.helper.form_action = reverse('doorPaymentHandler')
self.helper.layout = Layout(
Hidden('submissionUser', subUser),
Hidden('invoice', invoiceId),
'amountPaid',
'paymentMethod',
'payerEmail',
'receivedBy',
Submit('submit', _('Submit')),
)
kwargs.update(initial={
'receivedBy': subUser,
'amountPaid': initialAmount
})
super().__init__(*args, **kwargs)
def clean_submissionUser(self):
user = super().clean_submissionUser()
if not user.has_perm('core.accept_door_payments'):
raise ValidationError(_('Invalid user submitted door payment.'))
return user
|
481688
|
import unittest
from emma import exceptions as ex
from emma.model.account import Account
from emma.model.webhook import WebHook
from tests.model import MockAdapter
class WebHookTest(unittest.TestCase):
def setUp(self):
Account.default_adapter = MockAdapter
self.webhook = WebHook(
Account(account_id="100", public_key="xxx", private_key="yyy"),
{
'webhook_id':200,
'url': u"http://example.com",
'method': u"POST",
'event': u"mailing_finish"
}
)
def test_can_delete_a_webhook(self):
del(self.webhook['webhook_id'])
with self.assertRaises(ex.NoWebHookIdError):
self.webhook.delete()
self.assertEquals(self.webhook.account.adapter.called, 0)
def test_can_delete_a_webhook2(self):
MockAdapter.expected = True
result = self.webhook.delete()
self.assertIsNone(result)
self.assertEquals(self.webhook.account.adapter.called, 1)
self.assertEquals(
self.webhook.account.adapter.call,
('DELETE', '/webhooks/200', {}))
def test_can_save_a_webhook(self):
del(self.webhook['webhook_id'])
MockAdapter.expected = 1024
result = self.webhook.save()
self.assertIsNone(result)
self.assertEquals(self.webhook.account.adapter.called, 1)
self.assertEquals(
self.webhook.account.adapter.call,
(
'POST',
'/webhooks',
{
'url': u"http://example.com",
'method': u"POST",
'event': u"mailing_finish"
}))
self.assertEquals(1024, self.webhook['webhook_id'])
def test_can_save_a_webhook2(self):
MockAdapter.expected = True
self.webhook['url'] = u"http://v2.example.com"
result = self.webhook.save()
self.assertIsNone(result)
self.assertEquals(self.webhook.account.adapter.called, 1)
self.assertEquals(
self.webhook.account.adapter.call,
(
'PUT',
'/webhooks/200',
{
'url': u"http://v2.example.com",
'method': u"POST",
'event': u"mailing_finish"
}))
|
481693
|
import os
import subprocess
from tqdm import tqdm
from multiprocessing import Pool
def convert(obj_path):
try:
load_folder = os.path.join(obj_path, 'parts_ply')
save_folder = os.path.join(obj_path, 'parts_off')
part_paths = [f.path for f in os.scandir(load_folder)]
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for part in part_paths:
target_mesh = save_folder+'/'+part[-5:-3]+'off'
subprocess.run(["meshlabserver", "-i", part, "-o", target_mesh])
except Exception as ex:
return
cad_folder = './cad_sapien'
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
# Parallel
threads = 16 # number of threads in your computer
convert_iter = Pool(threads).imap(convert, object_paths)
for _ in tqdm(convert_iter, total=len(object_paths)):
pass
|
481722
|
import argparse, _pickle, math, os, random, sys, time, logging
random.seed(666)
import numpy as np
np.random.seed(666)
from collections import Counter
from antu.io.vocabulary import Vocabulary
from antu.io.ext_embedding_readers import glove_reader
from antu.io.datasets.single_task_dataset import DatasetSetting, SingleTaskDataset
from utils.conllu_reader import PTBReader
def main():
# Configuration file processing
...
# DyNet setting
...
# Build the dataset of the training process
## Build data reader
data_reader = PTBReader(
field_list=['word', 'tag', 'head', 'rel'],
root='0\t**root**\t_\t**rpos**\t_\t_\t0\t**rrel**\t_\t_',
spacer=r'[\t]',)
## Build vocabulary with pretrained glove
vocabulary = Vocabulary()
g_word, _ = glove_reader(cfg.GLOVE)
pretrained_vocabs = {'glove': g_word}
vocabulary.extend_from_pretrained_vocab(pretrained_vocabs)
## Setup datasets
datasets_settings = {
'train': DatasetSetting(cfg.TRAIN, True),
'dev': DatasetSetting(cfg.DEV, True),
'test': DatasetSetting(cfg.TEST, True),}
datasets = SingleTaskDataset(vocabulary, datasets_settings, data_reader)
counters = {'word': Counter(), 'tag': Counter(), 'rel': Counter()}
datasets.build_dataset(
counters, no_pad_namespace={'rel'}, no_unk_namespace={'rel'})
# Build model
...
# Train model
train_batch = datasets.get_batches('train', cfg.TRAIN_BATCH_SIZE, True, cmp, True)
valid_batch = datasets.get_batches('dev', cfg.TEST_BATCH_SIZE, True, cmp, False)
test_batch = datasets.get_batches('test', cfg.TEST_BATCH_SIZE, True, cmp, False)
if __name__ == '__main__':
main()
|
481740
|
from spacy.pipeline import EntityRuler
def prepare_patterns(patterns, label):
entity_ruler_patterns = []
for pattern in patterns:
entity_ruler_patterns += [{"label": label, "pattern": pattern}]
return entity_ruler_patterns
def init_entity_ruler(nlp, patterns, label):
string_store = nlp.vocab.strings
if label not in string_store:
string_store.add(label)
ruler = EntityRuler(nlp)
ruler.add_patterns(prepare_patterns(patterns, label))
return ruler
|
481743
|
from .test_array import *
from .test_document import *
from .test_scalar import *
from .test_capi import *
|
481819
|
import logging
import sys
from . import Genderize, GenderizeException
logging.basicConfig(level=logging.WARNING)
g = Genderize()
returncode = 0 # no error
for line in sys.stdin:
name = line.strip()
try:
data = g.get1(name)
print("{data[name]}: {data[gender]}".format(data=data))
except GenderizeException:
returncode = 1 # at least one lookup failed
logging.error("Couldn't look up gender for %r", name, exc_info=True)
exit(returncode)
|
481834
|
import torch
from torchvision import datasets
from torchvision.transforms import ToTensor, Compose, Normalize
from torch.utils.data import Dataset
import random
import numpy as np
import torch.nn.functional as F
from src.utils.custom_typing import ColoredMNISTData
from torch.utils.data import DataLoader
import os
class ColoredMNISTDataset(Dataset):
@staticmethod
def get_random_colors():
rgb_code_list = [
(255.0, 0.0, 0.0),
(255.0, 128.0, 0.0),
(255.0, 255.0, 0.0),
(128.0, 255.0, 0.0),
(0.0, 255.0, 0.0),
(0.0, 255.0, 128.0),
(0.0, 255.0, 255.0),
(0.0, 128.0, 255.0),
(0.0, 0.0, 255.0),
(128.0, 0.0, 255.0),
(255.0, 0.0, 255.0),
(255.0, 0.0, 128.0),
]
lenght = len(rgb_code_list)
bg_index = random.randint(0, lenght - 1)
fg_index = random.randint(0, lenght - 1)
color_bg = rgb_code_list[bg_index]
color_fg = rgb_code_list[fg_index]
return color_bg, color_fg, bg_index, fg_index
@staticmethod
def create_colored_pairs(image, rgb_color_bg, rgb_color_fg):
"""
Get an MNIST image an generate two nex images by changing the background and foreground of the image
:param image: Array whose values are in the range of [0.0, 1.0]
"""
index_background = (image < 0.5).long()
index_foreground = (image >= 0.5).long()
keep_background = index_background * image
keep_foreground = index_foreground * image
index_background = index_background - keep_background
index_foreground = keep_foreground
colored_background = torch.stack(
[
rgb_color_bg[0] * index_background + keep_foreground * 255.0,
rgb_color_bg[1] * index_background + keep_foreground * 255.0,
rgb_color_bg[2] * index_background + keep_foreground * 255.0,
],
axis=2,
)
colored_foreground = torch.stack(
[
rgb_color_fg[0] * index_foreground + keep_background * 255.0,
rgb_color_fg[1] * index_foreground + keep_background * 255.0,
rgb_color_fg[2] * index_foreground + keep_background * 255.0,
],
axis=2,
)
return colored_background.permute(2, 0, 1), colored_foreground.permute(2, 0, 1)
def __init__(self, train=True, data_folder="data") -> None:
super().__init__()
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
self.data = datasets.MNIST(
root=data_folder, train=train, download=True, transform=ToTensor()
)
def __getitem__(self, index):
image, digit_label = self.data[index]
# image /= 255
rgb_color_bg, rgb_color_fg, bg_label, fg_label = self.get_random_colors()
bg_digit, fg_digit = self.create_colored_pairs(
image=image.squeeze(0), rgb_color_bg=rgb_color_bg, rgb_color_fg=rgb_color_fg
)
fg_digit /= 255
bg_digit /= 255
fg_label = torch.tensor(fg_label, dtype=torch.float32)
bg_label = torch.tensor(bg_label, dtype=torch.float32)
digit_label = torch.tensor(digit_label, dtype=torch.float32)
return ColoredMNISTData(
bg=bg_digit,
fg=fg_digit,
fg_label=fg_label,
bg_label=bg_label,
digit_label=digit_label,
)
def __len__(self):
return len(self.data)
|
481871
|
from pycimc import UcsServer
import config
for address in config.SERVERS:
with UcsServer(address, config.USERNAME, config.PASSWORD) as server:
server.get_fw_versions()
out_string = server.ipaddress + ','
for key,value in server.inventory['fw'].items():
path_list = key.split('/')[2:]
path = '/'.join(path_list)
out_string += path + ',' + value + ','
print out_string
|
481886
|
import os
import json
import logging
import random
from transformers import BertTokenizer, BertForMaskedLM, BertConfig
from transformers import AlbertTokenizer, AlbertForMaskedLM, AlbertConfig
from transformers import RobertaTokenizer, RobertaForMaskedLM, RobertaConfig
from transformers import AutoConfig
import torch
import torch.nn.functional as F
import numpy as np
logger = logging.getLogger(__name__)
class Prober():
def __init__(self, args, random_init='none'):
assert(random_init in ['none', 'all', 'embedding'])
super().__init__()
self._model_device = 'cpu'
model_name = args.model_name
vocab_name = model_name
if args.model_dir is not None:
# load bert model from file
model_name = str(args.model_dir) + "/"
vocab_name = model_name
logger.info("loading BERT model from {}".format(model_name))
# Load pre-trained model tokenizer (vocabulary)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(args.seed)
config = AutoConfig.from_pretrained(model_name)
if isinstance(config, AlbertConfig):
self.model_type = 'albert'
self.tokenizer = AlbertTokenizer.from_pretrained(vocab_name)
self.mlm_model = AlbertForMaskedLM.from_pretrained(model_name)
if random_init == 'all':
logger.info('Random initialize model...')
self.mlm_model = AlbertForMaskedLM(self.mlm_model.config)
self.base_model = self.mlm_model.albert
elif isinstance(config, RobertaConfig):
self.model_type = 'roberta'
self.tokenizer = RobertaTokenizer.from_pretrained(vocab_name)
self.mlm_model = RobertaForMaskedLM.from_pretrained(model_name)
if random_init == 'all':
logger.info('Random initialize model...')
self.mlm_model = RobertaForMaskedLM(self.mlm_model.config)
self.base_model = self.mlm_model.roberta
elif isinstance(config, BertConfig):
self.model_type = 'bert'
self.tokenizer = BertTokenizer.from_pretrained(vocab_name)
self.mlm_model = BertForMaskedLM.from_pretrained(model_name)
if random_init == 'all':
logger.info('Random initialize model...')
self.mlm_model = BertForMaskedLM(self.mlm_model.config)
self.base_model = self.mlm_model.bert
else:
raise ValueError('Model %s not supported yet!'%(model_name))
self.mlm_model.eval()
if random_init == 'embedding':
logger.info('Random initialize embedding layer...')
self.mlm_model._init_weights(self.base_model.embeddings.word_embeddings)
# original vocab
self.map_indices = None
self.vocab = list(self.tokenizer.get_vocab().keys())
logger.info('Vocab size: %d'%len(self.vocab))
self._init_inverse_vocab()
self.MASK = self.tokenizer.mask_token
self.EOS = self.tokenizer.eos_token
self.CLS = self.tokenizer.cls_token
self.SEP = self.tokenizer.sep_token
self.UNK = self.tokenizer.unk_token
# print(self.MASK, self.EOS, self.CLS, self.SEP, self.UNK)
self.pad_id = self.inverse_vocab[self.tokenizer.pad_token]
self.unk_index = self.inverse_vocab[self.tokenizer.unk_token]
# used to output top-k predictions
self.k = args.k
def _cuda(self):
self.mlm_model.cuda()
def try_cuda(self):
"""Move model to GPU if one is available."""
if torch.cuda.is_available():
if self._model_device != 'cuda':
logger.info('Moving model to CUDA')
self._cuda()
self._model_device = 'cuda'
else:
logger.info('No CUDA found')
def init_indices_for_filter_logprobs(self, vocab_subset, logger=None):
index_list = []
new_vocab_subset = []
for word in vocab_subset:
tokens = self.tokenizer.tokenize(' '+word)
if (len(tokens) == 1) and (tokens[0] != self.UNK):
index_list.append(self.tokenizer.convert_tokens_to_ids(tokens)[0])
new_vocab_subset.append(word)
else:
msg = "word {} from vocab_subset not in model vocabulary!".format(word)
if logger is not None:
logger.warning(msg)
else:
logger.info("WARNING: {}".format(msg))
indices = torch.as_tensor(index_list)
return indices, index_list
def _init_inverse_vocab(self):
self.inverse_vocab = {w: i for i, w in enumerate(self.vocab)}
def get_id(self, string):
tokenized_text = self.tokenizer.tokenize(string)
indexed_string = self.tokenizer.convert_tokens_to_ids(tokenized_text)
if self.map_indices is not None:
# map indices to subset of the vocabulary
indexed_string = self.convert_ids(indexed_string)
return indexed_string
def _get_input_tensors_batch_train(self, sentences_list, samples_list):
tokens_tensors_list = []
segments_tensors_list = []
masked_indices_list = []
tokenized_text_list = []
mlm_labels_tensor_list = []
mlm_label_ids = []
max_tokens = 0
for (sentences, samples) in zip(sentences_list, samples_list):
tokens_tensor, segments_tensor, masked_indices, tokenized_text, mlm_labels_tensor, mlm_label_id = self.__get_input_tensors(sentences, mlm_label=samples['obj_label'])
tokens_tensors_list.append(tokens_tensor)
segments_tensors_list.append(segments_tensor)
masked_indices_list.append(masked_indices)
tokenized_text_list.append(tokenized_text)
mlm_labels_tensor_list.append(mlm_labels_tensor)
mlm_label_ids.append(mlm_label_id)
if (tokens_tensor.shape[1] > max_tokens):
max_tokens = tokens_tensor.shape[1]
# apply padding and concatenate tensors
# use [PAD] for tokens and 0 for segments
final_tokens_tensor = None
final_segments_tensor = None
final_attention_mask = None
final_mlm_labels_tensor = None
for tokens_tensor, segments_tensor, mlm_labels_tensor in zip(tokens_tensors_list, segments_tensors_list, mlm_labels_tensor_list):
dim_tensor = tokens_tensor.shape[1]
pad_lenght = max_tokens - dim_tensor
attention_tensor = torch.full([1,dim_tensor], 1, dtype= torch.long)
if pad_lenght>0:
pad_1 = torch.full([1,pad_lenght], self.pad_id, dtype= torch.long)
pad_2 = torch.full([1,pad_lenght], 0, dtype= torch.long)
attention_pad = torch.full([1,pad_lenght], 0, dtype= torch.long)
pad_3 = torch.full([1,pad_lenght], -100, dtype=torch.long)
tokens_tensor = torch.cat((tokens_tensor,pad_1), dim=1)
segments_tensor = torch.cat((segments_tensor,pad_2), dim=1)
attention_tensor = torch.cat((attention_tensor,attention_pad), dim=1)
mlm_labels_tensor = torch.cat((mlm_labels_tensor, pad_3), dim=1)
if final_tokens_tensor is None:
final_tokens_tensor = tokens_tensor
final_segments_tensor = segments_tensor
final_attention_mask = attention_tensor
final_mlm_labels_tensor = mlm_labels_tensor
else:
final_tokens_tensor = torch.cat((final_tokens_tensor,tokens_tensor), dim=0)
final_segments_tensor = torch.cat((final_segments_tensor,segments_tensor), dim=0)
final_attention_mask = torch.cat((final_attention_mask,attention_tensor), dim=0)
final_mlm_labels_tensor = torch.cat((final_mlm_labels_tensor,mlm_labels_tensor), dim=0)
return final_tokens_tensor, final_segments_tensor, final_attention_mask, masked_indices_list, tokenized_text_list, final_mlm_labels_tensor, mlm_label_ids
def __get_input_tensors_batch(self, sentences_list):
tokens_tensors_list = []
segments_tensors_list = []
masked_indices_list = []
tokenized_text_list = []
max_tokens = 0
for sentences in sentences_list:
tokens_tensor, segments_tensor, masked_indices, tokenized_text = self.__get_input_tensors(sentences)
tokens_tensors_list.append(tokens_tensor)
segments_tensors_list.append(segments_tensor)
masked_indices_list.append(masked_indices)
tokenized_text_list.append(tokenized_text)
if (tokens_tensor.shape[1] > max_tokens):
max_tokens = tokens_tensor.shape[1]
# logger.info("MAX_TOKENS: {}".format(max_tokens))
# apply padding and concatenate tensors
# use [PAD] for tokens and 0 for segments
final_tokens_tensor = None
final_segments_tensor = None
final_attention_mask = None
for tokens_tensor, segments_tensor in zip(tokens_tensors_list, segments_tensors_list):
dim_tensor = tokens_tensor.shape[1]
pad_lenght = max_tokens - dim_tensor
attention_tensor = torch.full([1,dim_tensor], 1, dtype= torch.long)
if pad_lenght>0:
pad_1 = torch.full([1,pad_lenght], self.pad_id, dtype= torch.long)
pad_2 = torch.full([1,pad_lenght], 0, dtype= torch.long)
attention_pad = torch.full([1,pad_lenght], 0, dtype= torch.long)
tokens_tensor = torch.cat((tokens_tensor,pad_1), dim=1)
segments_tensor = torch.cat((segments_tensor,pad_2), dim=1)
attention_tensor = torch.cat((attention_tensor,attention_pad), dim=1)
if final_tokens_tensor is None:
final_tokens_tensor = tokens_tensor
final_segments_tensor = segments_tensor
final_attention_mask = attention_tensor
else:
final_tokens_tensor = torch.cat((final_tokens_tensor,tokens_tensor), dim=0)
final_segments_tensor = torch.cat((final_segments_tensor,segments_tensor), dim=0)
final_attention_mask = torch.cat((final_attention_mask,attention_tensor), dim=0)
# logger.info(final_tokens_tensor)
# logger.info(final_segments_tensor)
# logger.info(final_attention_mask)
# logger.info(final_tokens_tensor.shape)
# logger.info(final_segments_tensor.shape)
# logger.info(final_attention_mask.shape)
return final_tokens_tensor, final_segments_tensor, final_attention_mask, masked_indices_list, tokenized_text_list
def __get_input_tensors(self, sentences, mlm_label=None):
if len(sentences) > 2:
logger.info(sentences)
raise ValueError("BERT accepts maximum two sentences in input for each data point")
first_tokenized_sentence = [self.tokenizer.tokenize(token) if ((not token.startswith('[unused')) and (token != self.MASK)) else [token] for token in sentences[0].split()]
first_tokenized_sentence = [item for sublist in first_tokenized_sentence for item in sublist]
if self.model_type == 'roberta':
first_tokenized_sentence = self.tokenizer.tokenize(sentences[0])
first_segment_id = np.zeros(len(first_tokenized_sentence), dtype=int).tolist()
# add [SEP] token at the end
first_tokenized_sentence.append(self.SEP)
first_segment_id.append(0)
if len(sentences)>1 :
second_tokenized_sentece = [self.tokenizer.tokenize(token) if not token.startswith('[unused') else [token] for token in sentences[1].split()]
second_tokenized_sentece = [item for sublist in second_tokenized_sentece for item in sublist]
if self.model_type == 'roberta':
second_tokenized_sentece = self.tokenizer.tokenize(sentences[1])
second_segment_id = np.full(len(second_tokenized_sentece),1, dtype=int).tolist()
# add [SEP] token at the end
second_tokenized_sentece.append(self.SEP)
second_segment_id.append(1)
tokenized_text = first_tokenized_sentence + second_tokenized_sentece
segments_ids = first_segment_id + second_segment_id
else:
tokenized_text = first_tokenized_sentence
segments_ids = first_segment_id
# add [CLS] token at the beginning
tokenized_text.insert(0,self.CLS)
segments_ids.insert(0,0)
# look for masked indices
masked_indices = []
for i in range(len(tokenized_text)):
token = tokenized_text[i]
if token == self.MASK:
masked_indices.append(i)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
if mlm_label is None:
return tokens_tensor, segments_tensors, masked_indices, tokenized_text
# Handle mlm_label
mlm_labels = np.full(len(tokenized_text), -100, dtype=int).tolist()
tmp_ids = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(' '+mlm_label))
assert(len(tmp_ids) == 1)
mlm_labels[masked_indices[-1]] = tmp_ids[0]
mlm_labels_tensor = torch.tensor([mlm_labels])
return tokens_tensor, segments_tensors, masked_indices, tokenized_text, mlm_labels_tensor, tmp_ids[0]
def __get_token_ids_from_tensor(self, indexed_string):
token_ids = []
if self.map_indices is not None:
# map indices to subset of the vocabulary
indexed_string = self.convert_ids(indexed_string)
token_ids = np.asarray(indexed_string)
else:
token_ids = indexed_string
return token_ids
def get_batch_generation(self, sentences_list, logger= None,
try_cuda=True):
if not sentences_list:
return None
if try_cuda:
self.try_cuda()
tokens_tensor, segments_tensor, attention_mask_tensor, masked_indices_list, tokenized_text_list = self.__get_input_tensors_batch(sentences_list)
if logger is not None:
logger.debug("\n{}\n".format(tokenized_text_list))
with torch.no_grad():
logits = self.mlm_model(
input_ids=tokens_tensor.to(self._model_device),
token_type_ids=segments_tensor.to(self._model_device),
attention_mask=attention_mask_tensor.to(self._model_device),
)
log_probs = F.log_softmax(logits, dim=-1).cpu()
token_ids_list = []
for indexed_string in tokens_tensor.numpy():
token_ids_list.append(self.__get_token_ids_from_tensor(indexed_string))
return log_probs, token_ids_list, masked_indices_list
def run_batch(self, sentences_list, samples_list, try_cuda=True, training=True, filter_indices=None, index_list=None, vocab_to_common_vocab=None):
if try_cuda and torch.cuda.device_count() > 0:
self.try_cuda()
tokens_tensor, segments_tensor, attention_mask_tensor, masked_indices_list, tokenized_text_list, mlm_labels_tensor, mlm_label_ids = self._get_input_tensors_batch_train(sentences_list, samples_list)
if training:
self.mlm_model.train()
loss = self.mlm_model(
input_ids=tokens_tensor.to(self._model_device),
token_type_ids=segments_tensor.to(self._model_device),
attention_mask=attention_mask_tensor.to(self._model_device),
masked_lm_labels=mlm_labels_tensor.to(self._model_device),
)
loss = loss[0]
else:
self.mlm_model.eval()
with torch.no_grad():
loss, logits = self.mlm_model(
input_ids=tokens_tensor.to(self._model_device),
token_type_ids=segments_tensor.to(self._model_device),
attention_mask=attention_mask_tensor.to(self._model_device),
masked_lm_labels=mlm_labels_tensor.to(self._model_device),
)
log_probs = F.log_softmax(logits, dim=-1).cpu()
if training:
return loss
else:
# During testing, return accuracy and top-k predictions
tot = log_probs.shape[0]
cor = 0
preds = []
topk = []
common_vocab_loss = []
for i in range(log_probs.shape[0]):
masked_index = masked_indices_list[i][0]
log_prob = log_probs[i][masked_index]
mlm_label = mlm_label_ids[i]
if filter_indices is not None:
log_prob = log_prob.index_select(dim=0, index=filter_indices)
pred_common_vocab = torch.argmax(log_prob)
pred = index_list[pred_common_vocab]
# get top-k predictions
topk_preds = []
topk_log_prob, topk_ids = torch.topk(log_prob, self.k)
for log_prob_i, idx in zip(topk_log_prob, topk_ids):
ori_idx = index_list[idx]
token = self.vocab[ori_idx]
topk_preds.append({'token': token, 'log_prob': log_prob_i.item()})
topk.append(topk_preds)
# compute entropy on common vocab
common_logits = logits[i][masked_index].cpu().index_select(dim=0, index=filter_indices)
common_log_prob = -F.log_softmax(common_logits, dim=-1)
common_label_id = vocab_to_common_vocab[mlm_label]
common_vocab_loss.append(common_log_prob[common_label_id].item())
else:
pred = torch.argmax(log_prob)
topk.append([])
if pred == mlm_labels_tensor[i][masked_index]:
cor += 1
preds.append(1)
else:
preds.append(0)
return log_probs, cor, tot, preds, topk, loss, common_vocab_loss
|
481904
|
import ujson
from marshmallow import ValidationError
from .utils import validator
def lambda_handler(event, context):
try:
body = ujson.loads(event['body'])
result = validator.UserLoginSchema()
res = not bool(result.validate(body))
if res:
return {
"statusCode": 200,
"body": ujson.dumps({
"message": "Welcome !",
"data": {
"token": result.load(body)['token']
}
})
}
else:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": "Error !",
"data": result.validate(body)
})
}
except ValidationError as err:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": err.messages
})
}
except KeyError as error:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": "Something went wrong. Unable to parse data ! " + str(error)
})
}
def token_refresh(event, context):
try:
body = ujson.loads(event['body'])
result = validator.RefreshTokenSchema()
res = not bool(result.validate(body))
if res:
return {
"statusCode": 200,
"body": ujson.dumps({
"message": None,
"data": result.load(body)
})
}
else:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": "Error !",
"data": result.validate(body)
})
}
except ValidationError as err:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": err.messages
})
}
except KeyError:
return {
"statusCode": 400,
"body": ujson.dumps({"message": "Something went wrong. Unable to parse data !"})
}
|
481920
|
import chainer
import chainer.links as L
import chainer.functions as F
import env
class Audio_Stream_Light(chainer.Chain):
# @chainer.static_graph
def __call__(self, spec):
a = F.leaky_relu(self.bn1(self.conv1(spec)))
# a = F.leaky_relu(self.bn2(self.conv2(a)))
a = F.leaky_relu(self.bn3(self.conv3(a)))
a = F.leaky_relu(self.bn4(self.conv4(a)))
a = F.leaky_relu(self.bn5(self.conv5(a)))
a = F.leaky_relu(self.bn6(self.conv6(a)))
a = F.leaky_relu(self.bn7(self.conv7(a)))
a = F.leaky_relu(self.bn8(self.conv8(a)))
# a = F.leaky_relu(self.bn9(self.conv9(a)))
# a = F.leaky_relu(self.bn10(self.conv10(a)))
# a = F.leaky_relu(self.bn11(self.conv11(a)))
# a = F.leaky_relu(self.bn12(self.conv12(a)))
# a = F.leaky_relu(self.bn13(self.conv13(a)))
# a = F.leaky_relu(self.bn14(self.conv14(a)))
a = F.leaky_relu(self.bn15(self.conv15(a)))
return a # (b, 8, 301, 257)
def __init__(self, trained=None):
super(Audio_Stream_Light, self).__init__()
with self.init_scope():
initial = chainer.initializers.HeNormal()
self.conv1 = L.DilatedConvolution2D(
in_channels=env.AUDIO_CHANNELS, out_channels=96,
stride=1, ksize=(1, 7), dilate=(1, 1), pad=(0, 3),
nobias=True, initialW=initial)
# self.conv2 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(7, 1), dilate=(1, 1), pad=(3, 0),
# nobias=True, initialW=initial)
self.conv3 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(1, 1), pad=(2, 2),
nobias=True, initialW=initial)
self.conv4 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(2, 1), pad=(4, 2),
nobias=True, initialW=initial)
self.conv5 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(4, 1), pad=(8, 2),
nobias=True, initialW=initial)
self.conv6 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(8, 1), pad=(16, 2),
nobias=True, initialW=initial)
self.conv7 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(16, 1), pad=(32, 2),
nobias=True, initialW=initial)
self.conv8 = L.DilatedConvolution2D(
in_channels=96, out_channels=96,
stride=1, ksize=(5, 5), dilate=(32, 1), pad=(64, 2),
nobias=True, initialW=initial)
# self.conv9 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(1, 1), pad=(2, 2),
# nobias=True, initialW=initial)
# self.conv10 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(2, 2), pad=(4, 4),
# nobias=True, initialW=initial)
# self.conv11 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(4, 4), pad=(8, 8),
# nobias=True, initialW=initial)
# self.conv12 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(8, 8), pad=(16, 16),
# nobias=True, initialW=initial)
# self.conv13 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(16, 16), pad=(32, 32),
# nobias=True, initialW=initial)
# self.conv14 = L.DilatedConvolution2D(
# in_channels=96, out_channels=96,
# stride=1, ksize=(5, 5), dilate=(32, 32), pad=(64, 64),
# nobias=True, initialW=initial)
self.conv15 = L.DilatedConvolution2D(
in_channels=96, out_channels=8,
stride=1, ksize=(1, 1), dilate=(1, 1), pad=(0, 0),
nobias=True, initialW=initial)
self.bn1 = L.BatchNormalization(96)
# self.bn2 = L.BatchNormalization(96)
self.bn3 = L.BatchNormalization(96)
self.bn4 = L.BatchNormalization(96)
self.bn5 = L.BatchNormalization(96)
self.bn6 = L.BatchNormalization(96)
self.bn7 = L.BatchNormalization(96)
self.bn8 = L.BatchNormalization(96)
# self.bn9 = L.BatchNormalization(96)
# self.bn10 = L.BatchNormalization(96)
# self.bn11 = L.BatchNormalization(96)
# self.bn12 = L.BatchNormalization(96)
# self.bn13 = L.BatchNormalization(96)
# self.bn14 = L.BatchNormalization(96)
self.bn15 = L.BatchNormalization(8)
if trained is not None:
self.conv1.W = trained["conv1"].W
# self.conv2.W = trained["conv2"].W
self.conv3.W = trained["conv3"].W
self.conv4.W = trained["conv4"].W
self.conv5.W = trained["conv5"].W
self.conv6.W = trained["conv6"].W
self.conv7.W = trained["conv7"].W
self.conv8.W = trained["conv8"].W
# self.conv9.W = trained["conv9"].W
# self.conv10.W = trained["conv10"].W
# self.conv11.W = trained["conv11"].W
# self.conv12.W = trained["conv12"].W
# self.conv13.W = trained["conv13"].W
# self.conv14.W = trained["conv14"].W
self.conv15.W = trained["conv15"].W
self.bn1.gamma = trained["bn1"].gamma
self.bn1.beta = trained["bn1"].beta
self.bn1.avg_mean = trained["bn1"].avg_mean
self.bn1.avg_var = trained["bn1"].avg_var
# self.bn2.gamma = trained["bn2"].gamma
# self.bn2.beta = trained["bn2"].beta
# self.bn2.avg_mean = trained["bn2"].avg_mean
# self.bn2.avg_var = trained["bn2"].avg_var
self.bn3.gamma = trained["bn3"].gamma
self.bn3.beta = trained["bn3"].beta
self.bn3.avg_mean = trained["bn3"].avg_mean
self.bn3.avg_var = trained["bn3"].avg_var
self.bn4.gamma = trained["bn4"].gamma
self.bn4.beta = trained["bn4"].beta
self.bn4.avg_mean = trained["bn4"].avg_mean
self.bn4.avg_var = trained["bn4"].avg_var
self.bn5.gamma = trained["bn5"].gamma
self.bn5.beta = trained["bn5"].beta
self.bn5.avg_mean = trained["bn5"].avg_mean
self.bn5.avg_var = trained["bn5"].avg_var
self.bn6.gamma = trained["bn6"].gamma
self.bn6.beta = trained["bn6"].beta
self.bn6.avg_mean = trained["bn6"].avg_mean
self.bn6.avg_var = trained["bn6"].avg_var
self.bn7.gamma = trained["bn7"].gamma
self.bn7.beta = trained["bn7"].beta
self.bn7.avg_mean = trained["bn7"].avg_mean
self.bn7.avg_var = trained["bn7"].avg_var
self.bn8.gamma = trained["bn8"].gamma
self.bn8.beta = trained["bn8"].beta
self.bn8.avg_mean = trained["bn8"].avg_mean
self.bn8.avg_var = trained["bn8"].avg_var
# self.bn9.gamma = trained["bn9"].gamma
# self.bn9.beta = trained["bn9"].beta
# self.bn9.avg_mean = trained["bn9"].avg_mean
# self.bn9.avg_var = trained["bn9"].avg_var
# self.bn10.gamma = trained["bn10"].gamma
# self.bn10.beta = trained["bn10"].beta
# self.bn10.avg_mean = trained["bn10"].avg_mean
# self.bn10.avg_var = trained["bn10"].avg_var
# self.bn11.gamma = trained["bn11"].gamma
# self.bn11.beta = trained["bn11"].beta
# self.bn11.avg_mean = trained["bn11"].avg_mean
# self.bn11.avg_var = trained["bn11"].avg_var
# self.bn12.gamma = trained["bn12"].gamma
# self.bn12.beta = trained["bn12"].beta
# self.bn12.avg_mean = trained["bn12"].avg_mean
# self.bn12.avg_var = trained["bn12"].avg_var
# self.bn13.gamma = trained["bn13"].gamma
# self.bn13.beta = trained["bn13"].beta
# self.bn13.avg_mean = trained["bn13"].avg_mean
# self.bn13.avg_var = trained["bn13"].avg_var
# self.bn14.gamma = trained["bn14"].gamma
# self.bn14.beta = trained["bn14"].beta
# self.bn14.avg_mean = trained["bn14"].avg_mean
# self.bn14.avg_var = trained["bn14"].avg_var
self.bn15.gamma = trained["bn15"].gamma
self.bn15.beta = trained["bn15"].beta
self.bn15.avg_mean = trained["bn15"].avg_mean
self.bn15.avg_var = trained["bn15"].avg_var
|
481925
|
from .land import rlp
from .ahrweiler import ahrweiler
from .altenkirchen import altenkirchen
from .alzeyworms import alzeyworms
from .cochem import cochem
#from .kaiserslautern import kaiserslautern
from .koblenz import koblenz
from .landau import landau
from .mainz import mainz
from .neuwied import neuwied
from .rheinlahn import rheinlahn
from .vulkaneifel import vulkaneifel
from .westerwald import westerwald
|
481945
|
import datetime
from app_ccf.common import (
auto_update_application_statuses,
update_application_statuses
)
from app_ccf.config import CONFIG
from app_ccf.models import Application, VoucherCode, VoucherCodeBatch, PreapprovedAddress
from shared.test_utils import DEFAULT_CCF_APP_FIELDS
from parameterized import parameterized
from . import base_test
class AutoUpdateAppStatusTests(base_test.CcfBaseTest):
def setUp(self):
super().setUp()
self.ADDRESS_FIELDS = {
'addr1': '45 BROADWAY',
'city': 'NY',
'state': 'New York',
'zip_code': '10006',
'usps_verified': True,
}
self.OTHER_REQUIRED_FIELDS = DEFAULT_CCF_APP_FIELDS.copy()
for address_field in self.ADDRESS_FIELDS:
del self.OTHER_REQUIRED_FIELDS[address_field]
del self.OTHER_REQUIRED_FIELDS['first_name']
del self.OTHER_REQUIRED_FIELDS['last_name']
del self.OTHER_REQUIRED_FIELDS['phone_number']
def test_auto_update_application_statuses_fourDupAddresses_marksNewDupsForReview(
self):
# 4 apps with the same address, 1 already approved
app1 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="A",
last_name="A",
addr2="APT A1",
phone_number='+12222222222',
status=Application.ApplicationStatus.APPROVED)
app2 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="B",
last_name="B",
addr2="APT B2",
phone_number='+13333333333',
status=Application.ApplicationStatus.SUBMITTED)
app3 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="C",
last_name="C",
addr2="APT C3",
phone_number='+14444444444',
status=Application.ApplicationStatus.SUBMITTED)
app4 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="D",
last_name="D",
addr2="APT D4",
phone_number='+15555555555',
status=Application.ApplicationStatus.SUBMITTED)
auto_update_application_statuses()
# This one was already approved so it should stay the same
app1.refresh_from_db()
self.assertEqual(
app1.status,
Application.ApplicationStatus.APPROVED)
app2.refresh_from_db()
self.assertEqual(
app2.status,
Application.ApplicationStatus.NEEDS_REVIEW)
app3.refresh_from_db()
self.assertEqual(
app3.status,
Application.ApplicationStatus.NEEDS_REVIEW)
self.assertEqual(app3.note, 'duplicate address')
app4.refresh_from_db()
self.assertEqual(
app4.status,
Application.ApplicationStatus.NEEDS_REVIEW)
self.assertEqual(app4.note, 'duplicate address')
def test_auto_update_application_statuses_fourDupAddressesPreapproved_marksNewDupsForReview(
self):
PreapprovedAddress.objects.create(
addr1=self.ADDRESS_FIELDS["addr1"],
city=self.ADDRESS_FIELDS["city"],
state=self.ADDRESS_FIELDS["state"],
zip_code=self.ADDRESS_FIELDS["zip_code"],
note="Test Preapproved Address")
# 4 apps with the same address, 1 already approved
app1 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="A",
last_name="A",
addr2="APT A1",
phone_number='+12222222222',
status=Application.ApplicationStatus.APPROVED)
app2 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="B",
last_name="B",
addr2="APT B2",
phone_number='+13333333333',
status=Application.ApplicationStatus.SUBMITTED)
app3 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="C",
last_name="C",
addr2="APT C3",
phone_number='+14444444444',
status=Application.ApplicationStatus.SUBMITTED)
app4 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="D",
last_name="D",
addr2="APT D4",
phone_number='+15555555555',
status=Application.ApplicationStatus.SUBMITTED)
auto_update_application_statuses()
app1.refresh_from_db()
self.assertEqual(
app1.status,
Application.ApplicationStatus.APPROVED)
app2.refresh_from_db()
self.assertEqual(
app2.status,
Application.ApplicationStatus.APPROVED)
app3.refresh_from_db()
self.assertEqual(
app3.status,
Application.ApplicationStatus.APPROVED)
app4.refresh_from_db()
self.assertEqual(
app4.status,
Application.ApplicationStatus.APPROVED)
def test_auto_update_application_statuses_threeDupAddresses_marksDupsApproved(
self):
# 3 apps with the same address
app1 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="A",
last_name="A",
phone_number='+12222222222',
status=Application.ApplicationStatus.SUBMITTED)
app2 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="B",
last_name="B",
phone_number='+13333333333',
status=Application.ApplicationStatus.SUBMITTED)
app3 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
**self.ADDRESS_FIELDS,
first_name="C",
last_name="C",
phone_number='+14444444444',
status=Application.ApplicationStatus.SUBMITTED)
auto_update_application_statuses()
# This one was already approved so it should stay the same
app1.refresh_from_db()
self.assertEqual(
app1.status,
Application.ApplicationStatus.APPROVED)
app2.refresh_from_db()
self.assertEqual(
app2.status,
Application.ApplicationStatus.APPROVED)
app3.refresh_from_db()
self.assertEqual(
app3.status,
Application.ApplicationStatus.APPROVED)
def test_auto_update_application_statuses_dupNamePhone_marksDupsForReview(
self):
self.trigger_text_messages_mock.reset_mock()
app1 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
addr1='45 BROADWAY',
city='NY',
zip_code='10011',
state='NY',
first_name="FirstName",
last_name="LastName",
phone_number='+12222222222',
status=Application.ApplicationStatus.SUBMITTED)
app2 = Application.objects.create(
**self.OTHER_REQUIRED_FIELDS,
addr1='25 STATE ST',
city='NY',
zip_code='10011',
state='CA',
first_name="firstName",
last_name="lastName",
phone_number='+12222222222',
status=Application.ApplicationStatus.SUBMITTED)
auto_update_application_statuses()
app1.refresh_from_db()
self.assertEqual(
app1.status,
Application.ApplicationStatus.REJECTED)
self.assertEqual(app1.note, 'duplicate first/last/phone')
app2.refresh_from_db()
self.assertEqual(
app2.status,
Application.ApplicationStatus.REJECTED)
self.assertEqual(app2.note, 'duplicate first/last/phone')
self.assertEqual(
1,
self.trigger_text_messages_mock.call_count)
self.assertEqual(
{'+12222222222'},
self.trigger_text_messages_mock.call_args_list[0][0][0])
class UpdateAppStatusTests(base_test.CcfBaseTest):
def setUp(self):
super().setUp()
fields = DEFAULT_CCF_APP_FIELDS.copy()
self.models = [
Application.objects.create(
**fields,
status=Application.ApplicationStatus.APPROVED),
Application.objects.create(
**fields,
status=Application.ApplicationStatus.NEEDS_REVIEW),
Application.objects.create(
**fields,
status=Application.ApplicationStatus.NEEDS_REVIEW),
Application.objects.create(
**fields,
status=Application.ApplicationStatus.REJECTED),
Application.objects.create(
**fields,
status=Application.ApplicationStatus.SENT_FOR_PAYMENT),
]
@parameterized.expand([
(Application.ApplicationStatus.APPROVED, ),
(Application.ApplicationStatus.NEEDS_REVIEW, ),
(Application.ApplicationStatus.REJECTED, ),
(Application.ApplicationStatus.SENT_FOR_PAYMENT, ),
])
def test_update_application_statuses(self, status):
id_of_applications_to_update = [
self.models[2].application_id,
self.models[3].application_id,
self.models[4].application_id,
]
# Act
update_application_statuses(id_of_applications_to_update, status)
for m in self.models:
m.refresh_from_db()
# Verify updated models
self.assertEqual(status, self.models[2].status)
self.assertEqual(status, self.models[3].status)
self.assertEqual(status, self.models[4].status)
# Verify untouched models
self.assertEqual(Application.ApplicationStatus.APPROVED,
self.models[0].status)
self.assertEqual(
Application.ApplicationStatus.NEEDS_REVIEW, self.models[1].status)
@parameterized.expand([
(Application.Language.EN,),
(Application.Language.EN,),
(Application.Language.ES,),
])
def test_update_application_withTextMessaging(
self, language):
self.trigger_text_messages_mock.reset_mock()
self.models[0].language = language
self.models[0].save()
self.models[1].language = language
self.models[1].phone_number = '+16666666666'
self.models[1].save()
update_application_statuses(
(
self.models[0].application_id,
self.models[1].application_id),
Application.ApplicationStatus.PAYMENT_CONFIRMED)
self.assertEqual(
1,
self.trigger_text_messages_mock.call_count)
self.assertEqual(
{'+15555555555', '+16666666666'}, self.trigger_text_messages_mock.call_args[0][0])
|
481948
|
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torchvision
from torchvision.datasets import ImageFolder
from torchvision import transforms
from torchvision import models
import os
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('่ฝฝๅ
ฅๆฐๆฎ')
data_dir = './data/hotdog'
print(os.listdir(data_dir))
train_imgs = ImageFolder(os.path.join(data_dir, 'train'))
test_imgs = ImageFolder(os.path.join(data_dir, 'test'))
print('ๅฑ็คบ 16 ๅผ ๅพ็')
hotdogs = [train_imgs[i][0] for i in range(8)]
not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
utils.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4)
print('ๅๅ้ข่ฎญ็ปๆจกๅไธๆ ท็้ขๅค็ๆไฝ')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_augs = transforms.Compose([
transforms.RandomResizedCrop(size=224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
test_augs = transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
normalize
])
print('ๅฎไนๅๅๅงๅๆจกๅ')
print('้ข่ฎญ็ปๆจกๅไผ้ป่ฎคไธ่ฝฝๅฐ ~/.cache/torch/checkpoints ๆไปถๅคน๏ผๅฆๆ้่ฆๅๆข๏ผๅฏไปฅไฟฎๆน $TORCH_MODEL_ZOO ็ฏๅขๅ้')
pretrained_net = models.resnet18(pretrained=True)
'''
ๆบ็ ไธญๅฐๅๅฆไธ๏ผๆๆฏ็จ่ชๅทฑ็ NAS ไธ่ฝฝ็
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
'''
print('ๆๅฐๆบๆจกๅ็ fc ๅฑ')
print(pretrained_net.fc)
print('ๅๆฅ็ๆฏๅๆ 1000 ็ฑป๏ผๆไปฅๆไปฌ้่ฆไฟฎๆนไธบ 2 ็ฑป')
pretrained_net.fc = nn.Linear(512, 2)
print(pretrained_net.fc)
print('้ๅฏนไธๅ็ๅฑไฝฟ็จไธๅ็ๅญฆไน ็')
output_params = list(map(id, pretrained_net.fc.parameters()))
feature_params = filter(lambda p: id(p) not in output_params, pretrained_net.parameters())
lr = 0.01
optimizer = optim.SGD([
{'params': feature_params},
{'params': pretrained_net.fc.parameters(), 'lr': lr * 10}
], lr=lr, weight_decay=0.001)
print('ๅพฎ่ฐๆจกๅ')
def train_fine_tuning(net, optimizer, batch_size=128, num_epochs=4):
train_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs), batch_size, shuffle=True)
test_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs), batch_size)
loss = torch.nn.CrossEntropyLoss()
utils.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
train_fine_tuning(pretrained_net, optimizer)
print('ๅฎไนไธไธช็ปๆไธๆ ทไฝๆฏ้ๆบๅๅๅงๅ')
scratch_net = models.resnet18(pretrained=False, num_classes=2)
lr = 0.1
optimizer = optim.SGD(scratch_net.parameters(), lr=lr, weight_decay=0.001)
train_fine_tuning(scratch_net, optimizer)
print('ๅฏไปฅ็ๅฐๅพฎ่ฐๆจกๅ็ฒพๅบฆๆด้ซ')
'''
ๆๅฐๆบๆจกๅ็ fc ๅฑ
Linear(in_features=512, out_features=1000, bias=True)
ๅๆฅ็ๆฏๅๆ 1000 ็ฑป๏ผๆไปฅๆไปฌ้่ฆไฟฎๆนไธบ 2 ็ฑป
Linear(in_features=512, out_features=2, bias=True)
้ๅฏนไธๅ็ๅฑไฝฟ็จไธๅ็ๅญฆไน ็
ๅพฎ่ฐๆจกๅ
training on cpu
epoch 1, loss 4.0836, train acc 0.674, test acc 0.922, time 456.7 sec
epoch 2, loss 0.1991, train acc 0.904, test acc 0.934, time 474.0 sec
epoch 3, loss 0.1026, train acc 0.915, test acc 0.927, time 464.1 sec
epoch 4, loss 0.0606, train acc 0.921, test acc 0.920, time 463.7 sec
ๅฎไนไธไธช็ปๆไธๆ ทไฝๆฏ้ๆบๅๅๅงๅ
training on cpu
epoch 1, loss 2.4249, train acc 0.654, test acc 0.776, time 458.2 sec
epoch 2, loss 0.2222, train acc 0.804, test acc 0.787, time 430.5 sec
epoch 3, loss 0.1286, train acc 0.841, test acc 0.814, time 429.5 sec
epoch 4, loss 0.1015, train acc 0.815, test acc 0.838, time 474.2 sec
ๅฏไปฅ็ๅฐๅพฎ่ฐๆจกๅ็ฒพๅบฆๆด้ซ
'''
|
481963
|
import pathlib
import os
import tempfile
from zipfile import ZipFile
import pytest
from pytest_mock import MockerFixture
from ark.utils.deepcell_service_utils import create_deepcell_output
def mocked_run_deepcell(in_zip_path, output_dir, host, job_type, scale, timeout):
pathlib.Path(os.path.join(output_dir, 'fov1_feature_0.tif')).touch()
pathlib.Path(os.path.join(output_dir, 'fov2_feature_0.tif')).touch()
pathlib.Path(os.path.join(output_dir, 'fov3_feature_0.tif')).touch()
batch_num = int(in_zip_path.split('.')[0].split('_')[-1])
if batch_num < 2:
zip_path = os.path.join(output_dir, 'example_output.zip')
else:
zip_path = os.path.join(output_dir, f'example_output_{batch_num}.zip')
with ZipFile(zip_path, 'w') as zipObj:
if batch_num > 1:
return
for i in range(1, 4):
filename = os.path.join(output_dir, f'fov{i}_feature_0.tif')
zipObj.write(filename, os.path.basename(filename))
os.remove(filename)
def test_create_deepcell_output(mocker: MockerFixture):
with tempfile.TemporaryDirectory() as temp_dir:
mocker.patch('ark.utils.deepcell_service_utils.run_deepcell_direct', mocked_run_deepcell)
input_dir = os.path.join(temp_dir, 'input_dir')
os.makedirs(input_dir)
pathlib.Path(os.path.join(input_dir, 'fov1.tif')).touch()
pathlib.Path(os.path.join(input_dir, 'fov2.tif')).touch()
pathlib.Path(os.path.join(input_dir, 'fov3.tiff')).touch()
with tempfile.TemporaryDirectory() as output_dir:
with pytest.raises(ValueError):
# fail if non-existent fovs are specified
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir, fovs=['fov1', 'fov1000'])
# test with specified fov list
create_deepcell_output(deepcell_input_dir=input_dir, deepcell_output_dir=output_dir,
fovs=['fov1', 'fov2'])
with pytest.raises(ValueError):
# fail if scale argument can not be converted to float
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir, fovs=['fov1', 'fov2'],
scale='test')
# make sure DeepCell (.zip) output exists
assert os.path.exists(os.path.join(output_dir, 'example_output.zip'))
# DeepCell output .zip file should be extracted
assert os.path.exists(os.path.join(output_dir, 'fov1_feature_0.tif'))
assert os.path.exists(os.path.join(output_dir, 'fov2_feature_0.tif'))
with tempfile.TemporaryDirectory() as output_dir:
# test parallel
create_deepcell_output(deepcell_input_dir=input_dir, deepcell_output_dir=output_dir,
fovs=['fov1', 'fov2'], zip_size=1, parallel=True)
# make sure DeepCell (.zip's) output exists
assert os.path.exists(os.path.join(output_dir, 'example_output.zip'))
assert os.path.exists(os.path.join(output_dir, 'example_output_2.zip'))
with tempfile.TemporaryDirectory() as output_dir:
# test with mixed fov/file list
create_deepcell_output(deepcell_input_dir=input_dir, deepcell_output_dir=output_dir,
fovs=['fov1', 'fov2.tif', 'fov3.tiff'])
# make sure DeepCell (.zip) output exists
assert os.path.exists(os.path.join(output_dir, 'example_output.zip'))
# DeepCell output .zip file should be extracted
assert os.path.exists(os.path.join(output_dir, 'fov1_feature_0.tif'))
assert os.path.exists(os.path.join(output_dir, 'fov2_feature_0.tif'))
assert os.path.exists(os.path.join(output_dir, 'fov3_feature_0.tif'))
with tempfile.TemporaryDirectory() as output_dir:
# if fovs is None, all .tif files in input dir should be taken
create_deepcell_output(deepcell_input_dir=input_dir, deepcell_output_dir=output_dir)
# make sure DeepCell (.zip) output exists
assert os.path.exists(os.path.join(output_dir, 'example_output.zip'))
assert os.path.exists(os.path.join(output_dir, 'fov1_feature_0.tif'))
assert os.path.exists(os.path.join(output_dir, 'fov2_feature_0.tif'))
assert os.path.exists(os.path.join(output_dir, 'fov3_feature_0.tif'))
pathlib.Path(os.path.join(input_dir, 'fovs.zip')).touch()
# Warning should be displayed if fovs.zip file exists (will be overwritten)
with pytest.warns(UserWarning):
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir, fovs=['fov1'])
# DeepCell output .tif file does not exist for some fov
with pytest.warns(UserWarning):
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir, suffix='_other_suffix',
fovs=['fov1'])
# add additional fov for auto-batch testing
pathlib.Path(os.path.join(input_dir, 'fov4.tif')).touch()
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir,
fovs=['fov1', 'fov2', 'fov3', 'fov4'], zip_size=3)
# check that there are two zip files with sizes 3, 1 respectively
assert os.path.exists(os.path.join(input_dir, 'fovs_batch_1.zip'))
assert os.path.exists(os.path.join(input_dir, 'fovs_batch_2.zip'))
with ZipFile(os.path.join(input_dir, 'fovs_batch_1.zip'), 'r') as zip_batch1:
assert zip_batch1.namelist() == ['fov1.tif', 'fov2.tif', 'fov3.tiff']
with ZipFile(os.path.join(input_dir, 'fovs_batch_2.zip'), 'r') as zip_batch2:
assert zip_batch2.namelist() == ['fov4.tif']
# ValueError should be raised if .tif file does not exists for some fov in fovs
with pytest.raises(ValueError):
create_deepcell_output(deepcell_input_dir=input_dir,
deepcell_output_dir=output_dir, fovs=['fov1', 'fov5'])
|
481967
|
import argparse
from codes.basic_functions.transferability import (interaction_reduced_attack,
leave_one_out)
from set_config import set_config
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--p", type=str, help="inf; 2", default='inf')
parser.add_argument("--epsilon", type=int, default=16)
parser.add_argument("--step_size", type=float, default=2.)
parser.add_argument("--num_steps", type=int, default=100)
parser.add_argument("--loss_root", type=str, default='./experiments/loss')
parser.add_argument(
"--adv_image_root", type=str, default='./experiments/adv_images')
parser.add_argument("--clean_image_root", type=str, default='data/images_1000')
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--arch", type=str, default='resnet34')
parser.add_argument(
"--target_archs", type=str, default=['densenet201'], nargs='*')
parser.add_argument("--attack_method", type=str, default='PGD')
parser.add_argument("--gamma", type=float, default=1.)
parser.add_argument("--momentum", type=float, default=0.)
parser.add_argument("--m", type=int, default=0)
parser.add_argument("--sigma", type=float, default=15.)
parser.add_argument("--ti_size", type=int, default=1)
parser.add_argument("--lam", type=float, default=0.)
parser.add_argument("--grid_scale", type=int, default=16)
parser.add_argument("--sample_grid_num", type=int, default=32)
parser.add_argument("--sample_times", type=int, default=32)
args = parser.parse_args()
target_archs = [
"vgg16", "resnet152", "densenet201", "senet154", "inceptionv3",
"inceptionv4", "inceptionresnetv2"
]
def test_interaction_reduced_attack():
set_config(args)
interaction_reduced_attack.generate_adv_images(args)
for target_arch in target_archs:
args.target_arch = target_arch
interaction_reduced_attack.save_scores(args)
leave_one_out.evaluate(args)
test_interaction_reduced_attack()
|
482014
|
from __future__ import print_function, division
from tfr import Tuning
def test_pitch_to_relative_freq():
tuning_step1 = Tuning(steps_per_octave=1)
tuning_step12 = Tuning(steps_per_octave=12)
assert 1. == tuning_step1.pitch_to_relative_freq(0.)
assert 2. == tuning_step1.pitch_to_relative_freq(1.)
assert 4. == tuning_step1.pitch_to_relative_freq(2.)
assert 0.5 == tuning_step1.pitch_to_relative_freq(-1.)
assert 1. == tuning_step12.pitch_to_relative_freq(0.)
assert 2. == tuning_step12.pitch_to_relative_freq(12.)
assert 4. == tuning_step12.pitch_to_relative_freq(24.)
assert 0.5 == tuning_step12.pitch_to_relative_freq(-12.)
def test_pitch_to_freq():
tuning = Tuning()
assert 440. == tuning.pitch_to_freq(0.)
assert 880. == tuning.pitch_to_freq(12.)
assert 1760. == tuning.pitch_to_freq(24.)
assert 220. == tuning.pitch_to_freq(-12.)
assert abs(466.1637615180899 - tuning.pitch_to_freq(1.)) < 1e-10
assert abs(415.3046975799451 - tuning.pitch_to_freq(-1.)) < 1e-10
assert abs(1318.5102276514797 - tuning.pitch_to_freq(12 + 7)) < 1e-10
# TODO: test:
# - freq_to_pitch()
# - PitchQuantizer
# - various configurations of Tuning
|
482023
|
import httpx
class OneSignalHTTPError(Exception):
"""
Exception raised for errors in the response of REST API calls to One Signal.
"""
def __init__(self, response: httpx.Response):
self.http_response = response
self.message = self._get_message(response)
self.status_code = response.status_code
def _get_message(self, response: httpx.Response) -> str:
message = f'Unexpected http status code {response.status_code}.'
response_body = response.json()
if response_body and 'errors' in response_body and len(response_body['errors']) > 0:
message = response_body['errors'][0]
return message
|
482043
|
import torch.nn.functional as F
def depthwise_correlation(x, kernel):
"""Depthwise cross correlation.
This function is proposed in
`SiamRPN++ <https://arxiv.org/abs/1812.11703>`_.
Args:
x (Tensor): of shape (N, C, H_x, W_x).
kernel (Tensor): of shape (N, C, H_k, W_k).
Returns:
Tensor: of shape (N, C, H_o, W_o). H_o = H_x - H_k + 1. So does W_o.
"""
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch * channel, x.size(2), x.size(3))
kernel = kernel.view(batch * channel, 1, kernel.size(2), kernel.size(3))
out = F.conv2d(x, kernel, groups=batch * channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
|
482091
|
import torch
import config.args as args
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from preprocessing.data_processor import read_qa_examples, convert_examples_to_features
from util.Logginger import init_logger
logger = init_logger("bert_class", logging_path=args.log_path)
def init_params():
tokenizer = BertTokenizer(vocab_file=args.VOCAB_FILE)
return tokenizer
def create_batch_iter(mode):
"""ๆ้ ่ฟญไปฃๅจ"""
tokenizer = init_params()
if mode == "train":
examples = read_qa_examples(args.data_dir, "train")
batch_size = args.train_batch_size
elif mode == "dev":
examples = read_qa_examples(args.data_dir, "dev")
batch_size = args.eval_batch_size
else:
raise ValueError("Invalid mode %s" % mode)
# ็นๅพ
features = convert_examples_to_features(examples,
tokenizer,
mode,
args.max_seq_length,
args.doc_stride,
args.max_query_length,
is_training=True)
logger.info(" Num Features = %d", len(features))
logger.info(" Batch size = %d", batch_size)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_adj = torch.tensor([f.adj for f in features],dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
answer_types = torch.tensor([f.answer_type for f in features], dtype=torch.long)
# ๆฐๆฎ้
data = TensorDataset(all_input_ids, all_adj,all_input_mask, all_segment_ids, start_positions, end_positions, answer_types)
if mode == "train":
num_train_steps = int(
len(features) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
batch_size = args.train_batch_size
logger.info(" Num steps = %d", num_train_steps)
if args.local_rank == -1:
sampler = RandomSampler(data)
else:
sampler = DistributedSampler(data)
elif mode == "dev":
sampler = SequentialSampler(data)
else:
raise ValueError("Invalid mode %s" % mode)
# ่ฟญไปฃๅจ
iterator = DataLoader(data, sampler=sampler, batch_size=batch_size)
if mode == "train":
return iterator, num_train_steps
elif mode == "dev":
return iterator
else:
raise ValueError("Invalid mode %s" % mode)
|
482094
|
import math
from pythonfmu.fmi2slave import Fmi2Slave, Fmi2Causality, Real
from localmodule import get_amplitude, get_time_constant
class PythonSlaveWithDep(Fmi2Slave):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.realIn = 22.0
self.realOut = 0.0
self.register_variable(Real("realIn", causality=Fmi2Causality.input))
self.register_variable(Real("realOut", causality=Fmi2Causality.output))
def do_step(self, current_time, step_size):
self.realOut = self.realIn * get_amplitude() * math.exp((current_time + step_size) / get_time_constant())
return True
|
482140
|
import sys
import unittest
from os.path import dirname, join, abspath
def from_here(*parts):
return abspath(join(HERE, *parts))
HERE = dirname(__file__)
sys.path += [
from_here('..', '..'),
from_here('stubs')
]
def main():
loader = unittest.TestLoader()
suite = loader.discover(HERE)
result = unittest.TextTestRunner(verbosity=5).run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
|
482145
|
import os
import datetime
def check_file_exist(path):
"""
:param path: absolute path to file
:type path: file
:return: True.False based on file existence
:rtype: bool
"""
file_exist = False
if os.path.isfile(path):
file_exist = True
return file_exist
def get_first_date_of_month():
"""
Get the first date of month
Returns:
first_day_of_month: First Date of Month in isoformat
"""
first_day_of_month = datetime.date.today().replace(day=1)
return first_day_of_month.isoformat()
def get_day_month_year():
"""
Get the todays date
Returns:
today: Todays date in isoformat
"""
today = datetime.date.today()
return today.isoformat()
def construct_error_response(context, api_request_id):
"""
Construct error response dict
:param context: AWS Context object, containing properties about the invocation, function, and execution environment.
:param api_request_id:
:return:
:rtype:
"""
error_response = {
"statusCode": 500,
"lambda_function_name": context.function_name,
"log_group_name": context.log_group_name,
"log_stream_name": context.log_stream_name,
"api_request_id": api_request_id,
"lambda_request_id": context.aws_request_id
}
return error_response
|
482174
|
from using_protected import *
f = FooBar()
f.x = 3
if f.blah(4) != 4:
raise RuntimeError, "blah(int)"
|
482219
|
import calendar
import csv
import operator
import os
import re
import sys
import tarfile
import zipfile
from datetime import date, datetime, timedelta
from ftplib import FTP
from io import StringIO
import requests
import sqlalchemy
from dateutil import parser, relativedelta
from geoalchemy2 import Geometry
from metar.metar import ParserError
from sqlalchemy import BigInteger, Column, Date, DateTime, Float, Integer, String, Table, and_, distinct, select, text
from sqlalchemy.dialects.postgresql import ARRAY
from plenario.database import postgres_base, postgres_engine as engine
from plenario.settings import DATA_DIR
from .weather_metar import getAllCurrentWeather, getCurrentWeather, getMetar, getMetarVals
# from http://stackoverflow.com/questions/7490660/converting-wind-direction-in-angles-to-text-words
def degToCardinal(num):
val = int((num / 22.5) + .5)
arr = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
return arr[(val % 16)]
class WeatherError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class WeatherETL(object):
"""
Download, transform and insert weather data into plenario
"""
# contents:
# - initialize() functions (initialize(), initialize_month(), metar_initialize_current())
# - _do_etl() (_do_etl(), _metar_do_etl())
# - _cleanup_temp_tables, _metar_cleanup_temp_tables
# - _add_location() (not called?)
# - _update(), _update_metar():
# - Idea here is to create a new new_table which will represent the intersection (?) of src and dat -- only new records
# - We are eventually storing in dat_table
# - Raw incoming data is in src_table
# - make_tables(), metar_make_tables()
# - _extract(fname)
#
#
weather_type_dict = {'+FC': 'TORNADO/WATERSPOUT',
'FC': 'FUNNEL CLOUD',
'TS': 'THUNDERSTORM',
'GR': 'HAIL',
'RA': 'RAIN',
'DZ': 'DRIZZLE',
'SN': 'SNOW',
'SG': 'SNOW GRAINS',
'GS': 'SMALL HAIL &/OR SNOW PELLETS',
'PL': 'ICE PELLETS',
'IC': 'ICE CRYSTALS',
'FG': 'FOG', # 'FG+': 'HEAVY FOG (FG & LE.25 MILES VISIBILITY)',
'BR': 'MIST',
'UP': 'UNKNOWN PRECIPITATION',
'HZ': 'HAZE',
'FU': 'SMOKE',
'VA': 'VOLCANIC ASH',
'DU': 'WIDESPREAD DUST',
'DS': 'DUSTSTORM',
'PO': 'SAND/DUST WHIRLS',
'SA': 'SAND',
'SS': 'SANDSTORM',
'PY': 'SPRAY',
'SQ': 'SQUALL',
'DR': 'LOW DRIFTING',
'SH': 'SHOWER',
'FZ': 'FREEZING',
'MI': 'SHALLOW',
'PR': 'PARTIAL',
'BC': 'PATCHES',
'BL': 'BLOWING',
'VC': 'VICINITY'
# Prefixes:
# - LIGHT
# + HEAVY
# "NO SIGN" MODERATE
}
current_row = None
def __init__(self, data_dir=DATA_DIR, debug=False):
self.base_url = 'http://www.ncdc.noaa.gov/orders/qclcd'
self.data_dir = data_dir
self.debug_outfile = sys.stdout
self.debug = True
self.out_header = None
if (self.debug == True):
self.debug_filename = os.path.join(self.data_dir, 'weather_etl_debug_out.txt')
sys.stderr.write("writing out debug_file %s\n" % self.debug_filename)
self.debug_outfile = open(self.debug_filename, 'w+')
self.wban2callsign_map = self.build_wban2callsign_map()
def build_wban2callsign_map(self):
# stations_table = Table('weather_stations', Base.metadata,
# autoload=True, autoload_with=engine, extend_existing=True)
# ask stations_table for all rows where wban_code and call_sign are defined
sql = "SELECT wban_code, call_sign FROM weather_stations WHERE call_sign IS NOT NULL"
conn = engine.contextual_connect()
results = conn.execute(sql)
wban_callsigns = results.fetchall()
d = dict(wban_callsigns)
return d
# WeatherETL.initialize_last(): for debugging purposes, only initialize the most recent month of weather data.
def initialize_last(self, start_line=0, end_line=None):
self.make_tables()
fname = self._extract_last_fname()
raw_hourly, raw_daily, file_type = self._extract(fname)
t_daily = self._transform_daily(raw_daily, file_type, start_line=start_line, end_line=end_line)
self._load_daily(t_daily)
t_hourly = self._transform_hourly(raw_hourly, file_type, start_line=start_line, end_line=end_line)
self._load_hourly(t_hourly)
self._update(span='daily')
self._update(span='hourly')
self._cleanup_temp_tables()
def initialize(self):
# print "WeatherETL.initialize()"
self.make_tables()
fnames = self._extract_fnames()
for fname in fnames:
if (self.debug == True):
print(("INITIALIZE: doing fname", fname))
self._do_etl(fname)
def initialize_month(self, year, month, no_daily=False, no_hourly=False, weather_stations_list=None,
banned_weather_stations_list=None, start_line=0, end_line=None):
self.make_tables()
fname = self._extract_fname(year, month)
self._do_etl(fname, no_daily, no_hourly, weather_stations_list, banned_weather_stations_list, start_line,
end_line)
# Import current observations, whatever they may be, for the specified list of station WBANs and/or banned station WBANs.
def metar_initialize_current(self, weather_stations_list=None, banned_weather_stations_list=None):
self.metar_make_tables()
# we want to pass this to some _metar_do_etl() function
self._metar_do_etl(weather_stations_list, banned_weather_stations_list)
######################################################################
# do_etl: perform the ETL on a given tar/zip file
# weather_stations_list: a list of WBAN codes (as strings) in order to only read a subset of station observations
# start_line, end_line: optional parameters for testing which will only read a subset of lines from the file
######################################################################
def _do_etl(self, fname, no_daily=False, no_hourly=False, weather_stations_list=None,
banned_weather_stations_list=None, start_line=0, end_line=None):
raw_hourly, raw_daily, file_type = self._extract(fname)
if (self.debug):
self.debug_outfile.write("Extracting: %s\n" % fname)
if (not no_daily):
t_daily = self._transform_daily(raw_daily, file_type,
weather_stations_list=weather_stations_list,
banned_weather_stations_list=banned_weather_stations_list,
start_line=start_line, end_line=end_line)
if (not no_hourly):
t_hourly = self._transform_hourly(raw_hourly, file_type,
weather_stations_list=weather_stations_list,
banned_weather_stations_list=banned_weather_stations_list,
start_line=start_line, end_line=end_line)
if (not no_daily):
self._load_daily(t_daily) # this actually imports the transformed StringIO csv
self._update(span='daily')
# self._add_location(span='daily') # XXX mcc: hmm
if (not no_hourly):
self._load_hourly(t_hourly) # this actually imports the transformed StringIO csv
self._update(span='hourly')
# self._add_location(span='hourly') # XXX mcc: hmm
# self._cleanup_temp_tables()
def _metar_do_etl(self, weather_stations_list=None, banned_weather_stations_list=None):
# Below code hits the METAR server
# Don't bother calling any _extract_metar() function...
# metar_codes = getAllCurrentWeather()
if weather_stations_list:
# map wbans to call signs.
metar_codes = getCurrentWeather(wban_codes=weather_stations_list, wban2callsigns=self.wban2callsign_map)
else:
metar_codes = getAllCurrentWeather()
t_metars = self._transform_metars(metar_codes,
weather_stations_list,
banned_weather_stations_list)
# print "t_metars are: " ,t_metars
self._load_metar(t_metars)
self._update_metar()
self._metar_cleanup_temp_tables()
def _cleanup_temp_tables(self):
for span in ['daily', 'hourly']:
for tname in ['src', 'new']:
try:
table = getattr(self, '%s_%s_table' % (tname, span)) # possibly not getting dropped
table.drop(engine, checkfirst=True)
except AttributeError:
continue
def _metar_cleanup_temp_tables(self):
for tname in ['src', 'new']:
try:
table = getattr(self, '%s_metar_table' % tname)
table.drop(engine, checkfirst=True)
except AttributeError:
continue
def _add_location(self, span=None):
"""
Add latitude and longitude from weather station into observations table
"""
start_day, end_day = calendar.monthrange(self.current_year, self.current_month)
range_start = '%s-%s-%s' % (self.current_year, self.current_month, 1)
range_end = '%s-%s-%s' % (self.current_year, self.current_month, end_day)
date_col = 'date'
table_name = 'dat_weather_observations_%s' % span
if span == 'hourly' or span == 'metar':
date_col = 'datetime'
upd = text("""
UPDATE %s SET
longitude = subq.longitude,
latitude = subq.latitude
FROM (
SELECT
wban_code,
st_x(location) as longitude,
st_y(location) as latitude
FROM weather_stations
) as subq
WHERE %s.wban_code = subq.wban_code
AND %s.%s <= :range_end
AND %s.%s >= :range_start
AND %s.longitude IS NULL
AND %s.latitude IS NULL
""" % (table_name, table_name,
table_name, date_col,
table_name, date_col,
table_name, table_name)
)
conn = engine.contextual_connect()
conn.execute(upd, range_start=range_start, range_end=range_end)
def _update(self, span=None):
new_table = Table('new_weather_observations_%s' % span, postgres_base.metadata,
Column('wban_code', String(5)), keep_existing=True)
dat_table = getattr(self, '%s_table' % span)
src_table = getattr(self, 'src_%s_table' % span)
from_sel_cols = ['wban_code']
if span == 'daily':
from_sel_cols.append('date')
src_date_col = src_table.c.date
dat_date_col = dat_table.c.date
new_table.append_column(Column('date', Date))
new_date_col = new_table.c.date
elif span == 'hourly':
from_sel_cols.append('datetime')
src_date_col = src_table.c.datetime
dat_date_col = dat_table.c.datetime
new_table.append_column(Column('datetime', DateTime))
new_date_col = new_table.c.datetime
new_table.drop(engine, checkfirst=True)
new_table.create(engine)
ins = new_table.insert() \
.from_select(from_sel_cols,
select([src_table.c.wban_code, src_date_col]) \
.select_from(src_table.join(dat_table,
and_(src_table.c.wban_code == dat_table.c.wban_code,
src_date_col == dat_date_col),
isouter=True)
).where(dat_table.c.id == None)
)
# print "_update: span=%s: sql is'%s'" % (span, ins)
conn = engine.contextual_connect()
try:
conn.execute(ins)
new = True
except TypeError:
new = False
if new:
ins = dat_table.insert() \
.from_select([c for c in dat_table.columns if c.name != 'id'],
select([c for c in src_table.columns]) \
.select_from(src_table.join(new_table,
and_(src_table.c.wban_code == new_table.c.wban_code,
src_date_col == new_date_col)
))
)
# print "_update NEW : span=%s: sql is'%s'" % (span, ins)
conn.execute(ins)
def _update_metar(self):
# print "_update_metar()"
new_table = Table('new_weather_observations_metar', postgres_base.metadata,
Column('wban_code', String(5)),
keep_existing=True) # intersection of src and dat -- only new records
dat_table = getattr(self, 'metar_table') # where we are eventually storing things
src_table = getattr(self, 'src_metar_table') # raw incoming data
# print "we have new_table: '%s'" % new_table
# print "we have dat_table: '%s'" % dat_table
# print "we have src_table: '%s'" % src_table
from_sel_cols = ['wban_code', 'datetime']
src_date_col = src_table.c.datetime
dat_date_col = dat_table.c.datetime
new_table.append_column(Column('datetime', DateTime))
new_date_col = new_table.c.datetime
new_table.drop(engine, checkfirst=True)
try:
new_table.create(engine)
except sql.exc.ProgrammingError:
print("got ProgrammingError on new metar table create")
return None
## Essentially, insert into the new observations table for any src observations that are not in the current dat observations.
# ins = """
# INSERT INTO new_weather_observations_metar (wban_code, date)
# SELECT src_weather_observations_metar.wban_code, src_weather_observations_metar.datetime
# FROM src_weather_observations_metar
# LEFT OUTER JOIN dat_weather_observations_metar
# ON src_weather_observations_metar.wban_code = dat_weather_observations_metar.wban_code
# AND src_weather_observations_metar.datetime = dat_weather_observations_metar.datetime
# WHERE dat_weather_observations_metar.id IS NULL'
# """
ins = new_table.insert() \
.from_select(from_sel_cols,
select([src_table.c.wban_code, src_date_col]) \
.select_from(src_table.join(dat_table,
and_(src_table.c.wban_code == dat_table.c.wban_code,
src_date_col == dat_date_col),
isouter=True)
).where(dat_table.c.id == None)
)
# print "_update_metar(): sql is'%s'" % ins
conn = engine.contextual_connect()
try:
conn.execute(ins)
new = True
except TypeError:
new = False
except sql.exc.ProgrammingError:
print("got ProgrammingError on insert to new table")
if new:
# There were no new records.. soo, insert into the dat observations any records
# from src observations which match records in new observations.
# 'INSERT INTO dat_weather_observations_daily (wban_code, date, temp_max, temp_min, temp_avg, departure_from_normal, dewpoint_avg, wetbulb_avg, weather_types, snowice_depth, snowice_waterequiv, snowfall, precip_total, station_pressure, sealevel_pressure, resultant_windspeed, resultant_winddirection, resultant_winddirection_cardinal, avg_windspeed, max5_windspeed, max5_winddirection, max5_direction_cardinal, max2_windspeed, max2_winddirection, max2_direction_cardinal, longitude, latitude)
# ins = """
# INSERT INTO dat_weather_observations_metar (wban_code, call_sign, datetime, sky_condition, sky_condition_top, visibility
# SELECT ...
# """
ins = dat_table.insert() \
.from_select([c for c in dat_table.columns if c.name != 'id'],
select([c for c in src_table.columns]) \
.select_from(src_table.join(new_table,
and_(src_table.c.wban_code == new_table.c.wban_code,
src_date_col == new_date_col)
))
)
conn.execute(ins)
def make_tables(self):
self._make_daily_table()
self._make_hourly_table()
def metar_make_tables(self):
self._make_metar_table()
########################################
########################################
# Extract (from filename / URL to some raw StringIO()
########################################
########################################
def _download_write(self, fname):
fpath = os.path.join(self.data_dir, fname)
url = '%s/%s' % (self.base_url, fname)
if (self.debug == True):
self.debug_outfile.write("Extracting: %s\n" % url)
r = requests.get(url, stream=True)
with open(fpath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close() # Explicitly close before re-opening to read.
def _extract(self, fname):
file_type = 'zipfile'
if fname.endswith('.zip'):
file_type = 'zipfile'
elif fname.endswith('tar.gz'):
file_type = 'tarfile'
else:
print(("file type for ", fname, "not found: quitting"))
return None
# extract the year and month from the QCLCD filename
fname_spl = fname.split('.')
# look at the 2nd to last string
fname_yearmonth = (fname_spl[:-1])[0]
yearmonth_str = fname_yearmonth[-6:]
year_str = yearmonth_str[0:4]
month_str = yearmonth_str[4:6]
fpath = os.path.join(self.data_dir, fname)
raw_weather_hourly = StringIO()
raw_weather_daily = StringIO()
now_month, now_year = str(datetime.now().month), str(datetime.now().year)
if '%s%s' % (now_year.zfill(2), now_month.zfill(2)) == yearmonth_str:
self._download_write(fname)
elif not os.path.exists(fpath):
self._download_write(fname)
if file_type == 'tarfile':
with tarfile.open(fpath, 'r') as tar:
for tarinfo in tar:
if tarinfo.name.endswith('hourly.txt') and (yearmonth_str in tarinfo.name):
raw_weather_hourly.write(tar.extractfile(tarinfo).read())
elif tarinfo.name.endswith('daily.txt') and (yearmonth_str in tarinfo.name):
# need this 2nd caveat above to handle ridiculous stuff like 200408.tar.gz containing 200512daily.txt for no reason
raw_weather_daily.write(tar.extractfile(tarinfo).read())
else:
if (self.debug == True):
self.debug_outfile.write("extract: fpath is %s\n" % fpath)
with zipfile.ZipFile(fpath, 'r') as zf:
for name in zf.namelist():
if name.endswith('hourly.txt'):
raw_weather_hourly.write(zf.open(name).read().decode("utf-8"))
elif name.endswith('daily.txt'):
raw_weather_daily.write(zf.open(name).read().decode("utf-8"))
return raw_weather_hourly, raw_weather_daily, file_type
########################################
########################################
# Transformations of daily data e.g. '200704daily.txt' (from tarfile) or '201101daily.txt' (from zipfile)
########################################
########################################
def _transform_daily(self, raw_weather, file_type, weather_stations_list=None, banned_weather_stations_list=None,
start_line=0, end_line=None):
raw_weather.seek(0)
raw_header = raw_weather.readline()
header = raw_header.strip().split(',')
header = [x.strip() for x in header]
self.clean_observations_daily = StringIO()
writer = csv.writer(self.clean_observations_daily)
self.out_header = ["wban_code", "date", "temp_max", "temp_min",
"temp_avg", "departure_from_normal",
"dewpoint_avg", "wetbulb_avg", "weather_types",
"snowice_depth", "snowice_waterequiv",
"snowfall", "precip_total", "station_pressure",
"sealevel_pressure",
"resultant_windspeed", "resultant_winddirection", "resultant_winddirection_cardinal",
"avg_windspeed",
"max5_windspeed", "max5_winddirection", "max5_winddirection_cardinal",
"max2_windspeed", "max2_winddirection", "max2_winddirection_cardinal"]
writer.writerow(self.out_header)
row_count = 0
while True:
try:
# row = reader.next()
# DIY csv parsing for QCLCD to avoid buffering issues in UnicodeCVSReader
raw_row = raw_weather.readline()
if (raw_row == ''):
break
raw_row.strip()
row = raw_row.split(',')
self.current_row = row
if (row_count % 100 == 0):
if (self.debug == True):
self.debug_outfile.write("\rdaily parsing: row_count=%06d" % row_count)
self.debug_outfile.flush()
if (start_line > row_count):
row_count += 1
continue
if ((end_line is not None) and (row_count > end_line)):
break
row_count += 1
if (len(row) == 0):
continue
row_vals = getattr(self, '_parse_%s_row_daily' % file_type)(row, header, self.out_header)
row_dict = dict(list(zip(self.out_header, row_vals)))
if (weather_stations_list is not None):
# Only include observations from the specified list of wban_code values
if (row_dict['wban_code'] not in weather_stations_list):
continue
writer.writerow(row_vals)
except UnicodeDecodeError:
if (self.debug == True):
self.debug_outfile.write("UnicodeDecodeError caught\n")
self.debug_outfile.write(str(row))
self.debug_outfile.write(str(row_count) + "\n" + str(list(zip(self.out_header, row))) + "\n")
self.debug_outfile.flush()
# Skip this line, it has non-ASCII characters and probably shouldn't.
# We may not get this error anymore (ironically) after rolling our own CSV parser
# as opposed to UnicodeCSVReeder
pass
continue
except StopIteration:
break
self.debug_outfile.write('finished %s rows\n' % row_count)
return self.clean_observations_daily
def _parse_zipfile_row_daily(self, row, header, out_header):
wban_code = row[header.index('WBAN')]
date = row[header.index('YearMonthDay')] # e.g. 20140801
temp_max = self.getTemp(row[header.index('Tmax')])
temp_min = self.getTemp(row[header.index('Tmin')])
temp_avg = self.getTemp(row[header.index('Tavg')])
departure_from_normal = self.floatOrNA(row[header.index('Depart')])
dewpoint_avg = self.floatOrNA(row[header.index('DewPoint')])
wetbulb_avg = self.floatOrNA(row[header.index('WetBulb')])
weather_types_list = self._parse_weather_types(row[header.index('CodeSum')])
snowice_depth = self.getPrecip(row[header.index('Depth')])
snowice_waterequiv = self.getPrecip(row[header.index('Water1')]) # predict 'heart-attack snow'!
snowfall = self.getPrecip(row[header.index('SnowFall')])
precip_total = self.getPrecip(row[header.index('PrecipTotal')])
station_pressure = self.floatOrNA(row[header.index('StnPressure')])
sealevel_pressure = self.floatOrNA(row[header.index('SeaLevel')])
resultant_windspeed = self.floatOrNA(row[header.index('ResultSpeed')])
resultant_winddirection, resultant_winddirection_cardinal = self.getWind(resultant_windspeed,
row[header.index('ResultDir')])
avg_windspeed = self.floatOrNA(row[header.index('AvgSpeed')])
max5_windspeed = self.floatOrNA(row[header.index('Max5Speed')])
max5_winddirection, max5_winddirection_cardinal = self.getWind(max5_windspeed, row[header.index('Max5Dir')])
max2_windspeed = self.floatOrNA(row[header.index('Max2Speed')])
max2_winddirection, max2_winddirection_cardinal = self.getWind(max2_windspeed, row[header.index('Max2Dir')])
vals = [wban_code, date, temp_max, temp_min,
temp_avg, departure_from_normal,
dewpoint_avg, wetbulb_avg, weather_types_list,
snowice_depth, snowice_waterequiv,
snowfall, precip_total, station_pressure,
sealevel_pressure,
resultant_windspeed, resultant_winddirection, resultant_winddirection_cardinal,
avg_windspeed,
max5_windspeed, max5_winddirection, max5_winddirection_cardinal,
max2_windspeed, max2_winddirection, max2_winddirection_cardinal]
assert (len(out_header) == len(vals))
return vals
def _parse_tarfile_row_daily(self, row, header, out_header):
wban_code = self.getWBAN(row[header.index('Wban Number')])
date = row[header.index('YearMonthDay')] # e.g. 20140801
temp_max = self.getTemp(row[header.index('Max Temp')])
temp_min = self.getTemp(row[header.index('Min Temp')])
temp_avg = self.getTemp(row[header.index('Avg Temp')])
departure_from_normal = self.floatOrNA(row[header.index('Dep from Normal')])
dewpoint_avg = self.floatOrNA(row[header.index('Avg Dew Pt')])
wetbulb_avg = self.floatOrNA(row[header.index('Avg Wet Bulb')])
weather_types_list = self._parse_weather_types(row[header.index('Significant Weather')])
snowice_depth = self.getPrecip(row[header.index('Snow/Ice Depth')])
snowice_waterequiv = self.getPrecip(row[header.index('Snow/Ice Water Equiv')]) # predict 'heart-attack snow'!
snowfall = self.getPrecip(row[header.index('Precipitation Snowfall')])
precip_total = self.getPrecip(row[header.index('Precipitation Water Equiv')])
station_pressure = self.floatOrNA(row[header.index('Pressue Avg Station')]) # XXX Not me -- typo in header!
sealevel_pressure = self.floatOrNA(row[header.index('Pressure Avg Sea Level')])
resultant_windspeed = self.floatOrNA(row[header.index('Wind Speed')])
resultant_winddirection, resultant_winddirection_cardinal = self.getWind(resultant_windspeed,
row[header.index('Wind Direction')])
avg_windspeed = self.floatOrNA(row[header.index('Wind Avg Speed')])
max5_windspeed = self.floatOrNA(row[header.index('Max 5 sec speed')])
max5_winddirection, max5_winddirection_cardinal = self.getWind(max5_windspeed,
row[header.index('Max 5 sec Dir')])
max2_windspeed = self.floatOrNA(row[header.index('Max 2 min speed')])
max2_winddirection, max2_winddirection_cardinal = self.getWind(max2_windspeed,
row[header.index('Max 2 min Dir')])
vals = [wban_code, date, temp_max, temp_min,
temp_avg, departure_from_normal,
dewpoint_avg, wetbulb_avg, weather_types_list,
snowice_depth, snowice_waterequiv,
snowfall, precip_total, station_pressure,
sealevel_pressure,
resultant_windspeed, resultant_winddirection, resultant_winddirection_cardinal,
avg_windspeed,
max5_windspeed, max5_winddirection, max5_winddirection_cardinal,
max2_windspeed, max2_winddirection, max2_winddirection_cardinal]
assert (len(out_header) == len(vals))
return vals
########################################
########################################
# Transformations of hourly data e.g. 200704hourly.txt (from tarfile) or 201101hourly.txt (from zipfile)
########################################
########################################
def _transform_hourly(self, raw_weather, file_type, weather_stations_list=None, banned_weather_stations_list=None,
start_line=0, end_line=None):
raw_weather.seek(0)
# XXX mcc: should probably convert this to DIY CSV parsing a la _transform_daily()
reader = csv.reader(raw_weather)
header = next(reader)
# strip leading and trailing whitespace from header (e.g. from tarfiles)
header = [x.strip() for x in header]
self.clean_observations_hourly = StringIO()
writer = csv.writer(self.clean_observations_hourly)
self.out_header = ["wban_code", "datetime", "old_station_type", "station_type", \
"sky_condition", "sky_condition_top", "visibility", \
"weather_types", "drybulb_fahrenheit", "wetbulb_fahrenheit", \
"dewpoint_fahrenheit", "relative_humidity", \
"wind_speed", "wind_direction", "wind_direction_cardinal", \
"station_pressure", "sealevel_pressure", "report_type", \
"hourly_precip"]
writer.writerow(self.out_header)
row_count = 0
while True:
try:
row = next(reader)
if (row_count % 1000 == 0):
if (self.debug == True):
self.debug_outfile.write("\rparsing: row_count=%06d" % row_count)
self.debug_outfile.flush()
if (start_line > row_count):
row_count += 1
continue
if ((end_line is not None) and (row_count > end_line)):
break
row_count += 1
if (len(row) == 0):
continue
# this calls either self._parse_zipfile_row_hourly
# or self._parse_tarfile_row_hourly
row_vals = getattr(self, '_parse_%s_row_hourly' % file_type)(row, header, self.out_header)
if (not row_vals):
continue
row_dict = dict(list(zip(self.out_header, row_vals)))
if (weather_stations_list is not None):
# Only include observations from the specified list of wban_code values
if (row_dict['wban_code'] not in weather_stations_list):
continue
if (banned_weather_stations_list is not None):
if (row_dict['wban_code'] in banned_weather_stations_list):
continue
writer.writerow(row_vals)
except StopIteration:
break
except Exception:
continue
return self.clean_observations_hourly
def _parse_zipfile_row_hourly(self, row, header, out_header):
# There are two types of report types (column is called "RecordType" for some reason).
# 1) AA - METAR (AVIATION ROUTINE WEATHER REPORT) - HOURLY
# 2) SP - METAR SPECIAL REPORT
# Special reports seem to occur at the same time (and have
# largely the same content) as hourly reports, but under certain
# adverse conditions (e.g. low visibility).
# As such, I believe it is sufficient to just use the 'AA' reports and keep
# our composite primary key of (wban_code, datetime).
report_type = row[header.index('RecordType')]
wban_code = row[header.index('WBAN')]
date = row[header.index('Date')] # e.g. 20140801
time = row[header.index('Time')] # e.g. '601' 6:01am
# pad this into a four digit number:
time_str = None
if (time):
time_int = self.integerOrNA(time)
time_str = '%04d' % time_int
weather_date = datetime.strptime('%s %s' % (date, time_str), '%Y%m%d %H%M')
station_type = row[header.index('StationType')]
old_station_type = None
sky_condition = row[header.index('SkyCondition')]
# Take the topmost atmospheric observation of clouds (e.g. in 'SCT013 BKN021 OVC029'
# (scattered at 1300 feet, broken clouds at 2100 feet, overcast at 2900)
# take OVC29 as the top layer.
sky_condition_top = sky_condition.split(' ')[-1]
visibility = self.floatOrNA(row[header.index('Visibility')])
visibility_flag = row[header.index('VisibilityFlag')]
# XX mcc consider handling visibility_flag =='s' for 'suspect'
weather_types_list = self._parse_weather_types(row[header.index('WeatherType')])
weather_types_flag = row[header.index('WeatherTypeFlag')]
# XX mcc consider handling weather_type_flag =='s' for 'suspect'
drybulb_F = self.floatOrNA(row[header.index('DryBulbFarenheit')])
wetbulb_F = self.floatOrNA(row[header.index('WetBulbFarenheit')])
dewpoint_F = self.floatOrNA(row[header.index('DewPointFarenheit')])
rel_humidity = self.integerOrNA(row[header.index('RelativeHumidity')])
wind_speed = self.integerOrNA(row[header.index('WindSpeed')])
# XX mcc consider handling WindSpeedFlag == 's' for 'suspect'
wind_direction, wind_cardinal = self.getWind(wind_speed, row[header.index('WindDirection')])
station_pressure = self.floatOrNA(row[header.index('StationPressure')])
sealevel_pressure = self.floatOrNA(row[header.index('SeaLevelPressure')])
hourly_precip = self.getPrecip(row[header.index('HourlyPrecip')])
vals = [wban_code,
weather_date,
old_station_type,
station_type,
sky_condition, sky_condition_top,
visibility,
weather_types_list,
drybulb_F,
wetbulb_F,
dewpoint_F,
rel_humidity,
wind_speed, wind_direction, wind_cardinal,
station_pressure, sealevel_pressure,
report_type,
hourly_precip]
assert (len(out_header) == len(vals))
# return hourly zipfile params
return vals
def _parse_tarfile_row_hourly(self, row, header, out_header):
report_type = row[header.index('Record Type')]
if (report_type == 'SP'):
return None
wban_code = row[header.index('Wban Number')]
wban_code = wban_code.lstrip('0') # remove leading zeros from WBAN
date = row[header.index('YearMonthDay')] # e.g. 20140801
time = row[header.index('Time')] # e.g. '601' 6:01am
# pad this into a four digit number:
time_str = None
if (time):
time_int = self.integerOrNA(time)
if not time_int:
time_str = None
# XX: maybe just continue and bail if this doesn't work
return None
time_str = '%04d' % time_int
try:
weather_date = datetime.strptime('%s %s' % (date, time_str), '%Y%m%d %H%M')
except ValueError:
# This means the date / time can't be parsed and is probably not reliable.
return None
old_station_type = row[header.index('Station Type')].strip() # either AO1, AO2, or '-' (XX: why '-'??)
station_type = None
sky_condition = row[header.index('Sky Conditions')].strip()
sky_condition_top = sky_condition.split(' ')[-1]
visibility = self._parse_old_visibility(row[header.index('Visibility')])
weather_types_list = self._parse_weather_types(row[header.index('Weather Type')])
drybulb_F = self.floatOrNA(row[header.index('Dry Bulb Temp')])
wetbulb_F = self.floatOrNA(row[header.index('Wet Bulb Temp')])
dewpoint_F = self.floatOrNA(row[header.index('Dew Point Temp')])
rel_humidity = self.integerOrNA(row[header.index('% Relative Humidity')])
wind_speed = self.integerOrNA(row[header.index('Wind Speed (kt)')])
wind_direction, wind_cardinal = self.getWind(wind_speed, row[header.index('Wind Direction')])
station_pressure = self.floatOrNA(row[header.index('Station Pressure')])
sealevel_pressure = self.floatOrNA(row[header.index('Sea Level Pressure')])
hourly_precip = self.getPrecip(row[header.index('Precip. Total')])
vals = [wban_code,
weather_date,
old_station_type, station_type,
sky_condition, sky_condition_top,
visibility,
weather_types_list,
drybulb_F,
wetbulb_F,
dewpoint_F,
rel_humidity,
wind_speed, wind_direction, wind_cardinal,
station_pressure, sealevel_pressure,
report_type,
hourly_precip]
assert (len(out_header) == len(vals))
return vals
def _transform_metars(self, metar_codes, weather_stations_list=None, banned_weather_stations_list=None):
metar_codes_idx = 0
self.clean_observations_metar = StringIO()
writer = csv.writer(self.clean_observations_metar)
self.out_header = ["wban_code", "call_sign", "datetime", "sky_condition", "sky_condition_top",
"visibility", "weather_types", "temp_fahrenheit", "dewpoint_fahrenheit",
"wind_speed", "wind_direction", "wind_direction_cardinal", "wind_gust",
"station_pressure", "sealevel_pressure",
"precip_1hr", "precip_3hr", "precip_6hr", "precip_24hr"]
writer.writerow(self.out_header)
row_count = 0
added_count = 0
for row in metar_codes:
row_count += 1
row_vals = self._parse_row_metar(row, self.out_header)
# XXX: convert row_dict['weather_types'] from a list of lists (e.g. [[None, None, None, '', 'BR', None]])
# to a string that looks like: "{{None, None, None, '', 'BR', None}}"
try:
weather_types_str = str(row_vals[self.out_header.index('weather_types')])
except IndexError:
print(('the offending row is', row_vals))
continue
# print "weather_types_str = '%s'" % weather_types_str
# literally just change '[' to '{' and ']' to '}'
weather_types_str = weather_types_str.replace('[', '{')
weather_types_str = weather_types_str.replace(']', '}')
row_vals[self.out_header.index('weather_types')] = weather_types_str
# print "_transform_metars(): changed row_vals[self.out_header.index('weather_types')] to ", weather_types_str
if (not row_vals):
continue
row_dict = dict(list(zip(self.out_header, row_vals)))
if (weather_stations_list is not None):
# Only include observations from the specified list of wban_code values
if (row_dict['wban_code'] not in weather_stations_list):
continue
if (banned_weather_stations_list is not None):
if (row_dict['wban_code'] in banned_weather_stations_list):
continue
if (self.debug == True):
print(("_transform_metars(): WRITING row_vals: '%s'" % str(row_vals)))
# Making sure there is a WBAN code
if not row_vals[0]:
# This will happen for stations outside of the USA.
# Discard for now.
continue
added_count += 1
writer.writerow(row_vals)
return self.clean_observations_metar
def _parse_row_metar(self, row, header):
try:
m = getMetar(row)
except ParserError:
return []
vals = getMetarVals(m)
# print "_parse_row_metar(): header=", header
# print "_parse_row_metar(): vals=",vals
assert (len(header) == len(vals))
return vals
# Help parse a 'present weather' string like 'FZFG' (freezing fog) or 'BLSN' (blowing snow) or '-RA' (light rain)
# When we are doing precip slurp as many as possible
def _do_weather_parse(self, pw, mapping, multiple=False, local_debug=False):
# Grab as many of the keys as possible consecutively in the string
retvals = []
while (multiple == True):
(pw, key) = self._do_weather_parse(pw, mapping, multiple=False, local_debug=True)
# print "got pw, key=", pw,key
retvals.append(key)
if ((pw == '') or (key == 'NULL')):
return pw, retvals
break
else:
continue
if (len(pw) == 0):
return ('', 'NULL')
# 2nd parse for descriptors
for (key, val) in mapping:
# print "key is '%s'" % key
q = pw[0:len(key)]
if (q == key):
# print "key found: ", q
pw2 = pw[len(key):]
# print "returning", l2
# return (l2, val)
return (pw2, key)
return (pw, 'NULL')
# Parse a 'present weather' string like 'FZFG' (freezing fog) or 'BLSN' (blowing snow) or '-RA' (light rain)
def _parse_present_weather(self, pw):
orig_pw = pw
l = pw
intensities = [('-', 'Light'),
('+', 'Heavy')]
(l, intensity) = self._do_weather_parse(l, intensities)
vicinities = [('VC', 'Vicinity')]
(l, vicinity) = self._do_weather_parse(l, vicinities)
descriptors = [('MI', 'Shallow'),
('PR', 'Partial'),
('BC', 'Patches'),
('DR', 'Low Drifting'),
('BL', 'Blowing'),
('SH', 'Shower(s)'),
('TS', 'Thunderstorm'),
('FZ', 'Freezing')]
(l, desc) = self._do_weather_parse(l, descriptors)
# 3rd parse for phenomena
precip_phenoms = [('DZ', 'Drizzle'),
('RA', 'Rain'),
('SN', 'Snow'),
('SG', 'Snow Grains'),
('IC', 'Ice Crystals'),
('PE', 'Ice Pellets'),
('PL', 'Ice Pellets'),
('GR', 'Hail'),
('GS', 'Small Hail'),
('UP', 'Unknown Precipitation')]
# We use arrays instead of hashmaps because we want to look for FG+ before FG (sigh)
obscuration_phenoms = [('BR', 'Mist'),
('FG+', 'Heavy Fog'),
('FG', 'Fog'),
('FU', 'Smoke'),
('VA', 'Volcanic Ash'),
('DU', 'Widespread Dust'),
('SA', 'Sand'),
('HZ', 'Haze'),
('PY', 'Spray')]
other_phenoms = [('PO', 'Dust Devils'),
('SQ', 'Squalls'),
('FC', 'Funnel Cloud'),
('+FC', 'Tornado Waterspout'),
('SS', 'Sandstorm'),
('DS', 'Duststorm'),
('GL', 'Glaze')]
(l, precips) = self._do_weather_parse(l, precip_phenoms, multiple=True)
(l, obscuration) = self._do_weather_parse(l, obscuration_phenoms)
(l, other) = self._do_weather_parse(l, other_phenoms)
# if l still has a length let's print it out and see what went wrong
if (self.debug == True):
if (len(l) > 0):
self.debug_outfile.write("\n")
self.debug_outfile.write(str(self.current_row))
self.debug_outfile.write("\ncould not fully parse present weather : '%s' '%s'\n\n" % (orig_pw, l))
wt_list = [intensity, vicinity, desc, precips[0], obscuration, other]
ret_wt_lists = []
ret_wt_lists.append(wt_list)
# if (len(precips) > 1):
# print "first precip: ", wt_list
for p in precips[1:]:
if p != 'NULL':
# print "extra precip!", p, orig_pw
wt_list = ['NULL', 'NULL', 'NULL', p, 'NULL', 'NULL']
# print "extra precip (precip):", wt_list
ret_wt_lists.append(wt_list)
return ret_wt_lists
# Parse a list of 'present weather' strings and convert to multidimensional postgres array.
def _parse_weather_types(self, wt_str):
wt_str = wt_str.strip()
if ((wt_str == '') or (wt_str == '-')):
return None
if (not wt_str):
return None
else:
wt_list = wt_str.split(' ')
wt_list = [wt.strip() for wt in wt_list]
pw_lists = []
for wt in wt_list:
wts = self._parse_present_weather(wt)
# make all weather reports have the same length..
for obsv in wts:
wt_list3 = self.list_to_postgres_array(obsv)
pw_lists.append(wt_list3)
list_of_lists = "{" + ', '.join(pw_lists) + "}"
# print "list_of_lists: " , list_of_lists
return list_of_lists
def _parse_old_visibility(self, visibility_str):
visibility_str = visibility_str.strip()
visibility_str = re.sub('SM', '', visibility_str)
# has_slash = re.match('\/'), visibility_str)
# XX This is not worth it, too many weird, undocumented special cases on this particular column
return None
# list_to_postgres_array(list_string): convert to {blah, blah2, blah3} format for postgres.
def list_to_postgres_array(self, l):
return "{" + ', '.join(l) + "}"
def getWBAN(self, wban):
return wban
def getTemp(self, temp):
if temp[-1] == '*':
temp = temp[:-1]
return self.floatOrNA(temp)
def getWind(self, wind_speed, wind_direction):
wind_cardinal = None
wind_direction = wind_direction.strip()
if (wind_direction == 'VR' or wind_direction == 'M' or wind_direction == 'VRB'):
wind_direction = 'VRB'
wind_cardinal = 'VRB'
elif (wind_direction == '' or wind_direction == '-'):
wind_direction = None
wind_cardinal = None
else:
wind_direction_int = None
try:
# XXX: NOTE: rounding wind_direction to integer. Sorry.
# XXX: Examine this field more carefully to determine what its range is.
wind_direction_int = int(round(float(wind_direction)))
wind_direction = wind_direction_int
except ValueError as e:
if (self.debug == True):
if (self.current_row):
self.debug_outfile.write("\n")
zipped_row = list(zip(self.out_header, self.current_row))
for column in zipped_row:
self.debug_outfile.write(str(column) + "\n")
self.debug_outfile.write(
"\nValueError: [%s], could not convert wind_direction '%s' to int\n" % (e, wind_direction))
self.debug_outfile.flush()
return None, None
wind_cardinal = degToCardinal(wind_direction_int)
if (wind_speed == 0):
wind_direction = None
wind_cardinal = None
return wind_direction, wind_cardinal
def getPrecip(self, precip_str):
precip_total = None
precip_total = precip_str.strip()
if (precip_total == 'T'):
precip_total = .005 # 'Trace' precipitation = .005 inch or less
precip_total = self.floatOrNA(precip_total)
return precip_total
def floatOrNA(self, val):
val_str = str(val).strip()
if (val_str == 'M'):
return None
if (val_str == '-'):
return None
if (val_str == 'err'):
return None
if (val_str == 'null'):
return None
if (val_str == ''): # WindSpeed line
return None
else:
try:
fval = float(val_str)
except ValueError as e:
if (self.debug == True):
if (self.current_row):
self.debug_outfile.write("\n")
zipped_row = list(zip(self.out_header, self.current_row))
for column in zipped_row:
self.debug_outfile.write(str(column) + "\n")
self.debug_outfile.write("\nValueError: [%s], could not convert '%s' to float\n" % (e, val_str))
self.debug_outfile.flush()
return None
return fval
def integerOrNA(self, val):
val_str = str(val).strip()
if (val_str == 'M'):
return None
if (val_str == '-'):
return None
if (val_str == 'VRB'):
return None
if (val_str == 'err'):
return None
if (val_str == 'null'):
return None
if (val_str.strip() == ''): # WindSpeed line
return None
else:
try:
ival = int(val)
except ValueError as e:
if (self.debug == True):
if (self.current_row):
self.debug_outfile.write("\n")
zipped_row = list(zip(self.out_header, self.current_row))
for column in zipped_row:
self.debug_outfile.write(str(column) + "\n")
self.debug_outfile.write("\nValueError [%s] could not convert '%s' to int\n" % (e, val))
self.debug_outfile.flush()
return None
return ival
def _make_daily_table(self):
self.daily_table = self._get_daily_table()
self.daily_table.append_column(Column('id', BigInteger, primary_key=True))
self.daily_table.create(engine, checkfirst=True)
def _make_hourly_table(self):
self.hourly_table = self._get_hourly_table()
self.hourly_table.append_column(Column('id', BigInteger, primary_key=True))
self.hourly_table.create(engine, checkfirst=True)
def _make_metar_table(self):
self.metar_table = self._get_metar_table()
self.metar_table.append_column(Column('id', BigInteger, primary_key=True))
self.metar_table.create(engine, checkfirst=True)
def _get_daily_table(self, name='dat'):
return Table('%s_weather_observations_daily' % name, postgres_base.metadata,
Column('wban_code', String(5), nullable=False),
Column('date', Date, nullable=False),
Column('temp_max', Float, index=True),
Column('temp_min', Float, index=True),
Column('temp_avg', Float, index=True),
Column('departure_from_normal', Float),
Column('dewpoint_avg', Float),
Column('wetbulb_avg', Float),
# Column('weather_types', ARRAY(String(16))), # column 'CodeSum',
Column('weather_types', ARRAY(String)), # column 'CodeSum',
Column("snowice_depth", Float),
Column("snowice_waterequiv", Float),
# XX: Not sure about meaning of 'Cool' and 'Heat' columns in daily table,
# based on documentation.
Column('snowfall', Float),
Column('precip_total', Float, index=True),
Column('station_pressure', Float),
Column('sealevel_pressure', Float),
Column('resultant_windspeed', Float),
Column('resultant_winddirection', String(3)), # appears to be 00 (000) to 36 (360)
Column('resultant_winddirection_cardinal', String(3)), # e.g. NNE, NNW
Column('avg_windspeed', Float),
Column('max5_windspeed', Float),
Column('max5_winddirection', String(3)), # 000 through 360, M for missing
Column('max5_direction_cardinal', String(3)), # e.g. NNE, NNW
Column('max2_windspeed', Float),
Column('max2_winddirection', String(3)), # 000 through 360, M for missing
Column('max2_direction_cardinal', String(3)), # e.g. NNE, NNW
Column('longitude', Float),
Column('latitude', Float),
keep_existing=True)
def _get_hourly_table(self, name='dat'):
return Table('%s_weather_observations_hourly' % name, postgres_base.metadata,
Column('wban_code', String(5), nullable=False),
Column('datetime', DateTime, nullable=False),
# AO1: without precipitation discriminator, AO2: with precipitation discriminator
Column('old_station_type', String(5)),
Column('station_type', Integer),
Column('sky_condition', String),
Column('sky_condition_top', String), # top-level sky condition, e.g.
# if 'FEW018 BKN029 OVC100'
# we have overcast at 10,000 feet (100 * 100).
# BKN017TCU means broken clouds at 1700 feet w/ towering cumulonimbus
# BKN017CB means broken clouds at 1700 feet w/ cumulonimbus
Column('visibility', Float), # in Statute Miles
# XX in R: unique(unlist(strsplit(unlist(as.character(unique(x$WeatherType))), ' ')))
# Column('weather_types', ARRAY(String(16))),
Column('weather_types', ARRAY(String)),
Column('drybulb_fahrenheit', Float, index=True), # These can be NULL bc of missing data
Column('wetbulb_fahrenheit', Float), # These can be NULL bc of missing data
Column('dewpoint_fahrenheit', Float), # These can be NULL bc of missing data
Column('relative_humidity', Integer),
Column('wind_speed', Integer),
Column('wind_direction', String(3)), # 000 to 360
Column('wind_direction_cardinal', String(3)), # e.g. NNE, NNW
Column('station_pressure', Float),
Column('sealevel_pressure', Float),
Column('report_type', String), # Either 'AA' or 'SP'
Column('hourly_precip', Float, index=True),
Column('longitude', Float),
Column('latitude', Float),
keep_existing=True)
def _get_metar_table(self, name='dat'):
return Table('%s_weather_observations_metar' % name, postgres_base.metadata,
Column('wban_code', String(5), nullable=False),
Column('call_sign', String(5), nullable=False),
Column('datetime', DateTime, nullable=False),
Column('sky_condition', String),
Column('sky_condition_top', String), # top-level sky condition, e.g.
# if 'FEW018 BKN029 OVC100'
# we have overcast at 10,000 feet (100 * 100).
# BKN017TCU means broken clouds at 1700 feet w/ towering cumulonimbus
# BKN017CB means broken clouds at 1700 feet w/ cumulonimbus
Column('visibility', Float), # in Statute Miles
Column('weather_types', ARRAY(String)),
Column('temp_fahrenheit', Float, index=True), # These can be NULL bc of missing data
Column('dewpoint_fahrenheit', Float), # These can be NULL bc of missing data
Column('wind_speed', Integer),
Column('wind_direction', String(3)), # 000 to 360
Column('wind_direction_cardinal', String(3)), # e.g. NNE, NNW
Column('wind_gust', Integer),
Column('station_pressure', Float),
Column('sealevel_pressure', Float),
Column('precip_1hr', Float, index=True),
Column('precip_3hr', Float, index=True),
Column('precip_6hr', Float, index=True),
Column('precip_24hr', Float, index=True),
Column('longitude', Float),
Column('latitude', Float),
keep_existing=True)
def _extract_last_fname(self):
# XX: tar files are all old and not recent.
# tar_last =
# tar_last = datetime(2007, 5, 1, 0, 0)
# tar_filename = '%s.tar.gz' % tar_last.strftime('%Y%m')
# print 'tar_filename'
zip_last = datetime.now()
self.current_year = zip_last.year
self.current_month = zip_last.month
zip_filename = 'QCLCD%s.zip' % zip_last.strftime('%Y%m')
return zip_filename
def _extract_fname(self, year_num, month_num):
self.current_year = year_num
self.current_month = month_num
curr_dt = datetime(year_num, month_num, 1, 0, 0)
if ((year_num < 2007) or (year_num == 2007 and month_num < 5)):
tar_filename = '%s.tar.gz' % (curr_dt.strftime('%Y%m'))
return tar_filename
else:
zip_filename = 'QCLCD%s.zip' % curr_dt.strftime('%Y%m')
return zip_filename
def _extract_fnames(self):
tar_start = datetime(1996, 7, 1, 0, 0)
tar_end = datetime(2007, 5, 1, 0, 0)
zip_start = datetime(2007, 5, 1, 0, 0)
zip_end = datetime.now() + timedelta(days=30)
tar_filenames = ['%s.tar.gz' % d.strftime('%Y%m') for d in \
self._date_span(tar_start, tar_end)]
zip_filenames = ['QCLCD%s.zip' % d.strftime('%Y%m') for d in \
self._date_span(zip_start, zip_end)]
return tar_filenames + zip_filenames
def _load_hourly(self, transformed_input):
if (self.debug == True):
transformed_input.seek(0)
f = open(os.path.join(self.data_dir, 'weather_etl_dump_hourly.txt'), 'w')
f.write(transformed_input.getvalue())
f.close()
transformed_input.seek(0)
self.src_hourly_table = self._get_hourly_table(name='src')
self.src_hourly_table.drop(engine, checkfirst=True)
self.src_hourly_table.create(engine, checkfirst=True)
skip_cols = ['id', 'latitude', 'longitude']
names = [c.name for c in self.hourly_table.columns if c.name not in skip_cols]
ins_st = "COPY src_weather_observations_hourly ("
for idx, name in enumerate(names):
if idx < len(names) - 1:
ins_st += '%s, ' % name
else:
ins_st += '%s)' % name
else:
ins_st += "FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ',')"
conn = engine.raw_connection()
cursor = conn.cursor()
if (self.debug == True):
self.debug_outfile.write("\nCalling: '%s'\n" % ins_st)
self.debug_outfile.flush()
cursor.copy_expert(ins_st, transformed_input)
conn.commit()
if (self.debug == True):
self.debug_outfile.write("Committed: '%s'" % ins_st)
self.debug_outfile.flush()
def _load_daily(self, transformed_input):
if (self.debug == True):
transformed_input.seek(0)
f = open(os.path.join(self.data_dir, 'weather_etl_dump_daily.txt'), 'w')
f.write(transformed_input.getvalue())
f.close()
transformed_input.seek(0)
skip_cols = ['id', 'latitude', 'longitude']
names = [c.name for c in self.daily_table.columns if c.name not in skip_cols]
self.src_daily_table = self._get_daily_table(name='src')
self.src_daily_table.drop(engine, checkfirst=True)
self.src_daily_table.create(engine, checkfirst=True)
ins_st = "COPY src_weather_observations_daily ("
for idx, name in enumerate(names):
if idx < len(names) - 1:
ins_st += '%s, ' % name
else:
ins_st += '%s)' % name
else:
ins_st += "FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ',')"
conn = engine.raw_connection()
cursor = conn.cursor()
if (self.debug == True):
self.debug_outfile.write("\nCalling: '%s'\n" % ins_st)
self.debug_outfile.flush()
cursor.copy_expert(ins_st, transformed_input)
conn.commit()
if (self.debug == True):
self.debug_outfile.write("committed: '%s'" % ins_st)
self.debug_outfile.flush()
def _load_metar(self, transformed_input):
transformed_input.seek(0)
# print "_load_metar(): transformed_input is ", transformed_input.getvalue()
transformed_input.seek(0)
skip_cols = ['id', 'latitude', 'longitude']
names = [c.name for c in self.metar_table.columns if c.name not in skip_cols]
self.src_metar_table = self._get_metar_table(name='src')
self.src_metar_table.drop(engine, checkfirst=True)
try:
self.src_metar_table.create(engine, checkfirst=True)
except sqlalchemy.exc.ProgrammingError:
print("got ProgrammingError on src metar table create")
return None
ins_st = "COPY src_weather_observations_metar ("
for idx, name in enumerate(names):
if idx < len(names) - 1:
ins_st += '%s, ' % name
else:
ins_st += '%s)' % name
else:
ins_st += "FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ',')"
# print "_load_metar() ins_st = ", ins_st
# print "transformed_input is", transformed_input.getvalue()
conn = engine.raw_connection()
cursor = conn.cursor()
if (self.debug == True):
self.debug_outfile.write("\nCalling: '%s'\n" % ins_st)
self.debug_outfile.flush()
cursor.copy_expert(ins_st, transformed_input)
conn.commit()
if (self.debug == True):
self.debug_outfile.write("committed: '%s'" % ins_st)
self.debug_outfile.flush()
pass
def _date_span(self, start, end):
delta = timedelta(days=30)
while (start.year, start.month) != (end.year, end.month):
yield start
start = self._add_month(start)
def _add_month(self, sourcedate):
month = sourcedate.month
year = sourcedate.year + month / 12
month = month % 12 + 1
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return date(year, month, day)
def _get_distinct_weather_stations_by_month(self, year, month, daily_or_hourly='daily'):
table = Table('dat_weather_observations_%s' % daily_or_hourly, postgres_base.metadata, autoload=True,
autoload_with=engine)
column = None
if (daily_or_hourly == 'daily'):
column = table.c.date
elif (daily_or_hourly == 'hourly'):
column = table.c.datetime
dt = datetime(year, month, 0o1)
dt_nextmonth = dt + relativedelta.relativedelta(months=1)
q = postgres_session.query(distinct(table.c.wban_code)).filter(and_(column >= dt,
column < dt_nextmonth))
station_list = list(map(operator.itemgetter(0), q.all()))
return station_list
# Given that this was the most recent month, year, call this function,
# which will figure out the most recent hourly weather observation and
# delete all metars before that datetime.
def clear_metars(self):
# build a datetime and then remove all metars after the max datetime
sql = "SELECT max (datetime) from dat_weather_observations_hourly;"
# given this time, delete all from dat_weather_observations_metar
#
print(("executing: ", sql))
conn = engine.contextual_connect()
results = conn.execute(sql)
res = results.fetchone()
if not res:
return
res_dt = res[0]
res_dt_str = datetime.strftime(res_dt, "%Y-%m-%d %H:%M:%S")
# given this most recent time, delete any metars from before that time
sql2 = "DELETE FROM dat_weather_observations_metar WHERE datetime < '%s'" % (res_dt_str)
print(("executing: ", sql2))
results = conn.execute(sql2)
class WeatherStationsETL(object):
"""
Download, transform and create table with info about weather stations
"""
def __init__(self):
self.stations_ftp = \
'ftp.ncdc.noaa.gov'
self.stations_file = \
'/pub/data/noaa/isd-history.csv'
def initialize(self):
self._extract()
self._transform()
self.make_station_table()
try:
self._load()
except:
print('weather stations already exist, updating instead')
self._update_stations()
def update(self):
self._extract()
self._transform()
# Doing this just so self.station_table is defined
self.make_station_table()
self._update_stations()
def _extract(self):
""" Download CSV of station info from NOAA """
try:
ftp = FTP(self.stations_ftp)
ftp.login()
stations = StringIO()
ftp.retrlines('RETR %s' % self.stations_file, stations.write)
self.station_raw_info = stations
self.station_raw_info.seek(0)
except:
self.station_info = None
raise WeatherError('Unable to fetch station data from NOAA.')
def _transform(self):
reader = csv.reader(self.station_raw_info)
header = ['wban_code', 'station_name', 'country',
'state', 'call_sign', 'location', 'elevation',
'begin', 'end']
next(reader)
self.clean_station_info = StringIO()
all_rows = []
wbans = []
for row in reader:
wban = row[1]
name = row[2]
country = row[3]
state = row[4]
call_sign = ''
lat = row[6].replace('+', '')
lon = row[7].replace('+', '')
elev = row[8].replace('+', '')
begin = parser.parse(row[9]).isoformat()
end = parser.parse(row[10]).isoformat()
if wban == '99999':
continue
elif wban in wbans:
continue
elif lat and lon:
location = 'SRID=4326;POINT(%s %s)' % (lon, lat)
wbans.append(wban)
all_rows.append([wban, name, country, state,
call_sign, location, elev, begin, end])
writer = csv.writer(self.clean_station_info)
writer.writerow(header)
writer.writerows(all_rows)
self.clean_station_info.seek(0)
def make_station_table(self):
self.station_table = Table('weather_stations', postgres_base.metadata,
Column('wban_code', String(5), primary_key=True),
Column('station_name', String(100), nullable=False),
Column('country', String(2)),
Column('state', String(2)),
Column('call_sign', String(5)),
Column('location', Geometry('POINT', srid=4326)),
Column('elevation', Float),
Column('begin', Date),
Column('end', Date))
self.station_table.create(engine, checkfirst=True)
def _load(self):
names = [c.name for c in self.station_table.columns]
ins_st = "COPY weather_stations FROM STDIN WITH (FORMAT CSV, HEADER TRUE, DELIMITER ',')"
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.copy_expert(ins_st, self.clean_station_info)
conn.commit()
return 'bluh'
def _update_stations(self):
reader = csv.DictReader(self.clean_station_info)
conn = engine.connect()
for row in reader:
station = postgres_session.query(self.station_table).filter(
self.station_table.c.wban_code == row['wban_code']).all()
if not station:
ins = self.station_table.insert().values(**row)
conn.execute(ins)
|
482241
|
from .light import VEXMixin
from ....utils.constants import DEFAULT_STATEMENT
class VEXSlicingMixin(VEXMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__no_exit_sliced = False
self._skip_stmts = 0
self._last_stmt = None
self._whitelist = None
__tls = ('__no_exit_sliced', '_skip_stmts', '_last_stmt', '_whitelist')
def process(self, *args, skip_stmts=0, last_stmt=None, whitelist=None, **kwargs):
self._skip_stmts = skip_stmts
self._last_stmt = last_stmt
self._whitelist = whitelist
return super().process(*args, **kwargs)
def handle_vex_block(self, irsb):
self.__no_exit_sliced = not self._check_vex_slice(DEFAULT_STATEMENT) and \
not any(self._check_vex_slice(stmt_idx) \
for stmt_idx, stmt in enumerate(irsb.statements) \
if stmt.tag == 'Ist_Exit')
super().handle_vex_block(irsb)
def _handle_vex_stmt(self, stmt):
if self._check_vex_slice(self.stmt_idx):
super()._handle_vex_stmt(stmt)
def _handle_vex_defaultexit(self, expr, jumpkind):
if self.__no_exit_sliced:
super()._handle_vex_defaultexit(None, 'Ijk_Boring')
elif self._check_vex_slice(DEFAULT_STATEMENT):
super()._handle_vex_defaultexit(expr, jumpkind)
def _check_vex_slice(self, stmt_idx):
if stmt_idx == DEFAULT_STATEMENT:
if self._last_stmt is not None and self._last_stmt != DEFAULT_STATEMENT:
return False
if self._whitelist is not None and DEFAULT_STATEMENT not in self._whitelist:
return False
else:
if stmt_idx < self._skip_stmts:
return False
if self._last_stmt is not None and self._last_stmt != DEFAULT_STATEMENT and stmt_idx > self._last_stmt:
return False
if self._whitelist is not None and stmt_idx not in self._whitelist:
return False
return True
|
482288
|
import os
import requests
WEIGHT_NAME_TO_CKPT = {
"detr": [
"https://storage.googleapis.com/visualbehavior-publicweights/detr/checkpoint",
"https://storage.googleapis.com/visualbehavior-publicweights/detr/detr.ckpt.data-00000-of-00001",
"https://storage.googleapis.com/visualbehavior-publicweights/detr/detr.ckpt.index"
]
}
def load_weights(model, weights: str):
""" Load weight on a given model
weights are supposed to be sotred in the weight folder at the root of the repository. If weights
does not exists, but are publicly known, the weight will be download from gcloud.
"""
if not os.path.exists('weights'):
os.makedirs('weights')
if "ckpt" in "weights":
model.load(weights)
elif weights in WEIGHT_NAME_TO_CKPT:
wdir = f"weights/{weights}"
if not os.path.exists(wdir):
os.makedirs(wdir)
for f in WEIGHT_NAME_TO_CKPT[weights]:
fname = f.split("/")[-1]
if not os.path.exists(os.path.join(wdir, fname)):
print("Download....", f)
r = requests.get(f, allow_redirects=True)
open(os.path.join(wdir, fname), 'wb').write(r.content)
print("Load weights from", os.path.join(wdir, f"{weights}.ckpt"))
l = model.load_weights(os.path.join(wdir, f"{weights}.ckpt"))
l.expect_partial()
else:
raise Exception(f'Cant load the weights: {weights}')
|
482303
|
from collections import defaultdict
import math
from dataclasses import dataclass, field
from transformers import logging
from transformers.training_args import TrainingArguments
from src.sampling import negative_sampling_strategy
@dataclass
class OntoProteinModelArguments:
protein_model_file_name: str = field(
default=None,
metadata={"help": "The directory of protein sequence pretrained model."}
)
text_model_file_name: str = field(
default=None,
metadata={"help": "The directory of text sequence pretrained model."}
)
protein_model_config_name: str = field(
default=None,
metadata={'help': "Protein pretrained config name or path if not the same as protein_model_file_name"}
)
text_model_config_name: str = field(
default=None,
metadata={"help": "Text pretrained config name or path if not the same as text_model_file_name"}
)
protein_tokenizer_name: str = field(
default=None,
metadata={"help": "Protein sequence tokenizer name or path if not the same as protein_model_file_name"}
)
text_tokenizer_name: str = field(
default=None,
metadata={"help": "Text sequence tokenizer name or path if not the same as text_model_file_name"}
)
# For OntoModel
go_encoder_cls: str = field(
default='embedding',
metadata={"help": "The class of Go term description encoder"}
)
protein_encoder_cls: str = field(
default='bert',
metadata={'help': 'The class of protein encoder.'}
)
ke_embedding_size: int = field(
default=1024,
metadata={"help": "Size of knowledge embedding when using `Embedding` as Go encoder."}
)
double_entity_embedding_size: bool = field(
default=False,
metadata={"help": "Whether or not to set the entity embedding size to double."}
)
@dataclass
class OntoProteinTrainingArguments(TrainingArguments):
optimize_memory: bool = field(
default=False,
metadata={"help": "Whether or not to optimize memory when computering the loss function of negative samples. "}
)
use_seq: bool = field(
default=True,
metadata={"help": "Whether or not to use protein sequence, which its pooler output through encoder as protein representation."}
)
use_desc: bool = field(
default=False,
metadata={"help": "Whether or not to use description of Go term, which its pooler output through encoder as Go term embedding."}
)
dataloader_protein_go_num_workers: int = field(
default=1,
metadata={"help": "Number of workers to collate protein-go dataset."}
)
dataloader_go_go_num_workers: int = field(
default=1,
metadata={"help": "Number of workers to collate go-go dataset."}
)
dataloader_protein_seq_num_workers: int = field(
default=1,
metadata={'help': "Number of workers to collate protein sequence dataset."}
)
# number of negative sampling
num_protein_go_neg_sample: int = field(
default=1,
metadata={"help": "Number of negatve sampling for Protein-Go"}
)
num_go_go_neg_sample: int = field(
default=1,
metadata={"help": "Number of negative sampling for Go-Go"}
)
# Weight of KE loss and MLM loss in total loss
ke_lambda: float = field(
default=1.0,
metadata={"help": "Weight of KE loss."}
)
mlm_lambda: float = field(
default=1.0,
metadata={"help": "Weight of KE loss."}
)
# margin in KE score function.
ke_score_fn: str = field(
default=1.0,
metadata={"help": "Type of score function."}
)
ke_max_score: float = field(
default=1.0,
metadata={"help": "Margin in KE score function."}
)
# respectively set learning rate to training of protein language model and knowledge embedding
lm_learning_rate: float = field(
default=5e-5,
metadata={"help": "The initial MLM learning rate for AdamW."}
)
ke_learning_rate: float = field(
default=1e-4,
metadata={"help": "the initial KE learning rate for AdamW."}
)
num_protein_seq_epochs: int = field(
default=3,
metadata={"help": "Total number of training epochs of Protein MLM to perform."}
)
num_protein_go_epochs: int = field(
default=3,
metadata={"help": "Total number of training epochs of Protein-Go KE to perform."}
)
num_go_go_epochs: int = field(
default=3,
metadata={"help": "Total number of training epochs of Go-Go KE to perform."}
)
per_device_train_protein_seq_batch_size: int = field(
default=8,
metadata={"help": "Batch size per GPU/TPU core/CPU for training of Protein MLM."}
)
per_device_train_protein_go_batch_size: int = field(
default=8,
metadata={"help": "Batch size per GPU/TPU core/CPU for training of Protein-Go KE."}
)
per_device_train_go_go_batch_size: int = field(
default=8,
metadata={"help": "Batch size per GPU/TPU core/CPU for training of Go-Go KE."}
)
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."}
)
# distinguish steps of linear warmup on LM and KE.
lm_warmup_steps: int = field(
default=0,
metadata={"help": "Linear warmup over warmup_steps for LM."}
)
ke_warmup_steps: int = field(
default=0,
metadata={"help": "Linear warmup over warmup_steps for KE."}
)
lm_warmup_ratio: float = field(
default=0.0,
metadata={"help": "Linear warmup over warmup_ratio fraction of total steps for LM."}
)
ke_warmup_ratio: float = field(
default=0.0,
metadata={"help": "Linear warmup over warmup_ratio fraction of total steps for KE."}
)
def __post_init__(self):
super().__post_init__()
self.per_device_train_protein_seq_batch_size = self.per_device_train_batch_size
self.per_device_train_go_go_batch_size = self.per_device_train_batch_size
self.per_device_train_protein_go_batch_size = self.per_device_train_batch_size
# if self.deepspeed:
# # - must be run very last in arg parsing, since it will use a lot of these settings.
# # - must be run before the model is created.
# from src.op_deepspeed import OntoProteinTrainerDeepSpeedConfig
# # will be used later by the Trainer
# # note: leave self.deepspeed unmodified in case a user relies on it not to be modified)
# self.hf_deepspeed_config = OntoProteinTrainerDeepSpeedConfig(self.deepspeed)
# self.hf_deepspeed_config.trainer_config_process(self)
@property
def train_protein_seq_batch_size(self) -> int:
"""
The actual batch size for training of Protein MLM.
"""
per_device_batch_size = self.per_device_train_protein_seq_batch_size
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
return train_batch_size
@property
def train_protein_go_batch_size(self) -> int:
"""
The actual batch size for training of Protein-Go KE.
"""
per_device_batch_size = self.per_device_train_protein_go_batch_size
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
return train_batch_size
@property
def train_go_go_batch_size(self) -> int:
"""
The actual batch size for training of Go-Go KE.
"""
per_device_batch_size = self.per_device_train_go_go_batch_size
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
return train_batch_size
def get_warmup_steps(self, num_training_steps: int):
"""
Get number of steps used for a linear warmup.
"""
warmup_steps = (
self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio)
)
return warmup_steps
def get_lm_warmup_steps(self, num_training_steps: int):
"""
Get number of steps used for a linear warmup on LM.
"""
warmup_steps = (
self.lm_warmup_steps if self.lm_warmup_steps > 0 else math.ceil(num_training_steps * self.lm_warmup_ratio)
)
return warmup_steps
def get_ke_warmup_steps(self, num_training_steps: int):
"""
Get number of steps used for a linear warmup on KE.
"""
warmup_steps = (
self.ke_warmup_steps if self.lm_warmup_steps > 0 else math.ceil(num_training_steps * self.ke_warmup_ratio)
)
return warmup_steps
@dataclass
class OntoProteinDataTrainingArguments:
# Dataset use
# Note: We only consider following combinations of dataset for sevral types of model:
# ProtBert: protein_seq
# OntoProtein w/o seq: protein_go + go_go
# OntoProtein w/ seq: protein_seq + protein_go + go_go
model_protein_seq_data: bool = field(
default=True,
metadata={"help": "Whether or not to model protein sequence data."}
)
model_protein_go_data: bool = field(
default=True,
metadata={"help": "Whether or not to model triplet data of `Protein-Go`"}
)
model_go_go_data: bool = field(
default=True,
metadata={"help": "Whether or not to model triplet data of `Go-Go`"}
)
# Pretrain data directory and specific file name
# Note: The directory need contain following file:
# - {protein sequence data}
# - data.mdb
# - lock.mdb
# - go_def.txt
# - go_type.txt
# - go_go_triplet.txt
# - protein_go_triplet.txt
# - protein_seq.txt
# - protein2id.txt
# - go2id.txt
# - relation2id.txt
pretrain_data_dir: str = field(
default='data/pretrain_data',
metadata={"help": "the directory path of pretrain data."}
)
protein_seq_data_file_name: str = field(
default='swiss_seq',
metadata={"help": "the directory path of specific protein sequence data."}
)
in_memory: bool = field(
default=False,
metadata={"help": "Whether or not to save data into memory during sampling"}
)
# negative sampling
negative_sampling_fn: str = field(
default="simple_random",
metadata={"help": f"Strategy of negative sampling. Could choose {', '.join(negative_sampling_strategy.keys())}"}
)
protein_go_sample_head: bool = field(
default=False,
metadata={"help": "Whether or not to sample head entity in triplet of `protein-go`"}
)
protein_go_sample_tail: bool = field(
default=True,
metadata={"help": "Whether or not to sample tail entity in triplet of `protein-go`"}
)
go_go_sample_head: bool = field(
default=False,
metadata={"help": "Whether or not to sample head entity in triplet of `go-go`"}
)
go_go_sample_tail: bool = field(
default=False,
metadata={"help": "Whether or not to sample tail entity in triplet of `go-go`"}
)
# max length of protein sequence and Go term description
max_protein_seq_length: int = field(
default=None,
metadata={"help": "Maximum length of protein sequence."}
)
max_text_seq_length: int = field(
default=512,
metadata={"help": "Maximum length of Go term description."}
)
|
482329
|
from .h5_utils import (merge_h5_trajectory, load_h5_as_dict_array, load_h5s_as_list_dict_array, generate_chunked_h5_replay)
from .serialization import *
from .hash_utils import check_md5sum, md5sum
|
482372
|
import FWCore.ParameterSet.Config as cms
import DQM.TrackingMonitor.TrackingMonitor_cfi
MonitorTrackGLBMuons = DQM.TrackingMonitor.TrackingMonitor_cfi.TrackMon.clone()
MonitorTrackGLBMuons.TrackProducer = 'globalMuons'
MonitorTrackGLBMuons.AlgoName = 'glb'
MonitorTrackGLBMuons.FolderName = 'Muons/globalMuons'
MonitorTrackGLBMuons.doBeamSpotPlots = False
MonitorTrackGLBMuons.BSFolderName = 'Muons/globalCosmicMuons/BeamSpotParameters'
MonitorTrackGLBMuons.doSeedParameterHistos = False
MonitorTrackGLBMuons.doProfilesVsLS = True
MonitorTrackGLBMuons.doAllPlots = False
MonitorTrackGLBMuons.doGeneralPropertiesPlots = True
MonitorTrackGLBMuons.doHitPropertiesPlots = True
MonitorTrackGLBMuons.doTrackerSpecific = True
MonitorTrackGLBMuons.doDCAPlots = True
MonitorTrackGLBMuons.doDCAwrtPVPlots = True
MonitorTrackGLBMuons.doDCAwrt000Plots = False
MonitorTrackGLBMuons.doSIPPlots = True
MonitorTrackGLBMuons.doEffFromHitPatternVsPU = True
MonitorTrackGLBMuons.doEffFromHitPatternVsBX = False
MonitorTrackGLBMuons.doEffFromHitPatternVsLUMI = cms.bool(True)
|
482395
|
from fastapi import FastAPI
from fastapi.testclient import TestClient
from unittest.mock import patch, ANY
from a2ml.server.server import app, API_SCHEMA
from a2ml.tasks_queue.tasks_api import new_project_task, list_projects_task, delete_project_task, select_project_task, \
new_dataset_task, list_datasets_task, delete_dataset_task, select_dataset_task, \
import_data_task
client = TestClient(app)
class TestServer():
def test_hello(self):
response = client.get('/hello')
assert response.status_code == 200
assert response.json() == {'Hello': 'World'}
@classmethod
def define_tests(cls, schema):
for path in schema.keys():
for verb in schema[path].keys():
task = schema[path][verb]
cls.define_test(verb, path, task)
@classmethod
def define_test(cls, http_verb, path, task):
def test(self):
with patch.object(task, 'delay') as mock_method:
http_method = getattr(client, http_verb)
response = http_method(path + '?arg1=11', json={'arg2': 22})
assert response.status_code == 200
body = response.json()
assert body['meta'] == { 'status': 200 }
assert isinstance(body['data']['request_id'], str)
mock_method.assert_called_once_with({
'arg1': '11',
'arg2': 22,
'_request_id': ANY
})
setattr(cls, f'test_{http_verb}' + path.replace('/', '_'), test)
TestServer.define_tests(API_SCHEMA)
|
482422
|
sGenerateLightVersionForDebugging = False
# sGenerateLightVersionForDebugging = True
sDatasetFolder = "../dataset/hand/"
sDisplayDebugging = sGenerateLightVersionForDebugging
import cv2
import json
from coco_util import *
def runHands(jsonOutput, imageAndAnnotFolder):
data = load_json(imageAndAnnotFolder + "hands_v143_14817.json")
totalWriteCount = len(data["root"])
printEveryXIterations = max(1, round(totalWriteCount / 10))
print("Initial #annotations: " + str(totalWriteCount))
ii=-1
pid=-1
annotations= []
images = []
for item in data["root"]:
ii+=1
pid+=1
if ii % printEveryXIterations == 0:
print('Sample %d of %d' % (ii+1, totalWriteCount))
elif sGenerateLightVersionForDebugging:
continue
img = cv2.imread(imageAndAnnotFolder + item["img_paths"])
# Fill hand keypoints
all_points = []
# Option a) Remove empty left hand
# pass
# # Option b) Add empty left hand
# left_hand_pts = []
# if len(left_hand_pts) == 0:
# for i in range(0, 21): left_hand_pts.append([0,0,0])
# all_points.extend(left_hand_pts)
right_hand_pts = item["joint_self"]
all_points.extend(right_hand_pts)
for pt in all_points:
if pt[2] == 0: continue
cv2.circle(img, (int(pt[0]), int(pt[1])), 2, (0,0,255), -1)
# Convert into caffe visibility?
caffe_points = []
for point in all_points:
caffe_points.append(point[0])
caffe_points.append(point[1])
caffe_points.append(point[2])
# Add Image
image_path = item["img_paths"]
# Image
img_width = img.shape[1]
img_height = img.shape[0]
# Add Image
image_object = dict()
image_object["id"] = ii
image_object["file_name"] = image_path.split("/")[-1]
image_object["width"] = img_width
image_object["height"] = img_height
images.append(image_object)
# Get rectangle
rect = get_rect_from_points_only_bigger(right_hand_pts, img_width, img_height, 10)
rectW = rect[2]-rect[0]
rectH = rect[3]-rect[1]
# Store Person Data
data = dict()
data["segmentation"] = [] # DONT HAVE
data["num_keypoints"] = len(all_points)
data["img_path"] = image_path.split("/")[-1]
data["bbox"] = [rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1]]
data["area"] = data["bbox"][2]*data["bbox"][3]
data["iscrowd"] = 0
data["keypoints"] = caffe_points
data["img_width"] = img_width
data["img_height"] = img_height
data["category_id"] = 1
data["image_id"] = ii
data["id"] = pid
annotations.append(data)
# Display
if sDisplayDebugging:
cv2.rectangle(img, (int(rect[0]), int(rect[1])), (int(rect[2]), int(rect[3])), 255, 2)
show_image(img)
cv2.waitKey(-1)
# Json Object
json_object = dict()
json_object["info"] = dict()
json_object["info"]["version"] = 1.0
json_object["info"]["description"] = "Hands Dome Dataset in COCO Json Format"
json_object["licenses"] = []
json_object["images"] = images
json_object["annotations"] = annotations
# JSON writing
print("Saving " + jsonOutput + "...")
print("Final #Images: " + str(len(json_object["images"])))
print("Final #Annotations: " + str(len(json_object["annotations"])))
open(jsonOutput, 'w').close()
with open(jsonOutput, 'w') as outfile:
json.dump(json_object, outfile)
print("Saved!")
sImageAndAnnotFolder = sDatasetFolder + "hand143_panopticdb/"
sJsonOutput = sDatasetFolder + 'json/hand21_dome_train.json'
runHands(sJsonOutput, sImageAndAnnotFolder)
|
482428
|
from re import search
from ducktape.mark.resource import cluster
from ducktape.mark import parametrize
from ducktape.cluster.cluster_spec import ClusterSpec
from waltz_ducktape.tests.produce_consume_validate import ProduceConsumeValidateTest
class BenchmarkTest(ProduceConsumeValidateTest):
"""
A benchmark of Waltz producer/consumer performance.
"""
MIN_CLUSTER_SPEC = ClusterSpec.from_list([
{'cpu':1, 'mem':'1GB', 'disk':'25GB', 'additional_disks':{'/dev/sdb':'100GB'}, 'num_nodes':3},
{'cpu':1, 'mem':'3GB', 'disk':'15GB', 'num_nodes':2},
{'cpu':1, 'mem':'1GB', 'disk':'25GB', 'num_nodes':1}])
def __init__(self, test_context):
super(BenchmarkTest, self).__init__(test_context=test_context)
@cluster(cluster_spec=MIN_CLUSTER_SPEC)
@parametrize(txn_size=512, txn_per_thread=1000, num_thread=100, interval=10, lock_pool_size=0, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, txn_per_thread=1000, num_thread=100, interval=20, lock_pool_size=0, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, txn_per_thread=2000, num_thread=50, interval=10, lock_pool_size=0, num_active_partitions=1, timeout=360)
@parametrize(txn_size=1024, txn_per_thread=1000, num_thread=100, interval=10, lock_pool_size=0, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, txn_per_thread=100, num_thread=100, interval=10, lock_pool_size=64, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, txn_per_thread=100, num_thread=100, interval=10, lock_pool_size=128, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, txn_per_thread=100, num_thread=100, interval=10, lock_pool_size=128, num_active_partitions=2, timeout=360)
def test_producer_performance(self, txn_size, txn_per_thread, num_thread, interval, lock_pool_size, num_active_partitions, timeout):
test_cmd = self.performance_cli.producer_test_cmd(self.log_file_path, txn_size, txn_per_thread, num_thread,
interval, lock_pool_size, num_active_partitions)
test_output = self.run_produce_consume_validate(lambda: self.simple_validation_func(test_cmd, timeout))
self.print_producer_performance(test_output)
@cluster(cluster_spec=MIN_CLUSTER_SPEC)
@parametrize(txn_size=512, num_txn=100000, num_active_partitions=1, timeout=360)
@parametrize(txn_size=512, num_txn=100000, num_active_partitions=4, timeout=360)
@parametrize(txn_size=1024, num_txn=100000, num_active_partitions=1, timeout=360)
def test_consumer_performance(self, txn_size, num_txn, num_active_partitions, timeout):
test_cmd = self.performance_cli.consumer_test_cmd(self.log_file_path, txn_size, num_txn, num_active_partitions)
test_output = self.run_produce_consume_validate(lambda: self.simple_validation_func(test_cmd, timeout))
self.print_consumer_performance(test_output)
def print_producer_performance(self, test_output):
performance = search(".*transactions(.|\n)*MilliSec\/Transaction.*", test_output).group(0)
print("\n####################### PRODUCER PERFORMANCE REPORT #######################\n" + \
"\n{performance}\n".format(performance=performance) + \
"\n###########################################################################\n")
def print_consumer_performance(self, test_output):
performance = search(".*transactions(.|\n)*MB/sec.*", test_output).group(0)
print("\n####################### CONSUMER PERFORMANCE REPORT #######################\n" + \
"\n{performance}\n".format(performance=performance) + \
"\n###########################################################################\n")
|
482431
|
from aioamqp_consumer import RpcServer, json_rpc
amqp_url = 'amqp://guest:[email protected]:5672//'
@json_rpc(queue_name='random_queue')
async def square(*, x):
ret = x ** 2
print(x, ret)
return ret
if __name__ == '__main__':
RpcServer(amqp_url, method=square).run()
|
482444
|
import io
import os
import time
from .utils import download_dataset
import zipfile
import numpy as np
from scipy.io.wavfile import read as wav_read
from tqdm import tqdm
import pretty_midi
import soundfile as sf
_urls = {
"https://storage.googleapis.com/magentadata/datasets/groove/groove-v1.0.0.zip": "groove-v1.0.0.zip"
}
def load(path=None):
"""
The Groove MIDI Dataset (GMD) is composed of 13.6 hours of aligned MIDI and (synthesized) audio of human-performed, tempo-aligned expressive drumming. The dataset contains 1,150 MIDI files and over 22,000 measures of drumming.
Size: 4.76GB
License
-------
Creative Commons License
The dataset is made available by Google LLC under a Creative Commons Attribution 4.0 International (CC BY 4.0) License.
Dataset
-------
Update: If youโre looking for a dataset suitable for drum transcription or other audio-focused applications, see our Expanded Groove MIDI Dataset.
To enable a wide range of experiments and encourage comparisons between methods on the same data, we created a new dataset of drum performances recorded in MIDI format. We hired professional drummers and asked them to perform in multiple styles to a click track on a Roland TD-11 electronic drum kit. We also recorded the aligned, high-quality synthesized audio from the TD-11 and include it in the release.
The Groove MIDI Dataset (GMD), has several attributes that distinguish it from existing ones:
The dataset contains about 13.6 hours, 1,150 MIDI files, and over 22,000 measures of drumming.
Each performance was played along with a metronome set at a specific tempo by the drummer.
The data includes performances by a total of 10 drummers, with more than 80% of duration coming from hired professionals. The professionals were able to improvise in a wide range of styles, resulting in a diverse dataset.
The drummers were instructed to play a mix of long sequences (several minutes of continuous playing) and short beats and fills.
Each performance is annotated with a genre (provided by the drummer), tempo, and anonymized drummer ID.
Most of the performances are in 4/4 time, with a few examples from other time signatures.
Four drummers were asked to record the same set of 10 beats in their own style. These are included in the test set split, labeled eval-session/groove1-10.
In addition to the MIDI recordings that are the primary source of data for the experiments in this work, we captured the synthesized audio outputs of the drum set and aligned them to within 2ms of the corresponding MIDI files.
A train/validation/test split configuration is provided for easier comparison of model accuracy on various tasks.
Split Beats Fills Measures (approx.) Hits Duration (minutes)
Train 378 519 17752 357618 648.5
Validation 48 76 2269 44044 82.2
Test 77 52 2193 43832 84.3
Total 503 647 22214 445494 815.0
For more information about how the dataset was created and several applications of it, please see the paper where it was introduced: Learning to Groove with Inverse Sequence Transformations.
For an example application of the dataset, see our blog post on GrooVAE.
MIDI Data
Format
The Roland TD-11 splits the recorded data into separate tracks: one for meta-messages (tempo, time signature, key signature), one for control changes (hi-hat pedal position), and one for notes. The control changes are set on channel 0 and the notes on channel 9 (the canonical drum channel). To simplify processing of this data, we made two adustments to the raw MIDI files before distributing:
We merged all messages (meta, control change, and note) to a single track.
We set all messages to channel 9 (10 if 1-indexed).
Drum Mapping
------------
The Roland TD-11 used to record the performances in MIDI uses some pitch values that differ from the General MIDI (GM) Specifications. Below we show how the Roland mapping compares to GM. Please take note of these discrepancies during playback and training. The final column shows the simplified mapping we used in our paper.
Pitch Roland Mapping GM Mapping Paper Mapping Frequency
36 Kick Bass Drum 1 Bass (36) 88067
38 Snare (Head) Acoustic Snare Snare (38) 102787
40 Snare (Rim) Electric Snare Snare (38) 22262
37 Snare X-Stick Side Stick Snare (38) 9696
48 Tom 1 Hi-Mid Tom High Tom (50) 13145
50 Tom 1 (Rim) High Tom High Tom (50) 1561
45 Tom 2 Low Tom Low-Mid Tom (47) 3935
47 Tom 2 (Rim) Low-Mid Tom Low-Mid Tom (47) 1322
43 Tom 3 (Head) High Floor Tom High Floor Tom (43) 11260
58 Tom 3 (Rim) Vibraslap High Floor Tom (43) 1003
46 HH Open (Bow) Open Hi-Hat Open Hi-Hat (46) 3905
26 HH Open (Edge) N/A Open Hi-Hat (46) 10243
42 HH Closed (Bow) Closed Hi-Hat Closed Hi-Hat (42) 31691
22 HH Closed (Edge) N/A Closed Hi-Hat (42) 34764
44 HH Pedal Pedal Hi-Hat Closed Hi-Hat (42) 52343
49 Crash 1 (Bow) Crash Cymbal 1 Crash Cymbal (49) 720
55 Crash 1 (Edge) Splash Cymbal Crash Cymbal (49) 5567
57 Crash 2 (Bow) Crash Cymbal 2 Crash Cymbal (49) 1832
52 Crash 2 (Edge) Chinese Cymbal Crash Cymbal (49) 1046
51 Ride (Bow) Ride Cymbal 1 Ride Cymbal (51) 43847
59 Ride (Edge) Ride Cymbal 2 Ride Cymbal (51) 2220
53 Ride (Bell) Ride Bell Ride Cymbal (51) 5567
Control Changes
The TD-11 also records control changes specifying the position of the hi-hat pedal on each hit. We have preserved this information under control 4.
How to Cite
-----------
If you use the Groove MIDI Dataset in your work, please cite the paper where it was introduced:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Learning to Groove with Inverse Sequence Transformations."
International Conference on Machine Learning (ICML), 2019.
You can also use the following BibTeX entry:
@inproceedings{groove2019,
Author = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
Title = {Learning to Groove with Inverse Sequence Transformations},
Booktitle = {International Conference on Machine Learning (ICML)},
Year = {2019},
}
Acknowledgements
----------------
Weโd like to thank the following primary contributors to the dataset:
<NAME> (of Never Weather)
<NAME> (of Phish)
<NAME> (of Wild Mango)
<NAME> (of SF Contemporary Music Players)
<NAME> (of El Duo)
Additional drumming provided by: <NAME>, <NAME>, <NAME>, and <NAME>.
"""
if path is None:
path = os.environ["DATASET_PATH"]
download_dataset(path, "groove_MIDI", _urls)
t0 = time.time()
# load wavs
f = zipfile.ZipFile(os.path.join(path, "groove_MIDI", "groove-v1.0.0.zip"))
columns = "drummer,session,id,style,bpm,beat_type,time_signature,midi_filename,audio_filename,duration,split".split(
","
)
info_file = f.open("groove/info.csv")
infos = np.loadtxt(info_file, delimiter=",", dtype=str)
data = [[] for i in range(len(columns))]
indices = list(range(7)) + [9, 10]
for row in tqdm(infos[1:], ascii=True):
try:
wav = f.read("groove/" + row[8])
byt = io.BytesIO(wav)
data[8].append(sf.read(byt)[0].astype("float32"))
except RuntimeError:
print("...skipping ", row[8])
continue
for column in indices:
data[column].append(row[column])
data[7].append(pretty_midi.PrettyMIDI(io.BytesIO(f.read("groove/" + row[7]))))
data = {col: data for col, data in zip(columns, data)}
return data
|
482464
|
from bounties import settings
from utils.functional_tools import wrapped_partial, pipe, pluck
from std_bounties.slack_templates import templates
from std_bounties.slack_client_helpers import notify_slack, get_base_bounty_values, format_message
from slackclient import SlackClient
sc = SlackClient(settings.SLACK_TOKEN)
channel = settings.NOTIFICATIONS_SLACK_CHANNEL
class SlackMessageClient:
def __init__(self):
pass
def bounty_issued(self, bounty):
message = pipe(bounty, [get_base_bounty_values, wrapped_partial(pluck, fields=[
'title',
'id',
'usd_price',
'total_value',
'token_symbol',
'token_price',
'deadline',
'link',
'total_value'
]), wrapped_partial(format_message, msg_string=templates['BountyIssued'])])
notify_slack(sc, channel, 'Bounty Issued', message)
def bounty_issued_and_activated(self, bounty):
message = pipe(bounty, [get_base_bounty_values, wrapped_partial(pluck, fields=[
'title',
'id',
'usd_price',
'total_value',
'token_symbol',
'token_price',
'deadline',
'link',
'total_value'
]), wrapped_partial(format_message, msg_string=templates['BountyIssued'])])
notify_slack(sc, channel, 'Bounty Issued and Activated', message)
def bounty_activated(self, bounty):
message = pipe(bounty, [get_base_bounty_values, wrapped_partial(pluck, fields=[
'title',
'id',
'usd_price',
'total_value',
'token_symbol',
'token_price',
'link'
]), wrapped_partial(format_message, msg_string=templates['BountyActivated'])])
notify_slack(sc, channel, 'Bounty Activated', message)
def bounty_fulfilled(self, bounty, fulfillment_id):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['BountyFulfilled'], fulfillment_id=fulfillment_id)])
notify_slack(sc, channel, 'Bounty Fulfilled', message)
def fulfillment_updated(self, bounty, fulfillment_id):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['FulfillmentUpdated'], fulfillment_id=fulfillment_id)])
notify_slack(sc, channel, 'Fulfillment Updated', message)
def fulfillment_accepted(self, bounty, fulfillment_id):
message = pipe(bounty,
[get_base_bounty_values,
wrapped_partial(pluck,
fields=['title',
'id',
'usd_price',
'total_value',
'token_symbol',
'token_price',
'deadline',
'link',
'token_lock_price']),
wrapped_partial(format_message,
msg_string=templates['FulfillmentAccepted'],
fulfillment_id=fulfillment_id)])
notify_slack(sc, channel, 'Fulfillment Accepted', message)
def bounty_killed(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['BountyKilled'])])
notify_slack(sc, channel, 'Bounty Killed', message)
def contribution_added(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['ContributionAdded'])])
notify_slack(sc, channel, 'Contribution Added', message)
def deadline_extended(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'deadline', 'link']), wrapped_partial(
format_message, msg_string=templates['DeadlineExtended'])])
notify_slack(sc, channel, 'Deadline Extended', message)
def bounty_changed(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['BountyChanged'])])
notify_slack(sc, channel, 'Bounty Changed', message)
def issuer_transferred(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['IssuerTransferred'])])
notify_slack(sc, channel, 'Issuer Transferred', message)
def payout_increased(self, bounty):
message = pipe(
bounty, [
get_base_bounty_values, wrapped_partial(
pluck, fields=[
'title', 'id', 'link']), wrapped_partial(
format_message, msg_string=templates['PayoutIncreased'])])
notify_slack(sc, channel, 'Payout Increased', message)
|
482489
|
from osim.env import ArmEnv, HopEnv, GaitEnv, StandEnv, CrouchEnv
import joblib
import argparse
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
parser = argparse.ArgumentParser(description='Test a policy')
parser.add_argument('-p', action="store", dest="params_file")
parsed = parser.parse_args()
params = joblib.load(parsed.params_file)
env = params['env']
env.test = True
obs = env.reset()
total_rew = 0.0
for i in range(200):
action = params['policy'].get_action(obs)
obs,reward,_,_ = env.step(action[0])
total_rew += reward
if env._wrapped_env.__class__.__name__ == "ArmEnv" and i % 200 == 0:
env._wrapped_env.new_target()
print(env._wrapped_env.shoulder, env._wrapped_env.elbow)
print(total_rew)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.