id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
81813
|
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
h = {v: i for i, v in enumerate(list1)}
result = []
m = inf
for i, v in enumerate(list2):
if v in h:
r = h[v] + i
if r < m:
m = r
result = [v]
elif r == m:
result.append(v)
return result
|
81818
|
import time
import os
def get_format_time():
'''
:return:
'''
now = time.time()
tl = time.localtime(now)
format_time = time.strftime("%Y-%m-%d", tl)
return format_time
def write_file(filepath, content, filename):
filename += '.log'
path = os.path.join(filepath, filename)
with open(path, 'a+') as f:
f.write(content)
f.write('\n')
|
81869
|
import itertools
import pytest
import tubes
def test_static_tube_takes_a_list():
tube = tubes.Each([1, 2, 3])
assert list(tube) == [1, 2, 3]
def test_static_tube_takes_an_iter():
tube = tubes.Each(itertools.count(10)).first(3)
assert list(tube) == [10, 11, 12]
def test_static_tube_with_strings():
tube = tubes.Each(['a', 'b', 'c'])
assert list(tube) == ['a', 'b', 'c']
def test_static_tube_with_strings():
tube = tubes.Each(['a', 'b', 'c'])
assert list(tube.to(str)) == ['a', 'b', 'c']
assert list(tube.to(bytes)) == [b'a', b'b', b'c']
def test_static_tube_with_encoding():
tube = tubes.Each(['£', '😃', ''])
assert list(tube.to(str)) == ['£', '😃', '']
assert list(tube.to(bytes)) == [b'\xc2\xa3', b'\xf0\x9f\x98\x83', b'']
with pytest.raises(UnicodeEncodeError):
list(tube.to(bytes, codec='ascii'))
|
81885
|
from fairwork_server.settings import *
import django_heroku
import dj_database_url
import os
DEBUG = False
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
ADMIN_NAME = os.environ['ADMIN_NAME']
ADMIN_EMAIL = os.environ['ADMIN_EMAIL']
ADMINS = [(os.environ['ADMIN_NAME'], os.environ['ADMIN_EMAIL']), ]
HOSTNAME = os.environ['HOSTNAME']
WORKER_IRB_TEMPLATE = os.environ['WORKER_IRB_TEMPLATE']
REQUESTER_IRB_TEMPLATE = os.environ['REQUESTER_IRB_TEMPLATE']
ALLOWED_HOSTS = ['0.0.0.0', '127.0.0.1', 'localhost', 'fairwork.herokuapp.com', 'fairwork.stanford.edu']
TIME_ZONE = 'America/Los_Angeles'
DATABASES['default'] = dj_database_url.config()
SECRET_KEY = os.environ['SECRET_KEY']
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']
EMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = os.environ['ADMIN_EMAIL']
SERVER_EMAIL = os.environ['ADMIN_EMAIL']
django_heroku.settings(locals())
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
|
81934
|
import time
import shutil
from HSTB.kluster.fqpr_intelligence import *
from HSTB.kluster.fqpr_project import *
def get_testfile_paths():
testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', '0009_20170523_181119_FA2806.all')
testsv = os.path.join(os.path.dirname(testfile), '2020_036_182635.svp')
expected_data_folder = 'em2040_40111_05_23_2017'
expected_data_folder_path = os.path.join(os.path.dirname(testfile), expected_data_folder)
return testfile, testsv, expected_data_folder, expected_data_folder_path
def cleanup_after_tests():
testfile, testsv, expected_data_folder, expected_data_folder_path = get_testfile_paths()
proj_path = os.path.join(os.path.dirname(testfile), 'kluster_project.json')
if os.path.exists(proj_path):
os.remove(proj_path)
vessel_file = os.path.join(os.path.dirname(testfile), 'vessel_file.kfc')
if os.path.exists(vessel_file):
os.remove(vessel_file)
if os.path.exists(expected_data_folder_path):
shutil.rmtree(expected_data_folder_path)
def setup_intel(include_vessel_file=True):
testfile, testsv, expected_data_folder, expected_data_folder_path = get_testfile_paths()
proj_path = os.path.join(os.path.dirname(testfile), 'kluster_project.json')
vessel_file = os.path.join(os.path.dirname(testfile), 'vessel_file.kfc')
if os.path.exists(proj_path):
os.remove(proj_path)
if os.path.exists(vessel_file):
os.remove(vessel_file)
proj = create_new_project(os.path.dirname(testfile))
if include_vessel_file:
proj.add_vessel_file(vessel_file)
fintel = FqprIntel(proj)
fintel.set_settings({'coord_system': 'NAD83'})
return proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path
def test_intel_add_multibeam():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel()
updated_type, new_data, new_project = fintel.add_file(testfile)
assert os.path.exists(proj_path)
assert os.path.exists(vessel_file)
assert updated_type == 'multibeam'
assert new_data['file_path'] == testfile
assert new_data['type'] == 'kongsberg_all'
assert new_data['data_start_time_utc'] == datetime(2017, 5, 23, 18, 11, 19, 364000, tzinfo=timezone.utc)
assert new_data['data_end_time_utc'] == datetime(2017, 5, 23, 18, 12, 13, 171000, tzinfo=timezone.utc)
assert new_data['primary_serial_number'] == 40111
assert new_data['secondary_serial_number'] == 0
assert new_data['sonar_model_number'] == 'em2040'
assert 'last_modified_time_utc' in new_data # can't check content of this, depends on the env
assert 'created_time_utc' in new_data # can't check content of this, depends on the env
assert 'time_added' in new_data # can't check content of this, depends on the env
assert new_data['unique_id'] == 0
assert new_data['file_name'] == '0009_20170523_181119_FA2806.all'
assert not new_project
assert fintel.multibeam_intel.line_groups == {expected_data_folder_path: [testfile]}
assert fintel.multibeam_intel.unmatched_files == {}
assert fintel.multibeam_intel.file_name == {testfile: '0009_20170523_181119_FA2806.all'}
assert fintel.multibeam_intel.matching_fqpr[testfile] == ''
fintel.clear()
assert fintel.multibeam_intel.line_groups == {}
assert fintel.multibeam_intel.unmatched_files == {}
assert fintel.multibeam_intel.file_name == {}
assert fintel.multibeam_intel.matching_fqpr == {}
proj.close()
fintel = None
proj = None
cleanup_after_tests()
def test_intel_remove_multibeam():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel()
updated_type, new_data, new_project = fintel.add_file(testfile)
assert updated_type == 'multibeam' # file was added
updated_type, uid = fintel.remove_file(testfile)
assert updated_type == 'multibeam'
assert uid == 0
assert fintel.multibeam_intel.line_groups == {}
assert fintel.multibeam_intel.unmatched_files == {}
assert fintel.multibeam_intel.file_name == {}
assert fintel.multibeam_intel.matching_fqpr == {}
fintel.clear()
proj.close()
fintel = None
proj = None
cleanup_after_tests()
def test_intel_add_sv():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel()
updated_type, new_data, new_project = fintel.add_file(testsv)
assert os.path.exists(proj_path)
assert updated_type == 'svp'
assert new_data['file_path'] == testsv
assert new_data['type'] == 'caris_svp'
assert new_data['profiles'] == [[(0.031, 1487.619079),
(1.031, 1489.224413),
(2.031, 1490.094255),
(3.031, 1490.282542),
(4.031, 1490.455471),
(5.031, 1490.606669),
(6.031, 1490.694613),
(7.031, 1490.751968),
(8.031, 1490.811492),
(9.031, 1490.869682),
(10.031, 1490.923819),
(11.031, 1490.981475),
(12.031, 1491.058214),
(13.031, 1491.107904),
(14.031, 1491.156586),
(15.031, 1491.22292),
(16.031, 1491.26239),
(17.031, 1491.306912),
(18.031, 1491.355384),
(19.031, 1491.414501),
(20.031, 1491.45854),
(21.031, 1491.480412),
(22.031, 1491.504141),
(23.031, 1491.519287)]]
assert new_data['number_of_profiles'] == 1
assert new_data['number_of_layers'] == [24]
assert new_data['julian_day'] == ['2020-036']
assert new_data['time_utc'] == [datetime(2020, 2, 5, 18, 26, tzinfo=timezone.utc)]
assert new_data['time_utc_seconds'] == [1580927160.0]
assert new_data['latitude'] == [37.85094444]
assert new_data['longitude'] == [-122.46491667]
assert new_data['source_epsg'] == [4326]
assert new_data['utm_zone'] == [10]
assert new_data['utm_hemisphere'] == ['N']
assert 'last_modified_time_utc' in new_data # can't check content of this, depends on the env
assert 'created_time_utc' in new_data # can't check content of this, depends on the env
assert 'time_added' in new_data # can't check content of this, depends on the env
assert new_data['unique_id'] == 0
assert new_data['file_name'] == '2020_036_182635.svp'
assert not new_project
assert fintel.svp_intel.file_paths == [testsv]
assert fintel.svp_intel.file_path == {'2020_036_182635.svp': testsv}
assert fintel.svp_intel.file_name == {testsv: '2020_036_182635.svp'}
assert fintel.svp_intel.unique_id_reverse == {0: testsv}
assert fintel.svp_intel.type == {testsv: 'caris_svp'}
fintel.clear()
assert fintel.svp_intel.file_paths == []
assert fintel.svp_intel.file_path == {}
assert fintel.svp_intel.file_name == {}
assert fintel.svp_intel.unique_id_reverse == {}
assert fintel.svp_intel.type == {}
proj.close()
fintel = None
proj = None
cleanup_after_tests()
def test_intel_remove_sv():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel()
updated_type, new_data, new_project = fintel.add_file(testsv)
assert updated_type == 'svp' # file was added
updated_type, uid = fintel.remove_file(testsv)
assert updated_type == 'svp'
assert uid == 0
assert fintel.svp_intel.file_paths == []
assert fintel.svp_intel.file_path == {}
assert fintel.svp_intel.file_name == {}
assert fintel.svp_intel.unique_id_reverse == {}
assert fintel.svp_intel.type == {}
fintel.clear()
proj.close()
fintel = None
proj = None
cleanup_after_tests()
def test_intel_modes():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel(include_vessel_file=False)
updated_type, new_data, new_project = fintel.add_file(testfile)
# convert multibeam file
fintel.execute_action()
# normal mode will have a new processing action for that day
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Run all processing on em2040_40111_05_23_2017'
# convert only will have no actions, since we've already converted
fintel.set_auto_processing_mode('convert_only')
assert not fintel.has_actions
# concatenate will have a new action to only convert this one line
fintel.set_auto_processing_mode('concatenate')
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Run all processing on em2040_40111_05_23_2017'
assert fintel.action_container.actions[0].kwargs['only_this_line'] == '0009_20170523_181119_FA2806.all'
fintel.clear()
proj.close()
fintel = None
proj = None
cleanup_after_tests()
def test_intel_vessel_file():
proj, fintel, proj_path, vessel_file, testfile, testsv, expected_data_folder_path = setup_intel()
updated_type, new_data, new_project = fintel.add_file(testfile)
# convert multibeam file
fintel.execute_action()
vf = fintel.project.return_vessel_file()
converted_fqpr = list(fintel.project.fqpr_instances.values())[0]
# after conversion, the offsets from this converted data will be stored in the vessel file
expected_offsets = {'beam_opening_angle': {'1495563079': 1.3}, 'heading_patch_error': {'1495563079': 0.5},
'heading_sensor_error': {'1495563079': 0.02}, 'heave_error': {'1495563079': 0.05},
'horizontal_positioning_error': {'1495563079': 1.5}, 'imu_h': {'1495563079': 0.4},
'latency': {'1495563079': 0.0}, 'imu_p': {'1495563079': -0.18},
'imu_r': {'1495563079': -0.16}, 'imu_x': {'1495563079': 0.0},
'imu_y': {'1495563079': 0.0}, 'imu_z': {'1495563079': 0.0},
'latency_patch_error': {'1495563079': 0.0}, 'pitch_patch_error': {'1495563079': 0.1},
'pitch_sensor_error': {'1495563079': 0.001}, 'roll_patch_error': {'1495563079': 0.1},
'roll_sensor_error': {'1495563079': 0.001}, 'rx_h': {'1495563079': 0.0},
'rx_p': {'1495563079': 0.0}, 'rx_r': {'1495563079': 0.0},
'rx_x': {'1495563079': -0.1}, 'rx_x_0': {'1495563079': 0.011}, 'rx_x_1': {'1495563079': 0.011},
'rx_x_2': {'1495563079': 0.011}, 'rx_y': {'1495563079': -0.304}, 'rx_y_0': {'1495563079': 0.0},
'rx_y_1': {'1495563079': 0.0}, 'rx_y_2': {'1495563079': 0.0}, 'rx_z': {'1495563079': -0.016},
'rx_z_0': {'1495563079': -0.006}, 'rx_z_1': {'1495563079': -0.006},
'rx_z_2': {'1495563079': -0.006}, 'separation_model_error': {'1495563079': 0.0},
'sonar_type': {'1495563079': 'em2040'}, 'source': {'1495563079': 'em2040_40111_05_23_2017'},
'surface_sv_error': {'1495563079': 0.5}, 'timing_latency_error': {'1495563079': 0.001},
'tx_h': {'1495563079': 0.0}, 'tx_p': {'1495563079': 0.0}, 'tx_r': {'1495563079': 0.0},
'tx_to_antenna_x': {'1495563079': 0.0}, 'tx_to_antenna_y': {'1495563079': 0.0},
'tx_to_antenna_z': {'1495563079': 0.0}, 'tx_x': {'1495563079': 0.0},
'tx_x_0': {'1495563079': 0.0}, 'tx_x_1': {'1495563079': 0.0}, 'tx_x_2': {'1495563079': 0.0},
'tx_y': {'1495563079': 0.0}, 'tx_y_0': {'1495563079': -0.0554},
'tx_y_1': {'1495563079': 0.0131}, 'tx_y_2': {'1495563079': 0.0554}, 'tx_z': {'1495563079': 0.0},
'tx_z_0': {'1495563079': -0.012}, 'tx_z_1': {'1495563079': -0.006},
'tx_z_2': {'1495563079': -0.012}, 'vertical_positioning_error': {'1495563079': 1.0},
'vessel_speed_error': {'1495563079': 0.1}, 'waterline': {'1495563079': -0.64},
'waterline_error': {'1495563079': 0.02}, 'x_offset_error': {'1495563079': 0.2},
'y_offset_error': {'1495563079': 0.2}, 'z_offset_error': {'1495563079': 0.2}}
assert vf.data[converted_fqpr.multibeam.raw_ping[0].system_identifier] == expected_offsets
fintel.execute_action()
assert not fintel.has_actions
vf.update('40111', {'beam_opening_angle': {'1495563079': 999}}, carry_over_tpu=False)
vf.save()
fintel.regenerate_actions()
# after regenerating actions, we have a new compute tpu action since we changed this tpu value
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 only computing TPU'
vf.update('40111', {'rx_p': {'1495563079': 999}})
vf.save()
fintel.regenerate_actions()
# after regenerating actions, we have a new all processing action since we changed a patch test angle
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Run all processing on em2040_40111_05_23_2017'
vf.update('40111', {'rx_p': {'1495563079': 0.0}})
vf.save()
fintel.regenerate_actions()
# after regenerating actions, we are back to the compute tpu action, since we reverted the patch test change
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 only computing TPU'
vf.update('40111', {'rx_x': {'1495563079': 999}})
vf.save()
fintel.regenerate_actions()
# after regenerating actions, we have a new georeferencing action since we changed a lever arm, it overrides the tpu
# action, as we will do a tpu process after georeferencing for the lever arm change anyway
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 starting with sound velocity'
vf.update('40111', {'rx_x': {'1495563079': -0.1}})
vf.save()
fintel.regenerate_actions()
# after regenerating actions, we are back to the compute tpu action, since we reverted the lever arm change
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 only computing TPU'
converted_fqpr.multibeam.raw_ping[0].attrs['xyzrph']['waterline']['1495563079'] = 999
fintel.keep_waterline_changes = False
fintel.regenerate_actions()
# after regenerating actions, we have no new action as we have disabled retaining waterline changes
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 only computing TPU'
vf = fintel.project.return_vessel_file()
assert vf.data['40111']['waterline']['1495563079'] == -0.64
converted_fqpr.multibeam.raw_ping[0].attrs['xyzrph']['waterline']['1495563079'] = 999
fintel.keep_waterline_changes = True
fintel.regenerate_actions()
# after regenerating actions, we have a new sound velocity process as we adjusted the existing waterline value, and
# waterline changes in existing data are honored.
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 starting with sound velocity'
vf = fintel.project.return_vessel_file()
assert vf.data['40111']['waterline']['1495563079'] == 999
# reverting the waterline action requires regenerating actions twice for now...
converted_fqpr.multibeam.raw_ping[0].attrs['xyzrph']['waterline']['1495563079'] = -0.64
fintel.keep_waterline_changes = True
fintel.regenerate_actions()
fintel.regenerate_actions()
# after regenerating actions, we have a new sound velocity process as we adjusted the existing waterline value, and
# waterline changes in existing data are honored.
assert fintel.has_actions
assert fintel.action_container.actions[0].text == 'Process em2040_40111_05_23_2017 only computing TPU'
vf = fintel.project.return_vessel_file()
assert vf.data['40111']['waterline']['1495563079'] == -0.64
# reverting the tpu action
vf.update('40111', {'beam_opening_angle': {'1495563079': 1.3}}, carry_over_tpu=False)
vf.save()
fintel.regenerate_actions()
assert not fintel.has_actions
fintel.clear()
proj.close()
fintel = None
proj = None
cleanup_after_tests()
# some issue with pytest hanging when we use the folder monitoring stuff
# not sure what to do here, stopping/joining the observer is what the docs say to do
# def test_intel_monitor():
# testfile, testsv, expected_data_folder, expected_data_folder_path = get_testfile_paths()
#
# proj_path = os.path.join(os.path.dirname(testfile), 'kluster_project.json')
# if os.path.exists(proj_path):
# os.remove(proj_path)
# proj = create_new_project(os.path.dirname(testfile))
# fintel = FqprIntel(proj)
#
# fintel.start_folder_monitor(os.path.dirname(testfile))
# time.sleep(5)
# if not fintel.svp_intel.file_paths or not fintel.multibeam_intel.file_paths: # might need a bit longer
# time.sleep(10)
# fintel.stop_folder_monitor(os.path.dirname(testfile))
#
# assert fintel.svp_intel.file_paths == [testsv]
# assert fintel.svp_intel.file_path == {'2020_036_182635.svp': testsv}
# assert fintel.svp_intel.file_name == {testsv: '2020_036_182635.svp'}
# assert fintel.svp_intel.type == {testsv: 'caris_svp'}
#
# assert fintel.multibeam_intel.line_groups == {expected_data_folder_path: [testfile]}
# assert fintel.multibeam_intel.unmatched_files == {}
# assert fintel.multibeam_intel.file_name == {testfile: '0009_20170523_181119_FA2806.all'}
# assert fintel.multibeam_intel.matching_fqpr[testfile] == ''
#
# fintel.clear()
# proj.close()
# fintel = None
# proj = None
# cleanup_after_tests()
|
81951
|
from flask import Blueprint, jsonify, request
from cocoa.web.views.utils import userid, format_message
from web.main.backend import get_backend
action = Blueprint('action', __name__)
@action.route('/_select_option/', methods=['GET'])
def select():
backend = get_backend()
selection_id = int(request.args.get('selection'))
if selection_id == -1:
return
selected_item = backend.select(userid(), selection_id)
ordered_item = backend.schema.get_ordered_item(selected_item)
displayed_message = format_message("You selected: {}".format(", ".join([v[1] for v in ordered_item])), True)
return jsonify(message=displayed_message)
|
81956
|
import unittest
from nymms.reactor.Reactor import Reactor
from nymms.reactor.handlers.Handler import Handler
enabled_config = {'handler_class': 'nymms.reactor.handlers.Handler.Handler',
'enabled': True}
disabled_config = {'handler_class': 'nymms.reactor.handlers.Handler.Handler',
'enabled': False}
class TestReactor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.reactor = Reactor()
def test_load_enabled_handler(self):
handler = self.reactor.load_handler('dummy_handler', enabled_config)
self.assertIsInstance(handler, Handler)
def test_load_disabled_handler(self):
handler = self.reactor.load_handler('dummy_handler', disabled_config)
self.assertIs(handler, None)
|
81984
|
from datetime import datetime
from elasticsearch_dsl import DocType, String, Date, Integer, Float
from elasticsearch_dsl.connections import connections
# Define a default Elasticsearch client
connections.create_connection(hosts=['localhost'])
class Extension(DocType):
name = String()
url = String()
description = String()
user_count = Integer()
review_count = Float()
review_score = Float()
class Meta:
index = 'exts'
# create the mappings in elasticsearch
Extension.init()
import json
exts = json.load(open('data/PAGES.json'))
# TODO source code extract
# rob query: all ext with this permission in manifest and this regex in source code
# https://www.elastic.co/guide/en/elasticsearch/guide/current/nested-query.html
for ext in exts:
print(ext['name'])
sources = extract_sources(ext['id'])
# create and save
ext = Extension(meta={'id': ext['ext_id']},
name=ext['name'],
sources=sources,
url=ext['url'],
review_count=ext['aggregateRating.properties.ratingCount'],
review_score=ext['aggregateRating.properties.ratingValue'],
description=ext['full_description'],
user_count=int(ext['user_count']))
ext.save()
# Display cluster health
print(connections.get_connection().cluster.health())
|
82015
|
import os
from pyaedt import aedt_exception_handler
from pyaedt.modeler.GeometryOperators import GeometryOperators
class Part(object):
"""Manages 3D component placement and definition.
Parameters
----------
part_folder : str
Path to the folder with the A3DCOMP files.
part_dict : dict
Defines relevant properties of the class with the following keywords:
* 'comp_name': str, Name of the A3DCOMP file.
* 'offset': list or str, Offset coordinate system definition relative to the parent.
* 'rotation_cs': list or str, Rotation coordinate system relative to the parent.
* 'rotation': str or numeric, Rotation angle.
* 'compensation_angle': str or numeric, Initial angle.
* 'rotation_axis': str, Rotation axis (``"X"``, ``"Y"``, or ``"Z"``).
* 'duplicate_number': str or int, Number of instances for linear duplication.
* 'duplicate_vector': list, Vector for duplication relative to the parent coordinate system.
parent : str
The default is ``None``.
name : str, optional
Name of the A3DCOMP file without the extension. The default is ``None``.
"""
# List of known keys for a part and default values:
allowed_keys = {
"comp_name": None, # *.a3dcomp file name
"offset": None,
"rotation_cs": None,
"rotation": 0.0,
"compensation_angle": None,
"rotation_axis": "Z",
"tire_radius": None,
"duplicate_number": None,
"duplicate_vector": None,
"antenna_type": None, # Antenna only
"ffd_name": None, # Antenna only
"mode": None, # Antenna only
"aedt_name": None,
"beamwidth_elevation": None, # Antenna only
"beamwidth_azimuth": None, # Antenna only
"polarization": None,
} # Antenna only
def __init__(self, part_folder, part_dict, parent=None, name=None):
# Default values:
self._compdef = dict()
self._multiparts = parent
# Extract the 3D component name and part folder
# from the file name.
# Use this as the default value for comp_name. Ensure that the correct extension is used.
self._compdef["part_folder"] = part_folder
for k in Part.allowed_keys:
if k in part_dict:
self._compdef[k] = part_dict[k]
else:
self._compdef[k] = Part.allowed_keys[k]
self._motion = False
if parent: # Inherit _motion directly from parent.
self._motion = self._multiparts.motion
# make sure self._name is unique if it is not passed as an argument.
if name:
self._name = name # Part name should be unique. No checking here.
elif "name" in part_dict:
self._name = part_dict["name"]
else:
self._name = "radar" # TODO: Need to fix this!
# Update self._compdef from the library definition in the *.json file.
for kw, val in part_dict.items():
if kw in self._compdef:
self._compdef[kw] = val
else:
raise KeyError("Key " + kw + " not allowed.")
# Instantiate yaw, pitch and roll. Might want to change
# how this is handled. Make "rotation" a list instead of
# using .yaw, .pitch, .roll properties?
self.rot_axis = [False, False, False] # [X, Y, Z] rotation Boolean
if self._compdef["rotation_axis"]:
rotations_axis = self._compdef["rotation_axis"].split(",")
if self._compdef["rotation"]:
rotations = self._compdef["rotation"].split(",")
else:
rotations = []
y = "0"
p = "0"
r = "0"
for a in rotations:
if rotations_axis[rotations.index(a)].lower() == "x": # roll
r = a
self.rot_axis[2] = True
elif rotations_axis[rotations.index(a)].lower() == "y": # pitch
p = a
self.rot_axis[1] = True
elif rotations_axis[rotations.index(a)].lower() == "z": # yaw
y = a
self.rot_axis[0] = True
self._yaw = y
self._pitch = p
self._roll = r
else:
self._yaw = "0"
self._pitch = "0"
self._roll = "0"
def __setitem__(self, key, value):
self._compdef[key] = value
def __getitem__(self, key):
if key == "rotation_cs":
cs = self._compdef[key]
if cs == "Global" or cs is None:
self._compdef[key] = ["0", "0", "0"]
else:
self._compdef[key] = [str(i) if not i is str else i for i in cs]
return self._compdef[key]
@aedt_exception_handler
def zero_offset(self, kw): # Returns True if cs at kw is at [0, 0, 0]
"""Check if the coordinate system defined by kw is [0, 0, 0].
Parameters
----------
kw : str
Coordinate system for kw. Options are ``offset`` and ``rotation_cs``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if kw in ["offset", "rotation_cs"]:
s = []
if self[kw]:
s = [GeometryOperators.is_small(c) for c in self[kw]]
if len(s) > 0:
return all(s)
else:
return True
return False
@property
def file_name(self):
"""Antenna file name.
Returns
-------
str
Full name of the A3DCOMP file.
"""
return os.path.join(self._compdef["part_folder"], self["comp_name"])
# Create a unique coordinate system name for the part.
@property
def cs_name(self):
"""Coordinate system name.
Returns
-------
str
Name of the coordinate system.
"""
if self._motion or not self.zero_offset("offset") or not self.zero_offset("rotation_cs"):
return self.name + "_cs"
else:
return self._multiparts.cs_name
# Define the variable names for angles in the app:
@property
def yaw_name(self):
"""Yaw variable name. Yaw is the rotation about the object's Z-axis.
Returns
-------
str
ame of the yaw variable.
"""
return self.name + "_yaw"
@property
def pitch_name(self):
"""Pitch variable name. Pitch is the rotation about the object's Y-axis.
Returns
-------
str
Name of the pitch variable.
"""
return self.name + "_pitch"
@property
def roll_name(self):
"""Roll variable name. Roll is the rotation about the object's X-axis.
Returns
-------
str
Name of the roll variable.
"""
return self.name + "_roll"
# Always return the local origin as a list:
@property
def local_origin(self):
"""Local part offset values.
Returns
-------
list
List of offset values for the local part.
"""
if self["offset"]:
if self.zero_offset("offset") or self["offset"] == "Global":
return [0, 0, 0]
else:
if self._multiparts._local_units:
units = self._multiparts._local_units
else:
units = self._multiparts.modeler_units
offset = [str(i) + units for i in self["offset"]]
return offset
else:
return [0, 0, 0]
@property
def rotate_origin(self):
"""Origin rotation list.
Returns
-------
list
List of offset values for the rotation.
"""
if self["rotation_cs"]:
if self.zero_offset("rotation_cs") or self["rotation_cs"] == "Global":
return self.local_origin
else:
return self["rotation_cs"]
else:
return [0, 0, 0]
@property
def _do_rotate(self): # True if any rotation angles are non-zero or 'rotation_cs' is defined.
return any(self.rot_axis)
@property
def _do_offset(self): # True if any rotation angles are non-zero.
return any(GeometryOperators.numeric_cs(self.local_origin))
# Allow expressions should be valid angle as either string
# or numerical value.
@property
def yaw(self):
"""Yaw variable value.
Returns
-------
str
Value for the yaw variable.
"""
return self._yaw
@yaw.setter
def yaw(self, yaw):
self._yaw = yaw
@property
def pitch(self):
"""Pitch variable value.
Returns
-------
str
Value of the pitch variable.
"""
return self._pitch
@pitch.setter
def pitch(self, pitch):
self._pitch = pitch
@property
def roll(self):
"""Roll variable value.
Returns
-------
str
Value of the roll variable.
"""
return self._roll
@roll.setter
def roll(self, roll):
self._roll = roll
@property
def name(self):
"""Part name.
Returns
-------
str
Name of the part.
"""
return self._multiparts.name + "_" + self._name
@aedt_exception_handler
def set_relative_cs(self, app):
"""Create a parametric coordinate system.
Parameters
----------
app : pyaedt.Hfss
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
# Set x, y, z offset variables in app. But check first to see if the CS
# has already been defined.
if self.cs_name not in app.modeler.oeditor.GetCoordinateSystems() and self.cs_name != "Global":
x_pointing = [1, 0, 0]
y_pointing = [0, 1, 0]
app.modeler.create_coordinate_system(
origin=self.local_origin,
x_pointing=x_pointing,
y_pointing=y_pointing,
reference_cs=self._multiparts.cs_name,
mode="axis",
name=self.cs_name,
)
return True
@property
def rot_cs_name(self):
"""Rotation coordinate system name.
Returns
-------
str
Name of the rotation coordinate system.
"""
return self.name + "_rot_cs"
@aedt_exception_handler
def do_rotate(self, app, aedt_object):
"""Set the rotation coordinate system relative to the parent coordinate system.
This method should only be called if there is rotation in the component.
The rotation coordinate system is offset from the parent coordinate system.
Parameters
----------
app : pyaedt.Hfss
HFSS application instance.
aedt_object : str
Name of the HFSS design.
"""
x_pointing = [1, 0, 0]
y_pointing = [0, 1, 0]
app.modeler.create_coordinate_system(
origin=self.rotate_origin,
x_pointing=x_pointing,
y_pointing=y_pointing,
reference_cs=self._multiparts.cs_name,
mode="axis",
name=self.rot_cs_name,
)
if self.rot_axis[0]:
app[self.yaw_name] = self.yaw
app.modeler.rotate(aedt_object, "Z", angle=self.yaw_name)
if self.rot_axis[1]:
app[self.pitch_name] = self.pitch
app.modeler.rotate(aedt_object, "Y", angle=self.pitch_name)
if self.rot_axis[2]:
app[self.roll_name] = self.roll
app.modeler.rotate(aedt_object, "X", angle=self.roll_name)
return True
@aedt_exception_handler
def insert(self, app):
"""Insert 3D component in AEDT.
Parameters
----------
app : pyaedt.Hfss
Returns
-------
str
Name of inserted object.
"""
aedt_objects = []
# TODO: Why the inconsistent syntax for cs commands?
if self._do_offset:
self.set_relative_cs(app) # Create coordinate system, if needed.
aedt_objects.append(app.modeler.primitives.insert_3d_component(self.file_name, targetCS=self.cs_name))
else:
aedt_objects.append(
app.modeler.primitives.insert_3d_component(self.file_name, targetCS=self._multiparts.cs_name)
)
if self._do_rotate:
self.do_rotate(app, aedt_objects[0])
# Duplication occurs in parent coordinate system.
app.modeler.set_working_coordinate_system(self._multiparts.cs_name)
if self["duplicate_vector"]:
d_vect = [float(i) for i in self["duplicate_vector"]]
duplicate_result = app.modeler.duplicate_along_line(
aedt_objects[0], d_vect, nclones=int(self["duplicate_number"]), is_3d_comp=True
)
if duplicate_result[0]:
for d in duplicate_result[1]:
aedt_objects.append(d)
return aedt_objects
class Antenna(Part, object):
"""Manages antennas.
This class is derived from :class:`Part`.
Parameters
----------
root_folder : str
Root directory
ant_dict : dict
Antenna dictionary
parent : str, optional
The default is ``None``.
name : str, optional
The default is ``None``.
"""
def __init__(self, root_folder, ant_dict, parent=None, name=None):
super(Antenna, self).__init__(root_folder, ant_dict, parent=parent, name=name)
def _antenna_type(self, app):
if self._compdef["antenna_type"] == "parametric":
return app.SbrAntennas.ParametricBeam
if self._compdef["antenna_type"] == "ffd":
return "file"
@property
def params(self):
"""Multi-part component parameters.
Returns
-------
dict
Dictionary of parameters for a multi-part component.
"""
p = {}
if self._compdef["antenna_type"] == "parametric":
p["Vertical BeamWidth"] = self._compdef["beamwidth_elevation"]
p["Horizontal BeamWidth"] = self._compdef["beamwidth_azimuth"]
p["Polarization"] = self._compdef["polarization"]
return p
@aedt_exception_handler
def _insert(self, app, target_cs=None, units=None):
if not target_cs:
target_cs = self._multiparts.cs_name
if not units:
if self._multiparts._local_units:
units = self._multiparts._local_units
else:
units = self._multiparts.units
if self._compdef["ffd_name"]:
ffd = os.path.join(self._compdef["part_folder"], self._compdef["ffd_name"] + ".ffd")
a = app.create_sbr_file_based_antenna(
ffd_full_path=ffd, model_units=units, target_cs=target_cs, antenna_name=self.name
)
else:
a = app.create_sbr_antenna(
self._antenna_type(app),
model_units=units,
parameters_dict=self.params,
target_cs=target_cs,
antenna_name=self.name,
)
return a
@aedt_exception_handler
def insert(self, app, units=None):
"""Insert antenna in HFSS SBR+.
Parameters
----------
app : pyaedt.Hfss
units :
The default is ``None``.
Returns
-------
str
Name of the inserted object.
"""
if self._do_offset:
self.set_relative_cs(app)
antenna_object = self._insert(app, units=units) # Create coordinate system, if needed.
else:
antenna_object = self._insert(app, target_cs=self._multiparts.cs_name, units=units)
if self._do_rotate and antenna_object:
self.do_rotate(app, antenna_object.antennaname)
return antenna_object
|
82029
|
import dispatch.plugins.kandbox_planner.util.kandbox_date_util as date_util
from dispatch.plugins.bases.kandbox_planner import KandboxRulePlugin
class KandboxRulePluginRequestedSkills(KandboxRulePlugin):
"""
Has the following members
"""
# rule_code = "check_job_skill"
# rule_name = "Worker can handle skills requested by job"
error_message_template = "Job ({}) requires skill ({}) , which worker {} does not have."
success_message_template = "Job ({}) requires skill ({}) , which worker {} has."
"""
result = {
'score': 0,
'message':'',
}
"""
title = "Requested Skills"
slug = "kandbox_rule_requested_skills"
author = "Kandbox"
author_url = "https://github.com/alibaba/easydispatch"
description = "Rule sufficient_travel_time for GYM for RL."
version = "0.1.0"
default_config = {}
config_form_spec = {
"type": "object",
"properties": {},
}
def evalute_normal_single_worker_n_job(self, env, job=None): # worker = None,
# return score, violated_rules (negative values)
# return self.weight * 1
# Now check if this new job can fit into existing
worker_code = job["scheduled_primary_worker_id"]
worker = env.workers_dict[worker_code]
res = {}
overall_message = "Job ({}) requires skill ({}), checking workers {}".format(
job["job_code"], job["requested_skills"], worker_code
)
score = 1
for skill_key in job["requested_skills"].keys():
# if(not all(skill in job['requested_skills'][skill_key] for skill in worker['skills'][skill_key])):
for skill in job["requested_skills"][skill_key]:
if not skill in worker["skills"][skill_key]:
overall_message += "(skill_key={}, skill={}) is not found!".format(
skill_key, skill
)
score = -1
break
overall_message += "(skill_key={}) found!".format(skill_key)
res["message"] = overall_message
res["score"] = score
return res
|
82034
|
from random import randint
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
# ToDo: we are using a lot of tf.keras.backend modules below, can we use tf core instead?
class MaskingDense(tf.keras.layers.Layer):
""" Just copied code from keras Dense layer and added masking and a few other tricks:
- Direct auto-regressive connections to output
- Allows a second (non-autoregressive) input that is fully connected to first hidden
- Either 1 output or 2 outputs (concatenated) that are separately
auto-regressive wrt to the input
"""
def __init__(self, units, out_units,
hidden_layers=1,
dropout_rate=0.0,
random_input_order=False,
activation='elu',
out_activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
out_kernel_initializer='glorot_uniform',
out_bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name=None,
batchnorm=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MaskingDense, self).__init__(name=name, **kwargs)
self.input_sel = None
self.random_input_order = random_input_order
self.rate = min(1., max(0., dropout_rate))
self.kernel_sels = []
self.units = units
self.out_units = out_units
self.hidden_layers = hidden_layers
self.activation = tf.keras.activations.get(activation)
self.out_activation = tf.keras.activations.get(out_activation) # None gives linear activation
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.out_kernel_initializer = tf.keras.initializers.get(out_kernel_initializer)
self.out_bias_initializer = tf.keras.initializers.get(out_bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self.batchnorm = batchnorm
def dropout_wrapper(self, inputs, training):
if 0. < self.rate < 1.:
def dropped_inputs():
return tf.keras.backend.dropout(inputs, self.rate, noise_shape=None, seed=None)
return tf.keras.backend.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def build_layer_weights(
self,
input_dim,
units,
use_bias=True,
is_output=False,
id=''
):
kernel_initializer = (self.kernel_initializer if not is_output
else self.out_kernel_initializer)
bias_initializer = (self.bias_initializer if not is_output
else self.out_bias_initializer)
kernel = self.add_weight(shape=(input_dim, units),
initializer=kernel_initializer,
name='kernel' + id,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if use_bias:
bias = self.add_weight(shape=(units,),
initializer=bias_initializer,
name='bias' + id,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
bias = None
return kernel, bias
def build_mask(self, shape, prev_sel, is_output):
if is_output:
if shape[-1] == len(self.input_sel):
input_sel = self.input_sel
else:
input_sel = self.input_sel * 2
else:
# Disallow D-1 because it would violate auto-regressive property
# Disallow unconnected units by sampling min from previous layer
input_sel = [randint(np.min(prev_sel), shape[-1] - 2) for i in range(shape[-1])]
def vals():
in_len = len(self.input_sel)
for x in range(shape[-2]):
for y in range(shape[-1]):
if is_output:
yield 1 if prev_sel[x] < input_sel[y % in_len] else 0
else:
yield 1 if prev_sel[x] <= input_sel[y] else 0
return tf.keras.backend.constant(list(vals()), dtype='float32', shape=shape), input_sel
def build(self, input_shape):
if isinstance(input_shape, list):
if len(input_shape) != 2:
raise ValueError('Only list only supported for exactly two inputs')
input_shape, other_input_shape = input_shape
# Build weights for other (non-autoregressive) vector
other_shape = (other_input_shape[-1], self.units)
self.other_kernel, self.other_bias = self.build_layer_weights(*other_shape, id='_h')
assert len(input_shape) >= 2
assert self.out_units == input_shape[-1] or self.out_units == 2 * input_shape[-1]
self.kernels, self.biases = [], []
self.kernel_masks, self.kernel_sels = [], []
self.batch_norms = []
shape = (input_shape[-1], self.units)
self.input_sel = np.arange(input_shape[-1])
if self.random_input_order:
np.random.shuffle(self.input_sel)
prev_sel = self.input_sel
for i in range(self.hidden_layers):
# Hidden layer
kernel, bias = self.build_layer_weights(*shape, id=str(i))
self.kernels.append(kernel)
self.biases.append(bias)
# Hidden layer mask
kernel_mask, kernel_sel = self.build_mask(shape, prev_sel, is_output=False)
self.kernel_masks.append(kernel_mask)
self.kernel_sels.append(kernel_sel)
prev_sel = kernel_sel
shape = (self.units, self.units)
self.batch_norms.append(tf.keras.layers.BatchNormalization(center=True, scale=True))
# Direct connection between input/output
if self.hidden_layers > 0:
direct_shape = (input_shape[-1], self.out_units)
self.direct_kernel, _ = self.build_layer_weights(
*direct_shape,
use_bias=False,
is_output=True,
id='_direct')
self.direct_kernel_mask, self.direct_sel = self.build_mask(direct_shape, self.input_sel,
is_output=True)
# Output layer
out_shape = (self.units, self.out_units)
self.out_kernel, self.out_bias = self.build_layer_weights(
*out_shape,
is_output=True,
id='_out')
self.out_kernel_mask, self.out_sel = self.build_mask(out_shape, prev_sel, is_output=True)
self.built = True
def call(self, inputs, training=None):
other_input = None
if isinstance(inputs, list):
assert len(inputs) == 2
assert self.hidden_layers > 0, "other input not supported if no hidden layers"
assert hasattr(self, 'other_kernel')
inputs, other_input = inputs
output = inputs
if other_input is not None:
other = tf.keras.backend.dot(other_input, self.other_kernel)
other = tf.keras.backend.bias_add(other, self.other_bias)
other = self.activation(other)
# Hidden layer + mask
for i in range(self.hidden_layers):
# i=0: input_dim -> masking_dim
# i>0: masking_dim -> masking_dim
weight = self.kernels[i] * self.kernel_masks[i]
output = tf.keras.backend.dot(output, weight)
# "other" input
if i == 0 and other_input is not None:
output = output + other
output = tf.keras.backend.bias_add(output, self.biases[i])
output = self.activation(output)
if self.batchnorm:
output = self.batch_norms[i](output)
output = self.dropout_wrapper(output, training)
# out_act(bias + (V dot M_v)h(x) + (A dot M_a)x + (other dot M_other)other)
# masking_dim -> input_dim
output = tf.keras.backend.dot(output, self.out_kernel * self.out_kernel_mask)
# Direct connection
if self.hidden_layers > 0:
# input_dim -> input_dim
direct = tf.keras.backend.dot(inputs, self.direct_kernel * self.direct_kernel_mask)
output = output + direct
output = tf.keras.backend.bias_add(output, self.out_bias)
output = self.out_activation(output)
output = self.dropout_wrapper(output, training)
return output
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
return (input_shape[0], self.out_units)
|
82038
|
import uuid
import os
from datetime import datetime
from django.db import transaction
from api.management.data_script import OperationalDataScript
from api.models.CarbonIntensityLimit import CarbonIntensityLimit
from api.models.CompliancePeriod import CompliancePeriod
from api.models.DefaultCarbonIntensity import DefaultCarbonIntensity
from api.models.DefaultCarbonIntensityCategory import DefaultCarbonIntensityCategory
from api.models.EnergyDensity import EnergyDensity
from api.models.EnergyDensityCategory import EnergyDensityCategory
from api.models.EnergyEffectivenessRatio import EnergyEffectivenessRatio
from api.models.EnergyEffectivenessRatioCategory import EnergyEffectivenessRatioCategory
from api.models.ExpectedUse import ExpectedUse
from api.models.FuelClass import FuelClass
from api.models.Organization import Organization
from api.models.OrganizationActionsType import OrganizationActionsType
from api.models.OrganizationBalance import OrganizationBalance
from api.models.OrganizationStatus import OrganizationStatus
from api.models.OrganizationType import OrganizationType
from api.models.PetroleumCarbonIntensity import PetroleumCarbonIntensity
from api.models.PetroleumCarbonIntensityCategory import PetroleumCarbonIntensityCategory
from api.models.Role import Role
from api.models.User import User
from api.models.UserRole import UserRole
class FunctionalTestDataLoad(OperationalDataScript):
comment = 'Functional Test Data Setup'
is_revertable = False
_usernames = ['functest_fs1',
'functest_fs2',
'functtest_analyst',
'functest_director',
'functest_tfrsadmin'
]
_orgs = ['Test Fuel Supplier 1', 'Test Fuel Supplier 2']
def check_run_preconditions(self):
for username in self._usernames:
if User.objects.filter(username=username).exists():
print('Found an existing user {}'.format(username))
return False
for org in self._orgs:
if Organization.objects.filter(name=org).exists():
print('Found an existing organization {}'.format(username))
return False
return True
@transaction.atomic
def run(self):
Organization(name=self._orgs[0],
actions_type=OrganizationActionsType.objects.get_by_natural_key("Buy And Sell"),
type=OrganizationType.objects.get_by_natural_key("Part3FuelSupplier"),
status=OrganizationStatus.objects.get_by_natural_key('Active')).save()
Organization(name=self._orgs[1],
actions_type=OrganizationActionsType.objects.get_by_natural_key("Buy And Sell"),
type=OrganizationType.objects.get_by_natural_key("Part3FuelSupplier"),
status=OrganizationStatus.objects.get_by_natural_key('Active')).save()
OrganizationBalance(organization=Organization.objects.get_by_natural_key(self._orgs[0]), credit_trade=None,
validated_credits=1000, effective_date=datetime.today().strftime('%Y-%m-%d')).save()
OrganizationBalance(organization=Organization.objects.get_by_natural_key(self._orgs[1]), credit_trade=None,
validated_credits=1000, effective_date=datetime.today().strftime('%Y-%m-%d')).save()
User(email='<EMAIL>', username='functest_fs1',
first_name='Functest1', last_name='Supplier', display_name='Test 1 Supplier',
organization=Organization.objects.get_by_natural_key(self._orgs[0])).save()
User(email='<EMAIL>', username='functest_fs2',
first_name='Functest2', last_name='Supplier', display_name='Test 2 Supplier',
organization=Organization.objects.get_by_natural_key(self._orgs[1])).save()
User(email='<EMAIL>', username='functest_analyst',
first_name='Analyst', last_name='Government', display_name='functest_analyst',
organization=Organization.objects.get(id=1)).save()
User(email='<EMAIL>', username='functest_director',
first_name='Director', last_name='Government', display_name='Director',
organization=Organization.objects.get(id=1)).save()
User(email='<EMAIL>', username='functest_tfrsadmin',
first_name='TfrsAdmin', last_name='Government', display_name='TfrsAdmin',
organization=Organization.objects.get(id=1)).save()
UserRole(user=User.objects.get(username='functest_fs1'), role=Role.objects.get_by_natural_key('FSManager')).save()
UserRole(user=User.objects.get(username='functest_fs1'), role=Role.objects.get_by_natural_key('ComplianceReporting')).save()
UserRole(user=User.objects.get(username='functest_fs1'), role=Role.objects.get_by_natural_key('FSDocSubmit')).save()
UserRole(user=User.objects.get(username='functest_fs2'), role=Role.objects.get_by_natural_key('FSManager')).save()
UserRole(user=User.objects.get(username='functest_fs2'), role=Role.objects.get_by_natural_key('FSDocSubmit')).save()
UserRole(user=User.objects.get(username='functest_analyst'), role=Role.objects.get_by_natural_key('GovUser')).save()
UserRole(user=User.objects.get(username='functest_analyst'), role=Role.objects.get_by_natural_key('GovDoc')).save()
UserRole(user=User.objects.get(username='functest_director'), role=Role.objects.get_by_natural_key('GovDirector')).save()
UserRole(user=User.objects.get(username='functest_tfrsadmin'), role=Role.objects.get_by_natural_key('Admin')).save()
script_class = FunctionalTestDataLoad
|
82070
|
import dataclasses
import click
import datetime
import neuro_extras
from collections import defaultdict
from graphviz import Digraph
from neuro_cli import __version__ as cli_version
from neuro_sdk import Client, ResourceNotFound, __version__ as sdk_version
from operator import attrgetter
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from types import TracebackType
from typing import (
AbstractSet,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
from typing_extensions import AsyncContextManager, AsyncIterator
from yarl import URL
import neuro_flow
from . import ast
from .batch_executor import BatchExecutor, LocalsBatchExecutor, get_running_flow
from .colored_topo_sorter import ColoredTopoSorter
from .commands import CmdProcessor
from .config_loader import BatchLocalCL
from .context import (
EMPTY_ROOT,
EarlyBatch,
EarlyLocalCall,
ProjectCtx,
RunningBatchFlow,
setup_project_ctx,
)
from .expr import EvalError, MultiError
from .parser import ConfigDir
from .storage.base import (
Attempt,
Bake,
BakeImage,
BakeMeta,
BakeStorage,
ProjectStorage,
Storage,
)
from .types import FullID, LocalPath, TaskStatus
from .utils import (
CommandRunner,
GlobalOptions,
collect_git_info,
encode_global_options,
fmt_datetime,
fmt_timedelta,
make_cmd_exec,
)
EXECUTOR_IMAGE = f"ghcr.io/neuro-inc/neuro-flow:{neuro_flow.__version__}"
GRAPH_COLORS = {
TaskStatus.PENDING: "skyblue",
TaskStatus.RUNNING: "steelblue",
TaskStatus.SUCCEEDED: "limegreen",
TaskStatus.CANCELLED: "orange",
TaskStatus.SKIPPED: "magenta",
TaskStatus.CACHED: "yellowgreen",
TaskStatus.FAILED: "orangered",
TaskStatus.UNKNOWN: "crimson",
}
class BakeFailedError(Exception):
def __init__(self, status: TaskStatus):
self.status = status
async def iter_flows(top_flow: EarlyBatch) -> AsyncIterator[Tuple[FullID, EarlyBatch]]:
to_check: List[Tuple[FullID, EarlyBatch]] = [((), top_flow)]
while to_check:
prefix, flow = to_check.pop(0)
yield prefix, flow
for tid in flow.graph:
if await flow.is_action(tid):
sub_flow = await flow.get_action_early(tid)
to_check.append((prefix + (tid,), sub_flow))
async def check_no_cycles(top_flow: EarlyBatch) -> None:
async for _, flow in iter_flows(top_flow):
ColoredTopoSorter(flow.graph)
async def check_local_deps(top_flow: EarlyBatch) -> None:
# This methods works in O(kn^3), where:
# - n is number of tasks in the flow
# - k is maximal depths of actions
# This complexity is because:
# For each task (n) for task's each dependency (n) and for each remote task (n)
# do prefix check (k). Note that task are ofter have a few dependencies,
# so in real cases one of those n effectively const.
#
# If performance becomes a problem, it can be replaced
# with Trie (prefix tree) to reduce time complexity to O(kn^2)
# (for each task (n) for each task's dependency (n) do Trie check (k))
runs_on_remote: Set[FullID] = set()
async for prefix, flow in iter_flows(top_flow):
runs_on_remote.update(
{prefix + (tid,) for tid in flow.graph if await flow.is_task(tid)}
)
def _is_prefix(item: FullID, prefix: FullID) -> bool:
if len(item) < len(prefix):
return False
return all(x == y for (x, y) in zip(item, prefix))
def _remote_deps(prefix: FullID, deps: Iterable[str]) -> Iterable[FullID]:
return (
remote
for dep in deps
for remote in runs_on_remote
if _is_prefix(remote, prefix + (dep,))
)
async for prefix, flow in iter_flows(top_flow):
early_locals = cast(
AsyncIterator[EarlyLocalCall],
(
await flow.get_local_early(tid)
for tid in flow.graph
if await flow.is_local(tid)
),
)
with_bad_deps = (
(early_local, remote)
async for early_local in early_locals
for remote in _remote_deps(prefix, early_local.needs)
)
async for early_local, remote in with_bad_deps:
early_local_str = ".".join(prefix + (early_local.real_id,))
remote_str = ".".join(remote)
raise Exception(
f"Local action '{early_local_str}' depends on remote "
f"task '{remote_str}'. This is not supported because "
"all local action should succeed before "
"remote executor starts."
)
async def check_expressions(top_flow: RunningBatchFlow) -> None:
errors: List[EvalError] = []
async for _, flow in iter_flows(top_flow):
errors += flow.validate_expressions()
if errors:
raise MultiError(errors)
class ImageRefNotUniqueError(Exception):
@dataclasses.dataclass
class ImageInfo:
context: Optional[Union[URL, LocalPath]]
dockerfile: Optional[Union[URL, LocalPath]]
ast: ast.Image
def __init__(self, ref: str, images: Sequence[ImageInfo]) -> None:
self._ref = ref
self._images = images
def __str__(self) -> str:
return (
f"Image with ref '{self._ref}' defined multiple times "
f"with different attributes:\n"
+ "\n".join(
f"at {EvalError.format_pos(image.ast._start)} with params:\n"
f" context: {image.context or '<empty>'}\n"
f" dockerfile: {image.dockerfile or '<empty>'}"
for image in self._images
)
)
async def check_image_refs_unique(top_flow: RunningBatchFlow) -> None:
_tmp: Dict[str, List[ImageRefNotUniqueError.ImageInfo]] = defaultdict(list)
async for _, flow in iter_flows(top_flow):
for image in flow.early_images.values():
if image.ref.startswith("image:"):
_tmp[image.ref].append(
ImageRefNotUniqueError.ImageInfo(
context=image.context,
dockerfile=image.dockerfile,
ast=flow.get_image_ast(image.id),
)
)
errors = []
for ref, images in _tmp.items():
contexts_differ = len({it.context for it in images}) > 1
dockerfiles_differ = len({it.dockerfile for it in images}) > 1
if contexts_differ or dockerfiles_differ:
errors.append(ImageRefNotUniqueError(ref, images))
if errors:
raise MultiError(errors)
async def build_graphs(
top_flow: RunningBatchFlow,
) -> Mapping[FullID, Mapping[FullID, AbstractSet[FullID]]]:
graphs = {}
async for prefix, flow in iter_flows(top_flow):
graphs[prefix] = {
prefix + (key,): {prefix + (node,) for node in nodes}
for key, nodes in flow.graph.items()
}
return graphs
async def upload_image_data(
top_flow: RunningBatchFlow,
neuro_runner: CommandRunner,
storage: BakeStorage,
) -> List[BakeImage]:
@dataclasses.dataclass
class _TmpData:
context_on_storage: Optional[URL]
dockerfile_rel: Optional[str]
yaml_defs: List[FullID]
_tmp: Dict[str, _TmpData] = {}
async for prefix, flow in iter_flows(top_flow):
for image in flow.early_images.values():
if isinstance(image.context, LocalPath):
# Reusing image ref between bakes introduces
# race condition anyway, so we can safely use it
# as remote context dir name
storage_context_dir: Optional[URL] = URL(
f"storage:.flow/{top_flow.project_id}/{image.ref.replace(':', '/')}"
)
else:
storage_context_dir = image.context
dockerfile_rel = None
if image.dockerfile_rel:
dockerfile_rel = str(image.dockerfile_rel.as_posix())
prev_entry = _tmp.get(image.ref)
if prev_entry is not None:
# Validation is done before
prev_entry.yaml_defs.append(prefix + (image.id,))
else:
if isinstance(image.context, LocalPath):
await neuro_runner(
"mkdir",
"--parents",
str(storage_context_dir),
)
await neuro_runner(
"cp",
"--recursive",
"--update",
"--no-target-directory",
str(image.context),
str(storage_context_dir),
)
_tmp[image.ref] = _TmpData(
yaml_defs=[prefix + (image.id,)],
context_on_storage=storage_context_dir,
dockerfile_rel=dockerfile_rel,
)
return [
await storage.create_bake_image(
ref=ref,
yaml_defs=entry.yaml_defs,
context_on_storage=entry.context_on_storage,
dockerfile_rel=entry.dockerfile_rel,
)
for ref, entry in _tmp.items()
]
class BatchRunner(AsyncContextManager["BatchRunner"]):
def __init__(
self,
config_dir: ConfigDir,
console: Console,
client: Client,
storage: Storage,
global_options: GlobalOptions,
run_neuro_cli: Optional[CommandRunner] = None,
) -> None:
self._config_dir = config_dir
self._console = console
self._client = client
self._storage = storage
self._project_storage: Optional[ProjectStorage] = None
self._config_loader: Optional[BatchLocalCL] = None
self._project: Optional[ProjectCtx] = None
self._run_neuro_cli = run_neuro_cli or make_cmd_exec(
"neuro", global_options=encode_global_options(global_options)
)
self._global_options = global_options
@property
def project_id(self) -> str:
assert self._project is not None
return self._project.id
@property
def project_role(self) -> Optional[str]:
assert self._project is not None
return self._project.role
@property
def config_loader(self) -> BatchLocalCL:
assert self._config_loader is not None
return self._config_loader
@property
def storage(self) -> ProjectStorage:
assert self._project_storage is not None
return self._project_storage
async def close(self) -> None:
if self._config_loader is not None:
await self._config_loader.close()
async def __aenter__(self) -> "BatchRunner":
self._config_loader = BatchLocalCL(self._config_dir, self._client)
self._project = await setup_project_ctx(EMPTY_ROOT, self._config_loader)
project = await self._storage.get_or_create_project(self._project.id)
self._project_storage = self._storage.project(id=project.id)
return self
async def __aexit__(
self,
exc_typ: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.close()
# Next function is also used in tests:
async def _setup_bake(
self,
batch_name: str,
params: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
tags: Sequence[str] = (),
) -> Tuple[Bake, RunningBatchFlow]:
# batch_name is a name of yaml config inside self._workspace / .neuro
# folder without the file extension
self._console.log(f"[bright_black]neuro_sdk=={sdk_version}")
self._console.log(f"[bright_black]neuro_cli=={cli_version}")
self._console.log(f"[bright_black]neuro-extras=={neuro_extras.__version__}")
self._console.log(f"[bright_black]neuro-flow=={neuro_flow.__version__}")
self._console.log(f"Use config file {self.config_loader.flow_path(batch_name)}")
# Check that the yaml is parseable
flow = await RunningBatchFlow.create(
self.config_loader, batch_name, "fake-bake-id", params
)
for volume in flow.volumes.values():
if volume.local is not None:
# TODO: sync volumes if needed
raise NotImplementedError("Volumes sync is not supported")
await check_no_cycles(flow)
await check_local_deps(flow)
await check_expressions(flow)
await check_image_refs_unique(flow)
graphs = await build_graphs(flow)
self._console.log(
"Check config... [green]ok[/green]",
)
self._console.log("Create bake...")
bake = await self.storage.create_bake(
batch=batch_name,
graphs=graphs,
params=flow.params,
name=name,
tags=tags,
meta=BakeMeta(
git_info=await collect_git_info(),
),
)
bake_storage = self.storage.bake(id=bake.id)
config_meta = await self.config_loader.collect_configs(batch_name, bake_storage)
await bake_storage.create_attempt(number=1, configs_meta=config_meta)
self._console.log(
f"Bake [b]{bake.name or bake.id}[/b] of "
f"project [b]{self.project_id}[/b] is created"
)
self._console.log("Uploading image contexts/dockerfiles...")
await upload_image_data(flow, self._run_neuro_cli, bake_storage)
return bake, flow
# Next function is also used in tests:
async def _run_locals(
self,
bake_id: str,
) -> TaskStatus:
async with LocalsBatchExecutor.create(
self._console,
bake_id,
self._client,
self._storage,
project_role=self.project_role,
) as executor:
return await executor.run()
async def bake(
self,
batch_name: str,
local_executor: bool = False,
params: Optional[Mapping[str, str]] = None,
name: Optional[str] = None,
tags: Sequence[str] = (),
) -> None:
self._console.print(
Panel(f"[bright_blue]Bake [b]{batch_name}[/b]", padding=1),
justify="center",
)
bake, flow = await self._setup_bake(batch_name, params, name, tags)
await self._run_bake(bake, flow, local_executor)
async def _run_bake(
self,
bake: Bake,
flow: RunningBatchFlow,
local_executor: bool,
) -> None:
self._console.rule("Run local actions")
locals_result = await self._run_locals(bake.id)
if locals_result != TaskStatus.SUCCEEDED:
return
self._console.rule("Run main actions")
if local_executor:
self._console.log(f"[bright_black]Using local executor")
await self.process(bake.id)
else:
self._console.log(f"[bright_black]Starting remote executor")
if flow.life_span:
life_span = fmt_timedelta(flow.life_span)
else:
life_span = "7d"
run_args = [
"run",
"--pass-config",
f"--volume=storage:.flow/logs/{bake.id}/:/root/.neuro/logs"
f"--life-span={life_span}",
f"--tag=project:{self.project_id}",
f"--tag=flow:{bake.batch}",
f"--tag=bake_id:{bake.id}",
f"--tag=remote_executor",
]
project_role = self.project_role
if project_role is not None:
run_args.append(f"--share={project_role}")
run_args += [
EXECUTOR_IMAGE,
"--",
"neuro-flow",
*encode_global_options(self._global_options),
"--fake-workspace",
"execute",
bake.id,
]
await self._run_neuro_cli(*run_args)
async def process(
self,
bake_id: str,
) -> None:
async with BatchExecutor.create(
self._console,
bake_id,
self._client,
self._storage,
project_role=self.project_role,
) as executor:
status = await executor.run()
if status != TaskStatus.SUCCEEDED:
raise BakeFailedError(status)
def get_bakes(self) -> AsyncIterator[Bake]:
return self.storage.list_bakes()
async def get_bake_attempt(self, bake_id: str, *, attempt_no: int = -1) -> Attempt:
return await self._storage.bake(id=bake_id).attempt(number=attempt_no).get()
async def list_bakes(
self,
tags: AbstractSet[str] = frozenset(),
since: Optional[datetime.datetime] = None,
until: Optional[datetime.datetime] = None,
recent_first: bool = False,
) -> None:
def _setup_table() -> Table:
table = Table(box=box.MINIMAL_HEAVY_HEAD)
table.add_column(
"ID",
style="bold",
width=len("bake-f6bd815b-3a3b-4ea1-b5ec-e8ab13678e3e"),
)
table.add_column("NAME", min_width=12)
table.add_column("BATCH", min_width=20)
table.add_column(
"EXECUTOR", width=len("job-f6bd815b-3a3b-4ea1-b5ec-e8ab13678e3e")
)
table.add_column("STATUS", width=9)
table.add_column("WHEN", min_width=10)
table.show_edge = False
return table
header_table = _setup_table()
self._console.print(header_table)
async for bake in self.storage.list_bakes(
tags=tags,
since=since,
until=until,
recent_first=recent_first,
):
if bake.last_attempt is None:
self._console.print(
f"[yellow]Bake [b]{bake.id}[/b] is malformed, skipping"
)
else:
row_table = _setup_table()
row_table.show_header = False
row_table.add_row(
bake.id,
bake.name or "",
bake.batch,
bake.last_attempt.executor_id or "",
bake.last_attempt.result,
fmt_datetime(bake.last_attempt.created_at),
)
self._console.print(row_table)
async def inspect(
self,
bake_id: str,
*,
attempt_no: int = -1,
output: Optional[LocalPath] = None,
save_dot: bool = False,
save_pdf: bool = False,
view_pdf: bool = False,
) -> None:
bake_storage = self.storage.bake(id=bake_id)
try:
bake = await bake_storage.get()
except ResourceNotFound:
self._console.print("[yellow]Bake not found")
self._console.print(
f"Please make sure that the bake [b]{bake_id}[/b] and "
f"project [b]{self.project_id}[/b] are correct."
)
exit(1)
assert False, "unreachable"
attempt_storage = bake_storage.attempt(number=attempt_no)
attempt = await attempt_storage.get()
self._console.print(f"[b]Bake id: {bake_id}[/b]")
self._console.print(f"[b]Attempt #{attempt.number}[/b]", attempt.result)
if attempt.executor_id:
info = await self._client.jobs.status(attempt.executor_id)
self._console.print(
f"[b]Executor {attempt.executor_id}[/b]", TaskStatus(info.status)
)
task_table = Table(box=box.MINIMAL_HEAVY_HEAD)
task_table.add_column("ID", style="bold")
task_table.add_column("STATUS")
task_table.add_column("RAW ID", style="bright_black")
task_table.add_column("STARTED")
task_table.add_column("FINISHED")
tasks = [task async for task in attempt_storage.list_tasks()]
for task in sorted(tasks, key=attrgetter("created_at")):
task_table.add_row(
".".join(task.yaml_id),
task.status,
task.raw_id,
fmt_datetime(task.created_at),
fmt_datetime(task.finished_at),
)
self._console.print("Tasks:")
self._console.print(task_table)
image_table = Table(box=box.MINIMAL_HEAVY_HEAD)
image_table.add_column("REF", style="bold")
image_table.add_column("STATUS")
image_table.add_column("BUILDER ID", style="bright_black")
async for image in bake_storage.list_bake_images():
image_table.add_row(
image.ref,
image.status,
image.builder_job_id or "",
)
if image_table.rows:
self._console.print("Images:")
self._console.print(image_table)
if output is None:
output = LocalPath(f"{bake.id}_{attempt.number}").with_suffix(".gv")
graphs = bake.graphs
dot = Digraph(bake.batch, filename=str(output), strict=True, engine="dot")
dot.attr(compound="true")
dot.node_attr = {"style": "filled"}
await self._subgraph(
dot, graphs, (), {}, {task.yaml_id: task.status for task in tasks}
)
if save_dot:
self._console.print(f"Saving file {dot.filename}")
dot.save()
if save_pdf:
self._console.print(f"Rendering {dot.filename}.pdf")
dot.render(view=view_pdf)
elif view_pdf:
self._console.print(f"Opening {dot.filename}.pdf")
dot.view()
async def _subgraph(
self,
dot: Digraph,
graphs: Mapping[FullID, Mapping[FullID, AbstractSet[FullID]]],
prefix: FullID,
anchors: Dict[str, str],
statuses: Dict[FullID, TaskStatus],
) -> None:
lhead: Optional[str]
ltail: Optional[str]
color: Optional[str]
first = True
graph = graphs[prefix]
for task_id, deps in graph.items():
tgt = ".".join(task_id)
name = task_id[-1]
if first:
anchors[".".join(prefix)] = tgt
first = False
if task_id in statuses:
color = GRAPH_COLORS.get(statuses[task_id])
else:
color = None
if task_id in graphs:
lhead = "cluster_" + tgt
with dot.subgraph(name=lhead) as subdot:
subdot.attr(label=f"{name}")
subdot.attr(compound="true")
subdot.attr(color=color)
await self._subgraph(
subdot,
graphs,
task_id,
anchors,
statuses,
)
tgt = anchors[tgt]
else:
dot.node(tgt, name, color=color)
lhead = None
for dep in deps:
src = ".".join(dep)
if src in anchors:
# src is a subgraph
ltail = "cluster_" + src
src = anchors[src]
else:
ltail = None
dot.edge(src, tgt, ltail=ltail, lhead=lhead)
async def logs(
self, bake_id: str, task_id: str, *, attempt_no: int = -1, raw: bool = False
) -> None:
attempt_storage = self.storage.bake(id=bake_id).attempt(number=attempt_no)
attempt = await attempt_storage.get()
full_id = tuple(task_id.split("."))
try:
task = await attempt_storage.task(yaml_id=full_id).get()
except ResourceNotFound:
raise click.BadArgumentUsage(f"Unknown task {task_id}")
if not task.status.is_finished:
raise click.BadArgumentUsage(f"Task {task_id} is not finished")
self._console.print(f"[b]Attempt #{attempt.number}[/b]", attempt.result)
self._console.print(f"Task [b]{task_id}[/b]", task.status)
if not task.raw_id:
return
if raw:
async for chunk in self._client.jobs.monitor(task.raw_id):
self._console.print(chunk.decode("utf-8", "replace"), end="")
else:
async with CmdProcessor() as proc:
async for chunk in self._client.jobs.monitor(task.raw_id):
async for line in proc.feed_chunk(chunk):
self._console.print(line.decode("utf-8", "replace"), end="")
async for line in proc.feed_eof():
self._console.print(line.decode("utf-8", "replace"), end="")
async def cancel(self, bake_id: str, *, attempt_no: int = -1) -> None:
attempt_storage = self.storage.bake(id=bake_id).attempt(number=attempt_no)
attempt = await attempt_storage.get()
if attempt.result.is_finished:
raise click.BadArgumentUsage(
f"Attempt #{attempt.number} of {attempt.bake_id} is already stopped."
)
await attempt_storage.update(result=TaskStatus.CANCELLED)
self._console.print(
f"[b]Attempt #{attempt.number}[/b] of bake "
f"[b]{attempt.bake_id}[/b] was cancelled."
)
async def clear_cache(
self, batch: Optional[str] = None, task_id: Optional[str] = None
) -> None:
full_id: Optional[FullID] = None
if task_id:
full_id = tuple(task_id.split("."))
await self.storage.delete_cache_entries(batch, full_id)
async def restart(
self,
bake_id: str,
*,
attempt_no: int = -1,
from_failed: bool = True,
local_executor: bool = False,
) -> None:
bake, flow = await self._restart(
bake_id, attempt_no=attempt_no, from_failed=from_failed
)
await self._run_bake(bake, flow, local_executor)
async def _restart(
self,
bake_id: str,
*,
attempt_no: int = -1,
from_failed: bool = True,
) -> Tuple[Bake, RunningBatchFlow]:
bake_storage = self.storage.bake(id=bake_id)
bake = await bake_storage.get()
if bake.last_attempt and attempt_no == -1:
last_attempt = attempt = bake.last_attempt
else:
attempt = await bake_storage.attempt(number=attempt_no).get()
last_attempt = await bake_storage.last_attempt().get()
if not attempt.result.is_finished:
raise click.BadArgumentUsage(
f"Cannot re-run still running attempt #{attempt.number} "
f"of {bake.id}."
)
if not last_attempt.result.is_finished:
raise click.BadArgumentUsage(
f"Cannot re-run bake when last attempt #{last_attempt.number} "
f"of {bake.id} is still running."
)
if attempt.result == TaskStatus.SUCCEEDED and from_failed:
raise click.BadArgumentUsage(
f"Cannot re-run successful attempt #{attempt.number} "
f"of {bake.id} with `--from-failed` flag set.\n"
"Hint: Try adding --no-from-failed to restart bake from the beginning."
)
if attempt.number >= 99:
raise click.BadArgumentUsage(
f"Cannot re-run {bake.id}, the number of attempts exceeded."
)
new_attempt = await bake_storage.create_attempt(
number=last_attempt.number + 1,
configs_meta=attempt.configs_meta,
)
if from_failed:
new_attempt_storage = bake_storage.attempt(id=new_attempt.id)
graphs = bake.graphs
handled = set() # a set of succesfully finished and not cached tasks
tasks = {
task.yaml_id: task
async for task in bake_storage.attempt(id=attempt.id).list_tasks()
}
for task in sorted(tasks.values(), key=attrgetter("created_at")):
if task.status == TaskStatus.SUCCEEDED:
# should check deps to don't process post-actions with
# always() precondition
prefix = task.yaml_id[:-1]
graph = graphs[prefix]
deps = graph[task.yaml_id]
if not deps or all(dep in handled for dep in deps):
if (
prefix in tasks
and tasks[prefix].status != TaskStatus.SUCCEEDED
):
# If action didn't succeeded, we should create task manually
await new_attempt_storage.create_task(
yaml_id=prefix,
status=TaskStatus.PENDING,
raw_id=None,
)
# TODO allow to create task with multiple statuses
# and copy them from old task
await new_attempt_storage.create_task(
yaml_id=task.yaml_id,
status=TaskStatus.SUCCEEDED,
raw_id=task.raw_id,
outputs=task.outputs,
state=task.state,
)
handled.add(task.id)
self._console.print(f"[b]Attempt #{new_attempt.number}[/b] is created")
flow = await get_running_flow(
bake, self._client, bake_storage, new_attempt.configs_meta
)
return bake, flow
|
82075
|
from autoencoder import Lambda
import torch.nn as nn
import torch
def create_block(in_channels, out_channels=None):
if out_channels is None:
out_channels = in_channels
return nn.Sequential(
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, kernel_size = 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def QualityNetwork(output_size = 3):
return nn.Sequential(
nn.AvgPool2d(2), # 128 -> 64
create_block(3, 8),
nn.MaxPool2d(2), # 64 -> 32
create_block(8, 16),
nn.MaxPool2d(2), # 32 -> 16
create_block(16, 16),
nn.MaxPool2d(2), # 16 -> 8
create_block(16, 16),
nn.MaxPool2d(2), # 8 -> 4
create_block(16, 32),
nn.MaxPool2d(2), # 4 -> 2
Lambda(lambda x: x.reshape(x.shape[0], -1)),
nn.Linear(128, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Linear(64, output_size),
nn.Softmax(dim=1),
)
|
82097
|
from __future__ import print_function
from distutils import log
from setuptools import setup, find_packages
import os
from jupyter_packaging import (
create_cmdclass,
install_npm,
ensure_targets,
combine_commands,
get_version,
skip_if_exists
)
# Name of the project
name = 'keplergl'
here = os.path.dirname(os.path.abspath(__file__))
long_description = 'Keplergl Jupyter Extension'
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
# Get version
version = get_version(os.path.join(name, '_version.py'))
js_dir = os.path.join(here, 'js')
# Representative files that should exist after a successful build
jstargets = [
os.path.join('keplergl', 'static', 'index.js'),
os.path.join('keplergl-jupyter', 'labextension', 'package.json'),
]
data_files_spec = [
('share/jupyter/nbextensions/keplergl-jupyter',
'keplergl/static', '**'),
('share/jupyter/labextensions/keplergl-jupyter',
'keplergl-jupyter/labextension', "**"),
('etc/jupyter/nbconfig/notebook.d', '.', 'keplergl-jupyter.json'),
]
cmdclass = create_cmdclass('jsdeps', data_files_spec=data_files_spec)
js_command = combine_commands(
install_npm(js_dir, npm=["yarn"], build_cmd='build'), ensure_targets(jstargets),
)
is_repo = os.path.exists(os.path.join(here, '.git'))
if is_repo:
cmdclass['jsdeps'] = js_command
else:
cmdclass['jsdeps'] = skip_if_exists(jstargets, js_command)
LONG_DESCRIPTION = 'A jupyter widget for kepler.gl, an advanced geospatial visualization tool, to render large-scale interactive maps.'
setup_args = {
'name': 'keplergl',
'version': version,
'description': 'This is a simple jupyter widget for kepler.gl, an advanced geospatial visualization tool, to render large-scale interactive maps.',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'install_requires': [
'ipywidgets>=7.0.0,<8',
'traittypes>=0.2.1',
'geopandas>=0.5.0',
'pandas>=0.23.0',
'Shapely>=1.6.4.post2'
],
'packages': find_packages(),
'zip_safe': False,
'cmdclass': cmdclass,
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'https://github.com/keplergl/kepler.gl/tree/master/bindings/kepler.gl-jupyter',
'keywords': [
'ipython',
'jupyter',
'widgets',
'geospatial',
'visualization',
'webGL'
],
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
}
setup(**setup_args)
|
82098
|
import random
passlen = int(input("enter the length of password"))
s="abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()?"
p = "".join(random.sample(s,passlen ))
print (p)
|
82169
|
from __future__ import print_function
import math
import numpy
import theano
import itertools
from theano import tensor, Op
from theano.gradient import disconnected_type
from fuel.utils import do_not_pickle_attributes
from picklable_itertools.extras import equizip
from collections import defaultdict, deque
from toposort import toposort_flatten
from lvsr.error_rate import (
reward_matrix, gain_matrix, edit_distance, _edit_distance_matrix, _bleu)
class RewardOp(Op):
__props__ = ()
def __init__(self, eos_label, alphabet_size):
"""Computes matrices of rewards and gains."""
self.eos_label = eos_label
self.alphabet_size = alphabet_size
def perform(self, node, inputs, output_storage):
groundtruth, recognized = inputs
if (groundtruth.ndim != 2 or recognized.ndim != 2
or groundtruth.shape[1] != recognized.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
all_rewards = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
all_gains = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
alphabet = list(range(self.alphabet_size))
for index in range(batch_size):
y = list(groundtruth[:, index])
y_hat = list(recognized[:, index])
try:
eos_pos = y.index(self.eos_label)
y = y[:eos_pos + 1]
except:
# Sometimes groundtruth is in fact also a prediction
# and in this case it might not have EOS label
pass
if self.eos_label in y_hat:
y_hat_eos_pos = y_hat.index(self.eos_label)
y_hat_trunc = y_hat[:y_hat_eos_pos + 1]
else:
y_hat_trunc = y_hat
rewards_trunc = reward_matrix(
y, y_hat_trunc, alphabet, self.eos_label)
# pass freshly computed rewards to gain_matrix to speed things up
# a bit
gains_trunc = gain_matrix(y, y_hat_trunc, alphabet,
given_reward_matrix=rewards_trunc)
gains = numpy.ones((len(y_hat), len(alphabet))) * -1000
gains[:(gains_trunc.shape[0] - 1), :] = gains_trunc[:-1, :]
rewards = numpy.ones((len(y_hat), len(alphabet))) * -1
rewards[:(rewards_trunc.shape[0] - 1), :] = rewards_trunc[:-1, :]
all_rewards[:, index, :] = rewards
all_gains[:, index, :] = gains
output_storage[0][0] = all_rewards
output_storage[1][0] = all_gains
def grad(self, *args, **kwargs):
return disconnected_type(), disconnected_type()
def make_node(self, groundtruth, recognized):
recognized = tensor.as_tensor_variable(recognized)
groundtruth = tensor.as_tensor_variable(groundtruth)
return theano.Apply(
self, [groundtruth, recognized], [tensor.ltensor3(), tensor.ltensor3()])
def trim(y, mask):
try:
return y[:mask.index(0.)]
except ValueError:
return y
class EditDistanceOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.bos_label = bos_label
self.eos_label = eos_label
self.deltas = deltas
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None])
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
if self.deltas:
matrix = _edit_distance_matrix(
y, y_hat, special_tokens={self.bos_label, self.eos_label})
row = matrix[-1, :].copy()
results[:len(y_hat), index, 0] = row[1:] - matrix[-1, :-1]
else:
results[len(y_hat) - 1, index, 0] = edit_distance(y, y_hat)
output_storage[0][0] = results
def grad(self, *args, **kwargs):
return theano.gradient.disconnected_type()
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self, [prediction, prediction_mask,
groundtruth, groundtruth_mask], [tensor.ltensor3()])
class BleuOp(Op):
__props__ = ()
def __init__(self, bos_label, eos_label, deltas=False):
self.n = 4
self.deltas = deltas
self.special_tokens = set([bos_label, eos_label])
def grad(self, *args, **kwargs):
return [theano.gradient.disconnected_type()] * 4
def perform(self, node, inputs, output_storage):
prediction, prediction_mask, groundtruth, groundtruth_mask = inputs
if (groundtruth.ndim != 2 or prediction.ndim != 2
or groundtruth.shape[1] != prediction.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
results = numpy.zeros_like(prediction[:, :, None]).astype('float32')
for index in range(batch_size):
y = trim(list(groundtruth[:, index]),
list(groundtruth_mask[:, index]))
y_no_special = [token for token in y
if token not in self.special_tokens]
y_hat = trim(list(prediction[:, index]),
list(prediction_mask[:, index]))
y_hat_no_special = [token for token in y_hat
if token not in self.special_tokens]
blues, _, _, _ = _bleu(y_no_special, y_hat_no_special, self.n)
reward = blues[:, self.n - 1].copy()
if self.deltas:
reward[1:] = reward[1:] - reward[:-1]
pos = -1
for i in range(len(y_hat)):
if y_hat[i] not in self.special_tokens:
pos = pos + 1
results[i, index, 0] = reward[pos]
else:
results[i, index, 0] = 0.
elif len(reward):
results[len(y_hat) - 1, index, 0] = reward[-1]
output_storage[0][0] = results
def make_node(self, prediction, prediction_mask,
groundtruth, groundtruth_mask):
prediction = tensor.as_tensor_variable(prediction)
prediction_mask = tensor.as_tensor_variable(prediction_mask)
groundtruth = tensor.as_tensor_variable(groundtruth)
groundtruth_mask = tensor.as_tensor_variable(groundtruth_mask)
return theano.Apply(
self,
[prediction, prediction_mask,
groundtruth, groundtruth_mask],
[tensor.tensor3()])
|
82187
|
import boto3
from botocore.config import Config
import json
import os
TABLE_NAME = os.environ['TABLE_NAME']
config = Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 1})
dynamodb = boto3.client('dynamodb', config=config)
def assemble(response):
body = {
'quotes': []
}
for item in response['Items']:
if 'quote' in item:
body['quotes'].append({
'responder': item['responder']['S'],
'quote': item['quote']['N']
})
else:
body['rfq-id'] = item['id']['S']
body['from'] = item['from']['S']
body['to'] = item['to']['S']
body['customer'] = item['customer']['S']
return body
def lambda_handler(event, context):
print('received event: {}'.format(json.dumps(event)))
id = event['pathParameters']['id']
# query DynamoDB with the rfq-id provided in the request
response = dynamodb.query(
TableName = TABLE_NAME,
KeyConditionExpression = 'id = :id',
ExpressionAttributeValues = {':id': {'S': id}}
)
body = assemble(response)
return {
'statusCode': 200,
'body': json.dumps(body)
}
|
82204
|
import numpy as np
class BayesDiscri:
def __init__(self):
'''
:__init__: 初始化BayesDiscri类
'''
self.varipro=[] # 各个特征xk在各个类别yi下的条件概率
self.priorpro={} # 各个类别yi的先验概率
self.respro=[] # 测试集中每个样本向量属于各个类别的概率
def train(self, data, rowvar=False):
'''
:train: 使用训练集进行训练
:param data: 训练数据集矩阵,矩阵元素可以是数字或者表示特征取值的字符串,其最后一行或者最后一列为样本的类别标签,训练数据集矩阵至少有两个样本和两个特征
:type data: np.array
:param rowvar: 指定每行或者每列代表一个变量;rowvar=True指定每行作为一个变量,每列作为一个样本向量;rowvar=False指定每列作为一个变量,每行作为一个样本向量。默认值为rowvar=False
:type rowvar: bool
'''
# 1. 首先训练集矩阵统一转换为rowvar=False的情况,即每行为一个样本向量
if rowvar==True:
data=data.T
# 2. 计算各个类别yi的先验概率,最后一列为样本标签
size=np.shape(data)[0] # 样本数量
count=np.shape(data)[1] # 特征数量
dic={}
for i in range(size):
if data[i][count-1] in dic.keys():
dic[str(data[i][count-1])]+=1
else:
dic[str(data[i][count-1])]=1
for i in dic.keys():
dic[i]/=size
self.priorpro=dic
# 3. 计算各个特征xk在各个类别yi下的条件概率
for i in range(count-1):
dic={}
for k in range(size):
temp=str(data[k][i])+'|'+str(data[k][count-1]) # dic的标签形式为: 特征取值+'|'+类别标签,表示条件概率p(特征取值|类别标签)
if temp in dic.keys():
dic[temp]+=1
else:
dic[temp]=1
for k in dic.keys():
kind=k.split('|')[1] # 抽取类别标签
dic[k]/=data[:,count-1].tolist().count(kind) # 统计类别标签的数目
self.varipro.append(dic)
# print(self.priorpro)
# print(self.varipro)
return
def discriminate(self, data, rowvar=False):
'''
:discriminate: 对测试集进行分类
:param data: 测试数据集矩阵,矩阵元素可以是数字或者表示特征取值的字符串
:type data: np.array
:param rowvar: 指定每行或者每列代表一个变量;rowvar=True指定每行作为一个变量,每列作为一个样本向量;rowvar=False指定每列作为一个变量,每行作为一个样本向量。默认值为rowvar=False
:type rowvar: bool
:return: 元组(res, respro)
: res: 分类结果列表,类型为list,其中res[i]为行数或者列数下标为i(下标从0开始)的样本向量的类别标签
: respro: 样本属于各个类别的概率列表,类型为list,其中respro[i]为行数或者列数下标为i(下标从0开始)的样本向量属于各个类别的概率
: 示例: 假设有两个测试集样本,可能的一个返回值为(res,respro),其中res=['类别A','类别A'],respro=[{'类别A':0.22,'类别B':0.78}, {'类别A':0.99,'类别B':0.01}]
:rtype: tuple
'''
# 1. 首先训练集矩阵统一转换为rowvar=False的情况,即每行为一个样本向量
if rowvar==True:
data=data.T
if data.ndim==1:
data=np.array([data])
# 2. 对于各个测试集的样本向量,对类别的每一个取值yi,首先计算p(x|yi)p(yi)=p(x1|yi)*p(x2|yi)*...*p(xn|yi)p(yi),计算结果最大的一个作为分类结果
size=np.shape(data)[0]
count=np.shape(data)[1]
res=[] #分类结果
for i in range(size):
p=[]
kind=[]
for k in self.priorpro.keys():
prior=self.priorpro[k]
for m in range(count):
name=str(data[i][m])+'|'+str(k)
if name in self.varipro[m].keys():
prior*=self.varipro[m][name]
else:
prior*=0
break
p.append(prior) # 类别yi的后验概率的分子部分p(x|yi)p(yi)
kind.append(k) # 类别yi的对应标签
res.append(kind[p.index(max(p))])
add=sum(p)
p=[x/add for x in p] # 计算后验概率,因为后验概率的分母部分均相同,因此后验概率的分母部分即为各个分子部分之和,而无需重新计算
self.respro.append(dict(zip(kind,p)))
return (res,self.respro)
|
82207
|
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from horch.common import tuplify
from horch.models import get_default_activation, get_default_norm_layer
from horch.config import cfg
# sigmoid = torch.nn.Sigmoid()
class SwishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
swish = SwishFunction.apply
class Swish(nn.Module):
def forward(self, x):
return swish(x)
def hardsigmoid(x, inplace=True):
return F.relu6(x + 3, inplace=inplace) / 6
def hardswish(x, inplace=True):
return x * hardsigmoid(x, inplace)
# def swish(x):
# return x * torch.sigmoid(x)
# class Swish(nn.Module):
# def __init__(self):
# super().__init__()
#
# def forward(self, x):
# return swish(x)
class HardSigmoid(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
return hardsigmoid(x, self.inplace)
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class HardSwish(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
return hardswish(x, self.inplace)
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
def upsample_add(x, y):
r"""
Upsample x and add it to y.
Parameters
----------
x : torch.Tensor
tensor to upsample
y : torch.Tensor
tensor to be added
"""
h, w = y.size()[2:4]
return F.interpolate(x, size=(h, w), mode='bilinear', align_corners=False) + y
def upsample_concat(x, y):
h, w = y.size()[2:4]
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=False)
return torch.cat((x, y), dim=1)
def get_groups(channels, ref=32):
if channels == 1:
return 1
xs = filter(lambda x: channels % x == 0, range(2, channels + 1))
c = min(xs, key=lambda x: abs(x - ref))
if c < 8:
c = max(c, channels // c)
return channels // c
def get_norm_layer(channels, name='default', **kwargs):
assert channels is not None and isinstance(channels, int)
if isinstance(name, nn.Module):
return name
elif hasattr(name, '__call__'):
return name(channels)
elif name == 'default':
return get_norm_layer(channels, get_default_norm_layer(), **kwargs)
elif name == 'bn':
if 'affine' in kwargs:
cfg_bn = {**cfg.bn, 'affine': kwargs['affine']}
else:
cfg_bn = cfg.bn
return nn.BatchNorm2d(channels, **cfg_bn)
elif name == 'gn':
num_groups = get_groups(channels, 32)
return nn.GroupNorm(num_groups, channels)
else:
raise NotImplementedError("No normalization named %s" % name)
def get_activation(name='default'):
if isinstance(name, nn.Module):
return name
if name == 'default':
return get_activation(get_default_activation())
elif name == 'relu':
return nn.ReLU(**cfg.relu)
elif name == 'relu6':
return nn.ReLU6(**cfg.relu6)
elif name == 'leaky_relu':
return nn.LeakyReLU(**cfg.leaky_relu)
elif name == 'sigmoid':
return nn.Sigmoid()
elif name == 'hswish':
return HardSwish(**cfg.hswish)
elif name == 'swish':
return Swish()
else:
raise NotImplementedError("Activation not implemented: %s" % name)
def PreConv2d(in_channels, out_channels,
kernel_size, stride=1,
padding='same', dilation=1, groups=1, bias=False,
norm_layer='default', activation='default'):
if padding == 'same':
if isinstance(kernel_size, tuple):
kh, kw = kernel_size
ph = (kh - 1) // 2
pw = (kw - 1) // 2
padding = (ph, pw)
else:
padding = (kernel_size - 1) // 2
return nn.Sequential(OrderedDict([
("bn", get_norm_layer(norm_layer, in_channels)),
("relu", get_activation(activation)),
("conv", nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias))
]))
def Conv2d(in_channels, out_channels,
kernel_size, stride=1,
padding='same', dilation=1, groups=1, bias=None,
norm_layer=None, activation=None, depthwise_separable=False, mid_norm_layer=None, transposed=False):
if depthwise_separable:
assert kernel_size != 1, "No need to use depthwise separable convolution in 1x1"
# if norm_layer is None:
# assert mid_norm_layer is not None, "`mid_norm_layer` must be provided when `norm_layer` is None"
# else:
if mid_norm_layer is None:
mid_norm_layer = norm_layer
return DWConv2d(in_channels, out_channels, kernel_size, stride, padding, bias, norm_layer, activation)
if padding == 'same':
if isinstance(kernel_size, tuple):
if dilation == 1:
dilation = (1, 1)
else:
assert isinstance(dilation, tuple)
kh, kw = kernel_size
dh, dw = dilation
ph = (kh + (kh - 1) * (dh - 1) - 1) // 2
pw = (kw + (kw - 1) * (dw - 1) - 1) // 2
padding = (ph, pw)
else:
padding = (kernel_size + (kernel_size - 1) * (dilation - 1) - 1) // 2
layers = []
if bias is None:
bias = norm_layer is None
if transposed:
conv = nn.ConvTranspose2d(
in_channels, out_channels,
kernel_size, stride, padding, dilation=dilation, groups=groups, bias=bias)
else:
conv = nn.Conv2d(
in_channels, out_channels,
kernel_size, stride, padding, dilation=dilation, groups=groups, bias=bias)
if activation is not None:
if activation == 'sigmoid':
nn.init.xavier_normal_(conv.weight)
elif activation == 'leaky_relu':
nn.init.kaiming_normal_(conv.weight, a=cfg.leaky_relu.negative_slope, nonlinearity='leaky_relu')
else:
try:
nn.init.kaiming_normal_(conv.weight, nonlinearity=activation)
except ValueError:
nn.init.kaiming_normal_(conv.weight, nonlinearity='relu')
else:
nn.init.kaiming_normal_(conv.weight, nonlinearity='relu')
if bias:
nn.init.zeros_(conv.bias)
if norm_layer is not None:
if norm_layer == 'default':
norm_layer = get_default_norm_layer()
layers.append(get_norm_layer(out_channels, norm_layer))
if activation is not None:
layers.append(get_activation(activation))
layers = [conv] + layers
if len(layers) == 1:
return layers[0]
else:
return nn.Sequential(*layers)
def Linear(in_channels, out_channels, bias=None, norm_layer=None, activation=None):
layers = []
if bias is None:
bias = norm_layer is None
fc = nn.Linear(
in_channels, out_channels, bias=bias)
if activation is not None:
if activation == 'sigmoid':
nn.init.xavier_normal_(fc.weight)
elif activation == 'leaky_relu':
nn.init.kaiming_normal_(fc.weight, a=0.1, nonlinearity='leaky_relu')
else:
try:
nn.init.kaiming_normal_(fc.weight, nonlinearity=activation)
except ValueError:
nn.init.kaiming_normal_(fc.weight, nonlinearity='relu')
else:
nn.init.kaiming_normal_(fc.weight, nonlinearity='relu')
if bias:
nn.init.zeros_(fc.bias)
if norm_layer == 'default' or norm_layer == 'bn':
layers.append(nn.BatchNorm1d(out_channels))
if activation is not None:
layers.append(get_activation(activation))
layers = [fc] + layers
if len(layers) == 1:
return layers[0]
else:
return nn.Sequential(*layers)
def Pool(name, kernel_size, stride=1, padding='same', ceil_mode=False):
if padding == 'same':
if isinstance(kernel_size, tuple):
kh, kw = kernel_size
ph = (kh - 1) // 2
pw = (kw - 1) // 2
padding = (ph, pw)
else:
padding = (kernel_size - 1) // 2
if name == 'avg':
return nn.AvgPool2d(kernel_size, stride, padding, ceil_mode=ceil_mode, count_include_pad=False)
elif name == 'max':
return nn.MaxPool2d(kernel_size, stride, padding, ceil_mode=ceil_mode)
else:
raise NotImplementedError("No activation named %s" % name)
def DWConv2d(in_channels, out_channels,
kernel_size=3, stride=1,
padding='same', bias=True,
norm_layer=None, activation=None):
mid_norm_layer = None
return nn.Sequential(
Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels,
norm_layer=mid_norm_layer),
Conv2d(in_channels, out_channels, kernel_size=1,
norm_layer=norm_layer, bias=bias, activation=activation),
)
class Sequential(nn.Sequential):
def __init__(self, *args, **_kwargs):
super().__init__(*args)
def forward(self, *xs):
for module in self._modules.values():
xs = module(*tuplify(xs))
return xs
class Identity(nn.Module):
def __init__(self, *_args, **_kwargs):
super().__init__()
def forward(self, x):
return x
class Flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def seq(*modules):
mods = []
for k, v in modules:
if v is not None:
mods.append((k, v))
return nn.Sequential(OrderedDict(mods))
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.zeros(self.n_channels, dtype=torch.float32), requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
return out
class SelfAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv_theta = Conv2d(in_channels, in_channels // 8, kernel_size=1)
self.conv_phi = Conv2d(in_channels, in_channels // 8, kernel_size=1)
self.pool_phi = nn.MaxPool2d(kernel_size=2, stride=(2, 2))
self.conv_g = Conv2d(in_channels, in_channels // 2, kernel_size=1)
self.pool_g = nn.MaxPool2d(kernel_size=2, stride=(2, 2))
self.conv_attn = Conv2d(in_channels // 2, in_channels, kernel_size=1)
self.sigma = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, x):
b, c, h, w = x.size()
theta = self.conv_theta(x)
theta = theta.view(b, -1, h * w)
phi = self.conv_phi(x)
phi = self.pool_phi(phi)
phi = phi.view(b, -1, h * w // 4)
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = F.softmax(attn, dim=-1)
g = self.conv_g(x)
g = self.pool_g(g)
g = g.view(b, -1, h * w // 4)
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(b, -1, h, w)
attn_g = self.conv_attn(attn_g)
x = x + self.sigma * attn_g
return x
class SelfAttention2(nn.Module):
def __init__(self, in_channels, reduction=8):
super().__init__()
channels = in_channels // reduction
self.conv_theta = Conv2d(in_channels, channels, kernel_size=1)
self.conv_phi = Conv2d(in_channels, channels, kernel_size=1)
self.conv_g = Conv2d(in_channels, channels, kernel_size=1)
self.conv_attn = Conv2d(channels, in_channels, kernel_size=1)
self.sigma = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, x):
b, c, h, w = x.size()
theta = self.conv_theta(x)
theta = theta.view(b, -1, h * w)
phi = self.conv_phi(x)
phi = phi.view(b, -1, h * w)
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = F.softmax(attn, dim=-1)
g = self.conv_g(x)
g = g.view(b, -1, h * w)
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(b, -1, h, w)
attn_g = self.conv_attn(attn_g)
x = x + self.sigma * attn_g
return x
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, momentum=0.001):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False, momentum=momentum)
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
def forward(self, x, y):
out = self.bn(x)
gamma, beta = self.embed(y).chunk(2, 1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
class SharedConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, embedding, momentum=0.001):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False, momentum=momentum)
self.embedding = embedding
def forward(self, x, y):
out = self.bn(x)
gamma, beta = self.embed(y).chunk(2, 1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
|
82254
|
try:
raise
except:
pass
try:
raise NotImplementedError('User Defined Error Message.')
except NotImplementedError as err:
print('NotImplementedError')
except:
print('Error :')
try:
raise KeyError('missing key')
except KeyError as ex:
print('KeyError')
except:
print('Error :')
try:
1 // 0
except ZeroDivisionError as ex:
print('ZeroDivisionError')
except:
print('Error :')
try:
raise RuntimeError("runtime!")
except RuntimeError as ex:
print('RuntimeError :', ex)
except:
print('Error :')
|
82267
|
import scadnano as sc
import modifications as mod
import dataclasses
def create_design():
stap_left_ss1 = sc.Domain(1, True, 0, 16)
stap_left_ss0 = sc.Domain(0, False, 0, 16)
stap_right_ss0 = sc.Domain(0, False, 16, 32)
stap_right_ss1 = sc.Domain(1, True, 16, 32)
scaf_ss1_left = sc.Domain(1, False, 0, 16)
scaf_ss0 = sc.Domain(0, True, 0, 32)
scaf_ss1_right = sc.Domain(1, False, 16, 32)
stap_left = sc.Strand([stap_left_ss1, stap_left_ss0])
stap_right = sc.Strand([stap_right_ss0, stap_right_ss1])
scaf = sc.Strand([scaf_ss1_left, scaf_ss0, scaf_ss1_right], color=sc.default_scaffold_color)
strands = [scaf, stap_left, stap_right]
design = sc.Design(strands=strands, grid=sc.square)
design.add_deletion(helix=0, offset=11)
design.add_deletion(helix=0, offset=12)
design.add_deletion(helix=0, offset=24)
design.add_deletion(helix=1, offset=12)
design.add_deletion(helix=1, offset=24)
design.add_insertion(helix=0, offset=29, length=1)
design.add_insertion(helix=1, offset=2, length=1)
design.assign_dna(scaf, 'AACT' * 16)
# biotin_mod_5p = dataclasses.replace(mod.biotin_5p, font_size=30)
# cy3_mod_3p = dataclasses.replace(mod.cy3_3p, font_size=30)
stap_left.set_modification_5p(mod.biotin_5p)
stap_left.set_modification_3p(mod.cy3_3p)
stap_left.set_modification_internal(9, mod.cy3_int)
stap_left.set_modification_internal(10, mod.biotin_int)
stap_left.set_modification_internal(11, mod.cy3_int)
stap_left.set_modification_internal(12, mod.cy5_int)
stap_left.set_modification_internal(4, mod.cy3_int)
stap_left.set_modification_internal(26, mod.cy5_int)
stap_right.set_modification_5p(mod.cy5_5p)
stap_right.set_modification_internal(5, mod.cy3_int)
stap_right.set_modification_3p(mod.biotin_3p)
scaf.set_modification_5p(mod.biotin_5p)
scaf.set_modification_3p(mod.cy3_3p)
scaf.set_modification_internal(5, mod.cy5_int)
scaf.set_modification_internal(32, mod.cy3_int)
return design
if __name__ == '__main__':
design = create_design()
design.write_scadnano_file(directory='output_designs')
|
82278
|
def extractBersekerTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'Because the world has changed into a death game is funny' in item['tags'] and (chp or vol or 'Prologue' in postfix):
return buildReleaseMessageWithType(item, 'Sekai ga death game ni natta no de tanoshii desu', vol, chp, frag=frag, postfix=postfix)
return False
|
82333
|
import sys
import time
import random
from .address import Address
__all__ = [
'NameServers',
'NoNameServer',
]
class NoNameServer(Exception):
pass
class IterMixIn:
def iter(self):
if not self.data: raise NoNameServer
return iter(self.data)
def success(self, item):
pass
def fail(self, item):
pass
class WeightMixIn:
def __init__(self, *k, **kw):
self._failures = [0] * len(self.data)
self.ts = 0
self._update()
def _update(self):
if time.time() > self.ts + 60:
self.ts = time.time()
self._sorted = list(self.data[i] for i in sorted(range(len(self.data)), key=lambda i: self._failures[i]))
self._last_min_failures = self._failures
self._failures = [0] * len(self.data)
def success(self, item):
self._update()
def fail(self, item):
self._update()
index = self.data.index(item)
self._failures[index] += 1
def iter(self):
if not self.data: raise NoNameServer
return iter(self._sorted)
class NameServers(WeightMixIn, IterMixIn):
def __init__(self, nameservers=[], **kw):
self.data = [Address.parse(item, default_protocol='udp', allow_domain=True) for item in nameservers]
super().__init__(**kw)
def __bool__(self):
return len(self.data) > 0
def __iter__(self):
return iter(self.data)
def __repr__(self):
return '<NameServers [%s]>' % ','.join(map(str, self.data))
|
82383
|
import numpy as np
try:
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
except ImportError:
import pyfits
import pywcs
def fits_overlap(file1,file2):
"""
Create a header containing the exact overlap region between two .fits files
Does NOT check to make sure the FITS files are in the same coordinate system!
Parameters
----------
file1,file2 : str,str
files from which to extract header strings
"""
hdr1 = pyfits.getheader(file1)
hdr2 = pyfits.getheader(file2)
return header_overlap(hdr1,hdr2)
def header_overlap(hdr1,hdr2,max_separation=180):
"""
Create a header containing the exact overlap region between two .fits files
Does NOT check to make sure the FITS files are in the same coordinate system!
Parameters
----------
hdr1,hdr2 : pyfits.Header
Two pyfits headers to compare
max_separation : int
Maximum number of degrees between two headers to consider before flipping
signs on one of them (this to deal with the longitude=0 region)
"""
wcs1 = pywcs.WCS(hdr1)
wcs2 = pywcs.WCS(hdr2)
((xmax1,ymax1),) = wcs1.wcs_pix2world([[hdr1['NAXIS1'],hdr1['NAXIS2']]],1)
((xmax2,ymax2),) = wcs2.wcs_pix2world([[hdr2['NAXIS1'],hdr2['NAXIS2']]],1)
((xmin1,ymin1),) = wcs1.wcs_pix2world([[1,1]],1)
((xmin2,ymin2),) = wcs2.wcs_pix2world([[1,1]],1)
# make sure the edges are all in the same quadrant-ish
xmlist = [ xm - 360 if xm > max_separation else
xm + 360 if xm < -max_separation else xm
for xm in xmin1,xmax1,xmin2,xmax2]
xmin1,xmax1,xmin2,xmax2 = xmlist
if xmin2 > xmax2:
xmax2,xmin2 = xmin2,xmax2
if xmin1 > xmax1:
xmax1,xmin1 = xmin1,xmax1
if ymin2 > ymax2:
ymax2,ymin2 = ymin2,ymax2
if ymin1 > ymax1:
ymax1,ymin1 = ymin1,ymax1
xmin = min(xmin1,xmin2)
xmax = max(xmax1,xmax2)
ymin = min(ymin1,ymin2)
ymax = max(ymax1,ymax2)
try:
cdelt1,cdelt2 = np.abs(np.vstack([wcs1.wcs.cd.diagonal(), wcs2.wcs.cd.diagonal()])).min(axis=0) * np.sign(wcs1.wcs.cd).diagonal()
except AttributeError:
cdelt1,cdelt2 = np.abs(np.vstack([wcs1.wcs.cdelt, wcs2.wcs.cdelt])).min(axis=0) * np.sign(wcs1.wcs.cdelt)
# no overlap at all
if ((xmin1 > xmax2) or
(xmin2 > xmax1)):
naxis1 = 0
else:
naxis1 = np.ceil(np.abs((xmax-xmin)/cdelt1))
if ymin1 > ymax2 or ymin2 > ymax1:
naxis2 = 0
else:
naxis2 = np.ceil(np.abs((ymax-ymin)/cdelt2))
# may want to change this later...
new_header = hdr1.copy()
new_header['CRVAL1'] = (xmin+xmax)/2.
new_header['CRVAL2'] = (ymin+ymax)/2.
new_header['CDELT1'] = cdelt1
new_header['CDELT2'] = cdelt2
new_header['NAXIS1'] = naxis1
new_header['NAXIS2'] = naxis2
new_header['CRPIX1'] = new_header['NAXIS1']/2
new_header['CRPIX2'] = new_header['NAXIS2']/2
return new_header
|
82417
|
import glm, random
def generateVoxelPositions(width, height, depth):
blockSize = 1.0
noiseScale = 20.0
amplitude = 20.0
offset = random.randrange(0, 1000000)
data = []
for x in range(width):
for y in range(height):
for z in range(depth):
noise = glm.perlin(glm.vec3(x/noiseScale + offset, y/noiseScale + offset, z/noiseScale + offset)) * amplitude
if noise >= 0.5:
data.append([x*blockSize, y*blockSize, z*blockSize])
return data
|
82430
|
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.utils import (
download_file_from_url,
get_module_logger,
)
logger = get_module_logger(__name__)
def check_score_data_source(
score_csv_data_path: Path,
score_data_source: str,
) -> None:
"""Checks if census data is present, and exits gracefully if it doesn't exist. It will download it from S3
if census_data_source is set to "aws"
Args:
score_csv_data_path (str): Path for local Score CSV data
score_data_source (str): Source for the score data
Options:
- local: fetch census data from the local data directory
- aws: fetch census from AWS S3 J40 data repository
Returns:
None
"""
TILE_SCORE_CSV_S3_URL = (
settings.AWS_JUSTICE40_DATAPIPELINE_URL
+ "/data/score/csv/tiles/usa.csv"
)
TILE_SCORE_CSV = score_csv_data_path / "tiles" / "usa.csv"
# download from s3 if census_data_source is aws
if score_data_source == "aws":
logger.info("Fetching Score Tile data from AWS S3")
download_file_from_url(
file_url=TILE_SCORE_CSV_S3_URL, download_file_name=TILE_SCORE_CSV
)
else:
# check if score data is found locally
if not os.path.isfile(TILE_SCORE_CSV):
logger.info(
"No local score tiles data found. Please use '-d aws` to fetch from AWS"
)
sys.exit()
def floor_series(series: pd.Series, number_of_decimals: int) -> pd.Series:
"""Floors all non-null numerical values to a specific number of decimal points
Args:
series (pd.Series): Input pandas series
number_of_decimals (int): Number of decimal points to floor all numerical values to
Returns:
floored_series (pd.Series): A Pandas Series of numerical values with appropriate number of decimal points
"""
# we perform many operations using the division operator
# as well as elementwise multiplication. The result of such
# operations can introduce such values, below, due to numerical
# instability. This results in unsafe type inference for numpy
# float types - exacerbated by panda's type inference engine.
# Hence, to handle such offending values we default to None
# Please see the reference, below, on nullable integer types for more details
unacceptable_values = [-np.inf, np.inf, "None", np.nan]
mapping = {
unacceptable_value: None for unacceptable_value in unacceptable_values
}
# ensure we are working with a numpy array (which is really what a pandas series is)
if not isinstance(series, pd.Series):
raise TypeError(
f"Argument series must be of type pandas series, not of type {type(series).__name__}."
)
# raise exception for handling empty series
if series.empty:
raise ValueError("Empty series provided.")
# if we have any values, just replace them with None
if series.isin(unacceptable_values).any():
series.replace(mapping, regex=False, inplace=True)
multiplication_factor = 10 ** number_of_decimals
# In order to safely cast NaNs
# First coerce series to float type: series.astype(float)
# Please see here:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html#nullable-integer-data-type
product_for_numerator = np.floor(
series.astype(float) * multiplication_factor
)
floored_series = np.where(
series.isnull(),
# For all null values default to null
None,
# The other default condition - floor non-null values
product_for_numerator / multiplication_factor,
)
return floored_series
|
82453
|
import numpy as np
#from cvxopt import matrix
import pickle
from l1ls import l1ls
import copy
import pdb
import os
feats_file = 'gt_feats.pkl'
mode = 'gt'
with open(feats_file, 'rb') as f:
feats = pickle.load(f)
num_classes = len(feats)
dicts_list = []
dicts_num = 192
max_iters = 25
min_tol = 1e-2
lamda = 1e-3
learn_dicts_list = []
learn_alpha_list = []
error_pre = 0
error_now = 0
error_list = []
for i in range(num_classes):
error = []
feat = feats[i]
init_dict = np.random.randn(feat.shape[0], dicts_num)
learn_dict = None
norm = np.linalg.norm(init_dict, axis=0, keepdims=True)
init_dict = init_dict / norm
print('Begin learn class {} \n'.format(i))
num_sample = feat.shape[1]
for k in range(max_iters):
alpha = []
if k == 0:
dict = init_dict
else:
dict = learn_dict
for j in range(feat.shape[1]):
[x, status, hist] = l1ls(dict, feat[:,j], lamda, quiet=True)
if 'Failed' in status:
print('L1 normalization not solved!')
alpha.append(x.reshape(-1,1))
alpha = np.concatenate(alpha, axis=1)
recon_feat = np.matmul(dict, alpha)
learn_dict = []
for j in range(dict.shape[1]):
y = feat - (recon_feat - dict[:,[j]].reshape(-1,1) * alpha[[j],:].reshape(1,-1))
d_j = np.matmul(y, alpha[j, :].reshape(-1, 1))
norm_d_j = d_j / np.linalg.norm(d_j)
learn_dict.append(norm_d_j.reshape(-1, 1))
learn_dict = np.concatenate(learn_dict, axis=1)
recon_error = ((feat - np.matmul(learn_dict, alpha))**2).sum() / num_sample
co_error = np.abs(alpha).sum() * lamda / num_sample
error.append([recon_error, co_error])
error_pre = error_now
error_now = recon_error + co_error
print('iter: {} error: {} {} \n'.format(k, recon_error, co_error))
if abs(error_now - error_pre) < min_tol:
break
learn_dicts_list.append(learn_dict)
learn_alpha_list.append(alpha)
error_list.append(error)
dict_file = os.path.join(os.path.dirname(feats_file), mode + '_learn_dicts_'+ str(lamda) +'.pkl')
alpha_file = os.path.join(os.path.dirname(feats_file), mode +'_alpha_' + str(lamda) +'.pkl')
error_file = os.path.join(os.path.dirname(feats_file), mode +'_error_' + str(lamda) +'.pkl')
with open(dict_file, 'wb') as f:
pickle.dump(learn_dicts_list, f)
with open(alpha_file, 'wb') as f:
pickle.dump(learn_alpha_list, f)
with open(error_file, 'wb') as f:
pickle.dump(error_list, f)
|
82483
|
import seaborn as sns
import pandas
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# ファイルの読み込み
salary = pandas.read_csv('data-salary.txt')
# データの先頭5行の確認
print(salary.head())
# データのサマリを確認
print(salary.describe())
# データの図示
sns.scatterplot(
x='X',
y='Y',
data=salary
)
plt.show()
# 単回帰 Seaborn
sns.lmplot("X","Y",salary)
plt.show()
# 単回帰 sklearn
x = salary[['X']]
y = salary[['Y']]
model_lr = LinearRegression()
model_lr.fit(x, y)
# 係数
print(model_lr.coef_)
# 切片
print(model_lr.intercept_)
# 決定係数
print(model_lr.score(x, y))
|
82497
|
import os
import sys
from multiprocessing import Process
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "src"))
cwd = os.getcwd()
from supervisor import supervisor
if __name__ == "__main__":
args = {}
dir_path = os.path.dirname(os.path.abspath(__file__))
args["ext_config_file_path"] = os.path.join(dir_path, "coco_squeezeDet_config.json")
supervisor.start(args)
|
82526
|
from torchmm.models.retrieval.scan import SCAN
from torchmm.models.retrieval.sgraf import SGRAF
from torchmm.models.retrieval.vsepp import VSEPP
from torchmm.models.retrieval.imram import IMRAM
from torchmm.models.retrieval.bfan import BFAN
from torchmm.models.captioning.aoanet import AoANet
from torchmm.models.multitask.vilbert import VILBERTFinetune, VILBERTPretrain
from torchmm.models.multitask.layers.bert_config import BertConfig
from torchmm.models.fusion.early import EarlyFusion
from torchmm.models.fusion.late import LateFusion
from torchmm.models.fusion.lmf import LMFFusion
from torchmm.models.fusion.tmc import TMCFusion
from torchmm.models.fusion.cmml import CMML
from torchmm.models.captioning.nic import NIC
__all__ = [
'SCAN',
'SGRAF',
'VSEPP',
'IMRAM',
'BFAN',
'CMML',
'NIC',
'AoANet',
'BertConfig',
'VILBERTPretrain',
'VILBERTFinetune',
'EarlyFusion',
'LateFusion',
'LMFFusion',
'TMCFusion'
]
|
82545
|
import os
import sys
import numpy as np
import scipy.io as sio
import more_itertools as mit
chan = ['Fp1','AF3','F3','F7','FC5','FC1','C3','T7','CP5','CP1','P3','P7','PO3','O1','Oz','Pz','Fp2','AF4','Fz','F4','F8','FC6','FC2','Cz','C4','T8','CP6','CP2','P4','P8','PO4','O2']
nLabel, nTrial, nUser, nChannel, nTime = 4, 40, 1, 32, 8064
print ("Program started \n")
m=[]
fout_labels0 = open("labels_0.dat",'w')
fout_labels1 = open("labels_1.dat",'w')
for i in range(nUser):#4, 40, 32, 32, 8064
if i < 10:
name = '%0*d' % (2,i+1)
else:
name = i+1
fname = "s"+str(name)+".mat"
x = sio.loadmat(fname)
print (fname)
for tr in range(nTrial):
fout_data = open("features_raw.csv",'w')
for dat in range(384,nTime):
for ch in range(nChannel):
m.append(str(x['data'][tr][ch][dat]))
windows = list(mit.windowed(m, n=512, step=256))
if(x['labels'][tr][0]<4.5):
fout_labels0.write(str(1) + "\n");
else:
fout_labels0.write(str(2) + "\n");
if(x['labels'][tr][1]<4.5):
fout_labels1.write(str(1) + "\n");
else:
fout_labels1.write(str(2) + "\n");
#Normalizing the data between [0,1]
windows.append(tuple([x for x in range(512)]))
for l in range(928):
for n in range(512):
if n==511:
fout_data.write(str(windows[l][n]))
else:
fout_data.write(str(windows[l][n])+",")
fout_data.write("\n")
fout_data.close()
#maximum=np.amax(array)
#minimum=np.amin(array)
#normalise all data in the array except the first value of each row
array = np.genfromtxt('features_raw.csv',delimiter=',')
maximum=array[:928, 1:].max()
minimum=array[:928, 1:].min()
#normalise all data in the array except the first value of each row
a = (array[:928,:] - minimum)/(maximum - minimum)
np.savetxt("features_raw.csv", a, delimiter=",", fmt='%s')
os.system('python entropy1.py')
print("user "+ str(i+1) +" trail"+ str(tr+1))
fout_labels0.close()
fout_labels1.close()
print ("\n"+"Print Successful")
|
82556
|
def reverse(string):
return string[::-1]
print('Gimmie some word')
s = input()
print(reverse(s))
|
82568
|
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.ops.init_ops import L2NormalizedTruncatedNormalInitializer
from DeepSparseCoding.tf1x.utils.trainable_variable_dict import TrainableVariableDict
class AeModule(object):
def __init__(self, data_tensor, layer_types, enc_channels, dec_channels, patch_size,
conv_strides, w_decay_mult, w_norm_mult, act_funcs, dropout, tie_dec_weights,
w_init_type, variable_scope="ae"):
"""
Autoencoder module
Inputs:
data_tensor
enc_channels [list of ints] the number of output channels per encoder layer
Last entry is the number of latent units
dec_channels [list of ints] the number of output channels per decoder layer
Last entry must be the number of input pixels for FC layers and channels for CONV layers
w_decay_mult: tradeoff multiplier for weight decay loss
w_norm_mult: tradeoff multiplier for weight norm loss (asks weight norm to == 1)
act_funcs: activation functions
dropout: specifies the keep probability or None
conv: if True, do convolution
conv_strides: list of strides for convolution [batch, y, x, channels]
patch_size: number of (y, x) inputs for convolutional patches
w_init_type: [str] which w_init to use, options are 'normal', 'xavier', or 'l2_normed'
variable_scope: specifies the variable_scope for the module
Outputs:
dictionary
"""
self.conv_strides = conv_strides
self.variable_scope = variable_scope
self.trainable_variables = TrainableVariableDict()
self.data_tensor = data_tensor
self.enc_channels = enc_channels
self.dec_channels = dec_channels
self.patch_size_y = [int(size[0]) for size in patch_size]
self.patch_size_x = [int(size[1]) for size in patch_size]
self.dropout = dropout
self.w_decay_mult = w_decay_mult
self.w_norm_mult = w_norm_mult
self.act_funcs = act_funcs
self.num_enc_layers = len(self.enc_channels)
self.num_dec_layers = len(self.dec_channels)
self.tie_dec_weights = tie_dec_weights
self.enc_layer_types = layer_types[:self.num_enc_layers]
self.dec_layer_types = layer_types[self.num_enc_layers:]
self.layer_types = [self.enc_layer_types, self.dec_layer_types]
self.num_enc_conv_layers = self.enc_layer_types.count("conv")
self.num_dec_conv_layers = self.dec_layer_types.count("conv")
self.num_conv_layers = self.num_enc_conv_layers + self.num_dec_conv_layers
self.num_enc_fc_layers = self.enc_layer_types.count("fc")
self.num_dec_fc_layers = self.dec_layer_types.count("fc")
self.num_fc_layers = self.num_enc_fc_layers + self.num_dec_fc_layers
self.num_layers = self.num_enc_layers + self.num_dec_layers
data_ndim = len(data_tensor.get_shape().as_list())
self.all_strides = [] # Full list of strides, including FC layers
for enc_conv_id in range(self.num_enc_conv_layers):
self.all_strides.append(self.conv_strides[enc_conv_id])
for enc_fc_id in range(self.num_enc_fc_layers):
self.all_strides.append(None)
for dec_fc_id in range(self.num_dec_fc_layers):
self.all_strides.append(None)
for dec_conv_id in range(self.num_dec_conv_layers):
self.all_strides.append(self.conv_strides[self.num_enc_conv_layers + dec_conv_id])
if data_ndim == 2:
self.batch_size, self.num_pixels = self.data_tensor.get_shape()
else:
self.batch_size, self.num_pixels_y, self.num_pixels_x, self.num_channels = \
self.data_tensor.get_shape()
self.num_pixels = self.num_pixels_y * self.num_pixels_x * self.num_channels
self.w_init_type = w_init_type
# Parameter checks
if self.enc_layer_types[0] == "conv":
assert data_ndim == 4, (
"Module requires data_tensor to have shape" +
" [batch, num_pixels_y, num_pixels_x, num_features] if first layer is conv")
else:
assert data_ndim == 2, (
"Module requires data_tensor to have shape [batch, num_pixels]")
if(self.tie_dec_weights):
assert self.num_enc_layers == self.num_dec_layers, (
"num_enc_layers must equal num_dec_layers, but are %g and %g"%(
self.num_enc_layers, self.num_dec_layers))
if self.num_enc_conv_layers > 0 and self.num_enc_fc_layers > 0:
assert np.all("conv" in self.enc_layer_types[:self.num_enc_conv_layers]), \
("Encoder conv layers must come before fc layers")
if self.num_dec_conv_layers > 0 and self.num_dec_fc_layers > 0:
assert np.all("fc" in self.dec_layer_types[:self.num_dec_fc_layers]), \
("Decoder fc layers must come before conv layers")
assert self.num_enc_layers == len(self.enc_layer_types), \
("The number of encoder channels must match the number of encoder layer types")
assert self.num_dec_layers == len(self.dec_layer_types), \
("The number of decoder channels must match the number of decoder layer types")
assert all([layer_type in ["conv", "fc"] for layer_type in layer_types]), \
("All layer_types must be conv or fc")
assert len(self.patch_size_y) == self.num_conv_layers, \
("patch_size_y must be a list of size " + str(self.num_conv_layers))
assert len(self.patch_size_x) == self.num_conv_layers, \
("patch_size_x must be a list of size " + str(self.num_conv_layers))
assert len(self.conv_strides) == self.num_conv_layers, \
("conv_strides must be a list of size " + str(self.num_conv_layers))
assert len(self.act_funcs) == self.num_layers, \
("act_funcs parameter must be a list of size " + str(self.num_layers))
self.build_graph()
def compute_weight_norm_loss(self):
with tf.compat.v1.variable_scope("w_norm"):
w_norm_list = []
for w in self.w_list:
reduc_axis = np.arange(1, len(w.get_shape().as_list()))
w_norm = tf.reduce_sum(input_tensor=tf.square(1 - tf.reduce_sum(input_tensor=tf.square(w), axis=reduc_axis)))
w_norm_list.append(w_norm)
norm_loss = tf.multiply(0.5 * self.w_norm_mult, tf.add_n(w_norm_list))
return norm_loss
def compute_weight_decay_loss(self):
with tf.compat.v1.variable_scope("unsupervised"):
w_decay_list = [tf.reduce_sum(input_tensor=tf.square(w)) for w in self.w_list]
decay_loss = tf.multiply(0.5*self.w_decay_mult, tf.add_n(w_decay_list))
return decay_loss
def compute_recon_loss(self, reconstruction):
with tf.compat.v1.variable_scope("unsupervised"):
# If the encoder and decoder are different types (conv vs fc) then there may be a shape mismatch
recon_shape = reconstruction.get_shape()
data_shape = self.data_tensor.get_shape()
if(recon_shape.ndims != data_shape.ndims):
if(np.prod(recon_shape.as_list()[1:]) == np.prod(data_shape.as_list()[1:])):
reconstruction = tf.reshape(reconstruction, tf.shape(input=self.data_tensor))
else:
assert False, ("Reconstructiion and input must have the same size")
reduc_dim = list(range(1, len(reconstruction.shape)))# We want to avg over batch
recon_loss = 0.5 * tf.reduce_mean(
input_tensor=tf.reduce_sum(input_tensor=tf.square(tf.subtract(reconstruction, self.data_tensor)),
axis=reduc_dim), name="recon_loss")
return recon_loss
def compute_total_loss(self):
with tf.compat.v1.variable_scope("loss") as scope:
self.loss_dict = {"recon_loss":self.compute_recon_loss(self.reconstruction),
"weight_decay_loss":self.compute_weight_decay_loss(),
"weight_norm_loss":self.compute_weight_norm_loss()}
self.total_loss = tf.add_n([loss for loss in self.loss_dict.values()], name="total_loss")
def flatten_feature_map(self, feature_map):
"""
Flatten input tensor from [batch, y, x, f] to [batch, y*x*f]
"""
map_shape = feature_map.get_shape()
if(map_shape.ndims == 4):
(batch, y, x, f) = map_shape
prev_input_features = int(y * x * f)
resh_map = tf.reshape(feature_map, [-1, prev_input_features])
elif(map_shape.ndims == 2):
resh_map = feature_map
else:
assert False, ("Input feature_map has incorrect ndims")
return resh_map
def get_dec_shapes(self, input_shape):
# The following assumes decoder fc->conv operation mirrors encoder conv->fc
conv_output_length = tf.python.keras.utils.conv_utils.conv_output_length
in_y, in_x, in_f = input_shape[1:]
dec_conv_strides = self.conv_strides[:-self.num_dec_conv_layers]
filter_size_y = self.patch_size_y[:-self.num_dec_conv_layers]
filter_size_x = self.patch_size_x[:-self.num_dec_conv_layers]
dec_channels = self.dec_channels[:self.num_dec_conv_layers][::-1]
last_enc_conv_channels = self.enc_channels[self.num_enc_conv_layers-1]
dec_channels[-1] = last_enc_conv_channels
layer_shapes = [[int(in_y), int(in_x), int(in_f)]]
for layer_id in range(self.num_dec_conv_layers):
out_y = conv_output_length(
input_length=layer_shapes[layer_id][0],
filter_size=filter_size_y[layer_id],
padding="same",
stride=dec_conv_strides[layer_id][1])
out_x = conv_output_length(
input_length=layer_shapes[layer_id][1],
filter_size=filter_size_x[layer_id],
padding="same",
stride=dec_conv_strides[layer_id][2])
layer_shapes.append([int(out_y), int(out_x), int(dec_channels[layer_id])])
return layer_shapes[::-1]
def compute_pre_activation(self, layer_id, input_tensor, w, b, conv, decode):
if conv:
strides = self.all_strides[layer_id]
if decode:
height_const = tf.shape(input=input_tensor)[1] % strides[1]
out_height = (tf.shape(input=input_tensor)[1] * strides[1]) - height_const
width_const = tf.shape(input=input_tensor)[2] % strides[2]
out_width = (tf.shape(input=input_tensor)[2] * strides[2]) - width_const
out_shape = tf.stack([tf.shape(input=input_tensor)[0], # Batch
out_height, # Height
out_width, # Width
tf.shape(input=w)[2]]) # Channels
pre_act = tf.add(tf.nn.conv2d_transpose(input_tensor, w, out_shape, strides,
padding="SAME"), b)
else:
pre_act = tf.add(tf.nn.conv2d(input=input_tensor, filters=w, strides=strides, padding="SAME"), b)
else:
pre_act = tf.add(tf.matmul(input_tensor, w), b)
return pre_act
def layer_maker(self, layer_id, input_tensor, activation_function, w_shape,
keep_prob=1.0, conv=False, decode=False, tie_dec_weights=False, name_suffix=""):
"""
Make layer that does act(u*w+b) where * is a dot product or convolution
Example case for w_read_id logic:
layer_id: [0 1 2 3 4] [5 6 7 8 9]
10-6 10-7 10-8 10-9 10-10
weight_id: [0 1 2 3 4] [ 4 3 2 1 0 ]
num_layers: 10
weight_id = num_layers - (layer_id + 1)
"""
with tf.compat.v1.variable_scope("layer"+str(layer_id), reuse=tf.compat.v1.AUTO_REUSE) as scope:
if tie_dec_weights:
w_read_id = self.num_layers - (layer_id+1)
else:
w_read_id = layer_id
name_prefix = "conv_" if conv else "fc_"
w_name = name_prefix+"w_"+str(w_read_id)+name_suffix
if self.w_init_type.lower() == "normal":
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normal_init, trainable=True)
elif self.w_init_type.lower() == "xavier":
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_xavier_init, trainable=True)
elif self.w_init_type.lower() == "l2_normed":
if decode:
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normed_dec_init, trainable=True)
else:
w = tf.compat.v1.get_variable(name=w_name, shape=w_shape, dtype=tf.float32,
initializer=self.w_normed_enc_init, trainable=True)
else:
assert False, ("w_init_type parameter must be 'normal', 'xavier', or 'l2_normed', not %s"%(
self.w_init_type))
b_name = name_prefix+"b_"+str(layer_id)+name_suffix
if conv and decode:
b_shape = w_shape[-2]
else:
b_shape = w_shape[-1]
b = tf.compat.v1.get_variable(name=b_name, shape=b_shape,
dtype=tf.float32, initializer=self.b_init, trainable=True)
pre_act = self.compute_pre_activation(layer_id, input_tensor, w, b, conv, decode)
output_tensor = activation_function(pre_act)
output_tensor = tf.nn.dropout(output_tensor, rate=1-keep_prob)
return output_tensor, w, b
def build_encoder(self, input_tensor, activation_functions):
enc_u_list = [input_tensor]
enc_w_list = []
enc_b_list = []
prev_input_features = input_tensor.get_shape().as_list()[-1]
# Make conv layers first
for layer_id in range(self.num_enc_conv_layers):
w_shape = [self.patch_size_y[layer_id], self.patch_size_x[layer_id],
int(prev_input_features), int(self.enc_channels[layer_id])]
u_out, w, b = self.layer_maker(layer_id, enc_u_list[layer_id],
activation_functions[layer_id], w_shape, keep_prob=self.dropout[layer_id],
conv=True, decode=False, tie_dec_weights=self.tie_dec_weights)
enc_u_list.append(u_out)
enc_w_list.append(w)
enc_b_list.append(b)
prev_input_features = int(self.enc_channels[layer_id])
# Make fc layers second
for enc_fc_layer_id in range(self.num_enc_fc_layers):
layer_id = enc_fc_layer_id + self.num_enc_conv_layers
if enc_fc_layer_id == 0: # Input needs to be reshaped to [batch, num_units] for FC layers
in_tensor = self.flatten_feature_map(enc_u_list[-1])
prev_input_features = in_tensor.get_shape().as_list()[1]
else:
in_tensor = enc_u_list[layer_id]
w_shape = [int(prev_input_features), int(self.enc_channels[layer_id])]
u_out, w, b = self.layer_maker(layer_id, in_tensor, activation_functions[layer_id],
w_shape, keep_prob=self.dropout[layer_id], conv=False, decode=False,
tie_dec_weights=self.tie_dec_weights)
enc_u_list.append(u_out)
enc_w_list.append(w)
enc_b_list.append(b)
prev_input_features = int(self.enc_channels[layer_id])
return enc_u_list, enc_w_list, enc_b_list
def build_decoder(self, input_tensor, activation_functions):
dec_u_list = [input_tensor]
dec_w_list = []
dec_b_list = []
# Build FC layers first
for dec_layer_id in range(self.num_dec_fc_layers):
layer_id = self.num_enc_layers + dec_layer_id
input_shape = dec_u_list[dec_layer_id].get_shape()
if input_shape.ndims == 4: # if final enc layer was conv then flatten
in_tensor = self.flatten_feature_map(dec_u_list[dec_layer_id])
else: # final enc layer was fc
in_tensor = dec_u_list[dec_layer_id]
if dec_layer_id == self.num_dec_fc_layers - 1 and self.num_dec_conv_layers > 0:
# If there are decoder conv layers, then
# the last decoder FC layer needs to output a vector of the correct length
# correct_length = feature_map_y * feature_map_x * feature_map_f
# where feature_map_f = self.dec_channels[dec_layer_id]
conv_layer_shapes = self.get_dec_shapes(self.data_tensor.get_shape())
out_channels = np.prod(conv_layer_shapes[0])
else:
out_channels = self.dec_channels[dec_layer_id]
w_shape = [in_tensor.get_shape()[-1], out_channels]
u_out, w, b = self.layer_maker(layer_id, in_tensor,
activation_functions[dec_layer_id], w_shape, keep_prob=self.dropout[layer_id],
conv=False, decode=True, tie_dec_weights=self.tie_dec_weights)
dec_u_list.append(u_out)
dec_w_list.append(w)
dec_b_list.append(b)
# Build conv layers second
for dec_conv_layer_id in range(self.num_dec_conv_layers):
dec_layer_id = self.num_dec_fc_layers + dec_conv_layer_id
layer_id = self.num_enc_layers + dec_layer_id
input_shape = dec_u_list[dec_layer_id].get_shape()
if input_shape.ndims == 4: # prev layer was conv
(batch, y, x, f) = input_shape
in_tensor = dec_u_list[dec_layer_id]
w_shape = [
self.patch_size_y[self.num_enc_conv_layers + dec_conv_layer_id],
self.patch_size_x[self.num_enc_conv_layers + dec_conv_layer_id],
self.dec_channels[dec_layer_id],
f]
else: # prev layer was fc
conv_layer_shapes = self.get_dec_shapes(self.data_tensor.get_shape())
new_shape = [-1] + conv_layer_shapes[dec_conv_layer_id]
in_tensor = tf.reshape(dec_u_list[dec_layer_id], new_shape)
w_shape = [
self.patch_size_y[self.num_enc_conv_layers + dec_conv_layer_id],
self.patch_size_x[self.num_enc_conv_layers + dec_conv_layer_id],
self.dec_channels[dec_layer_id],
new_shape[-1]]
u_out, w, b = self.layer_maker(layer_id, in_tensor, activation_functions[dec_layer_id],
w_shape, keep_prob=self.dropout[layer_id], conv=True, decode=True,
tie_dec_weights=self.tie_dec_weights)
dec_u_list.append(u_out)
dec_w_list.append(w)
dec_b_list.append(b)
return dec_u_list, dec_w_list, dec_b_list
def build_graph(self):
with tf.compat.v1.variable_scope(self.variable_scope) as scope:
with tf.compat.v1.variable_scope("weight_inits") as scope:
self.w_normal_init = tf.compat.v1.initializers.truncated_normal(mean=0.0, stddev=0.001)
self.w_xavier_init = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution=("uniform" if False else "truncated_normal"))
self.w_normed_enc_init = L2NormalizedTruncatedNormalInitializer(mean=0.0, stddev=0.001,
axis=0, epsilon=1e-12, dtype=tf.float32) #TODO: Fix axis to be general to conv layers
self.w_normed_dec_init = L2NormalizedTruncatedNormalInitializer(mean=0.0, stddev=0.001,
axis=-1, epsilon=1e-12, dtype=tf.float32)
self.b_init = tf.compat.v1.initializers.constant(1e-4)
self.u_list = [self.data_tensor]
self.w_list = []
self.b_list = []
enc_u_list, enc_w_list, enc_b_list = self.build_encoder(self.u_list[0],
self.act_funcs[:self.num_enc_layers])
self.u_list += enc_u_list[1:] # build_encoder() will place self.u_list[0] as enc_u_list[0]
self.w_list += enc_w_list
self.b_list += enc_b_list
with tf.compat.v1.variable_scope("inference") as scope:
self.a = tf.identity(enc_u_list[-1], name="activity")
dec_u_list, dec_w_list, dec_b_list = self.build_decoder(self.a,
self.act_funcs[self.num_enc_layers:])
self.u_list += dec_u_list[1:] # build_decoder() will place self.u_list[-1] as dec_u_list[0]
if not self.tie_dec_weights:
self.w_list += dec_w_list
self.b_list += dec_b_list
with tf.compat.v1.variable_scope("norm_weights") as scope:
w_enc_norm_dim = list(range(len(self.w_list[0].get_shape().as_list())-1))
self.norm_enc_w = self.w_list[0].assign(tf.nn.l2_normalize(self.w_list[0],
axis=w_enc_norm_dim, epsilon=1e-8, name="row_l2_norm"))
self.norm_dec_w = self.w_list[-1].assign(tf.nn.l2_normalize(self.w_list[-1],
axis=-1, epsilon=1e-8, name="col_l2_norm"))
self.norm_w = tf.group(self.norm_enc_w, self.norm_dec_w, name="l2_norm_weights")
for w,b in zip(self.w_list, self.b_list):
self.trainable_variables[w.name] = w
self.trainable_variables[b.name] = b
with tf.compat.v1.variable_scope("output") as scope:
self.reconstruction = tf.identity(self.u_list[-1], name="reconstruction")
self.compute_total_loss()
|
82602
|
import collections
import logging
import runpy
import typing
from pathlib import Path
import HABApp
log = logging.getLogger('HABApp.Rules')
class RuleFile:
def __init__(self, rule_manager, name: str, path: Path):
from .rule_manager import RuleManager
assert isinstance(rule_manager, RuleManager)
self.rule_manager = rule_manager
self.name: str = name
self.path: Path = path
self.rules = {} # type: typing.Dict[str, HABApp.Rule]
self.class_ctr: typing.Dict[str, int] = collections.defaultdict(lambda: 1)
def suggest_rule_name(self, obj) -> str:
# if there is already a name set we make no suggestion
if getattr(obj, 'rule_name', '') != '':
return obj.rule_name.replace('ü', 'ue').replace('ö', 'oe').replace('ä', 'ae')
# create unique name
# <class '__main__.MyRule'>
parts = str(type(obj)).split('.')
name = parts[-1][:-2]
found = self.class_ctr[name]
self.class_ctr[name] += 1
return f'{name:s}.{found:d}' if found > 1 else f'{name:s}'
def check_all_rules(self):
for rule in self.rules.values(): # type: HABApp.Rule
rule._check_rule()
def unload(self):
# If we don't have any rules we can not unload
if not self.rules:
return None
# unload all registered callbacks
for rule in self.rules.values(): # type: HABApp.Rule
rule._unload()
log.debug(f'File {self.name} successfully unloaded!')
return None
def __process_tc(self, tb: list):
tb.insert(0, f"Could not load {self.path}!")
return [line.replace('<module>', self.path.name) for line in tb]
def create_rules(self, created_rules: list):
# It seems like python 3.8 doesn't allow path like objects any more:
# https://github.com/spacemanspiff2007/HABApp/issues/111
runpy.run_path(str(self.path), run_name=str(self.path), init_globals={
'__HABAPP__RUNTIME__': self.rule_manager.runtime,
'__HABAPP__RULE_FILE__': self,
'__HABAPP__RULES': created_rules,
})
def load(self) -> bool:
created_rules: typing.List[HABApp.rule.Rule] = []
ign = HABApp.core.wrapper.ExceptionToHABApp(logger=log)
ign.proc_tb = self.__process_tc
with ign:
self.create_rules(created_rules)
if ign.raised_exception:
# unload all rule instances which might have already been created otherwise they might
# still listen to events and do stuff
for rule in created_rules:
with ign:
rule._unload()
return False
if not created_rules:
log.warning(f'Found no instances of HABApp.Rule in {str(self.path)}')
return True
with ign:
for rule in created_rules:
# ensure that we have a rule name
rule.rule_name = self.suggest_rule_name(rule)
# rule name must be unique for every file
if rule.rule_name in self.rules:
raise ValueError(f'Rule name must be unique!\n"{rule.rule_name}" is already used!')
self.rules[rule.rule_name] = rule
log.info(f'Added rule "{rule.rule_name}" from {self.name}')
if ign.raised_exception:
# unload all rule instances which might have already been created otherwise they might
# still listen to events and do stuff
for rule in created_rules:
with ign:
rule._unload()
return False
return True
|
82638
|
from plugin.scrobbler.methods.s_logging import Logging
from plugin.scrobbler.methods.s_websocket import WebSocket
__all__ = ['Logging', 'WebSocket']
|
82655
|
import os
import sys
import getopt
"""
Extracts the audio from our game videos. This script expects that ffmpeg is installed and in the PYTHONPATH.
Usage: python extract_wav.py -i <path_to_folder_where_mp4_files_are>
"""
def parse_arguments(argv):
input_path = ''
try:
opts, args = getopt.getopt(argv, "hi:", ["ifile="])
except getopt.GetoptError:
print('python extract_wav.py -i <path>')
sys.exit(2)
if opts is None:
print('python extract_wav.py -i <path>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python extract_wav.py -i <path>')
sys.exit()
elif opt in ("-i", "--ifile"):
input_path = arg
return input_path
def extract_wav(input_path):
for file in os.listdir(input_path):
if file.endswith(".mp4") or file.endswith(".MP4"):
file = os.path.join(input_path, file)
filename = os.path.splitext(file)[0]
print("Filename: ", filename)
"""
-map_channel:
The first 0 is the input file id
The next 1 is the stream specifier - should be the audio stream, 0 is video
The next 0 is the channel id
-ar 8000 resamples the channel to 8kHz
"""
os.system("ffmpeg -i {0} -map_channel 0.1.0 -ar 8000 {1}.wav".format(file, filename))
else:
continue
if __name__ == '__main__':
path = parse_arguments(sys.argv[1:])
extract_wav(path)
|
82668
|
strategies = {}
def strategy(strategy_name: str):
"""Register a strategy name and strategy Class.
Use as a decorator.
Example:
@strategy('id')
class FindById:
...
Strategy Classes are used to build Elements Objects.
Arguments:
strategy_name (str): Name of the strategy to be registered.
"""
def wrapper(finder_class):
global strategies
strategies[strategy_name] = finder_class
return finder_class
return wrapper
|
82671
|
from .dvd import DVD
from .gopro import GOPRO
from .reds import REDS
#
from .build import build_dataset, list_datasets
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
82673
|
import pickle
import pytest
from taxi import projects
def test_legacy_projects_db(tmpdir):
projects_db_file = tmpdir.join(projects.ProjectsDb.PROJECTS_FILE)
local_projects_db = projects.LocalProjectsDb()
foo = pickle.dumps(local_projects_db)
with projects_db_file.open(mode='wb') as f:
f.write(foo)
p = projects.ProjectsDb(tmpdir.strpath)
with pytest.raises(projects.OutdatedProjectsDbException):
p.get_projects()
def test_outdated_projects_db(tmpdir):
# Simulate a projects db version change
projects.LocalProjectsDb.VERSION = 1
try:
p = projects.ProjectsDb(tmpdir.strpath)
p.update([])
finally:
projects.LocalProjectsDb.VERSION = 2
with pytest.raises(projects.OutdatedProjectsDbException):
p.get_projects()
|
82682
|
import h5py
import numpy as np
file = h5py.File('/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5', 'r')
for keys in file:
feature = file[keys]['feature'][:]
np.save('/data2/wt/openimages/vc_feature/coco_vc_all_bu/'+keys+'.npy', feature)
|
82738
|
from FrameLibDocs.utils import write_json
from FrameLibDocs.classes import dParseAndBuild, Documentation
def main(docs):
"""
A simplified version of the qlookup used to display information about specific objects when hovered over in the umenu.
"""
docs.interfaces_dir.mkdir(exist_ok=True)
obj_lookup = docs.interfaces_dir / "FrameLib-obj-dlookup.json"
worker = dParseAndBuild()
refpages = [x for x in docs.refpages_dir.rglob("fl.*.xml")]
for ref in refpages:
worker.extract_from_refpage(ref)
write_json(obj_lookup, worker.d_master_dict)
if __name__ == "__main__":
main(Documentation())
|
82739
|
from joern.shelltool.TraversalTool import TraversalTool
from py2neo import neo4j
DEFAULT_TAGNAME = 'tag'
BATCH_SIZE = 1000
class JoernTag(TraversalTool):
def __init__(self, DESCRIPTION):
TraversalTool.__init__(self, DESCRIPTION)
self.argParser.add_argument("-t", "--tag", default = DEFAULT_TAGNAME)
self.inputPairs = []
def processLine(self, line):
# [nodeId, tagValue]
X = line.split('\t')
X = [int(X[0]), X[1]]
self.inputPairs.append(X)
if len(self.inputPairs) == BATCH_SIZE:
self.processBatch(self.inputPairs)
self.inputPairs = []
def processBatch(self, pairs):
self.writePairsToDatabase(pairs, self.args.tag)
def writePairsToDatabase(self, pairs, tagName):
batch = neo4j.WriteBatch(self.dbInterface.j.graphDb)
graphDbURL = self.dbInterface.j.getGraphDbURL()
if graphDbURL[-1] == '/': graphDbURL = graphDbURL[:-1]
for (nodeId, tagVal) in pairs:
nodeURL = graphDbURL + '/node/' + str(nodeId)
node = neo4j.Node(nodeURL)
batch.set_property(node, tagName , tagVal)
batch.submit()
def streamEnd(self):
if len(self.inputPairs) != 0:
self.processBatch(self.inputPairs)
|
82757
|
import os
import pathlib
def model_path(config, root="./saved"):
root = pathlib.Path(root)
filename = "{}".format(config.dataset)
# Dataset-specific keys
if config.dataset in ["CIFAR10"]:
filename += "_augm_{}".format(
config.augment,
)
# Model-specific keys
filename += "_model_{}".format(
config.model,
)
if "sa" in config.model:
filename += "_type_{}".format(config.attention_type)
if config.attention_type == "Local":
filename += "_patch_{}".format(config.patch_size)
filename += "_dpatt_{}_dpval_{}_activ_{}_norm_{}_white_{}".format(
config.dropout_att,
config.dropout_values,
config.activation_function,
config.norm_type,
config.whitening_scale,
)
# Optimization arguments
filename += "_optim_{}".format(config.optimizer)
if config.optimizer == "SGD":
filename += "_momentum_{}".format(config.optimizer_momentum)
filename += "_lr_{}_bs_{}_ep_{}_wd_{}_seed_{}_sched_{}".format(
config.lr,
config.batch_size,
config.epochs,
config.weight_decay,
config.seed,
config.scheduler,
)
if config.scheduler not in ["constant", "linear_warmup_cosine"]:
filename += "_schdec_{}".format(config.sched_decay_factor)
if config.scheduler == "multistep":
filename += "_schsteps_{}".format(config.sched_decay_steps)
# Comment
if config.comment != "":
filename += "_comment_{}".format(config.comment)
# Add correct termination
filename += ".pt"
# Check if directory exists and warn the user if the it exists and train is used.
os.makedirs(root, exist_ok=True)
path = root / filename
config.path = str(path)
if config.train and path.exists():
print("WARNING! The model exists in directory and will be overwritten")
|
82780
|
import can
bus1 = can.interface.Bus('can0', bustype='virtual')
bus2 = can.interface.Bus('can0', bustype='virtual')
msg1 = can.Message(arbitration_id=0xabcde, data=[1,2,3])
bus1.send(msg1)
msg2 = bus2.recv()
print(hex(msg1.arbitration_id))
print(hex(msg2.arbitration_id))
assert msg1.arbitration_id == msg2.arbitration_id
|
82798
|
import os
import Chamaeleo
from Chamaeleo.methods.default import BaseCodingAlgorithm
from Chamaeleo.methods.ecc import Hamming, ReedSolomon
from Chamaeleo.methods.fixed import Church
from Chamaeleo.utils.pipelines import RobustnessPipeline
if __name__ == "__main__":
root_path = os.path.dirname(Chamaeleo.__file__)
file_paths = {
"<NAME>.jpg": os.path.join(root_path, "data", "pictures", "<NAME>.jpg")
}
coding_schemes = {
"Base": BaseCodingAlgorithm(), "Church et al.": Church()
}
error_corrections = {
"None": None, "Hamming": Hamming(), "ReedSolomon": ReedSolomon()
}
needed_indices = [
True, True
]
pipeline = RobustnessPipeline(
coding_schemes=coding_schemes,
error_corrections=error_corrections,
needed_indices=needed_indices,
file_paths=file_paths,
nucleotide_insertion=0.001,
nucleotide_mutation=0.001,
nucleotide_deletion=0.001,
sequence_loss=0.001,
iterations=3,
segment_length=120,
index_length=16,
need_logs=True
)
pipeline.evaluate()
pipeline.output_records(type="string")
|
82877
|
import datetime
from collections import Iterable
from dateutil import tz
from atpy.data.iqfeed.iqfeed_level_1_provider import get_splits_dividends
from atpy.data.iqfeed.util import *
from atpy.data.splits_dividends import adjust_df
from pyevents.events import EventFilter
class IQFeedBarDataListener(iq.SilentBarListener):
"""Real-time bar data"""
def __init__(self, listeners, interval_len, interval_type='s', mkt_snapshot_depth=0, adjust_history=True, update_interval=0):
"""
:param listeners: listeners to notify for incombing bars
:param interval_len: interval length
:param interval_type: interval type
:param mkt_snapshot_depth: construct and maintain dataframe representing the current market snapshot with depth. If 0, then don't construct, otherwise construct for the past periods
:param adjust_history: adjust historical bars for splits and dividends
:param update_interval: how often to update each bar
"""
super().__init__(name="Bar data listener %d%s" % (interval_len, interval_type))
self.listeners = listeners
self.listeners += self.on_event
self.conn = None
self.streaming_conn = None
self.interval_len = interval_len
self.interval_type = interval_type
self.mkt_snapshot_depth = mkt_snapshot_depth
self.adjust_history = adjust_history
self.update_interval = update_interval
self.watched_symbols = dict()
self.bar_updates = 0
def __enter__(self):
launch_service()
self.conn = iq.BarConn()
self.conn.add_listener(self)
self.conn.connect()
# streaming conn for fundamental data
if self.adjust_history:
self.streaming_conn = iq.QuoteConn()
self.streaming_conn.connect()
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Disconnect connection etc"""
self.conn.remove_listener(self)
self.conn.disconnect()
self.conn = None
if self.streaming_conn is not None:
self.streaming_conn.disconnect()
self.streaming_conn = None
def __del__(self):
if self.conn is not None:
self.conn.remove_listener(self)
if self.own_conn:
self.conn.disconnect()
if self.streaming_conn is not None:
self.streaming_conn.disconnect()
def __getattr__(self, name):
if self.conn is not None:
return getattr(self.conn, name)
else:
raise AttributeError
def process_invalid_symbol(self, bad_symbol: str) -> None:
if bad_symbol in self.watched_symbols and bad_symbol in self.watched_symbols:
del self.watched_symbols[bad_symbol]
def _process_bar_update(self, bar_data: np.array) -> pd.DataFrame:
bar_data = bar_data[0] if len(bar_data) == 1 else bar_data
symbol = bar_data[0].decode("ascii")
df = self.watched_symbols[symbol]
if df is None:
self.watched_symbols[symbol] = self._bar_to_df(bar_data)
else:
bar_timestamp = (bar_data[1] + np.timedelta64(bar_data[2], 'us')) \
.astype(datetime.datetime) \
.replace(tzinfo=tz.gettz('US/Eastern')) \
.astimezone(tz.gettz('UTC'))
timestamp_ind = df.index.names.index('timestamp')
df_timestamp = df.index[-1][timestamp_ind]
if df_timestamp != bar_timestamp:
data = self._bar_to_df(bar_data)
df = df.append(data) if self.mkt_snapshot_depth > 0 else data
if df.shape[0] > self.mkt_snapshot_depth:
df = df.iloc[df.shape[0] - self.mkt_snapshot_depth:]
df.index = df.index.set_levels(pd.to_datetime(df.index.levels[timestamp_ind], utc=True), level='timestamp')
self.watched_symbols[symbol] = df
else:
df.iloc[-1] = bar_data['open_p'], \
bar_data['high_p'], \
bar_data['low_p'], \
bar_data['close_p'], \
bar_data['tot_vlm'], \
bar_data['prd_vlm'], \
bar_data['num_trds']
self.bar_updates = (self.bar_updates + 1) % 1000000007
if self.bar_updates % 100 == 0:
logging.getLogger(__name__).debug("%d bar updates" % self.bar_updates)
return df
def process_latest_bar_update(self, bar_data: np.array) -> None:
df = self._process_bar_update(bar_data)
symbol = bar_data[0].decode("ascii")
self.listeners({'type': 'latest_bar_update',
'data': df,
'symbol': symbol,
'interval_type': self.interval_type,
'interval_len': self.interval_len})
def process_live_bar(self, bar_data: np.array) -> None:
df = self._process_bar_update(bar_data)
symbol = bar_data[0].decode("ascii")
self.listeners({'type': 'live_bar',
'data': df,
'symbol': symbol,
'interval_type': self.interval_type,
'interval_len': self.interval_len})
def process_history_bar(self, bar_data: np.array) -> None:
bar_data = (bar_data[0] if len(bar_data) == 1 else bar_data).copy()
symbol = bar_data[0].decode("ascii")
if self.watched_symbols[symbol] is None:
self.watched_symbols[symbol] = list()
self.watched_symbols[symbol].append(bar_data)
if len(self.watched_symbols[symbol]) == self.mkt_snapshot_depth:
df = self._bars_to_df(self.watched_symbols[symbol])
if self.adjust_history:
adjust_df(df, get_splits_dividends(symbol, self.streaming_conn))
self.watched_symbols[symbol] = df
self.listeners({'type': 'history_bars',
'data': df,
'symbol': symbol,
'interval_type': self.interval_type,
'interval_len': self.interval_len})
def bar_updates_event_stream(self):
return EventFilter(listeners=self.listeners,
event_filter=
lambda e: True if 'type' in e
and e['type'] == 'latest_bar_update'
and e['interval_type'] == self.interval_type
and e['interval_len'] == self.interval_len
else False,
event_transformer=lambda e: (e['data'], e['symbol']))
def all_full_bars_event_stream(self):
return EventFilter(listeners=self.listeners,
event_filter=
lambda e: True if 'type' in e and e['type'] in ('history_bars', 'live_bar') and e['interval_type'] == self.interval_type and e['interval_len'] == self.interval_len else False,
event_transformer=lambda e: (e['data'], e['symbol']))
def on_event(self, event):
if event['type'] == 'watch_bars':
self.watch_bars(event['data']['symbol'] if isinstance(event['data'], dict) else event['data'])
def watch_bars(self, symbol: typing.Union[str, Iterable]):
data_copy = {'symbol': symbol,
'interval_type': self.interval_type,
'interval_len': self.interval_len,
'update': self.update_interval,
'lookback_bars': self.mkt_snapshot_depth}
if isinstance(symbol, str) and symbol not in self.watched_symbols:
self.watched_symbols[symbol] = None
self.conn.watch(**data_copy)
elif isinstance(symbol, Iterable):
for s in [s for s in data_copy['symbol'] if s not in self.watched_symbols]:
data_copy['symbol'] = s
self.watched_symbols[s] = None
self.conn.watch(**data_copy)
@staticmethod
def _bars_to_df(bars: list) -> pd.DataFrame:
if len(bars) == 0:
return pd.DataFrame()
df = iqfeed_to_df(bars)
df['timestamp'] = pd.Index(df['date'] + pd.to_timedelta(df['time'], unit='us')) \
.tz_localize('US/Eastern') \
.tz_convert('UTC')
df = df.rename(index=str,
columns={'open_p': 'open',
'high_p': 'high',
'low_p': 'low',
'close_p': 'close',
'tot_vlm': 'total_volume',
'prd_vlm': 'volume',
'num_trds': 'number_of_trades'}) \
.set_index('timestamp', drop=True, append=False) \
.drop(['date', 'time'], axis=1)
return df
@staticmethod
def _bar_to_df(bar_data) -> pd.DataFrame:
result = dict()
bar_data = (bar_data[0] if len(bar_data) == 1 else bar_data).copy()
result['timestamp'] = (datetime.datetime.combine(bar_data['date'].astype(datetime.datetime), datetime.datetime.min.time())
+ datetime.timedelta(microseconds=bar_data['time'].astype(np.uint64) / 1)) \
.replace(tzinfo=tz.gettz('US/Eastern')).astimezone(tz.gettz('UTC'))
result['open'] = bar_data['open_p']
result['high'] = bar_data['high_p']
result['low'] = bar_data['low_p']
result['close'] = bar_data['close_p']
result['total_volume'] = bar_data['tot_vlm']
result['volume'] = bar_data['prd_vlm']
result['number_of_trades'] = bar_data['num_trds']
result = pd.DataFrame(result, index=result['timestamp']).drop('timestamp', axis=1)
return result
|
82902
|
from django import VERSION
from django.conf.urls import url
from .views import GetPopupView
"""
In django 1.10 JavaScriptCatalog is new
so for older version django import view as function
and for django >= 1.10 I use view class
"""
if VERSION < (1, 10):
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import JavaScriptCatalog
app_name = 'django_popup_view_field'
if VERSION < (1, 10):
urlpatterns = [
url(r'^jsi18n/$', javascript_catalog, name='javascript-catalog'),
]
else:
urlpatterns = [
url(
r'^jsi18n/$',
JavaScriptCatalog.as_view(),
name='javascript-catalog'
)
]
urlpatterns += [
url(r'^(?P<view_class_name>\w+)/$', GetPopupView.as_view(), name="get_popup_view"),
]
|
82937
|
import torch
import numpy as np
from tqdm import tqdm
from typing import Union, List, Tuple, Any, Dict
from easydict import EasyDict
from .dataset import preprocess, InferenceDataset, InferenceDatasetWithKeypoints
from .network import build_spin
from .. import BasePose3dRunner, BasePose3dRefiner, ACTIONS
from iPERCore.tools.human_digitalizer.bodynets import SMPL
from iPERCore.tools.utils.dataloaders import build_inference_loader
from iPERCore.tools.utils.geometry.boxes import cal_head_bbox
from iPERCore.tools.utils.geometry.cam_pose_utils import cam_init2orig, cam_norm
from iPERCore.tools.utils.filesio.persistence import load_toml_file
__all__ = ["SPINRunner"]
class SPINRunner(BasePose3dRunner):
def __init__(self,
cfg_or_path: Union[EasyDict, str],
device=torch.device("cpu")):
"""
Args:
cfg_or_path (EasyDict or str): the configuration EasyDict or the cfg_path with `toml` file.
If it is an EasyDict instance, it must contains the followings,
--ckpt_path (str): the path of the pre-trained checkpoints;
--smpl_path (str): the path of the smpl model;
--smpl_mean_params (str): the path of the mean parameters of SMPL.
Otherwise if it is a `toml` file, an example could be the followings,
ckpt_path = "./assets/pretrains/spin_ckpt.pth"
smpl_path = "./assets/pretrains/smpl_model.pkl"
smpl_mean_params = "./assets/pretrains/smpl_mean_params.npz"
device (torch.device):
"""
self.device = device
# RGB
self.MEAN = torch.as_tensor([0.485, 0.456, 0.406])[None, :, None, None].to(self.device)
self.STD = torch.as_tensor([0.229, 0.224, 0.225])[None, :, None, None].to(self.device)
if isinstance(cfg_or_path, str):
cfg = EasyDict(load_toml_file(cfg_or_path))
else:
cfg = cfg_or_path
self.model = build_spin(pretrained=False)
checkpoint = torch.load(cfg["ckpt_path"])
self.model.load_state_dict(checkpoint, strict=True)
self.model.eval()
self._smpl = SMPL(cfg["smpl_path"]).to(self.device)
self.model = self.model.to(self.device)
def __call__(self, image: np.ndarray,
boxes: Union[np.ndarray, List, Tuple, Any],
action: ACTIONS = ACTIONS.SPLIT) -> Dict[str, Any]:
"""
Args:
image (np.ndarray): (H, W, C), color intensity [0, 255] with BGR color channel;
boxes (np.ndarray or List, or Tuple or None): (N, 4)
action:
-- 0: only return `cams`, `pose` and `shape` of SMPL;
-- 1: return `cams`, `pose`, `shape` and `verts`.
-- 2: return `cams`, `pose`, `shape`, `verts`, `j2d` and `j3d`.
Returns:
result (dict):
"""
image = np.copy(image)
proc_img, proc_info = preprocess(image, boxes)
proc_img = torch.tensor(proc_img).to(device=self.device)[None]
with torch.no_grad():
proc_img = (proc_img - self.MEAN) / self.STD
smpls = self.model(proc_img)
cams_orig = cam_init2orig(smpls[:, 0:3], proc_info["scale"],
torch.tensor(proc_info["start_pt"], device=self.device).float())
cams = cam_norm(cams_orig, proc_info["im_shape"][0])
smpls[:, 0:3] = cams
if action == ACTIONS.SPLIT:
result = self.body_model.split(smpls)
elif action == ACTIONS.SKIN:
result = self.body_model.skinning(smpls)
elif action == ACTIONS.SMPL:
result = {"theta": smpls}
else:
result = self.body_model.get_details(smpls)
result["proc_info"] = proc_info
return result
def run_with_smplify(self, image_paths: List[str], boxes: List[Union[List, Tuple, np.ndarray]],
keypoints_info: Dict, smplify_runner: BasePose3dRefiner,
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of Union[np.np.ndarray, list, tuple)): the bounding boxes of each image;
keypoints_info (Dict): the keypoints information of each image;
smplify_runner (BasePose3dRefiner): the simplify instance, it must contains the keypoint_formater;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): (num, 85), the optimized smpls;
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDatasetWithKeypoints(image_paths, boxes, keypoints_info,
smplify_runner.keypoint_formater, image_size=224, temporal=temporal)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_opt_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
keypoints_info = sample["keypoints"].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
smplify_results = smplify_runner(
keypoints_info, cams, init_smpls[:, -10:], init_smpls[:, 3:-10], proc_kps=False, temporal=temporal
)
opt_smpls = torch.cat([cams, smplify_results["new_opt_pose"], smplify_results["new_opt_betas"]], dim=1)
if filter_invalid:
opt_smpls_info = self.get_details(opt_smpls)
head_boxes = cal_head_bbox(opt_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_opt_smpls.append(opt_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_opt_smpls = torch.cat(all_opt_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": all_opt_smpls,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def run(self, image_paths: List[str], boxes: List[List],
batch_size: int = 16, num_workers: int = 4,
filter_invalid: bool = True, temporal: bool = True):
"""
Args:
image_paths (list of str): the image paths;
boxes (list of list): the bounding boxes of each image;
batch_size (int): the mini-batch size;
num_workers (int): the number of processes;
filter_invalid (bool): the flag to control whether filter invalid frames or not;
temporal (bool): use temporal smooth optimization or not.
Returns:
smpl_infos (dict): the estimated smpl infomations, it contains,
--all_init_smpls (torch.Tensor): (num, 85), the initialized smpls;
--all_opt_smpls (torch.Tensor): None
--all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes.
"""
def head_is_valid(head_boxes):
return (head_boxes[:, 1] - head_boxes[:, 0]) * (head_boxes[:, 3] - head_boxes[:, 2]) > 10 * 10
dataset = InferenceDataset(image_paths, boxes, image_size=224)
data_loader = build_inference_loader(dataset, batch_size=batch_size, num_workers=num_workers)
"""
sample (dict): the sample information, it contains,
--image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized
by MEAN and STD, RGB channel;
--orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel;
--im_shape (torch.Tensor): (height, width)
--keypoints (dict): (num_joints, 3), and num_joints could be [75,].
--center (torch.Tensor): (2,);
--start_pt (torch.Tensor): (2,);
--scale (torch.Tensor): (1,);
--img_path (str): the image path.
"""
all_init_smpls = []
all_pose3d_img_ids = []
for sample in tqdm(data_loader):
images = sample["image"].to(self.device)
start_pt = sample["start_pt"].to(self.device)
scale = sample["scale"][:, None].to(self.device).float()
im_shape = sample["im_shape"][:, 0:1].to(self.device)
img_ids = sample["img_id"]
with torch.no_grad():
init_smpls = self.model(images)
cams_orig = cam_init2orig(init_smpls[:, 0:3], scale, start_pt)
cams = cam_norm(cams_orig, im_shape)
init_smpls[:, 0:3] = cams
if filter_invalid:
init_smpls_info = self.get_details(init_smpls)
head_boxes = cal_head_bbox(init_smpls_info["j2d"], image_size=512)
valid = head_is_valid(head_boxes).nonzero(as_tuple=False)
valid.squeeze_(-1)
img_ids = img_ids[valid]
all_init_smpls.append(init_smpls.cpu())
all_pose3d_img_ids.append(img_ids.cpu())
all_init_smpls = torch.cat(all_init_smpls, dim=0)
all_valid_ids = torch.cat(all_pose3d_img_ids, dim=0)
smpl_infos = {
"all_init_smpls": all_init_smpls,
"all_opt_smpls": None,
"all_valid_ids": all_valid_ids
}
return smpl_infos
def get_details(self, smpls):
return self._smpl.get_details(smpls)
@property
def mean_theta(self):
mean_cam = self.model.init_cam
mean_pose = self.model.init_pose
mean_shape = self.model.init_shape
mean_theta = torch.cat([mean_cam, mean_pose, mean_shape], dim=-1)[0]
return mean_theta
@property
def body_model(self):
return self._smpl
|
82969
|
import pytest
import sqlite3
from unittest.mock import call, Mock
from allennlp.common.testing import AllenNlpTestCase
from scripts.ai2_internal.resume_daemon import (
BeakerStatus,
create_table,
handler,
logger,
resume,
start_autoresume,
)
# Don't spam the log in tests.
logger.removeHandler(handler)
class ResumeDaemonTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.connection = sqlite3.connect(":memory:")
create_table(self.connection)
def test_create_beaker_status_works(self):
status = BeakerStatus("stopped")
assert status.name == "stopped"
def test_create_beaker_status_throws(self):
with pytest.raises(ValueError):
status = BeakerStatus("garbage")
assert status.name == "garbage"
def test_does_nothing_on_empty_db(self):
beaker = Mock()
resume(self.connection, beaker)
assert not beaker.method_calls
def test_does_not_resume_a_running_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.running
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_not_resume_a_finished_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_resume_a_preempted_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
beaker.resume.return_value = "foo2"
resume(self.connection, beaker)
beaker.get_status.assert_called()
beaker.resume.assert_called()
assert len(beaker.method_calls) == 2
def test_respects_upper_bound_on_resumes(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
call.resume("foo1"),
call.get_status("foo2"),
call.resume("foo2"),
call.get_status("foo3"),
call.resume("foo3"),
call.get_status("foo4"),
]
beaker.assert_has_calls(calls)
def test_handles_a_realistic_scenario(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
if i == 2:
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
]
beaker.assert_has_calls(calls)
|
83003
|
import pygame
import pystage
from pystage.core.constants import KEY_MAPPINGS
from pystage.core._base_sprite import BaseSprite
class _Sensing(BaseSprite):
def __init__(self):
super().__init__()
def sensing_askandwait(self, question):
# an input field, answer needs to be available somehow
pass
def sensing_answer(self):
# Answer of the last question
pass
def sensing_keypressed(self, key):
return pygame.key.get_pressed()[KEY_MAPPINGS[key]]
def sensing_mousedown(self):
return any(pygame.mouse.get_pressed())
def sensing_mousex(self):
x = pygame.mouse.get_pos()[0]
return ((x - self.stage.offset_x) / self.stage.scale_factor) - self.stage.width / 2
def sensing_mousey(self):
y = pygame.mouse.get_pos()[1]
return -(y - self.stage.offset_y) / self.stage.scale_factor + self.stage.height / 2
def sensing_loudness(self):
# See events area, not sure if we support microphone access
pass
def sensing_timer(self):
return self.stage.timer
def sensing_resettimer(self):
self.stage.timer = 0
def sensing_setdragmode_draggable(self):
pass
sensing_setdragmode_draggable.opcode="sensing_setdragmode"
sensing_setdragmode_draggable.param="DRAG_MODE"
sensing_setdragmode_draggable.value="draggable"
def sensing_setdragmode_notdraggable(self):
pass
sensing_setdragmode_notdraggable.opcode="sensing_setdragmode"
sensing_setdragmode_notdraggable.param="DRAG_MODE"
sensing_setdragmode_notdraggable.value="not draggable"
# Here follows the mess of our favorite block, where a lot of stuff
# from other sprites and the stage can be retrieved.
# This is all redundant, as in Python you would simply do something like
# self.stage.get_backdrop_name(). Along these lines, we need good
# explanations and examples in the documentation that show why these functions
# should not be used.
def sensing_of_xposition(self, sprite):
pass
sensing_of_xposition.opcode="sensing_of"
sensing_of_xposition.param="PROPERTY"
sensing_of_xposition.value="x position"
def sensing_of_yposition(self, sprite):
pass
sensing_of_yposition.opcode="sensing_of"
sensing_of_yposition.param="PROPERTY"
sensing_of_yposition.value="y position"
def sensing_of_direction(self, sprite):
pass
sensing_of_direction.opcode="sensing_of"
sensing_of_direction.param="PROPERTY"
sensing_of_direction.value="direction"
def sensing_of_costumenumber(self, sprite):
pass
sensing_of_costumenumber.opcode="sensing_of"
sensing_of_costumenumber.param="PROPERTY"
sensing_of_costumenumber.value="costume #"
def sensing_of_costumename(self, sprite):
pass
sensing_of_costumename.opcode="sensing_of"
sensing_of_costumename.param="PROPERTY"
sensing_of_costumename.value="costume name"
def sensing_of_size(self, sprite):
pass
sensing_of_size.opcode="sensing_of"
sensing_of_size.param="PROPERTY"
sensing_of_size.value="size"
def sensing_of_volume(self, sprite="_stage_"):
pass
sensing_of_volume.opcode="sensing_of"
sensing_of_volume.param="PROPERTY"
sensing_of_volume.value="volume"
def sensing_of_variable(self, variable, sprite="_stage_"):
pass
sensing_of_variable.opcode="sensing_of"
def sensing_of_backdropnumber(self, stage="_stage_"):
pass
sensing_of_backdropnumber.opcode="sensing_of"
sensing_of_backdropnumber.param="PROPERTY"
sensing_of_backdropnumber.value="backdrop #"
def sensing_of_backdropname(self, stage="_stage_"):
pass
sensing_of_backdropname.opcode="sensing_of"
sensing_of_backdropname.param="PROPERTY"
sensing_of_backdropname.value="backdrop name"
def sensing_current_year(self):
pass
sensing_current_year.opcode="sensing_current"
sensing_current_year.param="CURRENTMENU"
sensing_current_year.value="YEAR"
def sensing_current_month(self):
pass
sensing_current_month.opcode="sensing_current"
sensing_current_month.param="CURRENTMENU"
sensing_current_month.value="MONTH"
def sensing_current_date(self):
pass
sensing_current_date.opcode="sensing_current"
sensing_current_date.param="CURRENTMENU"
sensing_current_date.value="DATE"
def sensing_current_dayofweek(self):
pass
sensing_current_dayofweek.opcode="sensing_current"
sensing_current_dayofweek.param="CURRENTMENU"
sensing_current_dayofweek.value="DAYOFWEEK"
def sensing_current_hour(self):
pass
sensing_current_hour.opcode="sensing_current"
sensing_current_hour.param="CURRENTMENU"
sensing_current_hour.value="HOUR"
def sensing_current_minute(self):
pass
sensing_current_minute.opcode="sensing_current"
sensing_current_minute.param="CURRENTMENU"
sensing_current_minute.value="MINUTE"
def sensing_current_second(self):
pass
sensing_current_second.opcode="sensing_current"
sensing_current_second.param="CURRENTMENU"
sensing_current_second.value="SECOND"
def sensing_dayssince2000(self):
pass
def sensing_username(self):
# Makes not a lot of sense, maybe for compatibility?
pass
class _SensingSprite(BaseSprite):
def __init__(self):
super().__init__()
def sensing_touchingobject_pointer(self):
pass
sensing_touchingobject_pointer.opcode="sensing_touchingobject"
sensing_touchingobject_pointer.param="TOUCHINGOBJECTMENU"
sensing_touchingobject_pointer.value="_mouse_"
def sensing_touchingobject_edge(self):
return not self.stage.rect.contains(self.rect)
sensing_touchingobject_edge.opcode="sensing_touchingobject"
sensing_touchingobject_edge.param="TOUCHINGOBJECTMENU"
sensing_touchingobject_edge.value="_edge_"
def sensing_touchingobject_sprite(self, sprite):
if sprite.rect.colliderect(self.rect):
offset = (self.rect.left - sprite.rect.left, self.rect.top - sprite.rect.top)
return sprite.mask.overlap(self.mask, offset) is not None
return False
sensing_touchingobject_sprite.opcode="sensing_touchingobject"
def sensing_touchingcolor(self, color):
pass
def sensing_coloristouchingcolor(self, sprite_color, color):
pass
def sensing_distanceto_pointer(self):
pass
sensing_distanceto_pointer.opcode="sensing_distanceto"
sensing_distanceto_pointer.param="DISTANCETOMENU"
sensing_distanceto_pointer.value="_mouse_"
def sensing_distanceto_sprite(self, sprite):
pass
sensing_distanceto_sprite.opcode="sensing_distanceto"
|
83009
|
from models import *
from pokeapi import PokeAPI
import logging
# logger = logging.getLogger('peewee')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
def main():
api = PokeAPI()
for i in range(1, api.get_count(), 1):
try:
raw_pokemon = api.get_pokemon(i)
if raw_pokemon:
pokemon, created = Pokemon.get_or_create(**raw_pokemon)
print(f"Pokemon {'Created' if created else 'Existing'}: {pokemon.nombre}")
except Exception as e:
print(e)
if __name__ == '__main__':
main()
myDB.close()
|
83057
|
from __future__ import print_function
from tdda.rexpy import extract
from tdda.rexpy.seq import common_string_sequence
from tdda.rexpy.relib import re
x = extract(['Roger', 'Coger', 'Doger'], tag=True, as_object=True)
print(x)
patternToExamples = x.pattern_matches()
sequences = []
for j, (pattern, examples) in enumerate(patternToExamples.items()):
N = len(examples)
if N < 1:
print('%s:%s' % (pattern, examples))
else:
eparts = [re.match(x.results.rex[j], e).groups() for e in examples]
nparts = len(eparts[0])
for i in range(nparts):
(L, R) = (eparts[0][i], eparts[1][i])
n = 2
s = common_string_sequence(L, R)
while n < N and s != '':
s = common_string_sequence(s, eparts[n][i])
n += 1
sequences.append(s)
print(sequences)
|
83059
|
from abc import ABC
import numpy as np
import gym
import mujoco_py
from gym.envs.registration import register
def change_fetch_model(change_model):
import os
import shutil
gym_folder = os.path.dirname(gym.__file__)
xml_folder = 'envs/robotics/assets/fetch'
full_folder_path = os.path.join(gym_folder, xml_folder)
xml_file_path = os.path.join(full_folder_path, 'shared.xml')
backup_file_path = os.path.join(full_folder_path, 'shared_backup.xml')
if change_model:
if not os.path.exists(backup_file_path):
shutil.copy2(xml_file_path, backup_file_path)
shutil.copy2('fetch_yellow_obj.xml', xml_file_path)
else:
if os.path.exists(backup_file_path):
shutil.copy2(backup_file_path, xml_file_path)
def make(domain_name, task_name, seed, from_pixels, height, width, cameras=range(1),
visualize_reward=False, frame_skip=None, reward_type='dense', change_model=False):
if 'RealArm' not in domain_name:
change_fetch_model(change_model)
env = gym.make(domain_name, reward_type=reward_type)
env = GymEnvWrapper(env, from_pixels=from_pixels, cameras=cameras, height=height, width=width)
else:
import gym_xarm
env = gym.make(domain_name)
env.env.set_reward_mode(reward_type)
env = RealEnvWrapper(env, from_pixels=from_pixels, cameras=cameras, height=height, width=width)
env.seed(seed)
return env
class EnvWrapper(gym.Env, ABC):
def __init__(self, env, cameras, from_pixels=True, height=100, width=100, channels_first=True):
camera_0 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 90}
camera_1 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 135}
camera_2 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 180}
camera_3 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 225}
camera_4 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 270}
camera_5 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 315}
camera_6 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 0}
camera_7 = {'trackbodyid': -1, 'distance': 1.5, 'lookat': np.array((0.0, 0.6, 0)),
'elevation': -45.0, 'azimuth': 45}
self.all_cameras = [camera_0, camera_1, camera_2, camera_3, camera_4, camera_5, camera_6, camera_7]
self._env = env
self.cameras = cameras
self.from_pixels = from_pixels
self.height = height
self.width = width
self.channels_first = channels_first
self.special_reset = None
self.special_reset_save = None
self.hybrid_obs = False
self.viewer = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
shape = [3 * len(cameras), height, width] if channels_first else [height, width, 3 * len(cameras)]
self._observation_space = gym.spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8
)
self._state_obs = None
self.change_camera()
self.reset()
def change_camera(self):
return
@property
def observation_space(self):
if self.from_pixels:
return self._observation_space
else:
return self._env.observation_space
@property
def action_space(self):
return self._env.action_space
def seed(self, seed=None):
return self._env.seed(seed)
def reset_model(self):
self._env.reset()
def viewer_setup(self, camera_id=0):
for key, value in self.all_cameras[camera_id].items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def set_hybrid_obs(self, mode):
self.hybrid_obs = mode
def _get_obs(self):
if self.from_pixels:
imgs = []
for c in self.cameras:
imgs.append(self.render(mode='rgb_array', camera_id=c))
if self.channels_first:
pixel_obs = np.concatenate(imgs, axis=0)
else:
pixel_obs = np.concatenate(imgs, axis=2)
if self.hybrid_obs:
return [pixel_obs, self._get_hybrid_state()]
else:
return pixel_obs
else:
return self._get_state_obs()
def _get_state_obs(self):
return self._state_obs
def _get_hybrid_state(self):
return self._state_obs
@property
def hybrid_state_shape(self):
if self.hybrid_obs:
return self._get_hybrid_state().shape
else:
return None
def step(self, action):
self._state_obs, reward, done, info = self._env.step(action)
return self._get_obs(), reward, done, info
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset()
return self._get_obs()
def set_state(self, qpos, qvel):
self._env.set_state(qpos, qvel)
@property
def dt(self):
if hasattr(self._env, 'dt'):
return self._env.dt
else:
return 1
@property
def _max_episode_steps(self):
return self._env.max_path_length
def do_simulation(self, ctrl, n_frames):
self._env.do_simulatiaon(ctrl, n_frames)
def render(self, mode='human', camera_id=0, height=None, width=None):
if mode == 'human':
self._env.render()
if height is None:
height = self.height
if width is None:
width = self.width
if mode == 'rgb_array':
if isinstance(self, GymEnvWrapper):
self._env.unwrapped._render_callback()
viewer = self._get_viewer(camera_id)
# Calling render twice to fix Mujoco change of resolution bug.
viewer.render(width, height, camera_id=-1)
viewer.render(width, height, camera_id=-1)
# window size used for old mujoco-py:
data = viewer.read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
data = data[::-1, :, :]
if self.channels_first:
data = data.transpose((2, 0, 1))
return data
def close(self):
if self.viewer is not None:
self.viewer = None
self._env.close()
def _get_viewer(self, camera_id):
if self.viewer is None:
from mujoco_py import GlfwContext
GlfwContext(offscreen=True)
self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1)
self.viewer_setup(camera_id)
return self.viewer
def get_body_com(self, body_name):
return self._env.get_body_com(body_name)
def state_vector(self):
return self._env.state_vector
class GymEnvWrapper(EnvWrapper):
def change_camera(self):
for c in self.all_cameras:
c['lookat'] = np.array((1.3, 0.75, 0.4))
c['distance'] = 1.2
# Zoomed out cameras
camera_8 = {'trackbodyid': -1, 'distance': 1.8, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -45.0, 'azimuth': 135}
camera_9 = {'trackbodyid': -1, 'distance': 1.8, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -45.0, 'azimuth': 225}
# Gripper head camera
camera_10 = {'trackbodyid': -1, 'distance': 0.2, 'lookat': np.array((1.3, 0.75, 0.4)),
'elevation': -90, 'azimuth': 0}
self.all_cameras.append(camera_8)
self.all_cameras.append(camera_9)
self.all_cameras.append(camera_10)
def update_tracking_cameras(self):
gripper_pos = self._state_obs['observation'][:3].copy()
self.all_cameras[10]['lookat'] = gripper_pos
def _get_obs(self):
self.update_tracking_cameras()
return super()._get_obs()
@property
def _max_episode_steps(self):
return self._env._max_episode_steps
def set_special_reset(self, mode):
self.special_reset = mode
def register_special_reset_move(self, action, reward):
if self.special_reset_save is not None:
self.special_reset_save['obs'].append(self._get_obs())
self.special_reset_save['act'].append(action)
self.special_reset_save['reward'].append(reward)
def go_to_pos(self, pos):
grip_pos = self._state_obs['observation'][:3]
action = np.zeros(4)
for i in range(10):
if np.linalg.norm(grip_pos - pos) < 0.02:
break
action[:3] = (pos - grip_pos) * 10
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
grip_pos = self._state_obs['observation'][:3]
def raise_gripper(self):
grip_pos = self._state_obs['observation'][:3]
raised_pos = grip_pos.copy()
raised_pos[2] += 0.1
self.go_to_pos(raised_pos)
def open_gripper(self):
action = np.array([0, 0, 0, 1])
for i in range(2):
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
def close_gripper(self):
action = np.array([0, 0, 0, -1])
for i in range(2):
self._state_obs, r, d, i = self._env.step(action)
self.register_special_reset_move(action, r)
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset()
if save_special_steps:
self.special_reset_save = {'obs': [], 'act': [], 'reward': []}
self.special_reset_save['obs'].append(self._get_obs())
if self.special_reset == 'close' and self._env.has_object:
obs = self._state_obs['observation']
goal = self._state_obs['desired_goal']
obj_pos = obs[3:6]
goal_distance = np.linalg.norm(obj_pos - goal)
desired_reset_pos = obj_pos + (obj_pos - goal) / goal_distance * 0.06
desired_reset_pos_raised = desired_reset_pos.copy()
desired_reset_pos_raised[2] += 0.1
self.raise_gripper()
self.go_to_pos(desired_reset_pos_raised)
self.go_to_pos(desired_reset_pos)
elif self.special_reset == 'grip' and self._env.has_object and not self._env.block_gripper:
obs = self._state_obs['observation']
obj_pos = obs[3:6]
above_obj = obj_pos.copy()
above_obj[2] += 0.1
self.open_gripper()
self.raise_gripper()
self.go_to_pos(above_obj)
self.go_to_pos(obj_pos)
self.close_gripper()
return self._get_obs()
def _get_state_obs(self):
obs = np.concatenate([self._state_obs['observation'],
self._state_obs['achieved_goal'],
self._state_obs['desired_goal']])
return obs
def _get_hybrid_state(self):
grip_pos = self._env.sim.data.get_site_xpos('robot0:grip')
dt = self._env.sim.nsubsteps * self._env.sim.model.opt.timestep
grip_velp = self._env.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = gym.envs.robotics.utils.robot_get_obs(self._env.sim)
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
robot_info = np.concatenate([grip_pos, gripper_state, grip_velp, gripper_vel])
hybrid_obs_list = []
if 'robot' in self.hybrid_obs:
hybrid_obs_list.append(robot_info)
if 'goal' in self.hybrid_obs:
hybrid_obs_list.append(self._state_obs['desired_goal'])
return np.concatenate(hybrid_obs_list)
@property
def observation_space(self):
shape = self._get_state_obs().shape
return gym.spaces.Box(-np.inf, np.inf, shape=shape, dtype='float32')
class RealEnvWrapper(GymEnvWrapper):
def render(self, mode='human', camera_id=0, height=None, width=None):
if mode == 'human':
self._env.render()
if height is None:
height = self.height
if width is None:
width = self.width
if mode == 'rgb_array':
data = self._env.render(mode='rgb_array', height=height, width=width)
if self.channels_first:
data = data.transpose((2, 0, 1))
if camera_id == 8:
data = data[3:]
return data
def _get_obs(self):
return self.render(mode='rgb_array', height=self.height, width=self.width)
def _get_state_obs(self):
return self._get_obs()
def reset(self, save_special_steps=False):
self._state_obs = self._env.reset(rand_pos=True)
return self._get_obs()
|
83060
|
from django.db import models
from django.db.models import Case, F, Q, Value, When
from psqlextra.expressions import HStoreRef
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
def test_query_annotate_hstore_key_ref():
"""Tests whether annotating using a :see:HStoreRef expression works
correctly.
This allows you to select an individual hstore key.
"""
model_fk = get_fake_model({"title": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(model_fk, on_delete=models.CASCADE)}
)
fk = model_fk.objects.create(title={"en": "english", "ar": "arabic"})
model.objects.create(fk=fk)
queryset = (
model.objects.annotate(english_title=HStoreRef("fk__title", "en"))
.values("english_title")
.first()
)
assert queryset["english_title"] == "english"
def test_query_annotate_rename():
"""Tests whether field names can be overwritten with a annotated field."""
model = get_fake_model({"title": models.CharField(max_length=12)})
model.objects.create(title="swen")
obj = model.objects.annotate(title=F("title")).first()
assert obj.title == "swen"
def test_query_annotate_rename_chain():
"""Tests whether annotations are behaving correctly after a QuerySet
chain."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
model.objects.create(name="test", value=23)
obj = model.objects.values("name").annotate(value=F("value"))[:1]
assert "value" in obj[0]
assert obj[0]["value"] == 23
def test_query_annotate_rename_order():
"""Tests whether annotation order is preserved after a rename."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
qs = model.objects.annotate(value=F("value"), value_2=F("value"))
assert list(qs.query.annotations.keys()) == ["value", "value_2"]
def test_query_annotate_in_expression():
"""Tests whether annotations can be used in expressions."""
model = get_fake_model({"name": models.CharField(max_length=10)})
model.objects.create(name="henk")
result = model.objects.annotate(
real_name=F("name"),
is_he_henk=Case(
When(Q(real_name="henk"), then=Value("really henk")),
default=Value("definitely not henk"),
output_field=models.CharField(),
),
).first()
assert result.real_name == "henk"
assert result.is_he_henk == "really henk"
def test_query_hstore_value_update_f_ref():
"""Tests whether F(..) expressions can be used in hstore values when
performing update queries."""
model = get_fake_model(
{"name": models.CharField(max_length=255), "name_new": HStoreField()}
)
model.objects.create(name="waqas", name_new=dict(en="swen"))
model.objects.update(name_new=dict(en=models.F("name")))
inst = model.objects.all().first()
assert inst.name_new.get("en") == "waqas"
def test_query_hstore_value_update_cast():
"""Tests whether values in a HStore field are automatically cast to strings
when doing updates."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en=2))
inst = model.objects.all().first()
assert inst.title.get("en") == "2"
def test_query_hstore_value_update_escape():
"""Tests whether values in a HStore field are properly escaped using
prepared statement values."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en="console.log('test')"))
inst = model.objects.all().first()
assert inst.title.get("en") == "console.log('test')"
|
83093
|
import random
from cmath import exp
import math
def rotation_scale(origin, theta, scale, points):
return [origin + (point-origin)*scale*e(theta) for point in points]
def translate(origin, points):
# offset = points[0]-origin
return [point-origin for point in points]
def flip(z1, z2, points):
z1z2 = z1+z2
return [z1z2-point for point in points]
def reverse(z1, z2, points):
mid = (z1+z2)/2
return [mid - (point-mid).conjugate() for point in points]
def flip_and_reverse(z1,z2, points):
mid = (z1+z2)/2
return [mid - (mid-point).conjugate() for point in points]
def e(theta):
return exp(1j*theta)
rules = [(0, 1 / 3), (math.pi / 3, 1 / 3), (-math.pi / 3, 1 / 3), (0, 1 / 3)]
def get_base(rules, base_length, startpoint):
result = [startpoint]
for theta, fac, _, _ in rules:
result.append( result[-1]*fac*e(theta) )
return result
def fractal(n, rules, base_length, startpoint=0+0j):
if n == 1:
return get_base(rules, base_length, startpoint)
else:
a = fractal(n - 1, rules, base_length, startpoint)
lastpoint = a[0]
ret = []
for theta, fac, flipped, is_reversed in rules:
if flipped is None and is_reversed is None:
ret.append(lastpoint*base_length*e(theta))
continue
t = rotation_scale(
lastpoint,
theta,
fac,
translate(lastpoint,a),
)
if flipped and is_reversed:
t = flip_and_reverse(t[0], t[-1], t)
elif flipped:
t = flip(t[0], t[-1], t)
elif is_reversed:
t = reverse(t[0], t[-1], t)
ret.extend(t)
lastpoint = ret[-1]
return ret
def left_index(points):
min_ind=0
for i,point in enumerate(points[1:],1):
if point.real < points[min_ind].real:
min_ind=i
elif point.real == points[min_ind].real:
if point.imag > points[min_ind].imag:
min_ind=i
return min_ind
def orientation(p, q, r):
val = (q.imag - p.imag)*(r.real - q.real) \
- (q.real-p.real)*(r.imag-q.imag)
return val<0
def convexhull(points):
l_ind = left_index(points)
hull = []
p,q,n = l_ind,0,len(points)
while True:
hull.append(p)
q = (p+1)%n
for i in range(n):
if orientation(points[p], points[i], points[q]):
q=i
p=q
if p==l_ind:
break
return [points[i] for i in hull]
# window = tk.Tk()
# cv = tk.Canvas(window, width=2000, height=2000)
# cv.pack()
rules = [(0, 1 / 3, False, False), (math.pi / 3, 1 / 3, False, False),
(-math.pi / 3, 1 / 3, False, False), (0, 1 / 3, False, False)]
# "l:90,f:-1:1:1,r:90,f:-1:-1:0.5773,f:1:1:0.5773,r:120,f:1:1:0.5773,l:90,f:1:-1:1,l:30"
rules1 = [(math.pi / 2, 1, False, True), (0, 0.5773, True, True),
(0, 0.5773, False, False), (-2 * math.pi / 3, 0.5773, False, False),
(-math.pi / 6, 1, True, False)]
rules2 = [(math.pi / 2, 1, False, False), (math.pi / 4, 0.707, True, False),
(0, 1, False, False), (-math.pi / 4, 0.707, True, False),
(-math.pi / 2, 1, False, False)]
rules3 = [(math.pi / 2, 1, False, False), (-math.pi / 4, 1.414, False, False),
(0, 1, True, True)]
rules4 = [(math.pi / 2, 1, False, False), (math.pi, 1, False, False),
(math.pi / 4, 1.414, False, False), (-math.pi / 4, 1.414, True, True)]
startpoint = 400+300j
import time
start_time = time.time()
l = fractal(15, rules3, 1, startpoint)
print(time.time()-start_time)
# cv.create_polygon(l, fill="red")
# cv.pack()
# l = fractal(12,rules)
# for i in range(0,len(l),1000000):
# cv.create_line(l[i:i+1000000])
# cv.pack()
# points = [(0,100),(500,100),(750,math.sqrt(3)*250+100 ),(1000,100),(1500,100)]
# points = sorted([ [random.randint(100,700) for _ in range(2)] for i in range(6) ])
# cv.create_line(*points)
# reversed = reverse(*points[0],*points[-1],points)
# cv.create_line(*reversed, fill = "red")
# flipped = flip(*points[0],*points[-1],points)
# # cv.create_line(*flipped,fill = "blue")
# # print(flipped)
# # cv.create_line(*translate(0,300,flipped))
# cv.pack()
# test = ( (i,j) for i,j in [(1,0), (1,1), (0,1), (-1,1), (0,-1)] )
# a= rotation_translation_gen(0,0,math.pi/2,test,2)
# list(a)
|
83100
|
import sys
sys.path.append("../../")
from appJar import gui
def showPositions():
for widg in app.getContainer().grid_slaves():
row, column = widg.grid_info()["row"], widg.grid_info()["column"]
print(widg, row, column)
with gui("Grid Demo", "300x300", sticky="news", expand="both") as app:
for x in range(5):
for y in range(5):
app.label(str(x)+str(y), row=x, column=y)
app.button("PRESS", showPositions, colspan=5)
|
83105
|
from django.test import TestCase
from model_bakery import baker
from django_cradmin.cradmin_testhelpers import TestCaseMixin
from django_cradmin.viewhelpers import listbuilderview
from django_cradmin.django_cradmin_testapp import models as testmodels
class ListBuilderViewWithoutPaging(listbuilderview.View):
model = testmodels.SomeItem
def get_queryset_for_role(self):
return testmodels.SomeItem.objects.all().order_by('id')
class ListBuilderViewWithPaging(ListBuilderViewWithoutPaging):
paginate_by = 3
class TestListBuilderView(TestCase, TestCaseMixin):
viewclass = ListBuilderViewWithoutPaging
def test_empty(self):
mockresponse = self.mock_http200_getrequest_htmls()
self.assertFalse(mockresponse.selector.exists('.test-cradmin-listbuilder-list'))
self.assertEqual(
mockresponse.selector.one('.test-cradmin-no-items-message').alltext_normalized,
'No some items')
def test_not_empty(self):
baker.make('django_cradmin_testapp.SomeItem',
name='Test name')
mockresponse = self.mock_http200_getrequest_htmls()
self.assertFalse(mockresponse.selector.exists('.test-cradmin-no-items-message'))
self.assertTrue(mockresponse.selector.exists('.test-cradmin-listbuilder-list'))
def test_item_rendering(self):
baker.make('django_cradmin_testapp.SomeItem',
name='Test name')
mockresponse = self.mock_http200_getrequest_htmls()
# mockresponse.selector.prettyprint()
self.assertEqual(1, mockresponse.selector.count('.test-cradmin-listbuilder-item-frame-renderer'))
self.assertEqual('Test name',
mockresponse.selector.one('.test-cradmin-listbuilder-item-frame-renderer').alltext_normalized)
class TestListBuilderPaginationView(TestCase, TestCaseMixin):
viewclass = ListBuilderViewWithPaging
def test_paginate_by_singlepage(self):
baker.make('django_cradmin_testapp.SomeItem', _quantity=3)
mockresponse = self.mock_http200_getrequest_htmls()
self.assertEqual(3, mockresponse.selector.count('.test-cradmin-listbuilder-item-frame-renderer'))
self.assertFalse(mockresponse.selector.exists('.django-cradmin-loadmorepager'))
def test_paginate_by_firstpage(self):
baker.make('django_cradmin_testapp.SomeItem', _quantity=8)
mockresponse = self.mock_http200_getrequest_htmls()
# mockresponse.selector.one('#django_cradmin_contentwrapper').prettyprint()
self.assertEqual(3, mockresponse.selector.count('.test-cradmin-listbuilder-item-frame-renderer'))
self.assertTrue(mockresponse.selector.exists('.django-cradmin-loadmorepager'))
def test_paginate_by_middlepage(self):
baker.make('django_cradmin_testapp.SomeItem', _quantity=8)
mockresponse = self.mock_http200_getrequest_htmls(requestkwargs={'data': {'page': 2}})
self.assertEqual(3, mockresponse.selector.count('.test-cradmin-listbuilder-item-frame-renderer'))
self.assertTrue(mockresponse.selector.exists('.django-cradmin-loadmorepager'))
def test_paginate_by_lastpage(self):
baker.make('django_cradmin_testapp.SomeItem', _quantity=8)
mockresponse = self.mock_http200_getrequest_htmls(requestkwargs={'data': {'page': 3}})
self.assertEqual(2, mockresponse.selector.count('.test-cradmin-listbuilder-item-frame-renderer'))
self.assertFalse(mockresponse.selector.exists('.django-cradmin-loadmorepager'))
|
83115
|
from cowait.tasks import Task
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, GlobalAveragePooling1D, Dense
class ImdbTask(Task):
async def run(self):
# get the pre-processed data
data_train, data_test = self.get_data()
# train and evaluate model
model = self.fit(data_train, data_test)
# test model on example data point
test_index = 500
test_example = data_test[0][test_index]
pred = model.predict(test_example.reshape(1, len(test_example)))
pred = 1 if pred[0][0] > 0.5 else 0
print("##### Test example #####")
print("Correct class:", data_test[1][test_index]) # gt=1
print("Predicted class:", pred)
return int(pred)
def get_data(self):
# load the data
# the text reviews have already been converted to numerical features
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data()
# pad all sequences to the same length
pad_length = 1000
x_train = pad_sequences(x_train, pad_length)
x_test = pad_sequences(x_test, pad_length)
return (x_train, y_train), (x_test, y_test)
def create_model(self):
# size of the vocabulary
vocab_size = len(tf.keras.datasets.imdb.get_word_index().keys()) + 3
# size of the embedding vectors learned by the model
embedding_dim = 16
# set up the model structure
model = Sequential([
Embedding(input_dim=vocab_size, output_dim=embedding_dim), # embedding layer
GlobalAveragePooling1D(), # pooling layer
Dense(1, activation='sigmoid') # single sigmoid output node
])
# compile model with optimizer, loss function and evaluation metrics
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
return model
def fit(self, data_train, data_test):
# get the data
(x_train, y_train), (x_test, y_test) = data_train, data_test
# create the model
model = self.create_model()
# fit the model to the train set
print("##### Train model #####")
model.fit(
x=x_train,
y=y_train,
epochs=10
)
# evaluate the model on the test set
print("##### Evaluate model #####")
model.evaluate(
x=x_test,
y=y_test
)
return model
|
83169
|
from rustypy.pywrapper import rust_bind
@rust_bind
def first_module() -> None:
print('... called from first module')
if __name__ == "__main__":
first_module()
|
83179
|
import torch
from functools import partial
from easydict import EasyDict as edict
from albumentations import *
from isegm.data.datasets import *
from isegm.model.losses import *
from isegm.data.transforms import *
from isegm.engine.trainer import ISTrainer
from isegm.model.metrics import AdaptiveIoU
from isegm.data.points_sampler import MultiPointSampler
from isegm.utils.log import logger
from isegm.model import initializer
from isegm.model.is_hrnet_model import HRNetModel
from isegm.model.is_deeplab_model import DeeplabModel
from isegm.model.is_segformer_model import SegformerModel
from isegm.model.is_hrformer_model import HRFormerModel
from isegm.model.is_swinformer_model import SwinformerModel
|
83188
|
from .snpeff_dump import SnpeffDumper
from .snpeff_upload import SnpeffHg19Uploader, SnpeffHg38Uploader
|
83194
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
spinner = html.Div(
[
make_subheading("Spinner", "spinner"),
html.Div(
[
dbc.Spinner(color=col)
for col in [
"primary",
"secondary",
"success",
"warning",
"danger",
]
],
className="mb-2",
),
html.Div(
[
dbc.Spinner(color=col, type="grow")
for col in [
"primary",
"secondary",
"success",
"warning",
"danger",
]
],
className="mb-2",
),
html.Div(
[
dbc.Button(
dbc.Spinner(size="sm"),
color="primary",
disabled=True,
className="me-1",
),
dbc.Button(
[dbc.Spinner(size="sm"), " Loading..."],
color="primary",
disabled=True,
),
]
),
],
className="mb-4",
)
|
83228
|
import os
main_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
empty_python_output = """\
# -*- encoding: utf-8 -*-
# This file has been generated by prophyc.
import sys
import prophy
"""
def test_showing_version(call_prophyc):
ret, out, err = call_prophyc(["--version"])
expected_version = '1.2.4'
assert ret == 0
assert out == 'prophyc %s\n' % expected_version
assert err == ""
def test_missing_input(call_prophyc):
ret, out, err = call_prophyc([])
assert ret == 1
assert out == ""
assert err == "prophyc: error: missing input file\n"
def test_no_output_directory(call_prophyc, dummy_file):
ret, out, err = call_prophyc(["--python_out", "no_dir", dummy_file])
assert ret == 1
assert out == ""
assert err == "prophyc: error: argument --python_out: no_dir directory not found\n"
def test_no_input_file(call_prophyc):
ret, out, err = call_prophyc(["path/that/does/not/exist"])
assert ret == 1
assert out == ""
assert err == "prophyc: error: argument INPUT_FILE: path/that/does/not/exist file not found\n"
def test_missing_output(call_prophyc, dummy_file):
ret, out, err = call_prophyc(["--isar", dummy_file])
assert ret == 1
assert out == ""
assert err == "prophyc: error: missing output directives\n"
def test_passing_isar_and_sack(call_prophyc, dummy_file):
ret, out, err = call_prophyc(["--isar", "--sack", "--python_out", ".", dummy_file])
assert ret == 1
assert out == ""
assert err == "prophyc: error: argument --sack: not allowed with argument --isar\n"
def test_including_isar_with_isar(call_prophyc, dummy_file):
ret, out, err = call_prophyc(["--isar", "--include_isar", dummy_file, "--python_out", ".",
dummy_file])
assert ret == 1
assert out == ""
assert err == 'prophyc: error: Isar defines inclusion is supported only in "sack" compilation mode.\n'
def test_isar_compiles_single_empty_xml(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("<struct/>")
ret, out, err = call_prophyc(["--isar", "--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == empty_python_output
def test_isar_compiles_multiple_empty_xmls(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input1.xml").write("<struct/>")
tmpdir_cwd.join("input2.xml").write("<struct/>")
tmpdir_cwd.join("input3.xml").write("<struct/>")
ret, out, err = call_prophyc(["--isar",
"--python_out",
str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input1.xml"),
os.path.join(str(tmpdir_cwd), "input2.xml"),
os.path.join(str(tmpdir_cwd), "input3.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input1.py").read() == empty_python_output
assert tmpdir_cwd.join("input2.py").read() == empty_python_output
assert tmpdir_cwd.join("input3.py").read() == empty_python_output
def test_outputs_to_correct_directory(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("<struct/>")
os.mkdir("output")
ret, out, err = call_prophyc(["--isar", "--python_out",
os.path.join(str(tmpdir_cwd), "output"),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join(os.path.join("output", "input.py")).read() == empty_python_output
def test_isar_patch(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""\
<x>
<struct name="B">
<member name="a" type="u8"/>
</struct>
<struct name="A">
<member name="a" type="u8"/>
</struct>
</x>
""")
tmpdir_cwd.join("patch").write("""\
B insert 999 b A
B dynamic b a
""")
ret, out, err = call_prophyc(["--isar", "--patch",
os.path.join(str(tmpdir_cwd), "patch"),
"--python_out",
str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == empty_python_output + """\
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
('a', prophy.u8),
]
class B(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
('a', prophy.u8),
('b', prophy.array(A, bound='a')),
]
"""
def test_isar_cpp(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<struct name="Test">
<member name="x" type="u32">
<dimension isVariableSize="true"/>
</member>
</struct>
</xml>
""")
ret, out, err = call_prophyc(["--isar",
"--cpp_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert """\
PROPHY_STRUCT(4) Test
{
uint32_t x_len;
uint32_t x[1]; /// dynamic array, size in x_len
};
""" in tmpdir_cwd.join("input.pp.hpp").read()
assert """\
template <>
Test* swap<Test>(Test* payload)
{
swap(&payload->x_len);
return cast<Test*>(swap_n_fixed(payload->x, payload->x_len));
}
""" in tmpdir_cwd.join("input.pp.cpp").read()
def test_isar_warnings(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<system xmlns:xi="http://www.xyz.com/1984/XInclude">
<xi:include href="include.xml"/>
</system>
</xml>
""")
ret, out, err = call_prophyc(["--isar",
"--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == "prophyc: warning: file include.xml not found\n"
def test_quiet_warnings(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<system xmlns:xi="http://www.xyz.com/1984/XInclude">
<xi:include href="include.xml"/>
</system>
</xml>
""")
ret, out, err = call_prophyc(["--isar",
"--quiet",
"--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
def test_isar_with_includes(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<system xmlns:xi="http://www.xyz.com/1984/XInclude">
<xi:include href="helper.xml"/>
</system>
<struct name="X">
<member name="a" type="Y"/>
</struct>
</xml>
""")
tmpdir_cwd.join("helper.xml").write("""
<xml>
<struct name="Y">
<member name="a" type="u64"/>
</struct>
</xml>
""")
ret, out, err = call_prophyc(["--isar",
"-I", str(tmpdir_cwd),
"--cpp_full_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert """\
struct X : public prophy::detail::message<X>
{
enum { encoded_byte_size = 8 };
Y a;
X() { }
X(const Y& _1): a(_1) { }
size_t get_byte_size() const
{
return 8;
}
};
""" in tmpdir_cwd.join("input.ppf.hpp").read()
def test_sack_compiles_single_empty_hpp(if_clang_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.hpp").write("")
ret, out, err = call_prophyc(["--sack", "--python_out",
str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.hpp")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == empty_python_output
def test_sack_patch(if_clang_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.hpp").write("""\
struct X
{
int x;
};
""")
tmpdir_cwd.join("patch").write("""\
X type x r64
""")
ret, out, err = call_prophyc(["--sack", "--patch",
os.path.join(str(tmpdir_cwd), "patch"),
"--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.hpp")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == empty_python_output + """\
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
('x', prophy.r64),
]
"""
def test_multiple_outputs(if_clang_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<struct name="Test">
<member name="x" type="u32"/>
</struct>
</xml>
""")
ret, out, err = call_prophyc(["--isar",
"--python_out", str(tmpdir_cwd),
"--cpp_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == """\
# -*- encoding: utf-8 -*-
# This file has been generated by prophyc.
import sys
import prophy
class Test(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
('x', prophy.u32),
]
"""
assert tmpdir_cwd.join("input.pp.hpp").read() == """\
#ifndef _PROPHY_GENERATED_input_HPP
#define _PROPHY_GENERATED_input_HPP
#include <prophy/prophy.hpp>
PROPHY_STRUCT(4) Test
{
uint32_t x;
};
namespace prophy
{
template <> Test* swap<Test>(Test*);
} // namespace prophy
#endif /* _PROPHY_GENERATED_input_HPP */
"""
assert tmpdir_cwd.join("input.pp.cpp").read() == """\
#include <prophy/detail/prophy.hpp>
#include "input.pp.hpp"
using namespace prophy::detail;
namespace prophy
{
template <>
Test* swap<Test>(Test* payload)
{
swap(&payload->x);
return payload + 1;
}
} // namespace prophy
"""
def test_clang_not_installed(if_clang_not_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.hpp").write("")
ret, out, err = call_prophyc(["--sack",
"--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.hpp")])
assert ret == 1
assert out == ""
assert err == "prophyc: error: %s\n" % if_clang_not_installed.error
def test_prophy_language(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.prophy").write("""\
struct X
{
u32 x[5];
u64 y<2>;
};
union U
{
1: X x;
2: u32 y;
};
""")
ret, out, err = call_prophyc(["--python_out", str(tmpdir_cwd),
"--cpp_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.prophy")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.py").read() == """\
# -*- encoding: utf-8 -*-
# This file has been generated by prophyc.
import sys
import prophy
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
('x', prophy.array(prophy.u32, size=5)),
('num_of_y', prophy.u32),
('y', prophy.array(prophy.u64, bound='num_of_y', size=2)),
]
class U(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [
('x', X, 1),
('y', prophy.u32, 2),
]
"""
assert tmpdir_cwd.join("input.pp.hpp").read() == """\
#ifndef _PROPHY_GENERATED_input_HPP
#define _PROPHY_GENERATED_input_HPP
#include <prophy/prophy.hpp>
PROPHY_STRUCT(8) X
{
uint32_t x[5];
uint32_t num_of_y;
uint64_t y[2]; /// limited array, size in num_of_y
};
PROPHY_STRUCT(8) U
{
enum _discriminator
{
discriminator_x = 1,
discriminator_y = 2
} discriminator;
uint32_t _padding0; /// manual padding to ensure natural alignment layout
union
{
X x;
uint32_t y;
};
};
namespace prophy
{
template <> X* swap<X>(X*);
template <> U* swap<U>(U*);
} // namespace prophy
#endif /* _PROPHY_GENERATED_input_HPP */
"""
assert tmpdir_cwd.join("input.pp.cpp").read() == """\
#include <prophy/detail/prophy.hpp>
#include "input.pp.hpp"
using namespace prophy::detail;
namespace prophy
{
template <>
X* swap<X>(X* payload)
{
swap_n_fixed(payload->x, 5);
swap(&payload->num_of_y);
swap_n_fixed(payload->y, payload->num_of_y);
return payload + 1;
}
template <>
U* swap<U>(U* payload)
{
swap(reinterpret_cast<uint32_t*>(&payload->discriminator));
switch (payload->discriminator)
{
case U::discriminator_x: swap(&payload->x); break;
case U::discriminator_y: swap(&payload->y); break;
default: break;
}
return payload + 1;
}
} // namespace prophy
"""
def test_prophy_parse_errors(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.prophy").write("""\
struct X {};
union Y {};
constant
""")
ret, out, err = call_prophyc(["--python_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.prophy")])
assert ret == 1
assert out == ""
errlines = err.splitlines()
assert len(errlines) == 2
assert errlines[0].endswith("input.prophy:1:11: error: syntax error at '}'")
assert errlines[1].endswith("input.prophy:2:10: error: syntax error at '}'")
assert not os.path.exists("input.py")
def test_sack_parse_warnings(if_clang_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.cpp").write("""\
int foo() { int x; }
rubbish;
""")
ret, out, err = call_prophyc(['--python_out', str(tmpdir_cwd), '--sack',
os.path.join(str(tmpdir_cwd), 'input.cpp')])
assert ret == 0
assert out == ""
errlines = err.splitlines()
assert len(errlines) == 2
assert 'input.cpp:1:20: warning: control reaches end of non-void function' in errlines[0]
assert 'input.cpp:2:1: warning: C++ requires a type specifier for all declarations' in errlines[1]
assert os.path.exists("input.py")
def test_sack_parse_errors(if_clang_installed, call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.unknown").write("")
ret, out, err = call_prophyc(['--python_out', str(tmpdir_cwd), '--sack',
os.path.join(str(tmpdir_cwd), 'input.unknown')])
assert ret == 1
assert out == ""
assert 'input.unknown: error: error parsing translation unit' in err
assert not os.path.exists("input.py")
def test_cpp_full_out(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.prophy").write("""
typedef i16 TP;
const MAX = 4;
struct X {
u32 x;
TP y<MAX>;
};
""")
ret, out, err = call_prophyc(["--cpp_full_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.prophy")])
assert ret == 0
assert out == ""
assert err == ""
assert tmpdir_cwd.join("input.ppf.hpp").read() == """\
#ifndef _PROPHY_GENERATED_FULL_input_HPP
#define _PROPHY_GENERATED_FULL_input_HPP
#include <stdint.h>
#include <numeric>
#include <vector>
#include <string>
#include <prophy/array.hpp>
#include <prophy/endianness.hpp>
#include <prophy/optional.hpp>
#include <prophy/detail/byte_size.hpp>
#include <prophy/detail/message.hpp>
#include <prophy/detail/mpl.hpp>
namespace prophy
{
namespace generated
{
typedef int16_t TP;
enum { MAX = 4u };
struct X : public prophy::detail::message<X>
{
enum { encoded_byte_size = 16 };
uint32_t x;
std::vector<TP> y; /// limit 4
X(): x() { }
X(uint32_t _1, const std::vector<TP>& _2): x(_1), y(_2) { }
size_t get_byte_size() const
{
return 16;
}
};
} // namespace generated
} // namespace prophy
#endif /* _PROPHY_GENERATED_FULL_input_HPP */
"""
assert tmpdir_cwd.join("input.ppf.cpp").read() == """\
#include "input.ppf.hpp"
#include <algorithm>
#include <prophy/detail/encoder.hpp>
#include <prophy/detail/decoder.hpp>
#include <prophy/detail/printer.hpp>
#include <prophy/detail/align.hpp>
using namespace prophy::generated;
namespace prophy
{
namespace detail
{
template <>
template <endianness E>
uint8_t* message_impl<X>::encode(const X& x, uint8_t* pos)
{
pos = do_encode<E>(pos, x.x);
pos = do_encode<E>(pos, uint32_t(std::min(x.y.size(), size_t(4))));
do_encode<E>(pos, x.y.data(), uint32_t(std::min(x.y.size(), size_t(4))));
pos = pos + 8;
return pos;
}
template uint8_t* message_impl<X>::encode<native>(const X& x, uint8_t* pos);
template uint8_t* message_impl<X>::encode<little>(const X& x, uint8_t* pos);
template uint8_t* message_impl<X>::encode<big>(const X& x, uint8_t* pos);
template <>
template <endianness E>
bool message_impl<X>::decode(X& x, const uint8_t*& pos, const uint8_t* end)
{
return (
do_decode<E>(x.x, pos, end) &&
do_decode_resize<E, uint32_t>(x.y, pos, end, 4) &&
do_decode_in_place<E>(x.y.data(), x.y.size(), pos, end) &&
do_decode_advance(8, pos, end)
);
}
template bool message_impl<X>::decode<native>(X& x, const uint8_t*& pos, const uint8_t* end);
template bool message_impl<X>::decode<little>(X& x, const uint8_t*& pos, const uint8_t* end);
template bool message_impl<X>::decode<big>(X& x, const uint8_t*& pos, const uint8_t* end);
template <>
void message_impl<X>::print(const X& x, std::ostream& out, size_t indent)
{
do_print(out, indent, "x", x.x);
do_print(out, indent, "y", x.y.data(), std::min(x.y.size(), size_t(4)));
}
template void message_impl<X>::print(const X& x, std::ostream& out, size_t indent);
} // namespace detail
} // namespace prophy
"""
def test_cpp_full_out_error(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.xml").write("""
<xml>
<struct name="Test">
<member name="x" type="Unknown">
<dimension isVariableSize="true"/>
</member>
</struct>
</xml>
""")
ret, out, err = call_prophyc(["--isar", "--cpp_full_out", str(tmpdir_cwd),
os.path.join(str(tmpdir_cwd), "input.xml")])
assert ret == 1
assert out == ""
assert err == """\
prophyc: warning: type 'Unknown' not found
prophyc: warning: Test::x has unknown type "Unknown"
prophyc: error: Test byte size unknown
"""
def test_model_evaluation_warnings(call_prophyc, tmpdir_cwd):
tmpdir_cwd.join("input.prophy").write("""
struct X
{
u32 x;
u32 y[2];
};
""")
tmpdir_cwd.join("patch").write("""\
X type x Unknown
X static y UNKNOWN
""")
ret, out, err = call_prophyc(["--cpp_out", str(tmpdir_cwd),
"--patch", os.path.join(str(tmpdir_cwd), "patch"),
os.path.join(str(tmpdir_cwd), "input.prophy")])
assert ret == 1
assert out == ""
assert err == """\
prophyc: warning: type 'Unknown' not found
prophyc: warning: numeric constant 'UNKNOWN' not found
prophyc: warning: X::x has unknown type "Unknown"
prophyc: warning: X::y array has unknown size "UNKNOWN"
prophyc: error: X byte size unknown
"""
def test_python_package_cross_module_imports(call_prophyc, tmpdir_cwd, monkeypatch):
codecs_package_dir = tmpdir_cwd.join("some_codecs")
codecs_package_dir.mkdir()
codecs_package_dir.join("__init__.py").ensure()
tmpdir_cwd.join("a.prophy").write("""\
struct A {
u32 a;
};
""")
tmpdir_cwd.join("b.prophy").write("""\
#include "a.prophy"
struct B {
A a;
};
""")
ret, _, _ = call_prophyc(["--python_out", str(codecs_package_dir),
str(tmpdir_cwd.join("a.prophy")),
str(tmpdir_cwd.join("b.prophy"))])
assert ret == 0
monkeypatch.syspath_prepend(str(tmpdir_cwd))
from some_codecs import b
assert hasattr(b, 'A')
assert hasattr(b.B().a, 'a')
|
83230
|
import uuid
import datetime
from typing import List, Union, Dict
from plugins.adversary.app.engine.database import EncryptedDictField
from plugins.adversary.app.engine.objects import Log
from plugins.adversary.app.util import tz_utcnow
version = 1.1
class Operation(dict):
def __init__(self):
super().__init__()
self['id'] = str(uuid.uuid4())
self['steps'] = []
self['nodetype'] = 'operation'
class AttackReference(dict):
def __init__(self, technique_id, technique_name, tactics):
super().__init__()
self['technique_id'] = technique_id
self['technique_name'] = technique_name
self["tactic"] = tactics
class Step(dict):
def __init__(self, attack_info: List[AttackReference], dest_hosts: List[str] = None, description: str = None):
super().__init__()
self['id'] = str(uuid.uuid4())
self['nodetype'] = 'step'
self['attack_info'] = attack_info
self['events'] = []
self['key_technique'] = attack_info[0]['technique_id'] if len(attack_info) else None
self['key_event'] = None
self['host'] = None
self['time'] = None
if dest_hosts is not None:
self['dest_hosts'] = dest_hosts
if description is not None:
self['description'] = description
class Event(dict):
def __init__(self, obj, action, host, start_time=None, fields=None):
if start_time is None:
start_time = tz_utcnow().isoformat()
if fields is None:
fields = {}
super().__init__()
self['id'] = str(uuid.uuid4())
self['nodetype'] = 'event'
self['host'] = host
self['object'] = obj
self['action'] = action
self['happened_after'] = start_time
self.update(**fields)
def end(self, successful):
self['happened_before'] = tz_utcnow().isoformat()
# self['successful'] = successful
if not successful:
return None
return self
class ProcessEvent(Event):
def __init__(self, host, ppid, pid, command_line, action='create'):
args = {'fqdn': host,
'ppid': ppid,
'pid': pid,
'command_line': command_line}
super().__init__("process", action, host, fields=args)
class FileEvent(Event):
def __init__(self, fqdn, file_path, action='create'):
args = {'fqdn': fqdn,
'file_path': file_path}
super().__init__('file', action, fqdn, fields=args)
class CredentialDump(Event):
def __init__(self, fqdn, pid, typ, usernames):
args = {'fqdn': fqdn,
'pid': pid,
'type': typ,
'usernames': usernames}
super().__init__('cred', 'dump', fqdn, fields=args)
class RegistryEvent(Event):
def __init__(self, fqdn, key, data, value, action="add"):
args = {'fqdn': fqdn,
'key': key,
'value': value,
'data': data}
super().__init__('registry', action, fqdn, fields=args)
class ProcessOpen(Event):
def __init__(self, fqdn, file_path, actor_pid):
args = {'fqdn': fqdn,
'file_path': file_path,
'actor_pid': actor_pid}
super().__init__('process', 'open', fqdn, fields=args)
class BSFEmitter(object):
def __init__(self, log: Log):
"""
An object that handles emitting BSF events
Args:
log: the log to emit log entries to
"""
self.log = log
self.is_done = False
self.encrypt = EncryptedDictField.encrypt_dict
def append_to_log_stream(self, bsf_node):
enc = self.encrypt(bsf_node)
self.log.modify(push__event_stream=enc)
def start_operation(self):
self.log.modify(active_operation=Operation())
def _pick_step_key_event(self) -> Union[Dict, None]:
"""
Select a key event from the active step's events and return that event's id.
:return: The database ID of the key event
"""
if not len(self.log.active_step['events']):
return None
events = list(filter(lambda e: e['id'] in self.log.active_step['events'], self.log.event_stream))
new_files = list(filter(lambda e: e['object'] == 'file' and e['action'] == 'create', events))
new_processes = list(filter(lambda e: e['object'] == 'process' and e['action'] == 'create', events))
if new_processes:
# Prefer the first process:create
return new_processes[0]
elif new_files:
# If there are no process:create events, then prefer the first file:create
return new_files[0]
elif events:
# just get the first event if there is one
return events[0]
@staticmethod
def _avg_time(happened_before: str, happened_after: str):
before = datetime.datetime.fromisoformat(happened_before)
after = datetime.datetime.fromisoformat(happened_after)
return (before + (after - before) / 2).isoformat()
def _push_active_step(self):
key_event = self._pick_step_key_event()
if key_event:
avg_key_time = self._avg_time(key_event['happened_before'], key_event['happened_after'])
self.log.modify(active_step__key_event=key_event['id'],
active_step__host=key_event['host'],
active_step__time=avg_key_time)
self.append_to_log_stream(self.log.active_step)
def add_step(self, step: Step):
if self.log.active_step and len(self.log.active_step['events']) > 0:
self._push_active_step()
self.log.modify(push__active_operation__steps=step['id'])
self.log.modify(active_step=step)
def add_event(self, event):
if not isinstance(event, CredentialDump):
self.log.modify(push__active_step__events=event['id'])
self.append_to_log_stream(event)
def done(self):
if self.is_done:
# This BSF Log has already been marked done.
return
if self.log.active_step:
self._push_active_step()
if self.log.active_operation:
self.append_to_log_stream(self.log.active_operation)
self.is_done = True
|
83245
|
from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_ssa_51 import ChiSsa51Spider
test_response = file_response(
join(dirname(__file__), "files", "chi_ssa_51.html"),
url="http://www.cbatechworks.org/",
)
spider = ChiSsa51Spider()
freezer = freeze_time("2019-07-19")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 3, 13, 12, 0)
def test_end():
assert parsed_items[0]["end"] == datetime(2019, 3, 13, 13, 0)
def test_id():
assert parsed_items[0]["id"] == "chi_ssa_51/201903131200/x/commission"
def test_status():
assert parsed_items[0]["status"] == PASSED
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_title(item):
assert item["title"] == "Commission"
@pytest.mark.parametrize("item", parsed_items)
def test_description(item):
assert item["description"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_time_notes(item):
assert item["time_notes"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_location(item):
assert item["location"] == {
"address": "806 East 78th Street, Chicago IL 60619",
"name": "<NAME>",
}
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert item["source"] == "http://www.cbatechworks.org/"
@pytest.mark.parametrize("item", parsed_items)
def test_links(item):
assert item["links"] == []
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == COMMISSION
|
83255
|
import attr
import pandas
from sarif_om import *
from src.exception.VulnerabilityNotFoundException import VulnerabilityNotFoundException
VERSION = "2.1.0"
SCHEMA = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"
class SarifHolder:
def __init__(self):
self.sarif = SarifLog(runs=[], version=VERSION, schema_uri=SCHEMA)
self.translationDict = dict()
# each analysis is defined by a Run
def addRun(self, newRun):
# Check if already exists an analysis performed by the same tool
for run in self.sarif.runs:
if run.tool.driver.name == newRun.tool.driver.name:
# Append Unique Rules
for rule in newRun.tool.driver.rules:
if isNotDuplicateRule(rule, run.tool.driver.rules):
run.tool.driver.rules.append(rule)
# Append Unique Artifacts
for artifact in newRun.artifacts:
if isNotDuplicateArtifact(artifact, run.artifacts):
run.artifacts.append(artifact)
# Append Unique Logical Locations
if newRun.logical_locations is not None:
for logicalLocation in newRun.logical_locations:
if isNotDuplicateLogicalLocation(logicalLocation, run.logical_locations):
run.logical_locations.append(logicalLocation)
# Append Results
for result in newRun.results:
run.results.append(result)
return
self.sarif.runs.append(newRun)
# to print the analysis from a given tool
def printToolRun(self, tool):
run = -1
for i in range(len(self.sarif.runs)):
if self.sarif.runs[i].tool.driver.name.lower() == tool.lower():
run = i
sarifIndividual = SarifLog(runs=[], version=VERSION, schema_uri=SCHEMA)
if run != -1:
sarifIndividual.runs.append(self.sarif.runs[run])
return self.serializeSarif(sarifIndividual)
# print json formatted the SARIF file
def print(self):
return self.serializeSarif(self.sarif)
# creates dictionary to fix variable names from sarif_om to standard sarif
def serialize(self, inst, field, value):
if field is not None:
self.translationDict[field.name] = field.metadata['schema_property_name']
return value
# filters SARIF keys to discard default values in output
def filterUnusedKeys(self, field, value):
return not (value is None or (field.default == value and field.name != "level") or (
isinstance(field.default, attr.Factory) and field.default.factory() == value))
# returns a dictionary based on the schema_property_name and the values of the SARIF object
def serializeSarif(self, sarifObj):
valuesDict = attr.asdict(sarifObj, filter=self.filterUnusedKeys, value_serializer=self.serialize)
return self.recursiveSarif(valuesDict)
# uses translationDict to fix variable names from sarif_om to standard SARIF
def recursiveSarif(self, serializedSarif):
if isinstance(serializedSarif, (int, str)):
return serializedSarif
if isinstance(serializedSarif, dict):
dic = dict()
for key, value in serializedSarif.items():
dic[self.translationDict[key]] = self.recursiveSarif(value)
return dic
if isinstance(serializedSarif, list):
lis = list()
for item in serializedSarif:
lis.append(self.recursiveSarif(item))
return lis
def parseRule(tool, vulnerability, full_description=None):
vuln_info = findVulnerabilityOnTable(tool, vulnerability)
if full_description is None:
return ReportingDescriptor(id=vuln_info["RuleId"],
short_description=MultiformatMessageString(
vuln_info["Vulnerability"]),
name=vuln_info["Type"] + "Vulnerability")
return ReportingDescriptor(id=vuln_info["RuleId"],
short_description=MultiformatMessageString(
vuln_info["Vulnerability"]),
full_description=MultiformatMessageString(full_description),
name=vuln_info["Type"] + "Vulnerability")
def parseResult(tool, vulnerability, level="warning", uri=None, line=None, end_line=None, column=None, snippet=None,
logicalLocation=None):
vuln_info = findVulnerabilityOnTable(tool, vulnerability)
level = parseLevel(level)
locations = [
Location(physical_location=PhysicalLocation(artifact_location=ArtifactLocation(uri=uri),
region=Region(start_line=line,
end_line=end_line,
start_column=column,
snippet=ArtifactContent(text=snippet))))
]
if logicalLocation is not None:
locations[0].logical_locations = [logicalLocation]
return Result(rule_id=vuln_info["RuleId"],
message=Message(text=vulnerability),
level=level,
locations=locations)
def parseArtifact(uri, source_language="Solidity"):
return Artifact(location=ArtifactLocation(uri=uri), source_language=source_language)
def parseLogicalLocation(name, kind="contract"):
return LogicalLocation(name=name, kind=kind)
# returns the row from the table for a given vulnerability and tool
def findVulnerabilityOnTable(tool, vulnerability_found):
table = pandas.read_csv("src/output_parser/sarif_vulnerability_mapping.csv")
tool_table = table.loc[table["Tool"] == tool]
# Due to messages that have extra information (for example the line where the vulnerability was found) this loop
# will search if the vulnerability expressed on table exist inside vulnerability found
for index, row in tool_table.iterrows():
if row["Vulnerability"] in vulnerability_found or vulnerability_found in row["Vulnerability"]:
return row
raise VulnerabilityNotFoundException(tool=tool, vulnerability=vulnerability_found)
# given a level produced by a tool, returns the level in SARIF format
def parseLevel(level):
if isinstance(level, int):
return "warning"
if level.lower() == "warning" or level.lower() == "warnings" or level.lower() == "medium":
return "warning"
if level.lower() == "error" or level.lower() == "violations" or level.lower() == "high":
return "error"
if level.lower() == "note" or level.lower() == "conflicts" or level.lower() == "informational":
return "note"
if level.lower == "none" or level.lower() == "safe":
return "none"
return "warning"
# Returns True when rule is unique
def isNotDuplicateRule(newRule, rulesList):
for rule in rulesList:
if rule.id == newRule.id:
return False
return True
# Returns True when artifact is unique
def isNotDuplicateArtifact(newArtifact, artifactsList):
for artifact in artifactsList:
if artifact.location.uri == newArtifact.location.uri:
return False
return True
# Returns True when LogicalLocation is unique
def isNotDuplicateLogicalLocation(newLogicalLocation, logicalLocationList):
for logicalLocation in logicalLocationList:
if logicalLocation.name == newLogicalLocation.name:
return False
return True
|
83265
|
import matplotlib.pyplot as plt
import numpy as np
class RefDataType:
def __init__(self,length,steps,coverage,cv,marker,color,label):
self.length = length
self.steps = steps
self.coverage = coverage
self.cv = cv
self.marker = marker
self.color = color
self.label = label
def get_prop(self, prop_str):
if prop_str == 'length':
return self.length
elif prop_str == 'steps':
return self.steps
elif prop_str == 'coverage':
return self.coverage
elif prop_str == 'cv':
return self.cv
elif prop_str == 'marker':
return self.marker
elif prop_str == 'color':
return self.color
elif prop_str == 'label':
return self.label
else:
return "oh crap!"
def filter_prob_vals(vals, cnts):
""" replaces cases having no instances
with last value observed, notes where they occur
for plotting
"""
prob_vals = []
for ind, v in enumerate(vals):
if v == 0 and ind != 0 and cnts[ind]==0:
vals[ind] = vals[ind-1]
prob_vals.append(ind)
return vals, prob_vals
def ref_prop_plot(ref, prop, prop_ind, ranges):
if prop == 'steps':
x_vals = get_exp_x_mids(ranges[prop_ind],2)
elif prop == 'coverage':
x_vals = get_exp_x_mids(ranges[prop_ind],10)
else:
x_vals = get_x_mids(ranges[prop_ind])
num_meas = sum([b[1] for b in ref.get_prop(prop)])
# s=[a*1000./num_meas for a in [b[1] for b in ref.get_prop(prop)]]
y_vals = [b[2] for b in ref.get_prop(prop)]
y_vals, probs = filter_prob_vals(y_vals, [b[1] for b in ref.get_prop(prop)])
for i in range(len(y_vals)):
if i == len(y_vals)-1 and prop_ind==3:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker, label = ref.get_prop('label'),
facecolors=ref.color, edgecolors=ref.color, s=100)
elif i not in probs:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker,
facecolors=ref.color, edgecolors=ref.color, s=100)
else:
axarr[0][prop_ind].scatter(x_vals[i], y_vals[i],
marker=ref.marker,
facecolors='none', edgecolors=ref.color, s=1000)
axarr[0][prop_ind].plot(x_vals, y_vals, c=ref.color)
def ref_counts_plot(ref, prop, prop_ind, ranges):
x_vals = get_x_mids(ranges[prop_ind])
row = ref.get_prop(prop)
instances = np.array([a[1] for a in row])
instances = np.true_divide(instances, sum(instances))
axarr[1][prop_ind].scatter(x_vals, instances,
c=ref.color, marker=ref.marker, s=100)
axarr[1][prop_ind].plot(x_vals, instances, c=ref.color)
def get_x_mids(rng):
return 0.5 * ( np.array(rng[:-1]) + np.array(rng[1:]) )
def get_exp_x_mids(rng, base):
if base == 10:
vals = np.log10(rng)
else:
vals = np.log2(rng)
return base**get_x_mids(vals)
length_x_rng = [0,4000,8000,12000,16000,20000]
step_x_rng = [1,2,4,8,16,32]
cov_x_rng = [1,10,100,1e3,1e4,1e5]
cv_x_rng = [0, 0.05, 0.1, 0.15, 0.20, 0.25]
ranges = [length_x_rng, step_x_rng, cov_x_rng, cv_x_rng]
# input data - from alignment summary (makefile) output
# rows 0 = length, 1 = steps, 2 = coverage, 3 = CV
# plotting rows 4 = marker, 5 = color
ref_100 = RefDataType(
[(40, 46, 0.87), (19, 21, 0.9), (3, 6, 0.5), (4, 6, 0.67), (6, 7, 0.86)],
[(59, 59, 1.0), (6, 11, 0.55), (3, 10, 0.3), (3, 4, 0.75), (1, 2, 0.5)],
[(4, 4, 1.0), (39, 49, 0.8), (22, 26, 0.85), (7, 7, 1.0), (0, 0, 0)],
[(67, 73, 0.92), (4, 5, 0.8), (1, 4, 0.25), (0, 3, 0.0), (0, 1, 0.0)],
'^', 'y', '100'
)
ref_200 = RefDataType(
[(59, 73, 0.81), (35, 46, 0.76), (6, 11, 0.55), (3, 6, 0.5), (4, 6, 0.67)],
[(82, 83, 0.99), (3, 12, 0.25), (12, 24, 0.5), (7, 16, 0.44), (3, 7, 0.43)],
[(6, 8, 0.75), (55, 71, 0.77), (37, 52, 0.71), (8, 10, 0.8), (1, 1, 1.0)],
[(90, 95, 0.95), (7, 17, 0.41), (4, 11, 0.36), (3, 7, 0.43), (3, 11, 0.27)],
'v', 'g', '200'
)
ref_400 = RefDataType(
[(98, 118, 0.83), (62, 79, 0.78), (22, 27, 0.81), (13, 18, 0.72), (10, 12, 0.83)],
[(146, 147, 0.99), (24, 35, 0.69), (17, 39, 0.44), (11, 19, 0.58), (7, 13, 0.54)],
[(17, 22, 0.77), (105, 135, 0.78), (67, 77, 0.87), (11, 14, 0.79), (5, 6, 0.83)],
[(174, 188, 0.93), (13, 23, 0.57), (7, 14, 0.5), (8, 14, 0.57), (3, 15, 0.2)],
'h', 'c', '400'
)
ref_800 = RefDataType(
[(193, 236, 0.82), (107, 145, 0.74), (39, 55, 0.71), (18, 28, 0.64), (8, 13, 0.62)],
[(271, 278, 0.97), (39, 83, 0.47), (30, 58, 0.52), (10, 24, 0.42), (13, 27, 0.48)],
[(30, 46, 0.65), (174, 230, 0.76), (127, 162, 0.78), (21, 26, 0.81), (6, 6, 1.0)],
[(310, 345, 0.9), (25, 53, 0.47), (12, 27, 0.44), (7, 23, 0.3), (11, 27, 0.41)],
's', 'r', '800'
)
ref_1600 = RefDataType(
[(325, 404, 0.8), (181, 225, 0.8), (46, 72, 0.64), (35, 53, 0.66), (19, 25, 0.76)],
[(432, 442, 0.98), (72, 130, 0.55), (48, 95, 0.51), (29, 58, 0.5), (19, 40, 0.47)],
[(70, 104, 0.67), (253, 328, 0.77), (134, 173, 0.77), (119, 137, 0.87), (20, 23, 0.87)],
[(500, 548, 0.91), (46, 71, 0.65), (25, 50, 0.5), (23, 62, 0.37), (12, 48, 0.25)],
'o', 'b', '1600'
)
# plots
props = ['length', 'steps', 'coverage', 'cv']
f, axarr = plt.subplots(2, len(props), sharey='row', sharex='col')
# axarr[0].set_xticklabels(labels)
for ind, prop in enumerate(props):
for ref in [ref_100, ref_200, ref_400, ref_800, ref_1600]:
# print ref, prop, ind
ref_prop_plot(ref, prop, ind, ranges)
ref_counts_plot(ref, prop, ind, ranges)
if prop == 'cv':
axarr[0][ind].set_xlim(left=0, right=0.25)
axarr[1][ind].set_xlim(left=0, right=0.25)
if prop == 'steps':
axarr[0][ind].set_xscale('log', basex=2)
axarr[0][ind].set_xlim(1,32)
axarr[1][ind].set_xscale('log', basex=2)
axarr[1][ind].set_xlim(1,32)
if prop == 'coverage':
axarr[0][ind].set_xscale('log', basex=10)
axarr[1][ind].set_xscale('log', basex=10)
axarr[0][ind].set_title(prop.upper())
# x_vals = get_x_mids(ranges[ind])
# row = ref_1600.get_prop(prop)
# instances = np.array([a[1] for a in row])
# instances = np.true_divide(instances, sum(instances))
# axarr[1][ind].scatter(x_vals, instances,
# c=ref_1600.color, marker=ref_1600.marker, s=100)
# axarr[1][ind].plot(x_vals, instances, c=ref_1600.color)
# legend - put it on the rightmost
axarr[0][ind].legend()
plt.show()
|
83291
|
import pandas
import pkg_resources
from unittest import TestCase
from dfs.nba.expansion import get_expansion_targets, encode_names, expand_nba_data, discretize_data
class ExpansionTestCase(TestCase):
def setUp(self):
# A little test data from the past few years, useful for testing BREF data
testfn = pkg_resources.resource_filename(__name__, 'test.pickle')
self.data = pandas.read_pickle(testfn)
# More recent test data -- used for testing external data
recentfn = pkg_resources.resource_filename(__name__, 'recent.pickle')
self.recentdata = pandas.read_pickle(recentfn)
# grab only one entry from each for super simple testing
self.ezdata = pandas.concat([self.data.tail(1), self.recentdata.tail(1)])
def test_get_expansion_targets(self):
live_targets = list(get_expansion_targets(expanding_live=False))
old_targets = list(get_expansion_targets())
# Check types
for name, (fn, targets) in live_targets + old_targets:
assert isinstance(name, basestring)
assert hasattr(fn, '__call__')
assert isinstance(targets, list)
for i in targets:
assert isinstance(i, basestring)
def test_encode_names(self):
self.assertDictEqual({"A":"FeatureName:A", "B":"FeatureName:B", "C":"FeatureName:C"},
encode_names("FeatureName",["A","B","C"]))
def test_expansion(self):
# basically just make sure these don't crash
expand_nba_data(self.data, live=False)
expand_nba_data(self.recentdata, live=True)
ez_expand = expand_nba_data(self.ezdata, live=False)
ez_expand_live = expand_nba_data(self.ezdata, live=True)
# this stuff sucks and was always getting killed b/c of data updates
#self.maxDiff = None
#count_dict = {'bref_id': 2, u'FT': 2, 'NF:STL': 1, 'OpponentLast2Weeks:MaxFPPMP': 2, u'3P': 2, u'TOV': 2, 'OpponentLast2Weeks:MaxFPPG': 2, u'Tm': 2, u'GmSc': 2, u'FG': 2, u'3PA': 2, u'DRB': 2, u'Rk': 2, 'NF:BLK': 1, u'Opp': 2, u'AST': 2, u'HomeAway': 0, u'FT%': 1, 'NF:Minutes': 1, u'PF': 2, 'NF:TOV': 1, u'PTS': 2, u'FGA': 2, 'Vegas:Spread': 2, 'OpponentLast2Weeks:AvgFPPG': 2, u'GS': 2, u'G': 2, 'NF:FP': 1, u'STL': 2, 'Last5:PPG': 2, u'Age': 2, u'TRB': 2, u'DFS': 1, u'FTA': 2, u'BLK': 2, 'date': 2, u'FG%': 2, 'OpponentLast2Weeks:AvgFPPMP': 2, 'Vegas:OverUnder': 2, u'+/-': 2, u'WinLoss': 2, 'NF:PTS': 1, 'Target:FDFP': 2, 'NF:REB': 1, 'NF:AST': 1, u'MP': 2, 'NF:PF': 1, 'OpponentLast2Weeks:FPPMP': 2, u'ORB': 2, u'3P%': 2, 'Salary:FanDuel Salary':1}
#self.assertDictEqual(count_dict, ez_expand.count().to_dict())
#live_count_dict = {'bref_id': 2, u'FT': 2, 'NF:STL': 1, 'OpponentLast2Weeks:MaxFPPMP': 2, u'3P': 2, u'TOV': 2, 'OpponentLast2Weeks:MaxFPPG': 2, u'Tm': 2, u'GmSc': 2, u'FG': 2, u'3PA': 2, u'DRB': 2, u'Rk': 2, 'NF:BLK': 1, u'Opp': 2, u'AST': 2, u'HomeAway': 0, u'FT%': 1, 'NF:Minutes': 1, u'PF': 2, 'NF:TOV': 1, u'PTS': 2, u'FGA': 2, 'Vegas:Spread': 2, 'OpponentLast2Weeks:AvgFPPG': 2, u'GS': 2, u'G': 2, 'NF:FP': 1, u'STL': 2, 'Last5:PPG': 2, u'Age': 2, u'TRB': 2, u'DFS': 1, u'FTA': 2, u'BLK': 2, 'date': 2, u'FG%': 2, 'OpponentLast2Weeks:AvgFPPMP': 2, 'Vegas:OverUnder': 1, u'+/-': 2, u'WinLoss': 2, 'NF:PTS': 1, 'NF:PF': 1, 'NF:REB': 1, 'NF:AST': 1, u'MP': 2, 'OpponentLast2Weeks:FPPMP': 2, u'ORB': 2, u'3P%': 2}
#self.assertDictEqual(live_count_dict, ez_expand_live.count().to_dict())
def test_discretization(self):
stadium_series = pandas.Series(data=["Lambeau", "Levis", "Qwest"]) # Pretend this is an expanded field
awesomeness_series = pandas.Series(data=[100, 30, 0]) # this is a continuous field
name_series = pandas.Series(data=["Packers", "49ers", "Seahawks"]) # and this is a not-expanded discrete field
df = pandas.DataFrame.from_dict({"Team:Stadium": stadium_series,
"Awesomeness": awesomeness_series,
"Team Name": name_series})
discretized = discretize_data(df)
# make sure only the expanded discrete fields were discretized
self.assertItemsEqual(["Team:Stadium=Lambeau","Team:Stadium=Levis","Team:Stadium=Qwest","Awesomeness","Team Name"],
discretized.columns)
|
83301
|
from __future__ import annotations
import argparse
import json
import logging
import os
from datetime import datetime
from datetime import timedelta
import git
import humanfriendly
from datalad.plugin import export_archive
from github import Github
from scripts.datalad_utils import get_dataset
from scripts.datalad_utils import install_dataset
from scripts.datalad_utils import uninstall_dataset
from scripts.log import get_logger
from tests.functions import get_proper_submodules
logger = get_logger(
"CONP-Archive", filename="conp-archive.log", file_level=logging.DEBUG
)
class ArchiveFailed(Exception):
pass
def parse_args():
example_text = """Example:
PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir>
"""
parser = argparse.ArgumentParser(
description="Archiver for the CONP-datasets.",
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--out_dir", "-o", type=str, help="Path to store the archived datasets."
)
parser.add_argument(
"--max-size",
type=float,
help="Maximum size of dataset to archive in GB.",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--all",
action="store_true",
help="Archive all the datasets rather than those modified since the last time.",
)
group.add_argument(
"--dataset",
"-d",
type=str,
nargs="+",
help="Restrict the archive to the specified dataset paths.",
)
return parser.parse_args()
def get_datasets_path():
return {
os.path.basename(submodule.path): submodule.path
for submodule in git.Repo().submodules
if submodule.path.startswith("projects")
}
def get_modified_datasets(
*,
since: datetime | None = None,
until: datetime | None = None,
) -> set[str]:
"""Retrieve the modified datasets.
Requires to set GITHUB_ACCESS_TOKEN as an environment variable.
Parameters
----------
since : Optional[datetime], optional
Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to
one week ago.
until : Optional[datetime], optional
Latest date at which commit are retrieved, by default `now`
Returns
-------
set[str]
Path of the dataset folders.
"""
now = datetime.now().astimezone()
if since is None:
if os.path.exists(".conp-archive"):
with open(".conp-archive") as fin:
since = datetime.fromisoformat(fin.read())
else:
since = now - timedelta(weeks=1)
if until is None:
until = now
try:
gh_access_token = os.environ.get("GITHUB_ACCESS_TOKEN", None)
if gh_access_token is None:
raise OSError("GITHUB_ACCESS_TOKEN is not defined.")
except OSError as e:
# The program is not stopped since GitHub allows 60 query per hours with
# authentication. However the program will most likely fail.
logger.critical(e)
logger.info(f"Retrieving modified datasets since {since}")
repo = Github(gh_access_token).get_repo("CONP-PCNO/conp-dataset")
commits = repo.get_commits(since=since, until=until)
with open(".conp-archive", "w") as fout:
fout.write(now.isoformat())
modified_datasets: set[str] = {
os.path.basename(file_.filename)
for commit in commits
for file_ in commit.files
if file_.filename.startswith("projects/")
}
return modified_datasets
def archive_dataset(
dataset_path: str, out_dir: str, archive_name: str, version: str
) -> None:
os.makedirs(out_dir, mode=0o755, exist_ok=True)
out_filename = os.path.join(out_dir, f"{archive_name}_version-{version}.tar.gz")
logger.info(f"Archiving dataset: {dataset_path} to {out_filename}")
cwd = os.getcwd()
try:
datalad_archiver = export_archive.ExportArchive()
dataset_repo = git.Repo(dataset_path)
with open(os.path.join(dataset_path, ".git.log"), "w") as fout:
fout.write(dataset_repo.git.log(pretty="format:%H %s"))
# Export is performed from the dataset root.
# This is to avoid failure when a submodule is not downloaded; e.g. for parent
# dataset in dataset derivative.
os.chdir(os.path.join(cwd, dataset_path))
datalad_archiver(".", filename=out_filename)
except Exception as e:
raise ArchiveFailed(
f"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\n{e}"
)
finally:
os.chdir(cwd)
if __name__ == "__main__":
args = parse_args()
# Only archive the datasets available locally.
datasets_path = get_datasets_path()
datasets = datasets_path.keys()
if args.dataset:
target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}
logger.warning(
f"The following dataset were not found locally: {target_datasets - datasets}"
)
datasets &= target_datasets
elif not args.all:
modified_datasets = get_modified_datasets()
logger.warning(
f"The following dataset were not found locally: {modified_datasets - datasets}"
)
datasets &= modified_datasets
for dataset_name in datasets:
dataset = datasets_path[dataset_name]
try:
logger.info(f"Installing dataset: {dataset}")
install_dataset(dataset)
is_public = False
version = ""
dataset_size = 0.0
with open(os.path.join(dataset, "DATS.json")) as fin:
metadata = json.load(fin)
is_public = (
metadata.get("distributions", [{}])[0]
.get("access", {})
.get("authorizations", [{}])[0]
.get("value")
== "public"
)
version = metadata.get("version")
for distribution in metadata.get("distributions", list()):
dataset_size += humanfriendly.parse_size(
f"{distribution['size']} {distribution['unit']['value']}",
)
dataset_size //= 1024 ** 3 # Convert to GB
# Only archive public dataset less than a specific size if one is provided to the script
if is_public:
if args.max_size is None or dataset_size <= args.max_size:
logger.info(f"Downloading dataset: {dataset}")
get_dataset(dataset)
for submodule in get_proper_submodules(dataset):
get_dataset(submodule)
archive_name = "__".join(
os.path.relpath(dataset, "projects").split("/")
)
archive_dataset(
dataset,
out_dir=args.out_dir,
archive_name=archive_name,
version=version,
)
# to save space on the VM that archives the dataset, need to uninstall
# the datalad dataset. `datalad drop` does not free up enough space
# unfortunately. See https://github.com/datalad/datalad/issues/6009
uninstall_dataset(dataset)
logger.info(f"SUCCESS: archive created for {dataset}")
else:
logger.info(f"SKIPPED: {dataset} larger than {args.max_size} GB")
else:
logger.info(
f"SKIPPED: archive not needed for {dataset}. Non-public dataset."
)
except Exception as e:
# TODO implement notification system.
# This will alert when a dataset fails the archiving process.
logger.exception(
f"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\n{e}"
)
logger.info("Done archiving the datasets.")
|
83331
|
from __future__ import print_function
from six import iteritems
from itertools import chain
import ipyparallel as ipp
from ipyparallel.client.client import ExecuteReply
# Remotely-called function; imports requirement internally.
def dummyTask(key):
from os import getpid
from time import sleep
from ipyparallel.datapub import publish_data
publish_data({key: 'running'})
sleep(2 * key)
publish_data({key: 'finishing'})
sleep(2)
return {'key': key, 'pid': getpid()}
# Default time to wait in main hub polling/result processing loop
SLEEP_SECONDS = 3
class BaseMaster(object):
instance = None
@classmethod
def getInstance(cls, profile=None, cluster_id=None, sleepSeconds=SLEEP_SECONDS):
"""
Return singleton instance of this class
"""
if not cls.instance:
cls.instance = cls(profile=profile, cluster_id=cluster_id)
cls.instance.setSleepSeconds(sleepSeconds)
return cls.instance
def __init__(self, profile=None, cluster_id=None):
self.client = ipp.Client(profile=profile, cluster_id=cluster_id)
self.statusDict = {}
self.sleepSeconds = SLEEP_SECONDS
self.keyField = 'key'
def setSleepSeconds(self, secs):
self.sleepSeconds = secs
def clearStatus(self):
self.statusDict = {}
def setStatus(self, key, status):
self.statusDict[key] = status
def queueTotals(self):
"""
Return totals for queue status across all engines
"""
try:
dv = self.client[:]
except ipp.NoEnginesRegistered as e:
print('queueTotals: %s' % e)
return
qstatus = dv.queue_status()
totals = dict(queue=0, completed=0, tasks=0)
for id, stats in iteritems(qstatus):
if id == u'unassigned':
totals[id] = stats
else:
for key, count in iteritems(stats):
totals[key] += count
return totals
def runningTasks(self):
qstatus = self.client.queue_status(verbose=True)
ids = [rec['tasks'] for key, rec in iteritems(qstatus) if isinstance(key, (int, long))]
return list(chain.from_iterable(ids))
def completedTasks(self):
recs = self.client.db_query({'completed': {'$ne': None}}, keys=['msg_id'])
ids = [rec['msg_id'] for rec in recs] if recs else None
return ids
def getResults(self, tasks):
if not tasks:
return None
client = self.client
ar = client.get_result(tasks, owner=True, block=False)
try:
results = ar.get()
except Exception as e:
print('getResults: %s' % e)
return
client.purge_results(jobs=tasks) # so we don't see them again
# filter out results from execute commands (e.g. imports)
results = [r[0] for r in results if r and not isinstance(r, ExecuteReply)]
return results
def runTasks(self, count, clearStatus=False):
if clearStatus:
self.clearStatus()
view = self.client.load_balanced_view()
arList = []
for key in range(1, count + 1):
ar = view.apply_async(dummyTask, key)
arList.append(ar)
self.setStatus(key, 'queued')
return arList
def checkRunning(self):
running = self.runningTasks()
if running:
try:
# _logger.debug("Found %d running tasks", len(running))
ar = self.client.get_result(running, block=False)
statusDict = self.statusDict
# print('statusDict:', statusDict)
for dataDict in ar.data:
for key, status in iteritems(dataDict):
currStatus = statusDict.get(key)
if currStatus != status:
self.setStatus(key, status)
except Exception as e:
print("checkRunning: %s" % e)
return
def processResult(self, result):
key = result[self.keyField]
self.setStatus(key, 'completed')
print("Completed", key)
def processResults(self):
from time import sleep
while True:
sleep(self.sleepSeconds)
self.checkRunning()
tot = self.queueTotals()
print(tot)
if not (tot['queue'] or tot['tasks'] or tot['unassigned']) and \
not self.completedTasks():
return
completed = self.completedTasks()
if completed:
results = self.getResults(completed)
if not results:
print("Completed tasks have no results: engine died?")
continue # is this recoverable?
for result in results:
self.processResult(result)
if __name__ == '__main__':
#
# Test with custom worker func and subclass
#
def runTrial(argDict):
from time import sleep
from random import random
from ipyparallel.datapub import publish_data
def randomSleep(minSleep, maxSleep):
delay = minSleep + random() * (maxSleep - minSleep)
sleep(delay)
argDict['slept'] = '%.2f' % delay
runId = argDict['runId']
publish_data({runId: 'running'})
randomSleep(10, 15)
publish_data({runId: 'finishing'})
sleep(2)
return argDict
class NewMaster(BaseMaster):
def __init__(self, profile=None, cluster_id=None):
super(NewMaster, self).__init__(profile=profile, cluster_id=cluster_id)
self.keyField = 'runId'
def runTrials(self, tuples, clearStatus=False):
if clearStatus:
self.clearStatus()
view = self.client.load_balanced_view(retries=1) # in case engine fails, retry job once only
asyncResults = []
argDict = {}
try:
for runId, trialNum in tuples:
argDict['trialNum'] = trialNum
argDict['runId'] = runId
# Easier to deal with a list of AsyncResults than a single
# instance that contains info about all "future" results.
result = view.map_async(runTrial, [argDict])
asyncResults.append(result)
self.setStatus(runId, 'queued')
except Exception as e:
print("Exception running 'runTrial': %s", e)
def processResult(self, result):
key = result[self.keyField]
self.setStatus(key, 'completed')
print("Completed", result)
testBase = False
profile = None
cluster_id = None
if testBase:
m = BaseMaster.getInstance(sleepSeconds=5)
m.runTasks(6, clearStatus=True)
else:
m = NewMaster.getInstance(sleepSeconds=3, profile=profile, cluster_id=cluster_id)
tuples = [(runId, trialNum) for runId, trialNum in enumerate(range(1000, 1020))]
m.runTrials(tuples, clearStatus=True)
m.processResults()
print('Status:')
d = m.statusDict
for runId in sorted(d.keys()):
print(' runId %s: %s' %(runId, d[runId]))
|
83343
|
import jiwer
import jiwer.transforms as tr
from jiwer import compute_measures
from typing import List
def compute_wer(predictions=None, references=None, concatenate_texts=False):
if concatenate_texts:
return compute_measures(references, predictions)
else:
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = compute_measures(reference, prediction)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return measures
class SentencesToListOfCharacters(tr.AbstractTransform):
def process_string(self,s):
return list(s)
def process_list(self, inp: List[str]):
chars = []
for sentence in inp:
chars.extend(self.process_string(sentence))
return chars
cer_transform = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
SentencesToListOfCharacters(),
]
)
def compute_cer(predictions, references, concatenate_texts=False):
if concatenate_texts:
return jiwer.wer(
references,
predictions,
truth_transform=cer_transform,
hypothesis_transform=cer_transform,
)
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = jiwer.compute_measures(
reference,
prediction,
truth_transform=cer_transform,
hypothesis_transform=cer_transform,
)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
if __name__ == "__main__":
print(compute_wer(['my name is'],['my name']))
|
83366
|
from powerlift.bench import Experiment, Store
from powerlift.executors.docker import InsecureDocker
from powerlift.executors.localmachine import LocalMachine
from powerlift.executors.azure_ci import AzureContainerInstance
import pytest
import os
def _add(x, y):
return x + y
def _err_handler(e):
raise e
def _trials(task):
if task.problem == "binary" and task.scalar_measure("n_rows") <= 10000:
return ["rf", "svm"]
return []
def _benchmark(trial):
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
if trial.task.problem == "binary" and trial.task.origin == "openml":
X, y, meta = trial.task.data(["X", "y", "meta"])
# Holdout split
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.3)
# Build preprocessor
is_cat = meta["categorical_mask"]
cat_cols = [idx for idx in range(X.shape[1]) if is_cat[idx]]
num_cols = [idx for idx in range(X.shape[1]) if not is_cat[idx]]
cat_ohe_step = ("ohe", OneHotEncoder(sparse=True, handle_unknown="ignore"))
cat_pipe = Pipeline([cat_ohe_step])
num_pipe = Pipeline([("identity", FunctionTransformer())])
transformers = [("cat", cat_pipe, cat_cols), ("num", num_pipe, num_cols)]
ct = Pipeline(
[
("ct", ColumnTransformer(transformers=transformers)),
(
"missing",
SimpleImputer(add_indicator=True, strategy="most_frequent"),
),
]
)
# Connect preprocessor with target learner
if trial.method.name == "svm":
clf = Pipeline([("ct", ct), ("est", CalibratedClassifierCV(LinearSVC()))])
else:
clf = Pipeline([("ct", ct), ("est", RandomForestClassifier())])
# Train
clf.fit(X_tr, y_tr)
# Predict
predictions = clf.predict_proba(X_te)[:, 1]
# Score
auc = roc_auc_score(y_te, predictions)
trial.log("auc", auc)
def test_multiprocessing():
"""This tests exists to ensure there is no hang in pytest."""
from multiprocessing.pool import Pool
pool = Pool()
results = []
num_tasks = 32
for i in range(num_tasks):
result = pool.apply_async(_add, (i, i), error_callback=_err_handler)
results.append(result)
counter = 0
for i in range(num_tasks):
counter += results[i].get()
assert counter == 992
pool.close()
# def test_scikit_experiment_aci(populated_azure_store):
@pytest.mark.skip("Remove this when testing ACI.")
def test_scikit_experiment_aci():
"""
As of 2022-06-09:
- Takes roughly 20 seconds to submit 10 tasks.
- Roughly 80 seconds for first runs to return.
- 180 seconds to complete (5 parallel containers).
"""
from dotenv import load_dotenv
load_dotenv()
azure_tenant_id = os.getenv("AZURE_TENANT_ID")
azure_client_id = os.getenv("AZURE_CLIENT_ID")
azure_client_secret = os.getenv("AZURE_CLIENT_SECRET")
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
resource_group = os.getenv("AZURE_RESOURCE_GROUP")
store = Store(os.getenv("AZURE_DB_URL"), force_recreate=False)
# store = populated_azure_store
executor = AzureContainerInstance(
store,
azure_tenant_id,
azure_client_id,
azure_client_secret,
subscription_id,
resource_group,
n_running_containers=5,
num_cores=1,
mem_size_gb=2,
raise_exception=True,
)
experiment = Experiment(store)
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_debug(populated_store):
store = populated_store
executor = LocalMachine(store, n_cpus=1, raise_exception=True)
experiment = Experiment(store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_local(populated_store):
store = populated_store
executor = LocalMachine(store, n_cpus=2)
experiment = Experiment(store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
def test_scikit_experiment_docker(populated_store):
from dotenv import load_dotenv
load_dotenv()
uri = os.getenv("DOCKER_DB_URL")
executor = InsecureDocker(
populated_store, n_running_containers=2, docker_db_uri=uri
)
experiment = Experiment(populated_store, name="scikit")
executor = experiment.run(_benchmark, _trials, timeout=10, executor=executor)
executor.join()
|
83387
|
from struct import pack
from World.WorldPacket.Constants.WorldOpCode import WorldOpCode
from Server.Connection.Connection import Connection
class InitialSpells(object):
def __init__(self, **kwargs):
self.data = kwargs.pop('data', bytes())
self.connection: Connection = kwargs.pop('connection')
async def process(self) -> tuple:
response = self._get_response()
return WorldOpCode.SMSG_INITIAL_SPELLS, [response]
def _get_response(self):
data = bytes()
num_spells = len(self.connection.player.spells)
data += pack(
'<BH',
0, # unk
num_spells # spell count
)
count = 1
for spell in self.connection.player.spells:
data += pack(
'<2H',
spell.spell_template.entry,
0
)
count += 1
data += pack(
'<2H',
num_spells,
0
)
# now = int(time.time())
#
# for spell in session.player.spells:
# values = 0
# data += pack(
# '<3H2I',
# spell.entry, # spell entry
# 0, # spell category
# 0, # item id
# now, # cooldown
# 0 | 0x80000000 # cooldown category
# )
# data += pack(
# '<2H',
# 0,
# 0
# )
return data
|
83446
|
class VehiclesDataset:
def __init__(self):
self.num_vehicle = 0
self.num_object = 0
self.num_object_with_kp = 0
self.vehicles = dict()
self.valid_ids = set()
self.mean_shape = None
self.pca_comp = None
self.camera_mtx = None
self.image_names = None
self.data_dir = None
self.mean_traj = None
self.cov_traj = None
self.plane = None
def __str__(self):
return "Vehicle Dataset: {} vehicles, {} objects".format(self.num_vehicle, self.num_of_objects())
def insert_vehicle(self, id, vehicle):
self.vehicles[id] = vehicle
self.valid_ids.add(id)
sorted(self.valid_ids)
self.num_vehicle += 1
def get_vehicle(self, query_id):
if query_id not in self.valid_ids:
return None
else:
return self.vehicles[query_id]
def size(self):
return self.num_vehicle
def contains(self, query_id):
return query_id in self.valid_ids
def num_of_objects(self):
num = 0
for k, v in self.vehicles.items():
num += v.num_objects
self.num_object = num
return num
def num_of_objects_with_kp(self):
num = 0
for k, v in self.vehicles.items():
num += v.num_objects_with_kp
self.num_object_with_kp = num
return num
class Vehicle:
def __init__(self, image_path, keypoint, bbox, image_id, keypoint_pool):
self.num_objects = 0
self.num_objects_with_kp = 0
self.id = None
self.frames = dict()
self.image_paths = dict()
self.keypoints = dict()
self.keypoints_backup = dict()
self.keypoints_det2 = dict()
self.keypoints_proj2 = dict()
self.bboxs = dict()
self.image_ids = dict()
self.rt = dict()
self.keypoints_pool = dict()
self.insert_object(image_path, keypoint, bbox, image_id, keypoint_pool)
self.pca = [0.0] * 5
self.shape = [[0.0, 0.0, 0.0] * 12]
self.spline = None # np.zeros((6, ))
self.spline_points = None
self.spline_predict = None # np.zeros((6, ))
self.spline_points_predict = None
self.rt_traj = dict()
self.rotation_world2cam = None
self.translation_world2cam = None
self.first_appearance_frame_id = None
self.stop_frame_range = None
self.first_move_frame_id = None
self.first_appearance_frame_time_pred = None
self.traj_cluster_id = None
def __str__(self):
return "ID: {}, with {} objects".format(self.id, self.num_objects) + ', PCA: [' + \
', '.join(["{0:0.2f}".format(i) for i in self.pca]) + ']'
def insert_object(self, image_path, keypoint, bbox, image_id, keypoint_pool=None, backup=False):
if image_path in self.image_paths:
print('{} is already contained, discard!'.format(image_path))
return None
else:
object_id = self.num_objects
self.image_paths[object_id] = image_path
self.frames[object_id] = int(image_path[-8:-4])
self.image_ids[object_id] = image_id
if backup:
self.keypoints_backup[object_id] = keypoint
else:
self.keypoints_backup[object_id] = None
self.keypoints[object_id] = keypoint
self.bboxs[object_id] = bbox
self.rt[object_id] = None
self.keypoints_pool[object_id] = keypoint_pool
self.num_objects += 1
if keypoint is not None:
self.num_objects_with_kp += 1
return object_id
def set_id(self, init_id):
self.id = init_id
def set_pca(self, pca):
if type(pca) is not list or len(pca) is not 5:
raise Warning("PCA component should be list of length 5")
else:
self.pca = pca
def set_3d_shape(self, shape):
if type(shape) is not list or len(shape) is not 12:
raise Warning("3D shape should be list of length 12, each has [x, y, z]")
else:
self.shape = shape
def set_rt(self, obj_id, rvec, tvec):
if type(rvec) is not list or len(rvec) is not 3 or type(tvec) is not list or len(tvec) is not 3:
raise Warning("rvec and tvec should be list of length 3.")
elif obj_id >= self.num_objects:
raise Warning("object id doesnot exist.")
else:
self.rt[obj_id] = [rvec, tvec]
def set_keypoints(self, obj_id, keypoints, backup=False):
if len(keypoints) is not 12:
# if type(keypoints) is not list or len(keypoints) is not 12:
raise Warning("keypoints should be list of length 12.")
elif obj_id >= self.num_objects:
raise Warning("object id doesnot exist.")
else:
if backup:
self.keypoints_backup[obj_id] = self.keypoints[obj_id]
self.keypoints[obj_id] = keypoints
def set_keypoints_cam2(self, obj_id, keypoints, det=True):
if len(keypoints) is not 12:
raise Warning("keypoints should be list of length 12.")
elif obj_id >= self.num_objects:
raise Warning("object id doesnot exist.")
else:
if det:
self.keypoints_det2[obj_id] = keypoints
else:
self.keypoints_proj2[obj_id] = keypoints
|
83448
|
import logging.config
import os
import structlog
from node_launcher.constants import NODE_LAUNCHER_DATA_PATH, OPERATING_SYSTEM
timestamper = structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S')
pre_chain = [
# Add the log level and a timestamp to the event_dict if the log entry
# is not from structlog.
structlog.stdlib.add_log_level,
timestamper,
]
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'plain': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': structlog.dev.ConsoleRenderer(colors=False),
'foreign_pre_chain': pre_chain,
},
'colored': {
'()': structlog.stdlib.ProcessorFormatter,
'processor': structlog.dev.ConsoleRenderer(colors=True),
'foreign_pre_chain': pre_chain,
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'colored',
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'filename': os.path.join(NODE_LAUNCHER_DATA_PATH[OPERATING_SYSTEM],
'debug.log'),
'formatter': 'plain',
},
},
'loggers': {
'': {
'handlers': ['default', 'file'],
'level': 'DEBUG',
'propagate': True,
},
}
})
def dropper(logger, method_name, event_dict):
for key in event_dict[0][0].keys():
if 'rpcpass' in key:
event_dict[0][0][key] = '<PASSWORD>'
return event_dict
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
dropper
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
log = structlog.get_logger()
|
83455
|
from .__init__ import EntroQ
from . import entroq_pb2 as pb
import click
import datetime
from datetime import timezone
import grpc
import json
from google.protobuf import json_format
class _ClickContext: pass
def _task_str_raw(task):
return EntroQ.to_dict(task)
def _task_str_json(task):
return EntroQ.to_dict(task, value_type='json')
@click.group()
@click.option('--svcaddr', default='localhost:37706', show_default=True, help='EntroQ service address')
@click.option('--json', '-j', is_flag=True, default=False, help='Values are JSON, unpack as such for display')
@click.pass_context
def main(ctx, svcaddr, json):
ctx.ensure_object(_ClickContext)
ctx.obj.addr = svcaddr
ctx.obj.task_to_str = _task_str_raw
if json:
ctx.obj.task_to_str = _task_str_json
@main.command()
@click.pass_context
@click.option('--queue', '-q', required=True, help='Queue in which to insert a task')
@click.option('--val', '-v', default='', help='Value in task to be inserted')
def ins(ctx, queue, val):
cli = EntroQ(ctx.obj.addr)
ins, _ = cli.modify(inserts=[pb.TaskData(queue=queue, value=val.encode('utf-8'))])
for t in ins:
print(ctx.obj.task_to_str(t))
@main.command()
@click.pass_context
@click.option('--prefix', '-p', default='', multiple=True, help='Queue match prefix, if filtering on prefix.')
@click.option('--queue', '-q', default='', multiple=True, help='Exact queue name, if filtering on name.')
@click.option('--limit', '-n', default=0, help='Limit number of results to return.')
def qs(ctx, prefix, queue, limit):
cli = EntroQ(ctx.obj.addr)
qs = cli.queues(prefixmatches=prefix, exactmatches=queue, limit=limit)
qdict = {s.name: json_format.MessageToDict(s) for s in qs}
print(json.dumps(qdict))
@main.command()
@click.pass_context
@click.option('--task', '-t', required=True, help='Task ID to remove')
@click.option('--force', '-f', is_flag=True, help='UNSAFE: delete task even if claimed already.')
@click.option('--retries', '-r', default=10, help='Number of times to retry if task is claimed.')
def rm(ctx, task, force, retries):
cli = EntroQ(ctx.obj.addr)
t = cli.task_by_id(task)
tid = pb.TaskID(id=t.id, version=t.version)
cli.delete(task_id=tid,
unsafe_claimant_id=t.claimant_id if force else None)
print(json_format.MessageToJson(tid))
@main.command()
@click.pass_context
@click.option('--queue', '-q', required=True, help='Queue to clear')
@click.option('--force', '-f', is_flag=True, help='UNSAFE: delete tasks even if claimed')
def clear(ctx, queue, force):
cli = EntroQ(ctx.obj.addr)
for t in cli.pop_all(queue, force=force):
print(ctx.obj.task_to_str(t))
@main.command()
@click.pass_context
@click.option('--queue', '-q', multiple=True, help='Queue to claim from (can be multiple)')
@click.option('--try', is_flag=True, help="Only try to claim, don't block")
@click.option('--duration', '-d', default=30, help='Seconds of claim duration')
def claim(ctx, queue, try_, duration):
cli = EntroQ(ctx.obj.addr)
claim_func = cli.try_claim if try_ else cli.claim
t = claim_func(queue, duration=duration)
print(ctx.obj.task_to_str(t))
@main.command()
@click.pass_context
@click.option('--millis', '-m', is_flag=True, help="Return time in milliseconds since the Epoch UTC")
@click.option('--local', '-l', is_flag=True, help='Show local time (when not using milliseconds)')
def time(ctx, millis, local):
cli = EntroQ(ctx.obj.addr)
time_ms = cli.time()
if millis:
print(time_ms)
return
tz = None
if not local:
tz = timezone.utc
dt = datetime.datetime.fromtimestamp(time_ms / 1000.0, tz=tz)
print(dt)
@main.command()
@click.pass_context
@click.option('--queue', '-q', default='', help='Queue to list')
@click.option('--task', '-t', multiple=True, help='Task ID to list')
@click.option('--limit', '-n', default=0, help='Limit returned tasks')
@click.option('--omit_values', '-V', is_flag=True, help='Omit values in returned tasks')
def ts(ctx, queue, task, limit, omit_values):
cli = EntroQ(ctx.obj.addr)
for task in cli.tasks(queue=queue, task_ids=task, limit=limit, omit_values=omit_values):
print(ctx.obj.task_to_str(task))
@main.command()
@click.pass_context
@click.option('--task', '-t', required=True, help='Task ID to modify - modifies whatever version it finds. Use with care.')
@click.option('--queue_to', '-Q', default='', help='Change queue to this value.')
@click.option('--val', '-v', default='', help='Change to this value.')
@click.option('--force', '-f', is_flag=True, help='UNSAFE: force modification even if this task is claimed.')
def mod(ctx, task, queue_to, val, force):
cli = EntroQ(ctx.obj.addr)
t = cli.task_by_id(task)
old_id = pb.TaskID(id=t.id, version=t.version)
new_data = pb.TaskData(queue=queue_to or t.queue,
value=val or t.value)
_, chg = cli.modify(changes=[pb.TaskChange(old_id=old_id, new_data=new_data)],
unsafe_claimant_id=t.claimant_id if force else None)
for t in chg:
print(ctx.obj.task_to_str(t))
if __name__ == '__main__':
main(obj=_ClickContext())
|
83472
|
import numpy as np
from noduleCADEvaluationLUNA16 import noduleCADEvaluation
import os
import csv
from multiprocessing import Pool
import functools
import SimpleITK as sitk
from config_testing import config
from layers import nms
annotations_filename = './labels/new_nodule.csv'
annotations_excluded_filename = './labels/new_non_nodule.csv'# path for excluded annotations for the fold
seriesuids_filename = './labels/LIDCTestID.csv'# path for seriesuid for the fold
datapath = config['LIDC_data']
sideinfopath = '/data/LunaProj/LIDC/processed/'
nmsthresh = 0.1
bboxpath = './test_results/baseline_se_focal_newparam/bbox/' #for baseline
frocpath = './test_results/baseline_se_focal_newparam/bbox/nms' + str(nmsthresh) + '/' #_focal
outputdir = './bboxoutput/se_focal/nms' + str(nmsthresh) + '/'
#detp = [0.3, 0.4, 0.5, 0.6, 0.7]
detp = [0.3]
nprocess = 38#4
firstline = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']
def VoxelToWorldCoord(voxelCoord, origin, spacing):
strechedVocelCoord = voxelCoord * spacing
worldCoord = strechedVocelCoord + origin
return worldCoord
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def convertcsv(bboxfname, bboxpath, detp):
resolution = np.array([1, 1, 1])
origin = np.load(sideinfopath+bboxfname[:-8]+'_origin.npy', mmap_mode='r')
spacing = np.load(sideinfopath+bboxfname[:-8]+'_spacing.npy', mmap_mode='r')
extendbox = np.load(sideinfopath+bboxfname[:-8]+'_extendbox.npy', mmap_mode='r')
pbb = np.load(bboxpath+bboxfname, mmap_mode='r')
diam = pbb[:,-1]
check = sigmoid(pbb[:,0]) > detp
pbbold = np.array(pbb[check])
# pbbold = np.array(pbb[pbb[:,0] > detp])
pbbold = np.array(pbbold[pbbold[:,-1] > 3]) # add new 9 15
pbb = nms(pbbold, nmsthresh)
pbb = np.array(pbb[:, :-1])
pbb[:, 1:] = np.array(pbb[:, 1:] + np.expand_dims(extendbox[:,0], 1).T)
pbb[:, 1:] = np.array(pbb[:, 1:] * np.expand_dims(resolution, 1).T / np.expand_dims(spacing, 1).T)
pos = VoxelToWorldCoord(pbb[:, 1:], origin, spacing)
rowlist = []
for nk in range(pos.shape[0]): # pos[nk, 2], pos[nk, 1], pos[nk, 0]
rowlist.append([bboxfname[:-8], pos[nk, 2], pos[nk, 1], pos[nk, 0], diam[nk], 1/(1+np.exp(-pbb[nk,0]))])
return rowlist
def getfrocvalue(results_filename, outputdir):
return noduleCADEvaluation(annotations_filename,annotations_excluded_filename,seriesuids_filename,results_filename,outputdir)
def getcsv(detp):
if not os.path.exists(frocpath):
os.makedirs(frocpath)
for detpthresh in detp:
print ('detp', detpthresh)
f = open(frocpath + 'predanno'+ str(detpthresh) + '.csv', 'w')
fwriter = csv.writer(f)
fwriter.writerow(firstline)
fnamelist = []
for fname in os.listdir(bboxpath):
if fname.endswith('_pbb.npy'):
fnamelist.append(fname)
# print fname
# for row in convertcsv(fname, bboxpath, k):
# fwriter.writerow(row)
# # return
print(len(fnamelist))
predannolist = p.map(functools.partial(convertcsv, bboxpath=bboxpath, detp=detpthresh), fnamelist)
# print len(predannolist), len(predannolist[0])
for predanno in predannolist:
# print predanno
for row in predanno:
# print row
fwriter.writerow(row)
f.close()
def getfroc(detp):
predannofnamalist = []
outputdirlist = []
for detpthresh in detp:
predannofnamalist.append(outputdir + 'predanno'+ str(detpthresh) + '.csv')
outputpath = outputdir + 'predanno'+ str(detpthresh) +'/'
outputdirlist.append(outputpath)
if not os.path.exists(outputpath):
os.makedirs(outputpath)
# froclist = p.map(getfrocvalue, predannofnamalist, outputdirlist)
froclist = []
for i in range(len(predannofnamalist)):
froclist.append(getfrocvalue(predannofnamalist[i], outputdirlist[i]))
np.save(outputdir+'froclist.npy', froclist)
if __name__ == '__main__':
p = Pool(nprocess)
getcsv(detp)
# getfroc(detp)
p.close()
print('finished!')
|
83474
|
import argparse
import cv2
import numpy as np
import torch
import kornia as K
from kornia.contrib import FaceDetector, FaceDetectorResult, FaceKeypoint
def draw_keypoint(img: np.ndarray, det: FaceDetectorResult, kpt_type: FaceKeypoint) -> np.ndarray:
kpt = det.get_keypoint(kpt_type).int().tolist()
return cv2.circle(img, kpt, 2, (255, 0, 0), 2)
def scale_image(img: np.ndarray, size: int) -> np.ndarray:
h, w = img.shape[:2]
scale = 1. * size / w
return cv2.resize(img, (int(w * scale), int(h * scale)))
def my_app():
# select the device
device = torch.device('cpu')
if args.cuda and torch.cuda.is_available():
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
# create the video capture object
cap = cv2.VideoCapture(0)
# compute scale
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"Video: h/w: {height}/{width} fps:{fps}")
scale = 1. * args.image_size / width
w, h = int(width * scale), int(height * scale)
# create the video writer object
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(args.video_out, fourcc, fps, (w, h))
# create the detector object
face_detection = FaceDetector().to(device)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
draw_keypoints: bool = False
while(True):
# Capture the video frame
# by frame
_, frame = cap.read()
start = cv2.getTickCount()
# preprocess
frame = scale_image(frame, args.image_size)
img = K.image_to_tensor(frame, keepdim=False).to(device)
img = K.color.bgr_to_rgb(img.float())
# detect !
with torch.no_grad():
dets = face_detection(img)
dets = [FaceDetectorResult(o) for o in dets]
fps: float = cv2.getTickFrequency() / (cv2.getTickCount() - start)
# show image
frame_vis = frame.copy()
frame_vis = cv2.putText(
frame_vis, f"FPS: {fps:.1f}", (10, 20), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
for b in dets:
if b.score < args.vis_threshold:
continue
# draw face bounding box
line_thickness = 2
line_length = 10
x1, y1 = b.top_left.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 + line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 + line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.top_right.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 - line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 + line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.bottom_right.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 - line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 - line_length), (0, 255, 0), thickness=line_thickness)
x1, y1 = b.bottom_left.int().tolist()
frame_vis = cv2.line(frame_vis, (x1, y1), (x1 + line_length, y1), (0, 255, 0), thickness=line_thickness)
frame_vis = cv2.line(frame_vis, (x1, y1), (x1, y1 - line_length), (0, 255, 0), thickness=line_thickness)
if draw_keypoints:
# draw facial keypoints
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.EYE_LEFT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.EYE_RIGHT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.NOSE)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.MOUTH_LEFT)
frame_vis = draw_keypoint(frame_vis, b, FaceKeypoint.MOUTH_RIGHT)
# draw the text score and FPS
pt = b.top_left.int().tolist()
frame_vis = cv2.putText(
frame_vis, f"{b.score:.2f}", (pt[0], pt[1] - 12), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# write the processed frame
out.write(frame_vis)
# Display the resulting frame
cv2.imshow('frame', frame_vis)
# the 's' button is set as the
# switching button to draw the face keypoints
if cv2.waitKey(1) == ord('s'):
draw_keypoints = not draw_keypoints
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) == ord('q'):
break
# After the loop release the cap and writing objects
cap.release()
out.release()
# Destroy all the windows
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Face and Landmark Detection')
parser.add_argument('--video_out', required=True, type=str, help='the file path to write the output.')
parser.add_argument('--image_size', default=320, type=int, help='the image size to process.')
parser.add_argument('--vis_threshold', default=0.8, type=float, help='visualization_threshold')
parser.add_argument('--vis_keypoints', dest='vis_keypoints', action='store_true')
parser.add_argument('--cuda', dest='cuda', action='store_true')
args = parser.parse_args()
my_app()
|
83477
|
import os
import re
import io
import sys
import glob
import json
import argparse
import datetime
from operator import itemgetter
from bs4 import BeautifulSoup
DURATION_RE = r'PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?'
def convert_to_type(s):
return s.replace('http://www.google.com/voice#', '')
def convert_to_tel(s):
return s.replace('tel:', '')
def convert_to_duration(s):
r = re.search(DURATION_RE, s)
td = datetime.timedelta(hours=int(r.group(1) or 0),
minutes=int(r.group(2) or 0),
seconds=int(r.group(3) or 0))
return td.total_seconds() * 1000
def serialize_general_to_record(raw):
soup = BeautifulSoup(raw, 'html.parser')
contributors = []
for contributor in soup.find_all('div', class_='contributor'):
contributors.append({
'name': contributor.find('span', class_='fn').string or '',
'tel': convert_to_tel(contributor.find('a', class_='tel')['href'])
})
record = {
'tags': [convert_to_type(a['href']) for a in
soup.find_all('a', rel='tag')],
'date': soup.find('abbr', class_='published')['title'],
'contributors': contributors
}
if soup.find('abbr', class_='duration') is not None:
record['duration'] = convert_to_duration(
soup.find('abbr', class_='duration')['title'])
return record
def serialize_text_messages_to_record(raw):
soup = BeautifulSoup(raw, 'html.parser')
sender = []
messages = []
dates = []
conversation = []
contributors = []
for contributor in soup.find_all('cite', class_='sender'):
# Messages from others are in the "span" tag and messages from you
# are in the "abbr" tag
if contributor.find('span', class_='fn'):
sender.append({
'name': contributor.find('span', class_='fn').string or '',
'tel': convert_to_tel(
contributor.find('a', class_='tel')['href'])
})
if contributor.find('abbr', class_='fn'):
sender.append({
'name': contributor.find('abbr', class_='fn').string or '',
'tel': convert_to_tel(
contributor.find('a', class_='tel')['href'])
})
for message in soup.find_all('q'):
messages.append(message.text)
for date in soup.find_all('abbr', class_='dt'):
dates.append(date['title'])
for item in sender:
if item not in contributors:
contributors.append(item)
# A message where the other side didn't respond.
# Tel is not given and will have to map later :/
if len(contributors) == 1 and contributors[0]['name'] == 'Me':
title = soup.find('title').text.split('\n')[-1]
if '+' in title:
contributors.append({
'name': title,
'tel': title
})
else:
contributors.append({
'name': title,
'tel': ''
})
for i in range(0, len(messages)):
conversation.append({
'sender': sender[i],
'message': messages[i],
'date': dates[i]
})
record = {
'date': dates[0],
'contributors': contributors,
'conversation': conversation,
'tags': [convert_to_type(a['href']) for a in
soup.find_all('a', rel='tag')]
}
return record
def serialize_files_to_json(paths):
records = []
for path in paths:
with io.open(path, 'r', encoding='utf8') as f:
if 'Text' in path:
serialized = serialize_text_messages_to_record(f.read())
records.append(serialized)
else:
serialized = serialize_general_to_record(f.read())
records.append(serialized)
records.sort(key=itemgetter('date'))
return json.dumps({'records': records}, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('source',
help='Directory of call & text HTML files to convert')
parser.add_argument('output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
help='Where to write JSON output (default: stdout)')
args = parser.parse_args()
files = glob.glob(os.path.join(args.source, '*.html'))
json = serialize_files_to_json(files)
with args.output as f:
f.write(json)
if __name__ == '__main__':
main()
|
83509
|
from __future__ import unicode_literals
import codecs
def encode_hex(value):
return '0x' + codecs.decode(codecs.encode(value, 'hex'), 'utf8')
def decode_hex(value):
_, _, hex_part = value.rpartition('x')
return codecs.decode(hex_part, 'hex')
|
83545
|
import random
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
"""
Generating squares
This example will generate 25 squares each in a randomly chosen grayvalue.
The grayvalue is chosen out of 25 different possiblities. Every redraw of the
window will create a new set of squares.
http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLSquares
"""
def initFun():
glClearColor(1.0, 1.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, 640.0, 0.0, 480.0)
def displayFun():
glClear(GL_COLOR_BUFFER_BIT)
for i in range(0, 25):
gray = idx = random.randint(0, 25) / 25.0
glColor3f(gray, gray, gray)
glRecti(random.randint(0, 640), random.randint(0, 480),
random.randint(0, 640), random.randint(0, 480))
glFlush()
if __name__ == '__main__':
glutInit()
glutInitWindowSize(640, 480)
glutCreateWindow(b"DrawSquares")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(displayFun)
initFun()
glutMainLoop()
|
83571
|
import abc
import asyncio
import logging
from typing import Any, Callable, List, Optional, TYPE_CHECKING, Union
from discord import Message, Reaction, TextChannel, User
from discord.abc import GuildChannel
from discord.ext.commands import Context
from dpymenus import Page, PagesError, Session, SessionError
from dpymenus.hooks import HookEvent, HookWhen, call_hook
from dpymenus.settings import BUTTON_DELAY, HISTORY_CACHE_LIMIT, REPLY_AS_DEFAULT, TIMEOUT
if TYPE_CHECKING:
from dpymenus import Template
from dpymenus.types import PageType
class BaseMenu(abc.ABC):
"""The abstract base menu object. All menu types derive from this class. Implements generic properties,
menu loop handling, and defines various helper methods."""
_timeout: int
_command_message: bool
_persist: bool
_reply: bool
_custom_check: Optional[Callable]
_replies_disabled: bool
_start_page_index: int
def __init__(self, ctx: Context):
self._id: int = -1
self.ctx: Context = ctx
self.pages: List[Page] = []
self.page: Optional[Page] = None
self.active: bool = True
self.input: Optional[Union[Message, Reaction]] = None
self.output: Optional[Message] = None
self.history: List[int] = []
@abc.abstractmethod
async def open(self):
pass
@property
def timeout(self) -> int:
return getattr(self, '_timeout', TIMEOUT)
def set_timeout(self, duration: int) -> 'BaseMenu':
"""Sets the timeout on a menu. Returns itself for fluent-style chaining.
:param duration: Specifies how long, in seconds, before the menu will time out.
:rtype: :class:`BaseMenu`
"""
setattr(self, '_timeout', duration)
return self
@property
def destination(self) -> Union[Context, User, TextChannel]:
return getattr(self, '_destination', self.ctx)
def set_destination(self, dest: Union[User, TextChannel]) -> 'BaseMenu':
"""Sets the message destination for the menu. Returns itself for fluent-style chaining.
:param dest: Where, in Discord, to send and display the menu.
:rtype: :class:`BaseMenu`
"""
setattr(self, '_destination', dest)
return self
@property
def replies_disabled(self) -> bool:
return getattr(self, '_replies_disabled', False)
def disable_replies(self) -> 'BaseMenu':
"""Disables the Reply feature on Discord from being used with this menu. Overrides the global settings.
Returns itself for fluent-style chaining.
:rtype: :class:`BaseMenu`
"""
self._replies_disabled = True
return self
@property
def command_message(self) -> bool:
return getattr(self, '_command_message', False)
def show_command_message(self) -> 'BaseMenu':
"""Persists user command invocation messages in the chat instead of deleting them after execution.
Returns itself for fluent-style chaining.
:rtype: :class:`BaseMenu`
"""
self._command_message = True
return self
@property
def persist(self) -> bool:
return getattr(self, '_persist', False)
def persist_on_close(self) -> 'BaseMenu':
"""Prevents message cleanup from running when a menu closes.
Returns itself for fluent-style chaining.
:rtype: :class:`BaseMenu`
"""
self._persist = True
return self
@property
def custom_check(self) -> Optional[Callable]:
return getattr(self, '_custom_check', None)
def set_custom_check(self, fn: Callable) -> 'BaseMenu':
"""Overrides the default check method for user responses.
Returns itself for fluent-style chaining.
:param fn: A reference to a predicate function.
:rtype: :class:`BaseMenu`
"""
setattr(self, '_custom_check', fn)
return self
@property
def start_page_index(self) -> int:
return getattr(self, '_start_page_index', 0)
def set_initial_page(self, index: int) -> 'BaseMenu':
"""Sets the initial page of the menu when opened based on a pages index in the `add_pages` list.
Defaults to 0.
:param index: Which page index to start on.
:rtype: :class:`BaseMenu`
"""
self._start_page_index = index
return self
def add_hook(self, when: HookWhen, event: HookEvent, callback: Callable) -> 'BaseMenu':
"""Sets various callback attributes on the menu so users can hook into
specific events. See https://dpymenus.com/lifecycle for the full list of events
and hook structure.
:param when: Defines which point in the menu lifetime the callback will be executed.
:param event: Defines which event in the menu lifetime the callback will be executed on.
:param callback: References a function or method which will be executed based on the `when`
and `event` params.
:rtype: :class:`BaseMenu`
"""
setattr(self, f'_hook_{when.name.lower()}_{event.name.lower()}', callback)
return self
# Helper Methods
async def close(self):
"""Gracefully exits out of the menu, performing necessary cleanup of sessions, reactions, and messages."""
await call_hook(self, '_hook_before_close')
Session.get(self).kill_or_freeze()
self.active = False
if self.output.reactions:
await asyncio.sleep(BUTTON_DELAY)
await self._safe_clear_reactions()
await self._safe_delete_output()
await call_hook(self, '_hook_after_close')
async def next(self):
"""Transitions to the next page."""
if self.page.index + 1 > len(self.pages) - 1:
return
self.page = self.pages[self.page.index + 1]
await self._next()
async def previous(self):
"""Transitions to the previous page."""
if self.page.index - 1 < 0:
return
self.page = self.pages[self.page.index - 1]
await self._next()
async def to_first(self):
"""Transitions to the first page."""
self.page = self.pages[0]
await self._next()
async def to_last(self):
"""Transitions to the last page."""
self.page = self.pages[-1:][0]
await self._next()
async def go_to(self, page: Optional[Union[str, int]] = None):
"""Transitions to a specific page.
:param page: The name of the `on_next` function for a particular page or its page number. If this is not set,
the next page in the list will be called.
"""
if isinstance(page, int):
self.page = self.pages[page]
elif isinstance(page, str):
# get a page index from its on_next callback function name and assign it
for p in self.pages:
if p.on_next_event.__name__ == page:
self.page = p
break
await self._next()
def last_visited_page(self) -> int:
"""Returns the last visited page index.
:rtype: int
"""
return self.history[-2] if len(self.history) > 1 else 0
def add_pages(self, pages: List['PageType'], template: 'Template' = None) -> 'BaseMenu':
"""Adds a list of pages to a menu, setting their index based on the position in the list.
Returns itself for fluent-style chaining.
:param pages: A list of pages to display; ordered from first to last in linear menus.
:param template: An optional :class:`Template` to define a menu style.
:rtype: :class:`BaseMenu`
"""
self._validate_pages(pages)
for i, page in enumerate(pages):
if not isinstance(page, Page):
page = Page.convert_from(page)
if template:
page = page._apply_template(template)
page.index = i
self.pages.append(page)
self.page = self.pages[0]
return self
async def send_message(self, page: 'PageType'):
"""Updates the output message if it can be edited, otherwise sends a new message.
:param page: A :class:`PageType` to send to Discord.
"""
safe_embed = page.as_safe_embed() if type(page) == Page else page
if isinstance(self.output.channel, GuildChannel):
return await self.output.edit(embed=safe_embed)
else:
await self.output.delete()
self.output = await self.destination.send(embed=safe_embed)
# Internal Methods
async def _open(self):
"""This method runs for ALL menus after their own open method. Session handling and initial setup is
performed in here; it should NEVER be handled inside specific menus."""
try:
session = await Session.create(self)
except SessionError as exc:
logging.info(exc.message)
else:
self.history = session.history
if self.history:
self.page = self.pages[session.history[-1]]
else:
self.page = self.pages[self.start_page_index]
await call_hook(self, '_hook_before_open')
if REPLY_AS_DEFAULT and self.replies_disabled is False:
self.output = await self.destination.reply(embed=self.page.as_safe_embed())
else:
self.output = await self.destination.send(embed=self.page.as_safe_embed())
self.input = self.ctx.message
self._update_history()
await self._safe_delete_input()
async def _safe_delete_input(self):
"""Safely deletes a message if the bot has permissions and show command messages is set to false."""
if self.command_message is False:
if isinstance(self.output.channel, GuildChannel):
await self.input.delete()
async def _safe_delete_output(self):
"""Safely deletes a message if the bot has permissions and persist is set to false."""
if self.persist is False:
await self.output.delete()
self.output = None
def _update_history(self):
"""Adds the most recent page index to the menus history cache. If the history is longer than
the cache limit, defined globally, then the oldest item is popped before updating the history."""
if len(self.history) >= HISTORY_CACHE_LIMIT:
self.history.pop(0)
self.history.append(self.page.index)
def _check(self, message: Message) -> bool:
"""Returns true if the event author and channel are the same as the initial values in the menu context."""
return message.author == self.ctx.author and self.output.channel == message.channel
async def _cancel_menu(self):
"""Closes the menu as a user-defined 'cancel' event. Checks if an on_cancel_event callback exists first."""
if self.page.on_cancel_event:
await self.page.on_cancel_event()
return
if cancel_page := getattr(self, 'cancel_page', None):
await self.output.edit(embed=cancel_page)
await self.close()
async def _timeout_menu(self):
"""Closes the menu on an asyncio.TimeoutError event. If an on_timeout_event callback exists, that function
will be run instead of the default behaviour."""
await call_hook(self, '_hook_before_timeout')
if self.page.on_timeout_event:
await self.page.on_timeout_event()
return
if timeout_page := getattr(self, 'timeout_page', None):
await self.output.edit(embed=timeout_page)
await self.close()
await call_hook(self, '_hook_after_timeout')
async def _next(self):
"""Sends a message after the `next` method is called. Closes the menu instance if there is no callback for
the on_next_event on the current page."""
if self.__class__.__name__ != 'PaginatedMenu':
if self.page.on_next_event is None:
Session.get(self).kill()
self.active = False
self._update_history()
await self.send_message(self.page)
# Validation Methods
@staticmethod
def _validate_pages(pages: List[Any]):
"""Checks that the Menu contains at least one pages."""
if len(pages) == 0:
raise PagesError(f'There must be at least one page in a menu. Expected at least 1, found {len(pages)}.')
|
83598
|
import os
import mock
import pytest
from prequ._pip_compat import PIP_10_OR_NEWER, PIP_192_OR_NEWER, path_to_url
from prequ.exceptions import DependencyResolutionFailed
from prequ.repositories.pypi import PyPIRepository
from prequ.scripts._repo import get_pip_command
PY27_LINUX64_TAGS = [
('cp27', 'cp27mu', 'manylinux1_x86_64'),
('cp27', 'cp27mu', 'linux_x86_64'),
('cp27', 'none', 'manylinux1_x86_64'),
('cp27', 'none', 'linux_x86_64'),
('py2', 'none', 'manylinux1_x86_64'),
('py2', 'none', 'linux_x86_64'),
('cp27', 'none', 'any'),
('cp2', 'none', 'any'),
('py27', 'none', 'any'),
('py2', 'none', 'any'),
('py26', 'none', 'any'),
('py25', 'none', 'any'),
('py24', 'none', 'any'),
('py23', 'none', 'any'),
('py22', 'none', 'any'),
('py21', 'none', 'any'),
('py20', 'none', 'any'),
]
def _patch_supported_tags(func):
if PIP_10_OR_NEWER:
def get_supported_tags(versions=None, noarch=False, platform=None,
impl=None, abi=None):
return PY27_LINUX64_TAGS
to_patch = 'pip._internal.' + (
'models.target_python.get_supported' if PIP_192_OR_NEWER else
'pep425tags.get_supported')
return mock.patch(
to_patch,
new=get_supported_tags)(func)
else:
return mock.patch(
'pip.pep425tags.supported_tags',
new=PY27_LINUX64_TAGS)(func)
@_patch_supported_tags
def test_resolving_respects_platform(from_line):
repository = get_repository()
if hasattr(repository.finder, 'candidate_evaluator'):
repository.finder.candidate_evaluator._valid_tags = PY27_LINUX64_TAGS
else:
repository.finder.valid_tags = PY27_LINUX64_TAGS
ireq = from_line('cryptography==2.0.3')
deps = repository.get_dependencies(ireq)
assert 'enum34' in set(x.name for x in deps)
assert 'ipaddress' in set(x.name for x in deps)
def test_generate_hashes_only_current_platform(from_line):
all_cffi_191_hashes = {
'sha256:04b133ef629ae2bc05f83d0b079a964494a9cd17914943e690c57209b44aae20',
'sha256:0f1b3193c17b93c75e73eeac92f22eec4c98a021d9969b1c347d1944fae0d26b',
'sha256:1fb1cf40c315656f98f4d3acfb1bd031a14a9a69d155e9a180d5f9b52eaf745a',
'sha256:20af85d8e154b50f540bc8d517a0dbf6b1c20b5d06e572afda919d5dafd1d06b',
'sha256:2570f93b42c61013ab4b26e23aa25b640faf5b093ad7dd3504c3a8eadd69bc24',
'sha256:2f4e2872833ee3764dfc168dea566b7dd83b01ac61b377490beba53b5ece57f7',
'sha256:31776a37a67424e7821324b9e03a05aa6378bbc2bccc58fa56402547f82803c6',
'sha256:353421c76545f1d440cacc137abc865f07eab9df0dd3510c0851a2ca04199e90',
'sha256:36d06de7b09b1eba54b1f5f76e2221afef7489cc61294508c5a7308a925a50c6',
'sha256:3f1908d0bcd654f8b7b73204f24336af9f020b707fb8af937e3e2279817cbcd6',
'sha256:5268de3a18f031e9787c919c1b9137ff681ea696e76740b1c6c336a26baaa58a',
'sha256:563e0bd53fda03c151573217b3a49b3abad8813de9dd0632e10090f6190fdaf8',
'sha256:5e1368d13f1774852f9e435260be19ad726bbfb501b80472f61c2dc768a0692a',
'sha256:60881c79eb72cb75bd0a4be5e31c9e431739146c4184a2618cabea3938418984',
'sha256:6120b62a642a40e47eb6c9ff00c02be69158fc7f7c5ff78e42a2c739d1c57cd6',
'sha256:65c223e77f87cb463191ace3398e0a6d84ce4ac575d42eb412a220b099f593d6',
'sha256:6fbf8db55710959344502b58ab937424173ad8b5eb514610bcf56b119caa350a',
'sha256:74aadea668c94eef4ceb09be3d0eae6619e28b4f1ced4e29cd43a05bb2cfd7a4',
'sha256:7be1efa623e1ed91b15b1e62e04c536def1d75785eb930a0b8179ca6b65ed16d',
'sha256:83266cdede210393889471b0c2631e78da9d4692fcca875af7e958ad39b897ee',
'sha256:86c68a3f8246495962446c6f96f6a27f182b91208187b68f1e87ec3dfd29fa32',
'sha256:9163f7743cf9991edaddf9cf886708e288fab38e1b9fec9c41c15c85c8f7f147',
'sha256:97d9f338f91b7927893ea6500b953e4b4b7e47c6272222992bb76221e17056ff',
'sha256:a7930e73a4359b52323d09de6d6860840314aa09346cbcf4def8875e1b07ebc7',
'sha256:ada8a42c493e4934a1a8875c2bc9efcb1b88c09883f70375bfa053ab32d6a118',
'sha256:b0bc2d83cc0ba0e8f0d9eca2ffe07f72f33bec7d84547071e7e875d4cca8272d',
'sha256:b5412a65605c642adf3e1544b59b8537daf5696dedadd2b3cbebc42e24da45ed',
'sha256:ba6b5205fced1625b6d9d55f9ef422f9667c5d95f18f07c0611eb964a3355331',
'sha256:bcaf3d86385daaab0ae51c9c53ebe70a6c1c5dfcb9e311b13517e04773ddf6b6',
'sha256:cfa15570ecec1ea6bee089e86fd4deae6208c96a811344ce246de5e5c9ac824a',
'sha256:d3e3063af1fa6b59e255da9a812891cdaf24b90fbaf653c02797871069b7c4c9',
'sha256:d9cfe26ecea2fec320cd0cac400c9c2435328994d23596ee6df63945fe7292b0',
'sha256:e5ef800ef8ef9ee05ae9a5b7d7d9cf7d6c936b32e312e54823faca3034ee16ab',
'sha256:f1366150acf611d09d37ffefb3559ed3ffeb1713643d3cd10716d6c5da3f83fb',
'sha256:f4eb9747a37120b35f59c8e96265e87b0c432ff010d32fc0772992aa14659502',
'sha256:f8264463cc08cd696ad17e4bf3c80f3344628c04c11ffdc545ddf0798bc17316',
'sha256:f8ba54848dfe280b1be0d6e699544cee4ba10d566f92464538063d9e645aed3e',
'sha256:f93d1edcaea7b6a7a8fbf936f4492a9a0ee0b4cb281efebd5e1dd73e5e432c71',
'sha256:fc8865c7e0ac25ddd71036c2b9a799418b32d9acb40400d345b8791b6e1058cb',
'sha256:fce6b0cb9ade1546178c031393633b09c4793834176496c99a94de0bfa471b27',
'sha256:fde17c52d7ce7d55a9fb263b57ccb5da6439915b5c7105617eb21f636bb1bd9c',
}
repository = get_repository()
ireq = from_line('cffi==1.9.1')
hashes = repository.get_hashes(ireq)
assert hashes != all_cffi_191_hashes, "No platform could support them all"
assert hashes.issubset(all_cffi_191_hashes)
def test_generate_hashes_without_interfering_with_each_other(from_line):
repository = get_repository()
repository.get_hashes(from_line('cffi==1.9.1'))
repository.get_hashes(from_line('matplotlib==2.0.2'))
def test_get_hashes_non_pinned(from_line):
repository = get_repository()
with pytest.raises(ValueError):
repository.get_hashes(from_line('prequ'))
def test_get_dependencies_non_pinned_non_editable(from_line):
repository = get_repository()
with pytest.raises(TypeError):
repository.get_dependencies(from_line('prequ'))
def test_failing_setup_script(from_editable):
repository = get_repository()
failing_package_dir = os.path.join(
os.path.split(__file__)[0], 'test_data', 'failing_package')
failing_package_url = path_to_url(failing_package_dir)
ireq = from_editable(failing_package_url)
with pytest.raises(DependencyResolutionFailed) as excinfo:
repository.get_dependencies(ireq)
# Check the contents of the error message
error_message = '{}'.format(excinfo.value)
assert error_message.startswith(
'Dependency resolution of {url} failed:\n'.format(
url=failing_package_url))
egg_info_failed = 'Command "python setup.py egg_info" failed'
command_errored = (
'Command errored out with exit status 1:'
' python setup.py egg_info Check the logs for full command output.')
assert (
egg_info_failed in error_message
or
command_errored in error_message)
import_error = "No module named 'non_existing_setup_helper'"
import_error2 = import_error.replace("'", '') # On Python 2
assert (import_error in error_message) or (import_error2 in error_message)
def get_repository():
pip_command = get_pip_command()
pip_options, _ = pip_command.parse_args([
'--index-url', PyPIRepository.DEFAULT_INDEX_URL
])
session = pip_command._build_session(pip_options)
return PyPIRepository(pip_options, session)
def test_get_hashes_editable_empty_set(from_editable):
pip_command = get_pip_command()
pip_options, _ = pip_command.parse_args([
'--index-url', PyPIRepository.DEFAULT_INDEX_URL
])
session = pip_command._build_session(pip_options)
repository = PyPIRepository(pip_options, session)
ireq = from_editable('git+https://github.com/django/django.git#egg=django')
assert repository.get_hashes(ireq) == set()
|
83648
|
import pandas as pd
import numpy as np
import pytest
from nltk.metrics.distance import masi_distance
from pandas.testing import assert_series_equal
from crowdkit.aggregation.utils import get_accuracy
from crowdkit.metrics.data import alpha_krippendorff, consistency, uncertainty
from crowdkit.metrics.performers import accuracy_on_aggregates
def test_consistency(toy_answers_df):
assert consistency(toy_answers_df) == 0.9384615384615385
class TestUncertaintyMetric:
def test_uncertainty_mean_per_task_skills(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
assert uncertainty(toy_answers_df, performers_skills) == 0.6308666201949331
def test_uncertainty_raises_wrong_compte_by(self, toy_answers_df):
performers_skills = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
with pytest.raises(KeyError):
uncertainty(toy_answers_df, performers_skills, compute_by='invalid')
def test_uncertainty_docstring_examples(self):
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
]
)
) == 0.0
assert uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'X', 'performer': 'C', 'label': 'Maybe'},
]
)
) == 1.0986122886681096
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="task",
aggregate=False
), pd.Series([0.693147, 0.0], index=['X', 'Y'], name='task'), atol=1e-3
)
np.testing.assert_allclose(
uncertainty(
pd.DataFrame.from_records(
[
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'No'},
{'task': 'Y', 'performer': 'A', 'label': 'Yes'},
{'task': 'Y', 'performer': 'B', 'label': 'Yes'},
]
),
compute_by="performer",
aggregate=False
), pd.Series([0.0, 0.693147], index=['A', 'B'], name='performer'), atol=1e-3
)
def test_uncertainty_raises_skills_not_found(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1],
index=pd.Index(['A', 'B'], name='performer'),
)
with pytest.raises(AssertionError):
uncertainty(answers, performers_skills)
def test_uncertainty_per_performer(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=pd.Index(['A', 'B', 'C'], name='performer'),
)
entropies = uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=False
)
assert isinstance(entropies, pd.Series)
assert sorted(np.unique(entropies.index).tolist()) == ['A', 'B', 'C']
# B always answers the same, entropy = 0
np.testing.assert_allclose(entropies['B'], 0, atol=1e-6)
# A answers uniformly, entropy = max possible
np.testing.assert_allclose(entropies['A'], 0.693147, atol=1e-6)
# C answers non-uniformly, entropy = between B and A
assert entropies['A'] > entropies['C'] > entropies['B']
assert entropies.mean() == uncertainty(
answers,
performers_skills,
compute_by='performer',
aggregate=True
)
def test_uncertainty_per_task(self):
answers = pd.DataFrame.from_records(
[
{'task': '1', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '1', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '1', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '2', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': '3', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '3', 'performer': 'C', 'label': frozenset(['dog'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'C', 'label': frozenset(['cat'])},
{'task': '4', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': '5', 'performer': 'B', 'label': frozenset(['dog'])},
]
)
performers_skills = pd.Series(
[1, 1, 1],
index=pd.Index(['A', 'B', 'C'], name='performer'),
)
entropies = uncertainty(answers,
performers_skills,
compute_by='task',
aggregate=False)
assert isinstance(entropies, pd.Series)
assert sorted(np.unique(entropies.index).tolist()) == ['1', '2', '3', '4', '5']
# Everybody answered same on tasks 2 and 4
np.testing.assert_allclose(entropies['2'], 0, atol=1e-6)
np.testing.assert_allclose(entropies['4'], 0, atol=1e-6)
# On tasks 1 and 3, 2 performers agreed and one answered differently
assert entropies['1'] > 0
np.testing.assert_allclose(entropies['1'], entropies['3'], atol=1e-6)
# Complete disagreement on task 5, max possible entropy
np.testing.assert_allclose(entropies['5'], 0.693147, atol=1e-6)
assert entropies.mean() == uncertainty(
answers,
performers_skills,
compute_by='task',
aggregate=True
)
def test_golden_set_accuracy(toy_answers_df, toy_gold_df):
assert get_accuracy(toy_answers_df, toy_gold_df) == 5 / 9
assert get_accuracy(toy_answers_df, toy_gold_df, by='performer').equals(pd.Series(
[0.5, 1.0, 1.0, 0.5, 0.0],
index=['w1', 'w2', 'w3', 'w4', 'w5'],
))
def test_accuracy_on_aggregates(toy_answers_df):
expected_performers_accuracy = pd.Series(
[0.6, 0.8, 1.0, 0.4, 0.8],
index=pd.Index(['w1', 'w2', 'w3', 'w4', 'w5'], name='performer'),
)
assert_series_equal(accuracy_on_aggregates(toy_answers_df, by='performer'), expected_performers_accuracy)
assert accuracy_on_aggregates(toy_answers_df) == 0.7083333333333334
def test_alpha_krippendorff(toy_answers_df):
assert alpha_krippendorff(pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
{'task': 'Y', 'performer': 'A', 'label': 'No'},
{'task': 'Y', 'performer': 'B', 'label': 'No'},
])) == 1.0
assert alpha_krippendorff(pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': 'Yes'},
{'task': 'X', 'performer': 'B', 'label': 'Yes'},
{'task': 'Y', 'performer': 'A', 'label': 'No'},
{'task': 'Y', 'performer': 'B', 'label': 'No'},
{'task': 'Z', 'performer': 'A', 'label': 'Yes'},
{'task': 'Z', 'performer': 'B', 'label': 'No'},
])) == 0.4444444444444444
assert alpha_krippendorff(toy_answers_df) == 0.14219114219114215
def test_alpha_krippendorff_with_distance():
whos_on_the_picture = pd.DataFrame.from_records([
{'task': 'X', 'performer': 'A', 'label': frozenset(['dog'])},
{'task': 'X', 'performer': 'B', 'label': frozenset(['dog'])},
{'task': 'Y', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': 'Y', 'performer': 'B', 'label': frozenset(['cat'])},
{'task': 'Z', 'performer': 'A', 'label': frozenset(['cat'])},
{'task': 'Z', 'performer': 'B', 'label': frozenset(['cat', 'mouse'])},
])
assert alpha_krippendorff(whos_on_the_picture) == 0.5454545454545454
assert alpha_krippendorff(whos_on_the_picture, masi_distance) == 0.6673336668334168
|
83652
|
import pandas as pd
def correct_lr(data):
'''
Invert the RL to LR and R1R2 to r2>r1
'''
import pandas as pd
def swap(a,b): return b,a
data = data.to_dict('index')
for k,v in data.items():
if v['isReceptor_fst'] and v['isReceptor_scn']:
v['isReceptor_fst'],v['isReceptor_scn'] = swap(v['isReceptor_fst'],v['isReceptor_scn'])
v['Ligand'],v['Receptor'] = swap(v['Ligand'],v['Receptor'])
v['Ligand.Cluster'],v['Receptor.Cluster'] = swap(v['Ligand.Cluster'],v['Receptor.Cluster'])
elif v['isReceptor_fst'] and not v['isReceptor_scn']:
v['isReceptor_fst'],v['isReceptor_scn'] = swap(v['isReceptor_fst'],v['isReceptor_scn'])
v['Ligand'],v['Receptor'] = swap(v['Ligand'],v['Receptor'])
v['Ligand.Cluster'],v['Receptor.Cluster'] = swap(v['Ligand.Cluster'],v['Receptor.Cluster'])
res_df = pd.DataFrame.from_dict(data,orient='index')
return (res_df)
def cpdb2df(data):
data = data.fillna(0)
df_data = {}
df_data['Ligand'] = []
df_data['Receptor'] = []
df_data['Ligand.Cluster'] = []
df_data['Receptor.Cluster'] = []
df_data['isReceptor_fst'] = []
df_data['isReceptor_scn'] = []
df_data['MeanLR'] = []
for i in range(data.shape[0]):
pair = list(data['interacting_pair'])[i].split('_')
for j in range(data.iloc[:,12:].shape[1]):
c_pair = list(data.columns)[j+12].split('|')
if float(data.iloc[i,j+12]) != 0.0:
df_data['Ligand'].append(pair[0])
df_data['Receptor'].append(pair[1])
df_data['Ligand.Cluster'].append(c_pair[0])
df_data['Receptor.Cluster'].append(c_pair[1])
df_data['isReceptor_fst'].append(list(data['receptor_a'])[i])
df_data['isReceptor_scn'].append(list(data['receptor_b'])[i])
df_data['MeanLR'].append(data.iloc[i,j+12])
data_final = pd.DataFrame.from_dict(df_data)
return(data_final)
import os
os.chdir('/home/nagai/Documents/sarscov/LR')
s1 = pd.read_csv('./CTR_filtered/significant_means.txt',sep='\t')
s2 = pd.read_csv('./COVID_filtered/significant_means.txt',sep='\t')
#dict with the mapping
s1_filtered = cpdb2df(s1)
s2_filtered = cpdb2df(s2)
s1_filtered = correct_lr(s1_filtered)
s2_filtered = correct_lr(s2_filtered)
s1_filtered.to_csv('s1_filtered_corrected.csv')
s2_filtered.to_csv('s2_filtered_corrected.csv')
|
83655
|
import albumentations
from albumentations.pytorch import ToTensorV2
import cv2
import numpy as np
def crop_image_from_gray(img, tol=7):
if img.ndim == 2:
mask = img > tol
return img[np.ix_(mask.any(1), mask.any(0))]
elif img.ndim == 3:
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = gray_img > tol
check_shape = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0]
if (check_shape == 0):
return img
else:
img1 = img[:, :, 0][np.ix_(mask.any(1), mask.any(0))]
img2 = img[:, :, 1][np.ix_(mask.any(1), mask.any(0))]
img3 = img[:, :, 2][np.ix_(mask.any(1), mask.any(0))]
img = np.stack([img1, img2, img3], axis=-1)
return img
def crop_maskImg(image, sigmaX=10):
image = crop_image_from_gray(image)
#image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0, 0), sigmaX), -4, 128)
return image
def get_riadd_train_transforms(args):
image_size = args.img_size
transforms_train = albumentations.Compose([
#albumentations.RandomResizedCrop(image_size, image_size, scale=(0.85, 1), p=1),
albumentations.Resize(image_size, image_size),
albumentations.HorizontalFlip(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.MedianBlur(blur_limit = 7, p=0.3),
albumentations.IAAAdditiveGaussianNoise(scale = (0,0.15*255), p = 0.5),
albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.3),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.3),
albumentations.Cutout(max_h_size=20, max_w_size=20, num_holes=5, p=0.5),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return transforms_train
def get_riadd_valid_transforms(args):
image_size = args.img_size
valid_transforms = albumentations.Compose([
albumentations.Resize(image_size, image_size),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return valid_transforms
def get_riadd_test_transforms(args):
image_size = args['img_size']
test_transforms = albumentations.Compose([
albumentations.Resize(image_size, image_size),
albumentations.HorizontalFlip(p=0.5),
albumentations.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5),
albumentations.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.5),
albumentations.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
])
return test_transforms
# if __name__ == '__main__':
# img = cv2.imread('/media/ExtDiskB/Hanson/datasets/wheat/RIADD/valid/1.png')
# img1 = preprocessing(img)
# # result=color_seperate(hsv_img, thresh_image)
# cv2.imwrite('1222.png',img1)
|
83681
|
import requests
login_url = "http://shop2.q.2019.volgactf.ru/loginProcess"
target_url = "http://shop2.q.2019.volgactf.ru/profile"
payload0 = {'name': 'wani', 'pass': '<PASSWORD>'}
payload1 = {'name': 'wani', 'CartItems[0].id': 4}
s = requests.Session()
r = s.post(login_url, data=payload0)
r = s.post(target_url, data=payload1)
print(r.text)
|
83707
|
from django.contrib.auth.models import User
from demoproject.filter.forms import UserListForm
from django_genericfilters.views import FilteredListView
class UserListView(FilteredListView):
# Normal ListView options
template_name = "user/user_list.html"
paginate_by = 10
context_object_name = "users"
model = User
# FilteredListView options
form_class = UserListForm
search_fields = ["first_name", "last_name", "username", "email"]
filter_fields = ["is_active", "is_staff", "is_superuser"]
default_order = "last_name"
user_list_view = UserListView.as_view()
|
83726
|
import time
import cv2
import numpy as np
from chainer import serializers, Variable
import chainer.functions as F
import argparse
from darknet19 import *
from yolov2 import *
from yolov2_grid_prob import *
from yolov2_bbox import *
n_classes = 10
n_boxes = 5
partial_layer = 18
def copy_conv_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.conv%d" % i)
dst_layer = eval("dst.conv%d" % i)
dst_layer.W = src_layer.W
dst_layer.b = src_layer.b
def copy_bias_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.bias%d" % i)
dst_layer = eval("dst.bias%d" % i)
dst_layer.b = src_layer.b
def copy_bn_layer(src, dst, layers):
for i in layers:
src_layer = eval("src.bn%d" % i)
dst_layer = eval("dst.bn%d" % i)
dst_layer.N = src_layer.N
dst_layer.avg_var = src_layer.avg_var
dst_layer.avg_mean = src_layer.avg_mean
dst_layer.gamma = src_layer.gamma
dst_layer.eps = src_layer.eps
# load model
print("loading original model...")
input_weight_file = "./backup/darknet19_448_final.model"
output_weight_file = "./backup/partial.model"
model = Darknet19Predictor(Darknet19())
serializers.load_hdf5(input_weight_file, model) # load saved model
yolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)
copy_conv_layer(model.predictor, yolov2, range(1, partial_layer+1))
copy_bias_layer(model.predictor, yolov2, range(1, partial_layer+1))
copy_bn_layer(model.predictor, yolov2, range(1, partial_layer+1))
model = YOLOv2Predictor(yolov2)
print("saving model to %s" % (output_weight_file))
serializers.save_hdf5("%s" % (output_weight_file), model)
|
83734
|
import numpy as np
import brainscore
from brainio.assemblies import DataAssembly
from brainscore.benchmarks._properties_common import PropertiesBenchmark, _assert_grating_activations
from brainscore.benchmarks._properties_common import calc_spatial_frequency_tuning
from brainscore.metrics.ceiling import NeuronalPropertyCeiling
from brainscore.metrics.distribution_similarity import BootstrapDistributionSimilarity, ks_similarity
from result_caching import store
ASSEMBLY_NAME = 'schiller.Schiller1976c'
REGION = 'V1'
TIMEBINS = [(70, 170)]
PARENT = 'V1-spatial_frequency'
PROPERTY_NAMES = ['spatial_frequency_selective', 'spatial_frequency_bandwidth']
BIBTEX = """@article{Schiller1976,
author = {<NAME> <NAME> <NAME>.},
doi = {10.1152/jn.1976.39.6.1352},
issn = {0022-3077},
journal = {Journal of neurophysiology},
number = {6},
pages = {1334--1351},
pmid = {825624},
title = {{Quantitative studies of single-cell properties in monkey striate cortex. III. Spatial Frequency}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/825624},
volume = {39},
year = {1976}
}
"""
RESPONSE_THRESHOLD = 5
def _MarquesSchiller1976V1Property(property_name):
assembly = brainscore.get_assembly(ASSEMBLY_NAME)
similarity_metric = BootstrapDistributionSimilarity(similarity_func=ks_similarity, property_name=property_name)
ceil_func = NeuronalPropertyCeiling(similarity_metric)
parent = PARENT
return PropertiesBenchmark(identifier=f'dicarlo.Marques_schiller1976-{property_name}', assembly=assembly,
neuronal_property=schiller1976_properties, similarity_metric=similarity_metric,
timebins=TIMEBINS,
parent=parent, ceiling_func=ceil_func, bibtex=BIBTEX, version=1)
def MarquesSchiller1976V1SpatialFrequencySelective():
property_name = 'spatial_frequency_selective'
return _MarquesSchiller1976V1Property(property_name=property_name)
def MarquesSchiller1976V1SpatialFrequencyBandwidth():
property_name = 'spatial_frequency_bandwidth'
return _MarquesSchiller1976V1Property(property_name=property_name)
@store(identifier_ignore=['responses', 'baseline'])
def schiller1976_properties(model_identifier, responses, baseline):
_assert_grating_activations(responses)
radius = np.array(sorted(set(responses.radius.values)))
spatial_frequency = np.array(sorted(set(responses.spatial_frequency.values)))
orientation = np.array(sorted(set(responses.orientation.values)))
phase = np.array(sorted(set(responses.phase.values)))
responses = responses.values
baseline = baseline.values
assert responses.shape[0] == baseline.shape[0]
n_neuroids = responses.shape[0]
responses = responses.reshape((n_neuroids, len(radius), len(spatial_frequency), len(orientation), len(phase)))
responses = responses.mean(axis=4)
max_response = responses.reshape((n_neuroids, -1)).max(axis=1, keepdims=True)
spatial_frequency_bandwidth = np.zeros((n_neuroids, 1))
spatial_frequency_selective = np.ones((n_neuroids, 1))
for neur in range(n_neuroids):
pref_radius, pref_spatial_frequency, pref_orientation = \
np.unravel_index(np.argmax(responses[neur, :, :, :]),
(len(radius), len(spatial_frequency), len(orientation)))
spatial_frequency_curve = responses[neur, pref_radius, :, pref_orientation]
spatial_frequency_bandwidth[neur] = \
calc_spatial_frequency_tuning(spatial_frequency_curve, spatial_frequency, thrsh=0.707, filt_type='smooth',
mode='ratio')[0]
spatial_frequency_selective[np.isnan(spatial_frequency_bandwidth)] = 0
properties_data = np.concatenate((spatial_frequency_selective, spatial_frequency_bandwidth), axis=1)
good_neuroids = max_response > baseline + RESPONSE_THRESHOLD
properties_data = properties_data[np.argwhere(good_neuroids)[:, 0], :]
properties_data = DataAssembly(properties_data, coords={'neuroid_id': ('neuroid', range(properties_data.shape[0])),
'region': ('neuroid', ['V1'] * properties_data.shape[0]),
'neuronal_property': PROPERTY_NAMES},
dims=['neuroid', 'neuronal_property'])
return properties_data
|
83750
|
import asyncio
import copy
import inspect
import logging
from typing import Dict, Any, Callable, List, Tuple
log = logging.getLogger(__name__)
class Parser:
"""
deal with a list of tokens made from Lexer, convert their type to match the command.handler
"""
_parse_funcs: Dict[Any, Callable] = {
str: lambda token: token,
int: lambda token: int(token),
float: lambda token: float(token)
# TODO: tag -> User/Channel/Role...
}
def __init__(self):
self._parse_funcs = copy.copy(Parser._parse_funcs)
def parse(self, tokens: List[str], handler: Callable) -> List[Any]:
"""
parse tokens into args that types corresponding to handler's requirement
:param tokens: output of Lexer.lex()
:param handler: parse target
:return: List of args
:raise: Parser.ArgListLenNotMatch
"""
s = inspect.signature(handler)
params = list(s.parameters.items())[1:] # the first param is `msg: Message`
# check
if len(tokens) > len(params):
raise Parser.TooMuchArgs(len(params), len(tokens), handler)
# parse
ret = []
for i in range(len(tokens)):
t = params[i][1].annotation # arg type
# no type hint for t
if t == inspect.Parameter.empty:
ret.append(tokens[i])
continue
if t not in self._parse_funcs:
raise Parser.ParseFuncNotExists(params[i], handler)
try:
ret.append(self._parse_funcs[t](tokens[i]))
except Exception as e:
raise Parser.ParseFuncException(e)
return ret
def register(self, func): # TODO: global register
"""
decorator, register the func into object restricted _parse_funcs()
checks if parse func for that type exists, and insert if not
:param func: parse func
"""
s = inspect.signature(func)
# check: 1. not coroutine, 2. len matches
if asyncio.iscoroutinefunction(func):
raise TypeError('parse function should not be async')
if len(s.parameters) != 1 or list(s.parameters.values())[0].annotation != str:
raise TypeError('parse function should own only one param, and the param type is str')
# insert, remember this is a replace
self._parse_funcs[s.return_annotation] = func
return func
class ParserException(Exception):
pass
class TooMuchArgs(ParserException):
def __init__(self, expected: int, exact: int, func: Callable):
self.expected = expected
self.exact = exact
self.func = func
class ParseFuncNotExists(ParserException):
def __init__(self, expected: Tuple[str, inspect.Parameter], func: Callable):
self.expected = expected
self.func = func
class ParseFuncException(ParserException):
def __init__(self, err: Exception):
self.err = err
|
83755
|
import math
layouts = []
def register_layout(cls):
layouts.append(cls)
return cls
def node(percent, layout, swallows, children):
result = {
"border": "normal",
# "current_border_width": 2,
"floating": "auto_off",
# "name": "fish <</home/finkernagel>>",
"percent": percent,
"type": "con",
"layout": layout,
}
if swallows:
result["swallows"] = ([{"class": "."}],)
if children:
result["nodes"] = children
return result
def get_stack(window_count, split):
return get_stack_unequal([1.0 / window_count] * window_count, split)
def get_stack_unequal(percentages, split):
elements = []
for p in percentages:
elements.append(node(p, split, True, False))
return [{"layout": split, "type": "con", "nodes": elements}]
@register_layout
class Layout_vStack:
name = "vStack"
aliases = ["1col", "1c"]
description = """\
One column / a vertical stack.
---------
| 1 |
---------
| 2 |
---------
| 3 |
---------
"""
def get_json(self, window_count):
return get_stack(window_count, "splitv")
@register_layout
class Layout_hStack:
name = "hStack"
aliases = ["1row", "1r"]
description = """\
One row / a horizontal stack
-------------
| | | |
| 1 | 2 | 3 |
| | | |
-------------
"""
def get_json(self, window_count):
return get_stack(window_count, "splith")
@register_layout
class Layout_tabbed:
name = "tabbed"
aliases = []
description = """\
Tabbed
---------
| |
| 1/2/3 |
| |
---------
"""
def get_json(self, window_count):
return get_stack(window_count, "tabbed")
@register_layout
class Layout_v2Stack:
name = "v2Stack"
aliases = ["2col", "2c", "2v"]
description = """\
Two columns of stacks
-------------
| 1 | 4 |
-------------
| 2 | 5 |
-------------
| 3 | 6 |
-------------
"""
def get_json(self, window_count):
s = int(math.ceil(window_count / 2))
left = get_stack(s, "splitv")
right = get_stack(s if window_count % 2 == 0 else s - 1, "splitv")
return [{"layout": "splith", "type": "con", "nodes": [left, right]}]
@register_layout
class Layout_h2Stack:
name = "h2Stack"
aliases = ["2row", "2r", "2h"]
description = """\
Two rows of stacks
-------------------
| 1 | 2 | 3 |
-------------------
| 4 | 5 | 6 |
-------------------
"""
def get_json(self, window_count):
s = int(math.ceil(window_count / 2))
left = get_stack(s, "splith")
right = get_stack(s if window_count % 2 == 0 else s - 1, "splith")
return [{"layout": "splitv", "type": "con", "nodes": [left, right]}]
@register_layout
class Layout_v3Stack:
name = "v3Stack"
aliases = ["3col", "3c", "3v"]
description = """\
Three columns of stacks
-------------------
| 1 | 3 | 5 |
-------------------
| 2 | 4 | 6 |
-------------------
"""
def get_json(self, window_count):
s = window_count // 3
a = get_stack(s + window_count % 3, "splitv")
b = get_stack(s, "splitv")
c = get_stack(s, "splitv")
return [{"layout": "splith", "type": "con", "nodes": [a, b, c]}]
@register_layout
class Layout_h3Stack:
name = "h3Stack"
aliases = ["3row", "3r", "3h"]
description = """\
Three rows of stacks
-------------------
| 1 | 2 | 3 |
-------------------
| 4 | 5 | 6 |
-------------------
| 7 | 8 | 9 |
-------------------
"""
def get_json(self, window_count):
s = window_count // 3
a = get_stack(s + window_count % 3, "splith")
b = get_stack(s, "splith")
c = get_stack(s, "splith")
return [{"layout": "splitv", "type": "con", "nodes": [a, b, c]}]
@register_layout
class Layout_Max:
name = "max"
aliases = ["maxTabbed"]
description = """\
One large container,
in tabbed mode.
---------------
| |
| 1,2,3,4, |
| |
---------------
"""
def get_json(self, window_count):
return get_stack(window_count, "tabbed")
@register_layout
class Layout_MainLeft:
name = "mainLeft"
aliases = ["ml", "mv", "MonadTall"]
description = """\
One large window to the left at 50%,
all others stacked to the right vertically.
-------------
| | 2 |
| |-----|
| 1 | 3 |
| |-----|
| | 4 |
-------------
"""
def get_json(self, window_count):
return node(
1,
"splith",
False,
[node(0.5, "splitv", True, []), get_stack(window_count - 1, "splitv")],
)
@register_layout
class Layout_MainRight:
name = "mainRight"
aliases = ["mr", "vm", "MonadTallFlip"]
description = """\
One large window to the right at 50%,
all others stacked to the right vertically.
-------------
| 2 | |
|-----| |
| 3 | 1 |
|-----| |
| 4 | |
-------------
"""
def get_json(self, window_count):
return (
node(
1,
"splith",
False,
[get_stack(window_count - 1, "splitv"), node(0.75, "splitv", True, [])],
),
list(range(1, window_count)) + [0],
)
@register_layout
class Layout_MainMainVStack:
name = "MainMainVStack"
aliases = ["mmv"]
description = """\
Two large windows to the left at 30%,
all others stacked to the right vertically.
-------------------
| | | 3 |
| | |-----|
| 1 | 2 | 4 |
| | |-----|
| | | 5 |
-------------------
"""
def get_json(self, window_count):
return node(
1,
"splith",
False,
[
node(1 / 3, "splitv", True, []),
node(1 / 3, "splitv", True, []),
get_stack(window_count - 2, "splitv"),
],
)
@register_layout
class Layout_MainVStackMain:
name = "MainVStackMain"
aliases = ["mvm"]
description = """\
Two large windows at 30% to the left and right,
a vstack in the center
-------------------
| | 3 | |
| |-----| |
| 1 | 4 | 2 |
| |-----| |
| | 5 | |
-------------------
"""
def get_json(self, window_count):
return (
node(
1,
"splith",
False,
[
node(1 / 3, "splitv", True, []),
get_stack(window_count - 2, "splitv"),
node(1 / 3, "splitv", True, []),
],
),
[0] + list(range(2, window_count)) + [1],
)
@register_layout
class Layout_Matrix:
name = "matrix"
aliases = []
description = """\
Place windows in a n * n matrix.
The matrix will place swallow-markers
if you have less than n*n windows.
N is math.ceil(math.sqrt(window_count))
"""
def get_json(self, window_count):
n = int(math.ceil(math.sqrt(window_count)))
stacks = [get_stack(n, "splith") for stack in range(n)]
return node(1, "splitv", False, stacks)
@register_layout
class Layout_VerticalTileTop:
name = "VerticalTileTop"
aliases = ["vtt"]
description = """\
Large master area (66%) on top,
horizontal stacking below
"""
def get_json(self, window_count):
return node(
1,
"splitv",
False,
[
node(0.66, "splitv", True, []),
node(
0.33,
"splitv",
False,
get_stack_unequal(
[0.33 / (window_count - 1)] * (window_count - 1), "splitv",
),
),
],
)
@register_layout
class Layout_VerticalTileBottom:
name = "VerticalTileBottom"
aliases = ["vtb"]
description = """\
Large master area (66%) on bottom,
horizontal stacking above
"""
def get_json(self, window_count):
return (
node(
1,
"splitv",
False,
[
node(
0.33,
"splitv",
False,
get_stack_unequal(
[0.33 / (window_count - 1)] * (window_count - 1), "splitv",
),
),
node(0.66, "splitv", True, []),
],
),
list(range(1, window_count)) + [0],
)
@register_layout
class Nested:
name = "NestedRight"
aliases = ["nr"]
description = """\
Nested layout, starting with a full left half.
-------------------------
| | |
| | 2 |
| | |
| 1 |-----------|
| | | 4 |
| | 3 |-----|
| | |5 | 6|
-------------------------
"""
def get_json(self, window_count):
dir = "h"
parent = node(1, "splith", False, [])
root = parent
parent["nodes"] = []
for ii in range(window_count):
parent["nodes"].append(get_stack_unequal([0.5], "split" + dir))
n = node(1, "splith", False, [])
if dir == "h":
dir = "v"
else:
dir = "h"
n["layout"] = "split" + dir
n["nodes"] = []
if ii < window_count - 1:
parent["nodes"].append(n)
parent = n
return root
@register_layout
class Smart:
name = "SmartNestedRight"
aliases = ["snr"]
description = """\
Nested layout, starting with a full left half,
but never going below 1/16th of the size.
2 windows
-------------------------
| | |
| | |
| | |
| 1 | 2 |
| | |
| | |
| | |
-------------------------
5 windows
-------------------------
| | |
| | 2 |
| | |
| 1 |-----------|
| | | 4 |
| | 3 |-----|
| | | 5 |
-------------------------
6 windows
-------------------------
| | |
| | 2 |
| | |
| 1 |-----------|
| | 3 | 4 |
| |-----|-----|
| | 5 | 6 |
-------------------------
7 windows
-------------------------
| | | |
| | 2 | 3 |
| | | |
| 1 |-----------|
| | 4 | 5 |
| |-----|-----|
| | 6 | 7 |
-------------------------
15 windows
-------------------------
| | 2 | 4 | 6 |
| 1 |-----|-----|-----|
| | 3 | 5 | 7 |
|-----------|-----------|
| 8 | A | C | E |
|-----|-----|-----|-----|
| 9 | B | D | F |
-------------------------
Falls back to matrix layout above 16 windows.
"""
def get_json(self, window_count):
def nest_1():
return node(1, "splith", True, [])
def nest_2():
return get_stack(2, "splith")
def nest_3():
return node(
1,
"splith",
False,
[node(0.5, "splitv", True, []), get_stack(2, "splitv")],
)
def nest_4():
return node(
1, "splith", False, [get_stack(2, "splitv"), get_stack(2, "splitv")],
)
if window_count == 1:
return nest_1()
elif window_count == 2:
return nest_2()
elif window_count == 3:
return nest_3()
elif window_count == 4:
return nest_4()
elif window_count == 5:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", True, [],),
node(
0.5, "splitv", False, [node(0.5, "split", True, []), nest_3()]
),
],
)
elif window_count == 6:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", True, [],),
node(
0.5, "splitv", False, [node(0.5, "split", True, []), nest_4()]
),
],
)
elif window_count == 7:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", True, [],),
node(0.5, "splitv", False, [nest_2(), nest_4()]),
],
)
elif window_count == 8:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", True, [],),
node(0.5, "splitv", False, [nest_3(), nest_4()]),
],
)
elif window_count == 9:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", True, [],),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 10:
return node(
1,
"splith",
False,
[
node(
0.5,
"splitv",
False,
[node(0.5, "splitv", True, []), node(0.5, "splitv", True, [])],
),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 11:
return node(
1,
"splith",
False,
[
node(
0.5, "splitv", False, [node(0.5, "splitv", True, []), nest_2()],
),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 12:
return node(
1,
"splith",
False,
[
node(
0.5, "splitv", False, [node(0.5, "splitv", True, []), nest_3()],
),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 13:
return node(
1,
"splith",
False,
[
node(
0.5, "splitv", False, [node(0.5, "splitv", True, []), nest_4()],
),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 14:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", False, [nest_2(), nest_4()],),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 15:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", False, [nest_3(), nest_4()],),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
elif window_count == 16:
return node(
1,
"splith",
False,
[
node(0.5, "splitv", False, [nest_4(), nest_4()],),
node(0.5, "splitv", False, [nest_4(), nest_4()]),
],
)
else:
return Layout_Matrix().get_json(window_count)
@register_layout
class Layout_MainCenter:
name = "mainCenter"
aliases = ["mc", "vmv"]
description = """\
One large window in the midle at 50%,
all others stacked to the left/right vertically.
-------------------
| 2 | | 5 |
|-----| |-----|
| 3 | 1 | 6 |
|-----| |-----|
| 4 | | 7 |
-------------------
"""
def get_json(self, window_count):
lr = window_count - 1
left = math.ceil(lr / 2)
right = math.floor(lr / 2)
nodes = []
if left:
nodes.append(node(0.25, 'splith', False, get_stack(left, 'splitv')))
nodes.append(node(0.5, 'splitv', True, []))
if right:
nodes.append(node(0.25, 'splith', False, get_stack(right, 'splitv')))
order = list(range(1, left+1)) + [0] + list(range(left+1, left+1+right))
print(order)
return node(1, 'splith', False, nodes),order
|
83778
|
import pytest
import mantle
from ..harness import show
def com(name, main):
import magma
from magma.testing import check_files_equal
name += '_' + magma.mantle_target
build = 'build/' + name
gold = 'gold/' + name
magma.compile(build, main)
assert check_files_equal(__file__, build+'.v', gold+'.v')
assert check_files_equal(__file__, build+'.pcf', gold+'.pcf')
@pytest.mark.parametrize("width", [3,4,5,6,7,8])
def test_lfsr(width):
from mantle.util.lfsr import DefineLFSR
Test = DefineLFSR(width)
com(f'LFSR{width}', show(Test, width))
|
83783
|
import json
import os
import asyncio
from aiohttp.client import ClientSession
from ln_address import LNAddress
import logging
###################################
logging.basicConfig(filename='lnaddress.log', level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.getLogger("lnaddress").setLevel(level=logging.WARNING)
logger = logging.getLogger(__name__)
###################################
async def main():
email = '<EMAIL>'
amount = 150 # amount in sats
#email = '<EMAIL>' # non working ln address
#amount = None
# Get environment variables
invoice_key = os.getenv('INVOICE_KEY')
admin_key = os.getenv('ADMIN_KEY')
base_url = os.getenv('BASE_URL')
config = { 'invoice_key': invoice_key,
'admin_key': admin_key,
'base_url': base_url }
print(config)
try:
async with ClientSession() as session:
logging.info("in ClientSession")
lnaddy = LNAddress(config, session)
bolt11 = await lnaddy.get_bolt11(email, amount)
logging.info(bolt11)
payhash = await lnaddy.get_payhash(bolt11)
logging.info("response from get_payhash: " + str(payhash))
# check payment hash status -
output = await lnaddy.check_invoice(payhash)
if 'paid' in output:
pay_status = output['paid']
pay_preimage = output['preimage']
paid_status= 'paid status:'+ str(pay_status)
img_status = "image: " + str(pay_preimage)
logging.info(paid_status)
logging.info(img_status)
else:
logging.info(output)
# pay invoice
#result = await lnaddy.pay_invoice(bolt11)
#logging.info("pay invoice result:")
#logging.info(result)
# check payment hash status -
"""
payment_hash = result['payment_hash']
output = await lnaddy.check_invoice(payment_hash)
if 'paid' in output:
pay_status = output['paid']
pay_preimage = output['preimage']
paid_status= 'paid status:'+ str(pay_status)
img_status = "image: " + str(pay_preimage)
logging.info(paid_status)
logging.info(img_status)
else:
logging.info(output)
"""
except Exception as e:
logging.error(e)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
83805
|
import datetime
import numpy as np
import os
import pandas as pd
import shutil
import urllib.request
import zipfile
__all__ = [
'fetch_ml_ratings',
]
VARIANTS = {
'100k': {'filename': 'u.data', 'sep': '\t'},
'1m': {'filename': 'ratings.dat', 'sep': r'::'},
'10m': {'filename': 'ratings.dat', 'sep': r'::'},
'20m': {'filename': 'ratings.csv', 'sep': ','}
}
def fetch_ml_ratings(data_dir_path=None, variant='20m', verbose=False):
"""Fetches MovieLens ratings dataset.
Parameters
----------
data_dir_path : str, default=None
Explicit data directory path to MovieLens ratings file.
variant : {'100k', '1m', '10m', '20m'}, default='20m'
Movie lens dataset variant.
verbose : bool, default=False
Whether or not downloading and unzipping the dataset with verbose.
Returns
-------
df : pandas.DataFrame
The MovieLens ratings dataset.
"""
if data_dir_path is None:
data_dir_path = _get_data_dir_path(data_dir_path)
dirname = 'ml-' + variant
filename = VARIANTS[variant]['filename']
csv_path = os.path.join(data_dir_path, dirname, filename)
zip_path = os.path.join(data_dir_path, dirname) + '.zip'
url = 'http://files.grouplens.org/datasets/movielens/ml-' + variant + \
'.zip'
else:
csv_path = data_dir_path
if os.path.exists(csv_path):
# Return data loaded into a DataFrame
df = _ml_ratings_csv_to_df(csv_path, variant)
return df
elif os.path.exists(zip_path):
# Unzip file before calling back itself
if verbose:
print('Unzipping data...')
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(data_dir_path)
if variant == '10m':
os.rename(os.path.join(data_dir_path, 'ml-10M100K'),
os.path.join(data_dir_path, dirname))
os.remove(zip_path)
return fetch_ml_ratings(variant=variant, verbose=verbose)
else:
# Download the ZIP file before calling back itself
if verbose:
print('Downloading data...')
with urllib.request.urlopen(url) as r, open(zip_path, 'wb') as f:
shutil.copyfileobj(r, f)
return fetch_ml_ratings(variant=variant, verbose=verbose)
def _get_data_dir_path(data_dir_path=None):
"""Returns the path of the funk-svd data directory.
This folder is used to store large datasets to avoid downloading them
several times.
By default the data dir is set to a folder named 'funk_svd_data' in the
user home folder. Alternatively, it can be set by the `FUNK_SVD_DATA`
environment variable or programmatically by giving an explicit
`data_dir_path`.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_dir_path : str, default=None
Explicit data directory path for large datasets.
Returns
-------
data_dir_path: str
Explicit data directory path for large datasets.
"""
if data_dir_path is None:
default = os.path.join('~', 'funk_svd_data')
data_dir_path = os.environ.get('FUNK_SVD_DATA', default=default)
data_dir_path = os.path.expanduser(data_dir_path)
if not os.path.exists(data_dir_path):
os.makedirs(data_dir_path)
return data_dir_path
def _ml_ratings_csv_to_df(csv_path, variant):
names = ['u_id', 'i_id', 'rating', 'timestamp']
dtype = {'u_id': np.uint32, 'i_id': np.uint32, 'rating': np.float64}
def date_parser(time):
return datetime.datetime.fromtimestamp(float(time))
df = pd.read_csv(csv_path, names=names, dtype=dtype, header=0,
sep=VARIANTS[variant]['sep'], parse_dates=['timestamp'],
date_parser=date_parser, engine='python')
df.sort_values(by='timestamp', inplace=True)
df.reset_index(drop=True, inplace=True)
return df
|
83824
|
import os
import sys
from PyQt5 import QtGui
from PyQt5.QtCore import QStandardPaths, QSettings
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QFileDialog
from PyQt5.QtXml import QDomDocument, QDomNode
try:
from krita import *
CONTEXT_KRITA = True
Krita = Krita # to stop Eric ide complaining about unknown Krita
EXTENSION = krita.Extension
except ImportError: # script being run in testing environment without Krita
CONTEXT_KRITA = False
EXTENSION = QWidget
from durraext import DURRAExt
def main():
# this includes when the script is run from the command line or
# from the Scripter plugin.
if CONTEXT_KRITA:
# scripter plugin
# give up - has the wrong context to create widgets etc.
# maybe in the future change this.
pass
else:
app = QApplication([])
extension = DURRAExt(None)
extension.setup()
extension.action_triggered()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
83832
|
import os
import scipy as sp
import scipy.misc
import imreg_dft as ird
basedir = os.path.join('..', 'examples')
# the TEMPLATE
im0 = sp.misc.imread(os.path.join(basedir, "sample1.png"), True)
# the image to be transformed
im1 = sp.misc.imread(os.path.join(basedir, "sample2.png"), True)
result = ird.translation(im0, im1)
tvec = result["tvec"].round(4)
# the Transformed IMaGe.
timg = ird.transform_img(im1, tvec=tvec)
# Maybe we don't want to show plots all the time
if os.environ.get("IMSHOW", "yes") == "yes":
import matplotlib.pyplot as plt
ird.imshow(im0, im1, timg)
plt.show()
print("Translation is {}, success rate {:.4g}"
.format(tuple(tvec), result["success"]))
|
83868
|
import json
def load_tools(file_path='/definitions/workers.json'):
tools = {}
with open(file_path) as json_file:
tools = json.load(json_file)
return tools
|
83876
|
import os
import argparse
import sys
from Image_extractor import *
from Lidar_pointcloud_extractor import *
from Calibration_extractor import *
from Label_extractor import *
from Label_ext_with_occlusion import *
def File_names_and_path(source_folder):
dir_list = os.listdir(source_folder)
files = list()
for directory in dir_list:
path = os.path.join(source_folder, directory)
if os.path.isdir(path):
files = files + File_names_and_path(path)
else:
files.append(path)
tf_files = [f for f in files if f.endswith('.' + 'tfrecord')]
return tf_files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--source', help = 'provide source path', type=str)
parser.add_argument('--dest', help='provide destinaetion path', type=str)
parser.add_argument('--velo',action="store_true", help='extract only lidar data points, calibration parameters and labels')
parser.add_argument('--img',action="store_true", help='extract only camera images, calibration parameters and labels')
parser.add_argument('--all',action="store_true", help='extract lidar data points, camera images, calibration parameters and labels')
parser.add_argument('--oclu',action="store_true", help='extract lidar data points, camera images, calibration parameters and labels with occlusion details')
args = parser.parse_args()
source_folder = args.source
dest_folder = args.dest
# files = [f for f in os.listdir(source_folder)]
# path = [os.path.join(source_folder, f) for f in files]
path = File_names_and_path(source_folder)
isExist = os.path.exists(os.path.join(dest_folder, 'Output'))
if isExist:
pass
else:
os.makedirs(os.path.join(dest_folder, "Output/Velodyne"))
os.makedirs(os.path.join(dest_folder, "Output/Calibration/Calib_all"))
os.makedirs(os.path.join(dest_folder, "Output/Labels/Label_all"))
subfolder_names1 = ['Output/Labels/Label/0', 'Output/Labels/Label/1', 'Output/Labels/Label/2', 'Output/Labels/Label/3', 'Output/Labels/Label/4']
for folder_name in subfolder_names1:
os.makedirs(os.path.join(dest_folder, folder_name))
subfolder_names1 = ['Output/Calibration/Calib/0', 'Output/Calibration/Calib/1', 'Output/Calibration/Calib/2', 'Output/Calibration/Calib/3', 'Output/Calibration/Calib/4']
for folder_name in subfolder_names1:
os.makedirs(os.path.join(dest_folder, folder_name))
subfolder_names = ['Output/Camera/Front', 'Output/Camera/Front_left', 'Output/Camera/Side_left', 'Output/Camera/Front_right', 'Output/Camera/Side_right']
for folder_name in subfolder_names:
os.makedirs(os.path.join(dest_folder, folder_name))
Front = os.path.join(dest_folder, "Output/Camera/Front/")
Front_left = os.path.join(dest_folder, "Output/Camera/Front_left/")
Side_left = os.path.join(dest_folder, "Output/Camera/Side_left/")
Front_right = os.path.join(dest_folder, "Output/Camera/Front_right/")
Side_right = os.path.join(dest_folder, "Output/Camera/Side_right/")
lidar = os.path.join(dest_folder, "Output/Velodyne/")
Calib_all = os.path.join(dest_folder, "Output/Calibration/Calib_all/")
Calib = os.path.join(dest_folder, "Output/Calibration/Calib/")
Label_all = os.path.join(dest_folder, "Output/Labels/Label_all/")
Label = os.path.join(dest_folder, "Output/Labels/Label/")
i, j, k, l = 0, 0, 0, 0
print('Extraction process started:')
if args.velo:
for filename in path:
j = point_cloud_extractor(j, filename, lidar)
k = calibration_extractor(k, filename, Calib_all, Calib)
l = label_extractor(l, filename, Label_all, Label)
j = j
k = k
l = l
if args.img:
for filename in path:
i = image_extractor(i, filename, Front, Front_left, Side_left, Front_right, Side_right)
k = calibration_extractor(k, filename, Calib_all, Calib)
l = label_extractor(l, filename, Label_all, Label)
i = i
k = k
l = l
if args.all:
for filename in path:
i = image_extractor(i, filename, Front, Front_left, Side_left, Front_right, Side_right)
j = point_cloud_extractor(j, filename, lidar)
k = calibration_extractor(k, filename, Calib_all, Calib)
l = label_extractor(l, filename, Label_all, Label)
i = i
j = j
k = k
l = l
if args.oclu:
for filename in path:
i = image_extractor(i, filename, Front, Front_left, Side_left, Front_right, Side_right)
j = point_cloud_extractor(j, filename, lidar)
k = calibration_extractor(k, filename, Calib_all, Calib)
l = label_ext_with_occlusion(l, filename, Label_all, Label)
i = i
j = j
k = k
l = l
print('Number of images extracted:', i)
print('Number of point clouds extracted:', j)
print('Number of calibration parameters extracted:', k)
print('Number of labels extracted:', l)
print('Extraction process complete:')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.