ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3b3d4a1374d500172b05783bb5e7af1a42bffc | '''
Implementation: Mahnoor Anjum
Description:
Intersection Test
By:
www.geeksforgeeks.org
'''
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import stats
data = pd.read_csv('data/combined_kmeans25_100.csv')
method = 'median_test'
def custom(a, b):
_,p,_,_ = stats.median_test(a, b)
return p
corr_mat = data.corr(method = custom)
fig, ax = plt.subplots(1,1, figsize = (10,4))
ax = sns.heatmap(corr_mat, cmap = 'YlGnBu', linewidths=.5, annot=True)
ax.set_title(str(method))
plt.savefig(str(method) + '.png')
|
py | 1a3b3e772ccf40ee0b1cbe0e25259985103d8734 | from dataclasses import dataclass
@dataclass
class SlotFeatures:
docid: str
trigger: str
trigger_id: str
trigger_type: str # Event type
trigger_sent_idx: int
arg: str
arg_id: str
arg_type: str # NER
arg_sent_idx: int
role: str
pair_type: str
context: str
prediction: str = None
|
py | 1a3b3f71a904fb93e1d19c9e3536ce318a88fe74 | #!/bin/python3
import os
import re
# Complete the happyLadybugs function below.
def happyLadybugs(b):
# find if exists any single letter
if b.count('_') == 0 and len(re.sub(r'((.)\2+)', '', b)) != 0:
return 'NO'
for a in set(b):
if a != '_' and b.count(a) == 1:
return 'NO'
return 'YES'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
g = int(input())
for g_itr in range(g):
n = int(input())
b = input()
result = happyLadybugs(b)
fptr.write(result + '\n')
fptr.close()
|
py | 1a3b40c483cbc9da60e1a095280a55777b33dd39 | """
Demonstrates testing the equality of lists.
"""
numbers1 = [3, 5, 7, 9]
numbers2 = [9, 7, 5, 3]
numbers3 = [3, 5, 7, 9]
numbers4 = [3, 5]
if numbers1 == numbers4 : #Uses an if statement to print if the numbers1 and numbers4 lists are equal or not
print("numbers1 and numbers4 are equal")
else :
print("numbers1 and numbers4 are not equal")
if numbers1 == numbers3 : #Uses an if statement to print if the numbers1 and numbers3 lists are equal or not
print("numbers1 and numbers3 are equal")
else :
print("numbers1 and numbers3 are not equal")
if numbers1 == numbers2 : #Uses an if statement to print if the numbers1 and numbers2 lists are equal or not
print("numbers1 and numbers2 are equal")
else :
print("numbers1 and numbers2 are not equal")
if numbers2 == numbers3 : #Uses an if statement to print if the numbers2 and numbers3 lists are equal or not
print("numbers2 and numbers3 are equal")
else :
print("numbers2 and numbers3 are not equal")
if numbers3 == numbers4 : #Uses an if statement to print if the numbers3 and numbers4 lists are equal or not
print("numbers3 and numbers4 are equal")
else :
print("numbers3 and numbers4 are not equal")
print()
numbers5 = numbers1 #Creates a shallow copy of the numbers1 list
if numbers5 is numbers1 : #Uses an if statement to print if numbers5 is a shallow copy of the numbers1 array
print("numbers5 is a shallow copy of numbers1")
else :
print("numbers5 is not a shallow copy of numbers1")
|
py | 1a3b41f74bd9935cfc256c49935442786102f6bf | """Receive signals from a keyboard and use it as a remote control."""
# pylint: disable=import-error
import threading
import logging
import os
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
DEVICE_DESCRIPTOR = "device_descriptor"
DEVICE_ID_GROUP = "Device description"
DEVICE_NAME = "device_name"
DOMAIN = "keyboard_remote"
ICON = "mdi:remote"
KEY_CODE = "key_code"
KEY_VALUE = {"key_up": 0, "key_down": 1, "key_hold": 2}
KEYBOARD_REMOTE_COMMAND_RECEIVED = "keyboard_remote_command_received"
KEYBOARD_REMOTE_CONNECTED = "keyboard_remote_connected"
KEYBOARD_REMOTE_DISCONNECTED = "keyboard_remote_disconnected"
TYPE = "type"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Exclusive(DEVICE_DESCRIPTOR, DEVICE_ID_GROUP): cv.string,
vol.Exclusive(DEVICE_NAME, DEVICE_ID_GROUP): cv.string,
vol.Optional(TYPE, default="key_up"): vol.All(
cv.string, vol.Any("key_up", "key_down", "key_hold")
),
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the keyboard_remote."""
config = config.get(DOMAIN)
keyboard_remote = KeyboardRemote(hass, config)
def _start_keyboard_remote(_event):
keyboard_remote.run()
def _stop_keyboard_remote(_event):
keyboard_remote.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_keyboard_remote)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_keyboard_remote)
return True
class KeyboardRemoteThread(threading.Thread):
"""This interfaces with the inputdevice using evdev."""
def __init__(self, hass, device_name, device_descriptor, key_value):
"""Construct a thread listening for events on one device."""
self.hass = hass
self.device_name = device_name
self.device_descriptor = device_descriptor
self.key_value = key_value
if self.device_descriptor:
self.device_id = self.device_descriptor
else:
self.device_id = self.device_name
self.dev = self._get_keyboard_device()
if self.dev is not None:
_LOGGER.debug("Keyboard connected, %s", self.device_id)
else:
_LOGGER.debug(
"Keyboard not connected, %s. " "Check /dev/input/event* permissions",
self.device_id,
)
id_folder = "/dev/input/by-id/"
if os.path.isdir(id_folder):
from evdev import InputDevice, list_devices
device_names = [
InputDevice(file_name).name for file_name in list_devices()
]
_LOGGER.debug(
"Possible device names are: %s. "
"Possible device descriptors are %s: %s",
device_names,
id_folder,
os.listdir(id_folder),
)
threading.Thread.__init__(self)
self.stopped = threading.Event()
self.hass = hass
def _get_keyboard_device(self):
"""Get the keyboard device."""
from evdev import InputDevice, list_devices
if self.device_name:
devices = [InputDevice(file_name) for file_name in list_devices()]
for device in devices:
if self.device_name == device.name:
return device
elif self.device_descriptor:
try:
device = InputDevice(self.device_descriptor)
except OSError:
pass
else:
return device
return None
def run(self):
"""Run the loop of the KeyboardRemote."""
from evdev import categorize, ecodes
if self.dev is not None:
self.dev.grab()
_LOGGER.debug("Interface started for %s", self.dev)
while not self.stopped.isSet():
# Sleeps to ease load on processor
time.sleep(0.05)
if self.dev is None:
self.dev = self._get_keyboard_device()
if self.dev is not None:
self.dev.grab()
self.hass.bus.fire(
KEYBOARD_REMOTE_CONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
_LOGGER.debug("Keyboard re-connected, %s", self.device_id)
else:
continue
try:
event = self.dev.read_one()
except IOError: # Keyboard Disconnected
self.dev = None
self.hass.bus.fire(
KEYBOARD_REMOTE_DISCONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
_LOGGER.debug("Keyboard disconnected, %s", self.device_id)
continue
if not event:
continue
if event.type is ecodes.EV_KEY and event.value is self.key_value:
_LOGGER.debug(categorize(event))
self.hass.bus.fire(
KEYBOARD_REMOTE_COMMAND_RECEIVED,
{
KEY_CODE: event.code,
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name,
},
)
class KeyboardRemote:
"""Sets up one thread per device."""
def __init__(self, hass, config):
"""Construct a KeyboardRemote interface object."""
self.threads = []
for dev_block in config:
device_descriptor = dev_block.get(DEVICE_DESCRIPTOR)
device_name = dev_block.get(DEVICE_NAME)
key_value = KEY_VALUE.get(dev_block.get(TYPE, "key_up"))
if device_descriptor is not None or device_name is not None:
thread = KeyboardRemoteThread(
hass, device_name, device_descriptor, key_value
)
self.threads.append(thread)
def run(self):
"""Run all event listener threads."""
for thread in self.threads:
thread.start()
def stop(self):
"""Stop all event listener threads."""
for thread in self.threads:
thread.stopped.set()
|
py | 1a3b424a58a6e2a7cb681aa90abe7e9141501497 | #!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import copy
import numpy as np
import pickle
from DatabaseManager.database import Database
from Utilities.misc import Printer
#========================================================================
class ResultsHandler(Printer):
DB_ATTRIBUTES = {'status': 'string',
'job_id': 'string',
'repetition': 'integer',
'work_dir': 'string',
'exp_identifier': 'string',
'parameters': 'pickle',
'objectives': 'pickle',
'author': 'pickle'}
PROCESSED_JOBS = []
def __init__(self, settings, verbose = True):
Printer.__init__(self, 'RESULTS HANDLER', color = 'yellow')
self.settings = settings
self.verbose = verbose
self._create_database()
def _create_database(self):
db_settings = self.settings['results_database']
self.database = Database(db_settings['path'], self.DB_ATTRIBUTES,
db_settings['database_type'], verbose = self.verbose)
def process_results(self, results_dict):
results_dict['status'] = 'new'
self.database.add(results_dict)
def remove_results(self, identifier):
self._print('removing feedback for %s' % identifier)
condition = {'exp_identifier': identifier}
self.database.remove_all(condition)
def get_new_results(self):
condition = {'status': 'new'}
new_results_list = self.database.fetch_all(condition)
# check, if:
# - for a given experiment
# - and a given job_id
# --> all repetitions are executed
new_results = {}
for result in new_results_list:
exp_identifier = result['exp_identifier']
job_id = result['job_id']
if exp_identifier in new_results.keys():
if job_id in new_results[exp_identifier]:
new_results[exp_identifier][job_id].append(result)
else:
new_results[exp_identifier][job_id] = [result]
else:
new_results[exp_identifier] = {job_id: [result]}
# get those jobs, for which we have all the results
completed_jobs = []
for exp_identifier in new_results.keys():
# get experiment
for experiment in self.settings['experiments']:
if experiment['name'] == exp_identifier:
break
num_repetitions = experiment['repetitions']
for job_id in new_results[exp_identifier]:
if len(new_results[exp_identifier][job_id]) == num_repetitions:
completed_jobs.append(job_id)
return completed_jobs
# separate the new feedbacks by name and by repetition
# new_results = {}
# condition = {'status': 'new'}
# new_result_list = self.database.fetch_all(condition)
# separate the new feedbacks by name
# new_results = {}
# for result in new_result_list:
# if result['exp_identifier'] in new_results.keys():
# new_results[result['exp_identifier']].append(result)
# else:
# new_results[result['exp_identifier']] = [result]
# return new_results
def analyze_new_results(self, job_id):
# get experiments with the defined job_id
condition = {'job_id': job_id}
results = self.database.fetch_all(condition)
# copy information to the processed dictionary
processed = {}
for att in ['job_id', 'work_dir', 'exp_identifier', 'parameters', 'author']:
processed[att] = copy.deepcopy(results[0][att])
processed['loss'] = {}
# perform operations on results
exp_identifier = results[0]['exp_identifier']
for experiment in self.settings['experiments']:
if experiment['name'] == exp_identifier:
break
for objective in experiment['objectives']:
name = objective['name']
operation = objective['operation']
# get all results
# print('RESULT', results)
values = np.array([result['objectives'][name] for result in results])
if operation == 'average':
value = np.mean(values)
elif operation == 'std_rel':
value = np.std(values) / np.mean(values)
else:
raise NotImplementedError()
processed['loss']['%s_%s' % (name, operation)] = value
setattr(self, 'info_dict_%s' % job_id, copy.deepcopy(processed))
self.PROCESSED_JOBS.append(job_id)
def set_all_to_used(self, job_id):
condition = {'job_id': job_id, 'status': 'new'}
update = {'status': 'used'}
self.database.update(condition, update)
# def set_all_to_used(self, exp_identifier):
# condition = {'exp_identifier': exp_identifier, 'status': 'new'}
# update = {'status': 'used'}
# self.database.update(condition, update)
#========================================================================
|
py | 1a3b429ea0025964d1590b8eae2fc783455d5308 | #!/usr/bin/python
from Solution import Solution
obj = Solution()
#A = [1,1,1,2,2,3]
#A = [0,0,1,1,1,1,2,3,3]
#A = [1, 1, 1, 1]
A = [1]
print(obj.removeDuplicates(A)) |
py | 1a3b440481f92a89429e5d143494a7f4404182ea | import itertools
import argparse
import datetime
import os
import sys
import re
import time
import numpy as np
import argparse
def filldict(listKeys, listValues):
mydict = {}
for key, value in zip(listKeys, listValues):
mydict[key] = value
return mydict
def generate_script_body(param_dict):
script_body='''#!/bin/bash
cd /home/babbatem/projects/skills_kin/ben_dapg
source /home/babbatem/envs/skills_kin/bin/activate
export GYM_ENV={}
echo $GYM_ENV
python my_job_script.py --config {} --output {}
'''
script_body=script_body.format(param_dict['env'],
param_dict['config'],
param_dict['output'])
return script_body
def get_config_file_dapg():
config= \
"""{
# general inputs
'env' : '%s',
'algorithm' : 'DAPG',
'seed' : %i,
'num_cpu' : 3,
'save_freq' : 25,
'eval_rollouts' : 1,
# Demonstration data and behavior cloning
'demo_file' : '%s',
'bc_batch_size' : 32,
'bc_epochs' : 5,
'bc_learn_rate' : 1e-3,
# RL parameters (all params related to PG, value function, DAPG etc.)
'policy_size' : (32, 32),
'vf_batch_size' : 64,
'vf_epochs' : 2,
'vf_learn_rate' : 1e-3,
'rl_step_size' : 0.05,
'rl_gamma' : 0.995,
'rl_gae' : 0.97,
'rl_num_traj' : 20,
'rl_num_iter' : 10,
'lam_0' : 1e-2,
'lam_1' : 0.95,
'init_log_std' : 1
}
"""
return config
def get_config_file_npg():
config= \
"""{
# general inputs
'env' : '%s',
'algorithm' : 'NPG',
'seed' : %i,
'num_cpu' : 3,
'save_freq' : 25,
'eval_rollouts' : 1,
# RL parameters (all params related to PG, value function, DAPG etc.)
'policy_size' : (32, 32),
'vf_batch_size' : 64,
'vf_epochs' : 2,
'vf_learn_rate' : 1e-3,
'rl_step_size' : 0.05,
'rl_gamma' : 0.995,
'rl_gae' : 0.97,
'rl_num_traj' : 20,
'rl_num_iter' : 10,
'lam_0' : 0,
'lam_1' : 0,
'init_log_std' : 1,
}
"""
return config
def submit(param_dict, job_details):
script_body = generate_script_body(param_dict)
objectname = param_dict['algo'] + '-' \
+ param_dict['env-short'] + '-' \
+ str(param_dict['seed'])
jobfile = "scripts/{}/{}".format(param_dict['name'], objectname)
with open(jobfile, 'w') as f:
f.write(script_body)
cmd="qsub {} {}".format(job_details, jobfile)
os.system(cmd)
return 0
def main(args):
KEYS = ['seed', 'env', 'algo', 'config', 'output', 'name', 'env-short']
SEEDS = np.arange(5)
# TODO: make this mapping correct
full_env_names_dict = {'drawer': 'kuka_gym:KukaDrawer-v0',
'microwave': 'kuka_gym:KukaCabinet-v0',
'dynamic': 'kuka_gym:KukaDynamic-v0'}
full_env_name = full_env_names_dict[args.env]
if args.gpu:
request = '-l long -l vf=32G -l gpus=1 -q gpus*'
else:
request = '-l long -l vf=32G -pe smp 3'
os.makedirs('experiments' + '/' + args.exp_name, exist_ok=True)
config_root = 'experiments' + '/' + args.exp_name + '/' + args.env + '/configs/'
output_root = 'experiments' + '/' + args.exp_name + '/' + args.env + '/outputs/'
os.makedirs('scripts/%s' % args.exp_name, exist_ok=True)
os.makedirs(config_root, exist_ok=True)
os.makedirs(output_root, exist_ok=True)
k=0
for i in range(len(SEEDS)):
# get the config text
if args.algo == 'dapg':
config = get_config_file_dapg()
elif args.algo == 'npg':
config = get_config_file_npg()
else:
print('Invalid algorithm name [dapg, npg]')
raise ValueError
demo_path = '/home/babbatem/projects/skills_kin/sim/data/kuka_%s_demo.pickle'
demo_path = demo_path % args.env
if args.algo == 'dapg':
config=config % (full_env_name, SEEDS[i], demo_path)
else:
config=config % (full_env_name, SEEDS[i])
config_path = config_root + args.algo + str(SEEDS[i]) + '.txt'
config_writer = open(config_path,'w')
config_writer.write(config)
config_writer.close()
output_path = output_root + args.algo + str(SEEDS[i])
element = [SEEDS[i],
full_env_name,
args.algo,
config_path,
output_path,
args.exp_name,
args.env]
param_dict = filldict(KEYS, element)
submit(param_dict, request)
k+=1
print(k)
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument('-t', '--test', action='store_true', help='don\'t submit, just count')
parser.add_argument('-n', '--exp-name', required=True, type=str, help='parent directory for jobs')
parser.add_argument('-g', '--gpu', action='store_true', help='request gpus')
parser.add_argument('-e', '--env', type=str, help='microwave, drawer, or dynamic')
parser.add_argument('-a', '--algo', type=str, help='dapg or npg')
args=parser.parse_args()
main(args)
|
py | 1a3b441eb6543c31685358e846502bd783191ad8 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
import webob.dec
import webob.exc
from nova.api import openstack as openstack_api
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
class APITest(test.NoDBTestCase):
def setUp(self):
super(APITest, self).setUp()
self.wsgi_app = fakes.wsgi_app()
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
return openstack_api.FaultWrapper(inner_app)
def test_malformed_json(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '{'
req.headers["content-type"] = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 400)
def test_malformed_xml(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '<hi im not xml>'
req.headers["content-type"] = "application/xml"
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 415)
def test_vendor_content_type_json(self):
ctype = 'application/vnd.openstack.compute+json'
req = webob.Request.blank('/')
req.headers['Accept'] = ctype
res = req.get_response(self.wsgi_app)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, ctype)
jsonutils.loads(res.body)
def test_exceptions_are_converted_to_faults_webob_exc(self):
@webob.dec.wsgify
def raise_webob_exc(req):
raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
# api.application = raise_webob_exc
api = self._wsgi_app(raise_webob_exc)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_api_fault(self):
@webob.dec.wsgify
def raise_api_fault(req):
exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
return wsgi.Fault(exc)
# api.application = raise_api_fault
api = self._wsgi_app(raise_api_fault)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('itemNotFound', resp.body)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_exception(self):
@webob.dec.wsgify
def fail(req):
raise Exception("Threw an exception")
# api.application = fail
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.NovaException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, resp.body)
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in six.iteritems(exception_type.headers):
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], str(value))
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_nova_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_nova_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
class APITestV21(APITest):
def setUp(self):
super(APITestV21, self).setUp()
self.wsgi_app = fakes.wsgi_app_v21()
# TODO(alex_xu): Get rid of the case translate NovaException to
# HTTPException after V2 api code removed. Because V2.1 API required raise
# HTTPException explicitly, so V2.1 API needn't such translation.
|
py | 1a3b4489bc30219bdf9a88e6e324dac3d3f5a979 | # -*- coding: utf-8 -*-
from abc import abstractmethod
from saltchannel.util import SingletonABCMeta
class SaltLibBase(metaclass=SingletonABCMeta):
crypto_sign_PUBLICKEYBYTES = 32
crypto_sign_SECRETKEYBYTES = 64
crypto_sign_BYTES = 64
crypto_sign_SEEDBYTES = 32
crypto_box_PUBLICKEYBYTES = 32
crypto_box_SECRETKEYBYTES = 32
crypto_box_SHAREDKEYBYTES = 32
crypto_box_BEFORENMBYTES = 32
crypto_box_NONCEBYTES = 24
crypto_box_ZEROBYTES = 32
crypto_box_BOXZEROBYTES = 16
crypto_box_OVERHEADBYTES = 16
crypto_box_INTERNALOVERHEADBYTES = 32
crypto_hash_BYTES = 64
@staticmethod
#@abstractmethod
def isAvailable(): pass
#@abstractmethod
def getName(self): pass
@abstractmethod
def crypto_sign_keypair_not_random(self, sk): pass
@abstractmethod
def crypto_sign(self, m, sk): pass
@abstractmethod
def crypto_sign_open(self, sm, pk): pass
@abstractmethod
def crypto_box_keypair_not_random(self, sk): pass
@abstractmethod
def crypto_box_beforenm(self, pk, sk): pass
@abstractmethod
def crypto_box_afternm(self, m, n, k): pass
@abstractmethod
def crypto_box_open_afternm(self, c, n, k): pass
@abstractmethod
def crypto_hash(self, m): pass
@abstractmethod
def randombytes(self, n): pass
|
py | 1a3b44a2996ccc8473bec7a267c1edbf344525ff | # Author: Yi Jiang, <[email protected]>, Institute of Physics, Chinese Academy of Sciences
# Adapted from the kdotp-symmetry package by: Dominik Gresch <[email protected]> © 2017-2018, ETH Zurich, Institut für Theoretische Physik
"""
Defines functions to construct the basis of the symmetry-constrained Hamiltonian.
"""
import sympy as sp
from sympy.physics.quantum import TensorProduct
import numpy as np
from functools import reduce
import scipy.linalg as la
from ._expr_utils import monomial_basis, expr_to_vector, matrix_to_expr_operator
from ._repr_utils import hermitian_to_vector, hermitian_basis, repr_to_matrix_operator, check_orthogonal, frobenius_product, solve_linear_system_numpy
from ._repr_utils import hermitian_pauli_basis, hermitian_pauli_basis_symbols
from ._linalg import intersection_basis, nullspace_blocked
from ._to_matrix import to_matrix
from ._logging_setup import LOGGER
from ._decompose_kp import decompose_kp
def symmetric_hamiltonian(
symmetry_operations,
kp_variable = 'k',
order = [0],
repr_basis = 'pauli',
msg_num = None,
kvec = None,
):
r"""
Calculates the basis of the symmetric Hamiltonian for a given set of symmetry operations.
:param symmetry_operations: The symmetry operations that the Hamiltonian should respect.
:type symmetry_operations: :py:class: `dict` with keys 'rotation_matrix', 'repr_matrix', 'repr_has_cc'.
:param kp_variable: The variable of the hamiltonian, can be anyone of 'k', 'E', 'B', 'e', 'k E', 'k B', 'E B', 'k E B'
:type kp_variable: :py:class:str
:param order: The list of orders of the monomials. Each number in the list specifies the order of a variable.
:type order: :py:class:`list` of :py:class:`int`
:param repr_basis: The basis for the hermitian matrices, with the same size as the representations.
By default, the :py:func:`.hermitian_pauli_basis` of the appropriate size is used.
:type repr_basis: :py:class:`list` of :py:mod:`sympy` matrices
:param msg_num & kvec: two string used to denote the magnetic space group and little group k,
used to locate linear representations in order to decompose kp hamiltonian.
:type msg_num & kvec: :py:class:str
:returns: Basis for the symmetric Hamiltonian, as a :py:class:`list` of :py:mod:`sympy` matrix expressions.
# Modified by YJ: if msg_num and kvec is specified, also return lists of decomposed repr and expr basis, otherwise return empty lists.
"""
# for sympy or numpy matrices
try:
repr_matrix_size = symmetry_operations[0]['repr_matrix'].shape[0]
# for plain lists -- this doesn't work for sympy matrices because
# their 'len' is the total number of elements
except AttributeError:
repr_matrix_size = len(symmetry_operations[0]['repr_matrix'])
repr_basis_type = 'pauli' if repr_basis == 'pauli' else None
if repr_basis == 'auto':
repr_basis = hermitian_basis(repr_matrix_size)
elif repr_basis == 'pauli':
repr_basis = hermitian_pauli_basis(repr_matrix_size)
repr_basis_symbols = hermitian_pauli_basis_symbols(repr_matrix_size)
if repr_basis not in ['auto', 'pauli']:
check_orthogonal(repr_basis)
Base_vec = ''
for t in kp_variable.split():
if t == 'k':
Base_vec += 'kx ky kz '
elif t == 'E':
Base_vec += 'Ex Ey Ez '
elif t == 'B':
Base_vec += 'Bx By Bz '
elif t == 'e':
Base_vec += 'ex ey ez '
Base_vec = sp.symbols(Base_vec)
expr_basis = monomial_basis(order, kp_variable)
expr_dim = len(expr_basis)
repr_dim = len(repr_basis)
repr_basis_norm_squares = [frobenius_product(b, b) for b in repr_basis]
full_dim = expr_dim * repr_dim
full_basis = [
sp.Matrix(x) for x in np.outer(expr_basis, repr_basis).
reshape(full_dim, repr_matrix_size, repr_matrix_size).tolist()
]
invariant_bases = []
expr_mat_collection = []
repr_mat_collection = []
for isym_op, sym_op in enumerate(symmetry_operations):
LOGGER.info('Calculating matrix form of expression.')
expr_mat = to_matrix(
operator=matrix_to_expr_operator(
sym_op['rotation_matrix'], repr_has_cc = sym_op['repr_has_cc'],
K_VEC = Base_vec
),
basis=expr_basis,
to_vector_fct=expr_to_vector,
K_VEC = Base_vec
)
expr_mat_collection.append(expr_mat)
LOGGER.info('Calculating matrix form of representation.')
repr_mat = to_matrix(
operator=repr_to_matrix_operator(
sym_op['repr_matrix'], complex_conjugate = sym_op['repr_has_cc']
),
basis=repr_basis,
to_vector_fct=hermitian_to_vector,
to_vector_kwargs=dict(basis_norm_squares=repr_basis_norm_squares)
)
repr_mat_collection.append(repr_mat)
# outer product
LOGGER.info('Calculating outer product.')
full_mat = TensorProduct(expr_mat, repr_mat)
# get Eig(F \ocross G, 1) basis
mat = full_mat - sp.eye(full_dim)
LOGGER.info('Calculating nullspace.')
nullspace_basis = nullspace_blocked(mat, simplify=sp.nsimplify)
# Modified by YJ: reshape here is necessary. The original np.array(nullspace_basis).tolist() will run into bugs for python>3.8
curr_basis = [ bs.reshape(1, expr_dim*repr_dim) for bs in nullspace_basis ]
if len(curr_basis) != _numeric_nullspace_dim(mat):
raise ValueError(
'Analytic and numeric dimensions of the nullspace of the matrix {mat} do not match'
.format(mat=mat)
)
invariant_bases.append(curr_basis)
LOGGER.info('Calculating basis intersection.')
basis_vectors = intersection_basis(*invariant_bases)
# ===== Added by YJ: decompose the kp model into symmetric basis ===== #
decomposed_repr_vec, decomposed_repr_mat, decomposed_expr, ir_str_list = [], [], [], []
for basis_vector in basis_vectors:
tmp_repr_vec, tmp_repr_mat, tmp_expr, linear_ir_str = decompose_kp(basis_vector, repr_basis, expr_basis, symmetry_operations, Base_vec, msg_num, kvec)
decomposed_repr_vec.append(tmp_repr_vec)
decomposed_repr_mat.append(tmp_repr_mat)
decomposed_expr.append(tmp_expr)
ir_str_list.append(linear_ir_str)
LOGGER.info('Expanding basis vectors.')
basis_vectors_expanded, decomposed_repr_symbols = [], []
for full_vec, repr_vec in zip(basis_vectors, decomposed_repr_vec):
basis_vectors_expanded.append( sum((v * b for v, b in zip(full_vec, full_basis)), sp.zeros(repr_matrix_size)) )
decomposed_repr_symbols.append([ reduce(lambda x, y : x+' + '+y, [str(sp.nsimplify(v)) + '* ' + b if v != 1 else b\
for v, b in zip(tmp, repr_basis_symbols) if v != 0]) for tmp in repr_vec ]) \
if repr_basis_type == 'pauli' else [None] * len(repr_vec)
_print_result(basis_vectors_expanded, basis_vectors, decomposed_expr, decomposed_repr_mat, decomposed_repr_symbols, ir_str_list)
return basis_vectors_expanded, decomposed_expr, decomposed_repr_mat
def _numeric_nullspace_dim(mat):
"""Numerically computes the nullspace dimension of a matrix."""
mat_numeric = np.array(mat.evalf().tolist(), dtype=complex)
eigenvals = la.eigvals(mat_numeric)
return np.sum(np.isclose(eigenvals, np.zeros_like(eigenvals)))
def _print_result(kpmodels, basis_vecs, expr_basis_vecs, repr_basis_mats, repr_basis_symbols, ir_str_list):
""" Print the result of kp models and decompoed basis"""
if len(kpmodels) == 0:
print('No symmetry-allowed kp models.')
else:
print('Number of independent kp models:', len(kpmodels))
for ith, kp, base_vec, rep, exp, rep_sym, ir in zip(range(len(kpmodels)), kpmodels, basis_vecs, repr_basis_mats, expr_basis_vecs, repr_basis_symbols, ir_str_list):
print('-----------------------------------------------------')
print('%d-th kp model:'%(ith+1))
print(kp)
print('Basis vector:', base_vec)
if exp == None:
print('Fail to decompose kp.')
else:
if ir:
print('\nDecomposed basis using linear IR:', ir)
else:
print('\nDecomposed basis (not symmetric):')
print('Coefficient basis:')
for ie in exp:
print(ie)
print('\nMatrix basis:')
for isym, ib in zip(rep_sym, rep):
print('Symbol:',isym, ' Expression:', ib, '\n')
print('-----------------------------------------------------')
|
py | 1a3b455add73cb9ba6bbe2885feb3410168e8faa | import json
from .models import *
def cookieCart(request):
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
print('Cart:', cart)
items = []
order = {'cart_items' :0, 'cart_total' :0}
cartItems = order['cart_total']
for i in cart:
try:
cartItems += cart[i]['quantity']
item = Items.objects.get(id=i)
total = (item.price * cart[i]['quantity'])
order['cart_items'] += cart[i]['quantity']
order['cart_total'] += total
item = {
'item':{
'id': item.id,
'name': item.name,
'price': item.price,
'imageURL': item.imageURL,
},
'quantity': cart[i]['quantity'],
'get_total': total
}
items.append(item)
except:
pass
return {'items':items, 'order':order, 'cartItems':cartItems}
def cartData(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
cartItems = order.cart_items
else:
cookieData = cookieCart(request)
items = cookieData['items']
order = cookieData['order']
cartItems = cookieData['cartItems']
return {'items':items, 'order':order, 'cartItems':cartItems}
def guestOrder(request, data):
print('User not logged in.')
print('Cookies:', request.COOKIES)
name = data['form']['name']
email = data['form']['email']
cookieData = cookieCart(request)
items = cookieData['items']
customer, created = Customer.objects.get_or_create(email = email)
customer.name = name
customer.save()
order, created = Order.objects.get_or_create(customer=customer, complete=False)
for i in items:
item = Items.objects.get(id=i['item']['id'])
orderItem = OrderItem.objects.create(
item = item,
order = order,
quantity = i['quantity']
)
return customer, order |
py | 1a3b45afb56f940e4f610aa06d50787dd3d1db45 | import numpy as np
from panel.planet_data import PLANET_DATA
from panel.telemetry.telemetry import Telemetry
class EllipseData(Telemetry):
@property
def _c(self):
return (self.apoapsis - self.periapsis) / 2.0
@property
def focus_x(self):
return self._c
@property
def focus_y(self):
return 0
@property
def width(self):
return 2 * self.semi_major_axis
@property
def height(self):
return 2 * self.semi_minor_axis
@property
def proj_equ_width(self):
x, y = self.projection(self.width, 0)
return np.sqrt(x**2 + y**2)
@property
def proj_equ_height(self):
x, y = self.projection(0, self.height)
return np.sqrt(x**2 + y**2)
|
py | 1a3b4620a6de8ee829216e5b1f78b0682d5aa83d | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import time
from typing import Optional
import paddle
from yacs.config import CfgNode
from paddlespeech.cli.asr.infer import ASRExecutor
from paddlespeech.cli.log import logger
from paddlespeech.cli.utils import MODEL_HOME
from paddlespeech.resource import CommonTaskResource
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.modules.ctc import CTCDecoder
from paddlespeech.s2t.utils.utility import UpdateConfig
from paddlespeech.server.engine.base_engine import BaseEngine
from paddlespeech.server.utils.paddle_predictor import init_predictor
from paddlespeech.server.utils.paddle_predictor import run_model
__all__ = ['ASREngine', 'PaddleASRConnectionHandler']
class ASRServerExecutor(ASRExecutor):
def __init__(self):
super().__init__()
self.task_resource = CommonTaskResource(
task='asr', model_format='static')
def _init_from_path(self,
model_type: str='wenetspeech',
am_model: Optional[os.PathLike]=None,
am_params: Optional[os.PathLike]=None,
lang: str='zh',
sample_rate: int=16000,
cfg_path: Optional[os.PathLike]=None,
decode_method: str='attention_rescoring',
am_predictor_conf: dict=None):
"""
Init model and other resources from a specific path.
"""
self.max_len = 50
sample_rate_str = '16k' if sample_rate == 16000 else '8k'
tag = model_type + '-' + lang + '-' + sample_rate_str
self.max_len = 50
self.task_resource.set_task_model(model_tag=tag)
if cfg_path is None or am_model is None or am_params is None:
self.res_path = self.task_resource.res_dir
self.cfg_path = os.path.join(
self.res_path, self.task_resource.res_dict['cfg_path'])
self.am_model = os.path.join(self.res_path,
self.task_resource.res_dict['model'])
self.am_params = os.path.join(self.res_path,
self.task_resource.res_dict['params'])
logger.info(self.res_path)
logger.info(self.cfg_path)
logger.info(self.am_model)
logger.info(self.am_params)
else:
self.cfg_path = os.path.abspath(cfg_path)
self.am_model = os.path.abspath(am_model)
self.am_params = os.path.abspath(am_params)
self.res_path = os.path.dirname(
os.path.dirname(os.path.abspath(self.cfg_path)))
#Init body.
self.config = CfgNode(new_allowed=True)
self.config.merge_from_file(self.cfg_path)
with UpdateConfig(self.config):
if "deepspeech2" in model_type:
self.vocab = self.config.vocab_filepath
if self.config.spm_model_prefix:
self.config.spm_model_prefix = os.path.join(
self.res_path, self.config.spm_model_prefix)
self.text_feature = TextFeaturizer(
unit_type=self.config.unit_type,
vocab=self.vocab,
spm_model_prefix=self.config.spm_model_prefix)
self.config.decode.lang_model_path = os.path.join(
MODEL_HOME, 'language_model',
self.config.decode.lang_model_path)
lm_url = self.task_resource.res_dict['lm_url']
lm_md5 = self.task_resource.res_dict['lm_md5']
self.download_lm(
lm_url,
os.path.dirname(self.config.decode.lang_model_path), lm_md5)
elif "conformer" in model_type or "transformer" in model_type:
raise Exception("wrong type")
else:
raise Exception("wrong type")
# AM predictor
self.am_predictor_conf = am_predictor_conf
self.am_predictor = init_predictor(
model_file=self.am_model,
params_file=self.am_params,
predictor_conf=self.am_predictor_conf)
# decoder
self.decoder = CTCDecoder(
odim=self.config.output_dim, # <blank> is in vocab
enc_n_units=self.config.rnn_layer_size * 2,
blank_id=self.config.blank_id,
dropout_rate=0.0,
reduction=True, # sum
batch_average=True, # sum / batch_size
grad_norm_type=self.config.get('ctc_grad_norm_type', None))
@paddle.no_grad()
def infer(self, model_type: str):
"""
Model inference and result stored in self.output.
"""
cfg = self.config.decode
audio = self._inputs["audio"]
audio_len = self._inputs["audio_len"]
if "deepspeech2" in model_type:
decode_batch_size = audio.shape[0]
# init once
self.decoder.init_decoder(
decode_batch_size, self.text_feature.vocab_list,
cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta,
cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n,
cfg.num_proc_bsearch)
output_data = run_model(self.am_predictor,
[audio.numpy(), audio_len.numpy()])
probs = output_data[0]
eouts_len = output_data[1]
batch_size = probs.shape[0]
self.decoder.reset_decoder(batch_size=batch_size)
self.decoder.next(probs, eouts_len)
trans_best, trans_beam = self.decoder.decode()
# self.model.decoder.del_decoder()
self._outputs["result"] = trans_best[0]
elif "conformer" in model_type or "transformer" in model_type:
raise Exception("invalid model name")
else:
raise Exception("invalid model name")
class ASREngine(BaseEngine):
"""ASR server engine
Args:
metaclass: Defaults to Singleton.
"""
def __init__(self):
super(ASREngine, self).__init__()
def init(self, config: dict) -> bool:
"""init engine resource
Args:
config_file (str): config file
Returns:
bool: init failed or success
"""
self.executor = ASRServerExecutor()
self.config = config
self.engine_type = "inference"
try:
if self.config.am_predictor_conf.device is not None:
self.device = self.config.am_predictor_conf.device
else:
self.device = paddle.get_device()
paddle.set_device(self.device)
except Exception as e:
logger.error(
"Set device failed, please check if device is already used and the parameter 'device' in the yaml file"
)
logger.error(e)
return False
self.executor._init_from_path(
model_type=self.config.model_type,
am_model=self.config.am_model,
am_params=self.config.am_params,
lang=self.config.lang,
sample_rate=self.config.sample_rate,
cfg_path=self.config.cfg_path,
decode_method=self.config.decode_method,
am_predictor_conf=self.config.am_predictor_conf)
logger.info("Initialize ASR server engine successfully.")
return True
class PaddleASRConnectionHandler(ASRServerExecutor):
def __init__(self, asr_engine):
"""The PaddleSpeech ASR Server Connection Handler
This connection process every asr server request
Args:
asr_engine (ASREngine): The ASR engine
"""
super().__init__()
self.input = None
self.output = None
self.asr_engine = asr_engine
self.executor = self.asr_engine.executor
self.config = self.executor.config
self.max_len = self.executor.max_len
self.decoder = self.executor.decoder
self.am_predictor = self.executor.am_predictor
self.text_feature = self.executor.text_feature
def run(self, audio_data):
"""engine run
Args:
audio_data (bytes): base64.b64decode
"""
if self._check(
io.BytesIO(audio_data), self.asr_engine.config.sample_rate,
self.asr_engine.config.force_yes):
logger.info("start running asr engine")
self.preprocess(self.asr_engine.config.model_type,
io.BytesIO(audio_data))
st = time.time()
self.infer(self.asr_engine.config.model_type)
infer_time = time.time() - st
self.output = self.postprocess() # Retrieve result of asr.
logger.info("end inferring asr engine")
else:
logger.info("file check failed!")
self.output = None
logger.info("inference time: {}".format(infer_time))
logger.info("asr engine type: paddle inference")
|
py | 1a3b462243b240ebf9f87d3e3515c3b8802eac6d | import pathlib
import pandas as pd
import panda_scripts as ps
import joint_pca as pca
from argument_validators import alphanumeric
from shutil import rmtree
from math import floor
from numpy.random import randint
from os import remove
import inspect
from __utils import *
# This is a wrapper for all the data-processing scripts below.
#
# Arguments:
# MONTH_DICT A dictionary from the scope above this level, to be filled by this function
# PARAMS_FILE _______
# LOG_FILE Path for the log file
# SM_FILE File with sm data
# COV_FILE File with cov data
# COV_LAYERS List of strings, length must match the number of layers in the COV_FILE
# EVAL_FILE File with eval data
# SHAPE_DIR Folder path where shape .rds files are (to be) stored
# REG_LIST List of regions to cut out of the sm file
# BUFFER km of buffer around each region
# TRAIN_DIR The directory of training files
# MONTH Numeric month to use in the train data
# EVAL_DIR The directory of the evaluation files
# USE_PCA Run PCA dimension reduction on both train and eval files
# Assumes same covariate columns are present in same order
# VALIDATE 1 to save SM values from TEST for residuals
# 2 to save SM values from EVAL for comparison
# STATS_FILE Path to function for computing statistics on test data
# Default empty string, no stats computed
# RAND Random seed; default 0 generates new seed
# SUPER If 1/True, expands test file of ecoregions to one level up;
# Default 0, nothing changed
# MIN_T_POINTS Minimum number of training points required in each region;
# Default -1 doesn't check
#
# Output:
# The output folder depends on which preprocessing steps are taken
# A log file is generated in LOG_DIR/proc-log#.txt,
# where # is the least unused natural number
def curate(MONTH_DICT, PARAMS_FILE, LOG_FILE, SM_FILE, COV_FILE, COV_LAYERS, EVAL_FILE, SHAPE_DIR,
REG_LIST, BUFFER, TRAIN_DIR, MONTH,
EVAL_DIR, USE_PCA, VALIDATE, STATS_FILE="", RAND=0, SUPER=0, MIN_T_POINTS=-1):
MASK_PATH = pathlib.Path("create_shape.R").resolve()
CROP_PATH = pathlib.Path("crop_to_shape.R").resolve()
ADD_COV_PATH = pathlib.Path("add_topos.R").resolve()
DROP_COLS_PATH = pathlib.Path("drop_cols.py").resolve()
# Prepare shape for cropping.
def create_shape(reg_type, reg, SHAPE_DIR=SHAPE_DIR):
if not SHAPE_DIR.is_dir():
SHAPE_DIR.mkdir(parents=True)
SHAPE_FILE = SHAPE_DIR.joinpath(f"{reg}.rds")
if SHAPE_FILE.is_file():
log.write(f"shape for {reg} exists in {SHAPE_DIR}\n")
else:
shape_args = [MASK_PATH, reg_type, reg, SHAPE_FILE]
log.write(f"{shape_args}\n")
#print(shape_args)
bash(shape_args)
log.write(f"Created shape for {reg} in {SHAPE_DIR}")
return SHAPE_FILE
print(f"Curation log file: {LOG_FILE}")
with open(LOG_FILE, "w") as log:
log.write("----------------------------------------\n")
log.write("Begin data processing with the following arguments...\n")
#https://stackoverflow.com/questions/582056/getting-list-of-parameter-names-inside-python-function
frame = inspect.currentframe()
args, _, _, vals = inspect.getargvalues(frame)
for i in args:
log.write(f"{i}={vals[i]}\n")
log.write("----------------------------------------\n")
# Establish random seed:
if (VALIDATE<=1) or (VALIDATE>=2):
seed=0
else:
if RAND:
seed = int(RAND)
else:
seed = randint(2**16)
log.write(f"For randomization, using {seed}.\n")
#suffix = ""
MONTH_DICT[MONTH] = {}
suffix = f"month{MONTH}"
if SUPER:
suffix += "-LvlUp"
if BUFFER:
suffix += f"-{BUFFER}meter"
MONTH_DICT[MONTH]["buffer"] = BUFFER
if USE_PCA:
suffix += "-PCA"
if seed:
suffix += f"-{VALIDATE-1:.2f}_{seed}"
MONTH_DICT[MONTH]["seed"] = seed
########################################
# Create train and eval files
if VALIDATE:
SM_BEFORE = TRAIN_DIR.parent.joinpath("original_sm-"+suffix)
log.write(f"Soil Moisture data from before preprocessing will go in {SM_BEFORE}\n")
if not SM_BEFORE.is_dir():
SM_BEFORE.mkdir(parents=True)
else:
SM_BEFORE = None
if SM_FILE:
log.write("Extracting sm data from the specified source.\n")
for reg_type,reg in REG_LIST:
if not TRAIN_DIR.is_dir():
TRAIN_DIR.mkdir(parents=True)
REG_TR_FILE = TRAIN_DIR.joinpath(f"{reg_type}_{reg}.csv")
if SUPER and (reg_type=="ECOREGION" or reg_type=="CEC"):
reg = ".".join(reg.split(".")[:-1])
SHAPE_FILE = create_shape(reg_type, reg)
# Crop soil moisture file to shape.
crop_args = [CROP_PATH, SM_FILE, SHAPE_FILE, REG_TR_FILE, BUFFER]
#print(crop_args)
log.write(f"{crop_args}\n")
bash(crop_args)
if COV_FILE:
cov_args = [ADD_COV_PATH, REG_TR_FILE, COV_FILE, REG_TR_FILE] + COV_LAYERS
log.write(f"{cov_args}\n")
bash(cov_args)
else:
log.write("No SM_FILE specified, so train folder assumed populated.\n")
if EVAL_FILE:
log.write("Creating eval files from specified source.\n")
for reg_type, reg in REG_LIST:
log.write(f"Creating EVAL file for {reg}.\n")
if not EVAL_DIR.is_dir():
EVAL_DIR.mkdir(parents=True)
REG_EV_FILE = EVAL_DIR.joinpath(f"{reg_type}_{reg}.csv")
SHAPE_FILE = create_shape(reg_type, reg)
# Crop evaluation file to shape.
crop_args = [CROP_PATH, EVAL_FILE, SHAPE_FILE, REG_EV_FILE]
log.write(f"{crop_args}\n")
bash(crop_args)
if VALIDATE==2:
VALID_FILE = SM_BEFORE.joinpath(REG_EV_FILE.name)
log.write(f"cp {REG_EV_FILE} {VALID_FILE}")
bash(["cp", REG_EV_FILE, VALID_FILE])
print(f"{DROP_COLS_PATH} {REG_EV_FILE} {REG_EV_FILE} -k 0,1")
bash([DROP_COLS_PATH, REG_EV_FILE, REG_EV_FILE, "-k", "0,1"])
if COV_FILE:
cov_args = [ADD_COV_PATH, REG_EV_FILE, COV_FILE, REG_EV_FILE] + COV_LAYERS
log.write(f"{cov_args}\n")
bash(cov_args)
elif COV_FILE:
log.write("Extracting covariate data from the specified source.\n")
for reg_type, reg in REG_LIST:
log.write(f"Creating EVAL file for {reg}.\n")
if not EVAL_DIR.is_dir():
EVAL_DIR.mkdir(parents=True)
REG_EV_FILE = EVAL_DIR.joinpath(f"{reg_type}_{reg}.csv")
SHAPE_FILE = create_shape(reg_type, reg)
# Crop covariate file to shape.
crop_args = [CROP_PATH, COV_FILE, SHAPE_FILE, REG_EV_FILE, 0] + COV_LAYERS
#print(crop_args)
log.write(f"{crop_args}\n")
bash(crop_args)
if VALIDATE==2:
VALID_FILE = SM_BEFORE.joinpath(REG_EV_FILE.name)
log.write(f"cp {REG_EV_FILE} {VALID_FILE}")
bash(["cp", REG_EV_FILE, VALID_FILE])
else:
log.write("No EVAL_FILE or COV_FILE specified, so eval folder assumed populated.\n")
########################################
# Compute statistics on train files
if STATS_FILE:
stat_args = [STATS_FILE, TRAIN_DIR]
log.write(f"{stat_args}\n")
bash(stat_args)
########################################
# Process train and eval files
TRAIN_DIR_TEMP = append_to_folder(TRAIN_DIR, "-postproc-"+suffix)
log.write(f"Processed training data to go in {TRAIN_DIR_TEMP}\n")
if TRAIN_DIR_TEMP.is_dir():
rmtree(TRAIN_DIR_TEMP)
TRAIN_DIR_TEMP.mkdir(parents=True)
EVAL_DIR_TEMP = append_to_folder(EVAL_DIR, "-postproc-"+suffix)
log.write(f"Processed evaluation data to go in {EVAL_DIR_TEMP}\n")
if EVAL_DIR_TEMP.is_dir():
rmtree(EVAL_DIR_TEMP)
EVAL_DIR_TEMP.mkdir(parents=True)
for reg_type, reg in REG_LIST:
region = f"{reg_type}_{reg}.csv"
if not os.path.isfile(TRAIN_DIR.joinpath(region)):
continue
tdf = pd.read_csv(TRAIN_DIR.joinpath(region))#, dtype=float)#.astype(object).infer_objects()
#print(f"before: {tdf.columns}")
tdf.rename(columns=alphanumeric, inplace=True)
#print(f"after: {tdf.columns}")
if not os.path.isfile(EVAL_DIR.joinpath(region)):
continue
edf = pd.read_csv(EVAL_DIR.joinpath(region))#, dtype=float)#.astype(object).infer_objects()
log.write(f"imported edf; first 3 rows:\n{edf.head(3)}\n")
#print(f"before: {edf.columns}")
edf.rename(columns=alphanumeric, inplace=True)
ecols = {edf.columns[0]: tdf.columns[0], edf.columns[1]: tdf.columns[1]}
edf.rename(columns=ecols, inplace=True)
#print(f"after: {edf.columns}")
if MONTH:
replacements = ps.monthify(tdf.columns)
tdf = tdf.rename(columns=replacements)
tdf = ps.keep_month(tdf, MONTH)
######################################################
## Dealing with NAs
######################################################
# Show how many non-NA's there are in each column
log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
# LSF is mostly NA in this region; replace it with 0, appropriate for a costal pixel
# Dict of cols with specified NA replacement value
bad_cols = {"LSF":0}
tdf.fillna(value=bad_cols, inplace=True)#[["LSF"]] = tdf[["LSF"]].fillna(0)
edf.fillna(value=bad_cols, inplace=True)#[["LSF"]] = edf[["LSF"]].fillna(0)
for col in bad_cols:
log.write(f"NA's in '{col}' replaced with {bad_cols[col]}.\n")
# Show how many non-NA's there are in each column
#log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
#log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
tdf = tdf.dropna()#thresh=4).fillna(0)
log.write(f"First 3 rows of tdf:\n{tdf.head(3)}\n")
#log.write(f"Number of non-NA values in tdf by column:\n{tdf.count()}\n")
edf = edf.dropna()#thresh=4).fillna(0)
log.write(f"First 3 rows of edf:\n{edf.head(3)}\n")
#log.write(f"Number of non-NA values in edf by column:\n{edf.count()}\n")
############################################
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
erows = edf.shape[0]
if erows:
log.write(f"There are {erows} evaluation points in {region}.\n")
else:
log.write(f"Warning: there are no evaluation points in {region}!\n")
continue
if floor(VALIDATE)==1:
before = tdf[tdf.columns[:3]]#.dropna()
if VALIDATE>1:
log.write(f"For before.sample, {seed}.\n")
before = before.sample(frac=(VALIDATE - 1), random_state=seed)
tdf.drop(before.index.tolist(), inplace=True)
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
brows = before.shape[0]
if brows:
log.write(f"There are {brows} validation points in {region}.\n")
else:
log.write(f"Warning: there are no validation points in {region}!\n")
continue
before_path = SM_BEFORE.joinpath(region)
before.to_csv(path_or_buf=before_path, index=False, header=False, na_rep="NA")
if BUFFER or SUPER:
log.write("Trimming validation file back down to {region}.\n")
crop_args = [CROP_PATH, before_path, before_path, r]
log.write(f"{crop_args}\n")
bash(crop_args)
if USE_PCA:
params = pca.get_params(tdf)
log.write(f"Performing PCA.\n")
#log.write(f"tdf pre-PCA: {tdf.shape}\n{tdf.head(3)}\n")
#log.write(f"edf pre-PCA: {edf.shape}\n{edf.head(3)}\n")
log.write(f"pre-PCA:\n{params}\n")
if len(params) > min(tdf.shape[0], edf.shape[0]):
log.write(f"Error: region {region} skipped! You have {tdf.shape[0]} rows of training data and {edf.shape[0]} rows of evaluation data, but you need at least {len(params)} of each to perform PCA on your params.\n")
continue
tdf, edf, comps = pca.joint_pca(tdf, edf, params)
log.write(f"post-PCA:\n{tdf.shape}\n{tdf.head(3)}\n{edf.shape}\n{edf.head(3)}\n{comps}\n")
log.write(f"Completed PCA for {region} with these eigenvalues:\n{comps}\n")
trows = tdf.shape[0]
if trows:
log.write(f"There are {trows} training points in {region}.\n")
else:
log.write(f"Warning: there are no training points in {region}!\n")
continue
erows = edf.shape[0]
if erows:
log.write(f"There are {erows} evaluation points in {region}.\n")
else:
log.write(f"Warning: there are no evaluation points in {region}!\n")
continue
tdf.to_csv(path_or_buf=TRAIN_DIR_TEMP.joinpath(region), index=False)
edf.to_csv(path_or_buf=EVAL_DIR_TEMP.joinpath(region), index=False)
TRAIN_DIR = TRAIN_DIR_TEMP
EVAL_DIR = EVAL_DIR_TEMP
# Update region list to only include those regions with at least a minimum number of test points
if (MIN_T_POINTS > -1):
NEW_REG_LIST = []
for reg_type, reg in REG_LIST:
REG_TR_FILE = TRAIN_DIR.joinpath(f"{reg_type}_{reg}.csv")
if REG_TR_FILE.is_file():
with open(REG_TR_FILE ,'r') as regtrfile:
num_lines = sum(1 for line in regtrfile)
if num_lines > MIN_T_POINTS:
NEW_REG_LIST.append((reg_type, reg))
log.write(f"Region {reg} has {num_lines - 1} data points ({MIN_T_POINTS} required). Kept in region list.\n")
else:
log.write(f"Warning! Region {reg} only has {num_lines - 1} data points ({MIN_T_POINTS} required). Removed from region list.\n")
remove(REG_TR_FILE)
else:
log.write(f"Warning! Region {reg} does not have a test file. Removed from region list.\n")
REG_LIST = NEW_REG_LIST
NEW_REG_FILE = LOG_FILE.with_suffix(f".{MONTH}reg")
with open(NEW_REG_FILE, "w") as reg_out:
for reg_type, reg in REG_LIST:
reg_out.write(f"{reg_type},{reg}\n")
###############################################
log.write("Data curation complete!!\n")
return(SM_BEFORE, TRAIN_DIR, EVAL_DIR, REG_LIST, seed, suffix)
|
py | 1a3b4886a39680ed4d293d3b1116d5a19aac9db6 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
py | 1a3b48988b9ed70191f9685b9e8abf838a6b4f6f | # -*- coding: utf-8 -*-
'''
tests for user state
user absent
user present
user present with custom homedir
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
from random import randint
import grp
# Import Salt Testing libs
import tests.integration as integration
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest, requires_system_grains
# Import salt libs
import salt.utils
if salt.utils.is_darwin():
USER = 'macuser'
GROUP = 'macuser'
GID = randint(400, 500)
NOGROUPGID = randint(400, 500)
else:
USER = 'nobody'
GROUP = 'nobody'
GID = 'nobody'
NOGROUPGID = 'nogroup'
class UserTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
test for user absent
'''
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def setUp(self):
if salt.utils.is_darwin():
#on mac we need to add user, because there is
#no creationtime for nobody user.
add_user = self.run_function('user.add', [USER], gid=GID)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_absent(self):
ret = self.run_state('user.absent', name='unpossible')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_if_present(self):
ret = self.run_state('user.present', name=USER)
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_if_present_with_gid(self):
if self.run_function('group.info', [USER]):
ret = self.run_state('user.present', name=USER, gid=GID)
elif self.run_function('group.info', ['nogroup']):
ret = self.run_state('user.present', name=USER, gid=NOGROUPGID)
else:
self.skipTest(
'Neither \'nobody\' nor \'nogroup\' are valid groups'
)
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_not_present(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
ret = self.run_state('user.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_when_home_dir_does_not_18843(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
if salt.utils.is_darwin():
HOMEDIR = '/Users/home_of_salt_test'
else:
HOMEDIR = '/home/home_of_salt_test'
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
self.run_function('file.absent', name=HOMEDIR)
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_nondefault(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
'''
ret = self.run_state('user.present', name='salt_test',
home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
@requires_system_grains
def test_user_present_gid_from_name_default(self, grains=None):
'''
This is a DESTRUCTIVE TEST. It creates a new user on the on the minion.
This is an integration test. Not all systems will automatically create
a group of the same name as the user, but I don't have access to any.
If you run the test and it fails, please fix the code it's testing to
work on your operating system.
'''
# MacOS users' primary group defaults to staff (20), not the name of
# user
gid_from_name = False if grains['os_family'] == 'MacOS' else True
ret = self.run_state('user.present', name='salt_test',
gid_from_name=gid_from_name, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
if grains['os_family'] in ('Suse',):
self.assertEqual(group_name, 'users')
elif grains['os_family'] == 'MacOS':
self.assertEqual(group_name, 'staff')
else:
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gid_from_name(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
This is a unit test, NOT an integration test. We create a group of the
same name as the user beforehand, so it should all run smoothly.
'''
ret = self.run_state('group.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.present', name='salt_test',
gid_from_name=True, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('group.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
@skipIf(sys.getfilesystemencoding().startswith('ANSI'), 'A system encoding which supports Unicode characters must be set. Current setting is: {0}. Try setting $LANG=\'en_US.UTF-8\''.format(sys.getfilesystemencoding()))
def test_user_present_unicode(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that unicode GECOS data will be properly handled, without
any encoding-related failures.
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=u'Sålt Test', roomnumber=u'①②③',
workphone=u'١٢٣٤', homephone=u'६७८'
)
self.assertSaltTrueReturn(ret)
# Ensure updating a user also works
ret = self.run_state(
'user.present', name='salt_test', fullname=u'Sølt Test', roomnumber=u'①③②',
workphone=u'٣٤١٢', homephone=u'६८७'
)
self.assertSaltTrueReturn(ret)
# ret = self.run_state('user.absent', name='salt_test')
# self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that numeric GECOS data will be properly coerced to strings,
otherwise the state will fail because the GECOS fields are written as
strings (and show up in the user.info output as such). Thus the
comparison will fail, since '12345' != 12345.
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=12345, roomnumber=123,
workphone=1234567890, homephone=1234567890
)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos_none_fields(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that if no GECOS data is supplied, the fields will be coerced
into empty strings as opposed to the string "None".
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=None, roomnumber=None,
workphone=None, homephone=None
)
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
self.assertEqual('', ret['fullname'])
# MacOS does not supply the following GECOS fields
if not salt.utils.is_darwin():
self.assertEqual('', ret['roomnumber'])
self.assertEqual('', ret['workphone'])
self.assertEqual('', ret['homephone'])
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def tearDown(self):
if salt.utils.is_darwin():
check_user = self.run_function('user.list_users')
if USER in check_user:
del_user = self.run_function('user.delete', [USER], remove=True)
|
py | 1a3b489e759122d56a92ea8be9fd6856e062779d | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CreateKeyDoneDialog.ui'
#
# Created: Mon Oct 09 13:21:17 2006
# by: PyQt4 UI code generator 4.0.1
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt4 import QtCore, QtGui
class Ui_CreateKeyDoneDialog(object):
def setupUi(self, CreateKeyDoneDialog):
CreateKeyDoneDialog.setObjectName("CreateKeyDoneDialog")
CreateKeyDoneDialog.resize(QtCore.QSize(QtCore.QRect(0,0,314,172).size()).expandedTo(CreateKeyDoneDialog.minimumSizeHint()))
CreateKeyDoneDialog.setWindowIcon(QtGui.QIcon(":/images/register32.png"))
self.vboxlayout = QtGui.QVBoxLayout(CreateKeyDoneDialog)
self.vboxlayout.setMargin(9)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName("vboxlayout")
self.label = QtGui.QLabel(CreateKeyDoneDialog)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.vboxlayout.addWidget(self.label)
self.groupBox = QtGui.QGroupBox(CreateKeyDoneDialog)
self.groupBox.setObjectName("groupBox")
self.vboxlayout1 = QtGui.QVBoxLayout(self.groupBox)
self.vboxlayout1.setMargin(9)
self.vboxlayout1.setSpacing(6)
self.vboxlayout1.setObjectName("vboxlayout1")
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.vboxlayout1.addWidget(self.label_2)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setMargin(0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName("label_3")
self.hboxlayout.addWidget(self.label_3)
self.keyId = QtGui.QLineEdit(self.groupBox)
self.keyId.setReadOnly(True)
self.keyId.setObjectName("keyId")
self.hboxlayout.addWidget(self.keyId)
self.vboxlayout1.addLayout(self.hboxlayout)
self.vboxlayout.addWidget(self.groupBox)
self.hboxlayout1 = QtGui.QHBoxLayout()
self.hboxlayout1.setMargin(0)
self.hboxlayout1.setSpacing(6)
self.hboxlayout1.setObjectName("hboxlayout1")
self.rememberKey = QtGui.QCheckBox(CreateKeyDoneDialog)
self.rememberKey.setObjectName("rememberKey")
self.hboxlayout1.addWidget(self.rememberKey)
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem)
self.goOnlineButton = QtGui.QPushButton(CreateKeyDoneDialog)
self.goOnlineButton.setObjectName("goOnlineButton")
self.hboxlayout1.addWidget(self.goOnlineButton)
self.vboxlayout.addLayout(self.hboxlayout1)
self.retranslateUi(CreateKeyDoneDialog)
QtCore.QMetaObject.connectSlotsByName(CreateKeyDoneDialog)
def retranslateUi(self, CreateKeyDoneDialog):
CreateKeyDoneDialog.setWindowTitle(QtGui.QApplication.translate("CreateKeyDoneDialog", "Private Key Created", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("CreateKeyDoneDialog", "<html><head><meta name=\"qrichtext\" content=\"1\" /></head><body style=\" white-space: pre-wrap; font-family:MS Shell Dlg 2; font-size:8.25pt; font-weight:400; font-style:normal; text-decoration:none;\"><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Your RSA Private Key has been created.</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("CreateKeyDoneDialog", "KeyID", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("CreateKeyDoneDialog", "<html><head><meta name=\"qrichtext\" content=\"1\" /></head><body style=\" white-space: pre-wrap; font-family:MS Shell Dlg 2; font-size:8.25pt; font-weight:400; font-style:normal; text-decoration:none;\"><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt; font-weight:600;\">Please send your KeyID to your friends so that they can add you to their contact list.</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("CreateKeyDoneDialog", "KeyID:", None, QtGui.QApplication.UnicodeUTF8))
self.rememberKey.setText(QtGui.QApplication.translate("CreateKeyDoneDialog", "&Remember password for this key", None, QtGui.QApplication.UnicodeUTF8))
self.goOnlineButton.setText(QtGui.QApplication.translate("CreateKeyDoneDialog", "&Go Online", None, QtGui.QApplication.UnicodeUTF8))
|
py | 1a3b4afce1361e775fb41e0cf84ef4391e7b2826 | from __future__ import unicode_literals
from collections import defaultdict
import datetime
import json
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import unix_time
from moto.core import ACCOUNT_ID
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict.keys())[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values)
class Item(BaseModel):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = attribute.value
return {"Attributes": attributes}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {"Item": included}
class Table(CloudFormationModel):
def __init__(
self,
name,
hash_key_attr,
hash_key_type,
range_key_attr=None,
range_key_type=None,
read_capacity=None,
write_capacity=None,
):
self.name = name
self.hash_key_attr = hash_key_attr
self.hash_key_type = hash_key_type
self.range_key_attr = range_key_attr
self.range_key_type = range_key_type
self.read_capacity = read_capacity
self.write_capacity = write_capacity
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
@property
def has_range_key(self):
return self.range_key_attr is not None
@property
def describe(self):
results = {
"Table": {
"CreationDateTime": unix_time(self.created_at),
"KeySchema": {
"HashKeyElement": {
"AttributeName": self.hash_key_attr,
"AttributeType": self.hash_key_type,
}
},
"ProvisionedThroughput": {
"ReadCapacityUnits": self.read_capacity,
"WriteCapacityUnits": self.write_capacity,
},
"TableName": self.name,
"TableStatus": "ACTIVE",
"ItemCount": len(self),
"TableSizeBytes": 0,
}
}
if self.has_range_key:
results["Table"]["KeySchema"]["RangeKeyElement"] = {
"AttributeName": self.range_key_attr,
"AttributeType": self.range_key_type,
}
return results
@staticmethod
def cloudformation_name_type():
return "TableName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html
return "AWS::DynamoDB::Table"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
key_attr = [
i["AttributeName"]
for i in properties["KeySchema"]
if i["KeyType"] == "HASH"
][0]
key_type = [
i["AttributeType"]
for i in properties["AttributeDefinitions"]
if i["AttributeName"] == key_attr
][0]
spec = {
"name": properties["TableName"],
"hash_key_attr": key_attr,
"hash_key_type": key_type,
}
# TODO: optional properties still missing:
# range_key_attr, range_key_type, read_capacity, write_capacity
return Table(**spec)
def __len__(self):
count = 0
for key, value in self.items.items():
if self.has_range_key:
count += len(value)
else:
count += 1
return count
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
def put_item(self, item_attrs):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(
hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs
)
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def get_item(self, hash_key, range_key):
if self.has_range_key and not range_key:
raise ValueError(
"Table has a range key, but no range key was passed into get_item"
)
try:
if range_key:
return self.items[hash_key][range_key]
else:
return self.items[hash_key]
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs):
results = []
last_page = True # Once pagination is implemented, change this
if self.range_key_attr:
possible_results = self.items[hash_key].values()
else:
possible_results = list(self.all_items())
if range_comparison:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
else:
# If we're not filtering on range key, return all values
results = possible_results
return results, last_page
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters):
results = []
scanned_count = 0
last_page = True # Once pagination is implemented, change this
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for (
attribute_name,
(comparison_operator, comparison_objs),
) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == "NULL":
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item
# fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
return results, scanned_count, last_page
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "StreamArn":
region = "us-east-1"
time = "2000-01-01T00:00:00.000"
return "arn:aws:dynamodb:{0}:{1}:table/{2}/stream/{3}".format(
region, ACCOUNT_ID, self.name, time
)
raise UnformattedGetAttTemplateException()
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def update_table_throughput(self, name, new_read_units, new_write_units):
table = self.tables[name]
table.read_capacity = new_read_units
table.write_capacity = new_write_units
return table
def put_item(self, table_name, item_attrs):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs)
def get_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values)
def scan(self, table_name, filters):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters)
def delete_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.delete_item(hash_key, range_key)
dynamodb_backend = DynamoDBBackend()
|
py | 1a3b4b69ffa4dda3c548f67f305db21a798959ee | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui4/printdialog_base.ui'
#
# Created: Mon May 4 14:30:35 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(700, 500)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName("gridlayout")
self.StackedWidget = QtGui.QStackedWidget(Dialog)
self.StackedWidget.setObjectName("StackedWidget")
self.page = QtGui.QWidget()
self.page.setObjectName("page")
self.gridlayout1 = QtGui.QGridLayout(self.page)
self.gridlayout1.setObjectName("gridlayout1")
self.label_2 = QtGui.QLabel(self.page)
font = QtGui.QFont()
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridlayout1.addWidget(self.label_2, 0, 0, 1, 1)
self.line = QtGui.QFrame(self.page)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridlayout1.addWidget(self.line, 1, 0, 1, 1)
self.Files = FileTable(self.page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Files.sizePolicy().hasHeightForWidth())
self.Files.setSizePolicy(sizePolicy)
self.Files.setObjectName("Files")
self.gridlayout1.addWidget(self.Files, 2, 0, 1, 1)
self.StackedWidget.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName("page_2")
self.gridlayout2 = QtGui.QGridLayout(self.page_2)
self.gridlayout2.setObjectName("gridlayout2")
self.label_3 = QtGui.QLabel(self.page_2)
font = QtGui.QFont()
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridlayout2.addWidget(self.label_3, 0, 0, 1, 1)
self.line_2 = QtGui.QFrame(self.page_2)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridlayout2.addWidget(self.line_2, 1, 0, 1, 1)
self.PrinterName = PrinterNameComboBox(self.page_2)
self.PrinterName.setObjectName("PrinterName")
self.gridlayout2.addWidget(self.PrinterName, 2, 0, 1, 1)
self.OptionsToolBox = PrintSettingsToolbox(self.page_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.OptionsToolBox.sizePolicy().hasHeightForWidth())
self.OptionsToolBox.setSizePolicy(sizePolicy)
self.OptionsToolBox.setObjectName("OptionsToolBox")
self.gridlayout2.addWidget(self.OptionsToolBox, 3, 0, 1, 1)
self.StackedWidget.addWidget(self.page_2)
self.gridlayout.addWidget(self.StackedWidget, 0, 0, 1, 5)
self.line_3 = QtGui.QFrame(Dialog)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.gridlayout.addWidget(self.line_3, 1, 0, 1, 5)
self.StepText = QtGui.QLabel(Dialog)
self.StepText.setObjectName("StepText")
self.gridlayout.addWidget(self.StepText, 2, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(251, 28, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem, 2, 1, 1, 1)
self.BackButton = QtGui.QPushButton(Dialog)
self.BackButton.setObjectName("BackButton")
self.gridlayout.addWidget(self.BackButton, 2, 2, 1, 1)
self.NextButton = QtGui.QPushButton(Dialog)
self.NextButton.setObjectName("NextButton")
self.gridlayout.addWidget(self.NextButton, 2, 3, 1, 1)
self.CancelButton = QtGui.QPushButton(Dialog)
self.CancelButton.setObjectName("CancelButton")
self.gridlayout.addWidget(self.CancelButton, 2, 4, 1, 1)
self.retranslateUi(Dialog)
self.StackedWidget.setCurrentIndex(1)
self.OptionsToolBox.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "HP Device Manager - Print", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Select Files to Print", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Select Printer and Options", None, QtGui.QApplication.UnicodeUTF8))
self.StepText.setText(QtGui.QApplication.translate("Dialog", "Step %1 of %2", None, QtGui.QApplication.UnicodeUTF8))
self.BackButton.setText(QtGui.QApplication.translate("Dialog", "< Back", None, QtGui.QApplication.UnicodeUTF8))
self.NextButton.setText(QtGui.QApplication.translate("Dialog", "Next >", None, QtGui.QApplication.UnicodeUTF8))
self.CancelButton.setText(QtGui.QApplication.translate("Dialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
from .printsettingstoolbox import PrintSettingsToolbox
from .printernamecombobox import PrinterNameComboBox
from .filetable import FileTable
|
py | 1a3b4c3536f61c443997e3f3cb00a0a3f8d4a9ee | import warnings
from platon import Account
from platon import Web3
from platon_typing import Address
from platon_aide.utils import send_transaction, get_transaction_result
class Module:
address: None
def __init__(self, web3: Web3):
self.web3 = web3
self.default_account: Account = None
# todo: 设置默认地址
self.default_address: Address = None
# 模块类型,目前仅用于判断是否可以返回event,包括:'inner-contract'
self._module_type = ''
# 返回结果类型,包括:txn, hash, receipt, event(仅内置合约可用)
self._result_type = 'receipt'
def set_default_account(self, account):
self.default_account = account
def _get_node_info(self):
node_info = self.web3.node.admin.node_info()
# self._node_id = node_info['id'] # todo: 增加不使用id字段的注释
self._node_id = node_info['enode'].split('//')[1].split('@')[0] # 请使用enode中的节点
self._bls_pubkey = node_info['blsPubKey']
self._bls_proof = self.web3.node.admin.get_schnorr_NIZK_prove()
version_info = self.web3.node.admin.get_program_version()
self._version = version_info['Version']
self._version_sign = version_info['Sign']
def send_transaction(self, txn, private_key, result_type=''):
result_type = result_type or self._result_type
if not private_key and self.default_account:
private_key = self.default_account.privateKey.hex()[2:]
tx_hash = send_transaction(self.web3, txn, private_key)
return self._get_transaction_result(tx_hash, result_type)
def _get_transaction_result(self, tx_hash, result_type):
if result_type == 'event' and self._module_type != 'inner-contract':
raise TypeError('result type "event" only support inner contract')
return get_transaction_result(self.web3, tx_hash, result_type)
def set_result_type(self, result_type):
if result_type not in ('txn', 'hash', 'receipt', 'event'):
raise ValueError('Unrecognized value')
if result_type == 'event' and self._module_type != 'inner-contract':
warnings.warn(f'result type "event" only support inner contract, '
f'try set {self.__class__.__name__} result type to "receipt"', RuntimeWarning)
result_type = 'receipt'
self._result_type = result_type
|
py | 1a3b4c4bf988a81dff3d1c56c48c2829f01da6ba | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_protection_rules
short_description: Manage a ProtectionRules resource in Oracle Cloud Infrastructure
description:
- This module allows the user to update a ProtectionRules resource in Oracle Cloud Infrastructure
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
aliases: ["id"]
required: true
protection_rules:
description:
- ""
type: list
elements: dict
required: true
suboptions:
key:
description:
- The unique key of the protection rule.
- This parameter is updatable.
type: str
required: true
action:
description:
- The action to apply to the protection rule. If unspecified, defaults to `OFF`.
- This parameter is updatable.
type: str
choices:
- "OFF"
- "DETECT"
- "BLOCK"
required: true
exclusions:
description:
- The types of requests excluded from the protection rule action. If the requests matches the criteria in the `exclusions`, the protection
rule action will not be executed.
type: list
elements: dict
suboptions:
target:
description:
- The target of the exclusion.
- This parameter is updatable.
type: str
choices:
- "REQUEST_COOKIES"
- "REQUEST_COOKIE_NAMES"
- "ARGS"
- "ARGS_NAMES"
exclusions:
description:
- ""
- This parameter is updatable.
type: list
elements: str
state:
description:
- The state of the ProtectionRules.
- Use I(state=present) to update an existing a ProtectionRules.
type: str
required: false
default: 'present'
choices: ["present"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Update protection_rules
oci_waas_protection_rules:
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
protection_rules:
- key: key_example
action: OFF
"""
RETURN = """
protection_rules:
description:
- Details of the ProtectionRules resource acted upon by the current operation
returned: on success
type: complex
contains:
key:
description:
- The unique key of the protection rule.
returned: on success
type: str
sample: key_example
mod_security_rule_ids:
description:
- The list of the ModSecurity rule IDs that apply to this protection rule. For more information about ModSecurity's open source WAF rules, see
L(Mod Security's documentation,https://www.modsecurity.org/CRS/Documentation/index.html).
returned: on success
type: list
sample: []
name:
description:
- The name of the protection rule.
returned: on success
type: str
sample: name_example
description:
description:
- The description of the protection rule.
returned: on success
type: str
sample: description_example
action:
description:
- The action to take when the traffic is detected as malicious. If unspecified, defaults to `OFF`.
returned: on success
type: str
sample: OFF
labels:
description:
- The list of labels for the protection rule.
- "**Note:** Protection rules with a `ResponseBody` label will have no effect unless `isResponseInspected` is true."
returned: on success
type: list
sample: []
exclusions:
description:
- ""
returned: on success
type: complex
contains:
target:
description:
- The target of the exclusion.
returned: on success
type: str
sample: REQUEST_COOKIES
exclusions:
description:
- ""
returned: on success
type: list
sample: []
sample: {
"key": "key_example",
"mod_security_rule_ids": [],
"name": "name_example",
"description": "description_example",
"action": "OFF",
"labels": [],
"exclusions": [{
"target": "REQUEST_COOKIES",
"exclusions": []
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
from oci.waas.models import ProtectionRuleAction
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ProtectionRulesHelperGen(OCIResourceHelperBase):
"""Supported operations: update, get and list"""
def get_module_resource_id_param(self):
return "waas_policy_id"
def get_module_resource_id(self):
return self.module.params.get("waas_policy_id")
def get_get_fn(self):
return self.client.get_protection_rule
def get_resource(self):
return oci_common_utils.get_default_response_from_resource(
oci_common_utils.list_all_resources(
self.client.get_protection_rule,
waas_policy_id=self.module.params.get("waas_policy_id"),
protection_rule_key=self.module.params.get("protection_rule_key"),
)
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"waas_policy_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
return dict()
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_protection_rules, **kwargs
)
def get_update_model_class(self):
return ProtectionRuleAction
def get_update_model(self):
if self.module.params.get("protection_rules"):
return [
oci_common_utils.convert_input_data_to_model_class(
resource, self.get_update_model_class()
)
for resource in self.module.params["protection_rules"]
]
return []
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_protection_rules,
call_fn_args=(),
call_fn_kwargs=dict(
waas_policy_id=self.module.params.get("waas_policy_id"),
protection_rules=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
ProtectionRulesHelperCustom = get_custom_class("ProtectionRulesHelperCustom")
class ResourceHelper(ProtectionRulesHelperCustom, ProtectionRulesHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
waas_policy_id=dict(aliases=["id"], type="str", required=True),
protection_rules=dict(
type="list",
elements="dict",
required=True,
options=dict(
key=dict(type="str", required=True, no_log=True),
action=dict(
type="str", required=True, choices=["OFF", "DETECT", "BLOCK"]
),
exclusions=dict(
type="list",
elements="dict",
options=dict(
target=dict(
type="str",
choices=[
"REQUEST_COOKIES",
"REQUEST_COOKIE_NAMES",
"ARGS",
"ARGS_NAMES",
],
),
exclusions=dict(type="list", elements="str"),
),
),
),
),
state=dict(type="str", default="present", choices=["present"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="protection_rules",
service_client_class=WaasClient,
namespace="waas",
)
result = dict(changed=False)
if resource_helper.is_update():
result = resource_helper.update()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
py | 1a3b4cce79ae175f3ec5456cc45f50d377d4e6e3 | #!/usr/bin/env python3
import re
import sys
import sqlite3
import traceback
import os
__location__ = os.path.realpath(
os.path.join(
os.getcwd(),
os.path.dirname(__file__)
)
)
input_failures = 0
try:
DATABASE_NAME = os.path.join(__location__, 'data.sqlite')
conn = sqlite3.connect(DATABASE_NAME)
i = 0
for line in sys.stdin:
l = line.strip()
# Groups: 1 2 3 4 5
match = re.search('^(\w+)\s+([\w\-\:]+)\s+([\w\-]+)\s+(\w+|-)\s+OK(.*)$', l)
if not match:
input_failures += 1
print(f'Error: Not matched input line: {l}')
continue
date_part = match.group(2).split('T')
data = {
'date': date_part[0],
'time': '',
'area': match.group(1),
'tested': '',
'confirmed': match.group(3),
'new_hospitalized': '',
'hospitalized': '',
'icu': '',
'vent': '',
'released': '',
'deceased': match.group(4),
'source': '',
}
if len(date_part) == 2:
data['time'] = date_part[1]
if data['confirmed'] == '-':
data['confirmed'] = ''
else:
data['confirmed'] = int(data['confirmed'])
if data['deceased'] == '-':
data['deceased'] = ''
else:
data['deceased'] = int(data['deceased'])
# Parse optional data.
rest = match.group(5)
extras_match = re.search('# Extras: ([^#]+)', rest)
if extras_match:
try:
extras = extras_match.group(1).strip()
extras = extras.split(',')
extras = { kv.split('=', 2)[0]: int(kv.split('=', 2)[1]) for kv in extras }
if 'current_hosp' in extras:
data['hospitalized'] = extras['current_hosp']
if 'current_icu' in extras:
data['icu'] = extras['current_icu']
if 'current_vent' in extras:
data['vent'] = extras['current_vent']
if 'ncumul_released' in extras:
data['released'] = extras['ncumul_released']
except Exception as e:
print(f'Error: Parsing optional data failed, ignoring: {extras_match.group(1)}')
# Parse URLs
url_match = re.search('# URLs: ([^#]+)', rest)
try:
url_source = url_match.group(1).strip().split(', ')[-1]
except (TypeError, IndexError):
url_source = ''
if 'SCRAPER_SOURCE' in os.environ:
data['source'] = os.environ['SCRAPER_SOURCE']
elif url_source:
data['source'] = url_source
c = conn.cursor()
try:
print(data)
c.execute(
'''
INSERT INTO data (
date,
time,
abbreviation_canton_and_fl,
ncumul_tested,
ncumul_conf,
new_hosp,
current_hosp,
current_icu,
current_vent,
ncumul_released,
ncumul_deceased,
source
)
VALUES
(?,?,?,?,?,?,?,?,?,?,?,?)
''',
[
data['date'],
data['time'],
data['area'],
data['tested'],
data['confirmed'],
data['new_hospitalized'],
data['hospitalized'],
data['icu'],
data['vent'],
data['released'],
data['deceased'],
data['source'],
]
)
print("Successfully added new entry.")
except sqlite3.IntegrityError:
if os.environ.get('SCRAPER_OVERWRITE') == 'yes':
c.execute(
'''
UPDATE data
SET
time = ? ,
ncumul_tested = ? ,
ncumul_conf = ? ,
new_hosp = ? ,
current_hosp = ? ,
current_icu = ? ,
current_vent = ? ,
ncumul_released = ? ,
ncumul_deceased = ?,
source = ?
WHERE date = ?
AND abbreviation_canton_and_fl = ?
''',
[
data['time'],
data['tested'],
data['confirmed'],
data['new_hospitalized'],
data['hospitalized'],
data['icu'],
data['vent'],
data['released'],
data['deceased'],
data['source'],
data['date'],
data['area'],
]
)
print("Successfully updated entry.")
else:
print("Error: Data for this date has already been added")
finally:
conn.commit()
except Exception as e:
print("Error: %s" % e, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(1)
finally:
conn.close()
if input_failures:
sys.exit(1)
|
py | 1a3b4d3160e20c07fde97471bb9e1828cae905b6 | import asyncio
import base64
from pathlib import Path
from subprocess import Popen
from tempfile import mkstemp
import concurrent.futures
import urllib.parse
from nbconvert.exporters import Exporter, HTMLExporter
import aiohttp
from ._screenshot import get_chrome_path
async def handler(ws, data, key=None):
await ws.send_json(data)
async for msg in ws:
msg_json = msg.json()
if 'result' in msg_json:
result = msg_json['result'].get(key)
break
return result
async def main(file_name, p):
async with aiohttp.ClientSession() as session:
connected = False
await asyncio.sleep(1)
for _ in range(10):
try:
resp = await session.get('http://localhost:9222/json')
data = await resp.json()
page_url = data[0]['webSocketDebuggerUrl']
connected = True
except:
await asyncio.sleep(1)
if connected:
break
if not connected:
p.kill()
raise Exception('Could not connect to chrome server')
async with session.ws_connect(page_url, receive_timeout=3, max_msg_size=0) as ws:
# first - navigate to html page
params = {'url': file_name}
data = {'id': 1, 'method': 'Page.navigate', 'params': params}
frameId = await handler(ws, data, 'frameId')
# second - enable page
# await asyncio.sleep(1)
data = {'id': 2, 'method': 'Page.enable'}
await handler(ws, data)
# third - get html
params = {'frameId': frameId, 'url': file_name}
data = {'id': 3, 'method': 'Page.getResourceContent', 'params': params}
await handler(ws, data, 'content')
# fourth - get pdf
await asyncio.sleep(1)
params = {'displayHeaderFooter': False, 'printBackground': True}
data = {'id': 4, 'method': 'Page.printToPDF', 'params': params}
pdf_data = await handler(ws, data, 'data')
pdf_data = base64.b64decode(pdf_data)
return pdf_data
def launch_chrome():
chrome_path = get_chrome_path()
args = [chrome_path,
'--headless',
'--disable-gpu',
'--run-all-compositor-stages-before-draw',
'--remote-debugging-port=9222'
]
p = Popen(args=args)
return p
def get_html_data(nb, resources, **kw):
he = HTMLExporter()
html_data, resources = he.from_notebook_node(nb, resources, **kw)
html_data = html_data.replace('@media print', '@media xxprintxx')
return html_data
def get_pdf_data(file_name, p):
try:
from asyncio import run
except ImportError:
from ._my_asyncio import run
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future = executor.submit(run, main(file_name, p))
return future.result()
class BrowserExporter(Exporter):
def _file_extension_default(self):
return '.pdf'
def from_notebook_node(self, nb, resources=None, **kw):
resources['output_extension'] = '.pdf'
nb_home = resources['metadata']['path']
p = launch_chrome()
html_data = get_html_data(nb, resources, **kw)
_, tf_name = mkstemp(dir=nb_home, suffix='.html')
with open(tf_name, 'w') as f:
f.write(html_data)
tf_path = Path(tf_name)
full_file_name = 'file://' + urllib.parse.quote(tf_name)
pdf_data = get_pdf_data(full_file_name, p)
import os
os.remove(tf_path)
p.kill()
return pdf_data, resources
|
py | 1a3b4dca8a2c81a6b80a140347433d710eda37f4 | # Import dependencies
from flask import Flask, render_template, redirect, url_for, request
from flask_login import LoginManager, login_required
import logging
import pxc_modules.plcnextAPI as API
import socket
import json
# Get the local IP address of the PLCnext
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
# Initial setup of flask server
app = Flask(__name__, static_folder='public', template_folder='views')
app.config['SECRET_KEY'] = 'Secret!'
# loginManager = LoginManager()
# loginManager.init_app(app)
# Setting IP address, WBM address, and eHMI address for redirects.
ip = get_ip()
wbm = 'https://'+ip+'/wbm'
ehmi = 'https://'+ip+'/ehmi'
#@loginManager.user_loader
#def load_user(user_id):
# with open('config/user.json') as file:
# users = json.load(file)
# return users[user_id]
@app.route('/')
def goToPage():
#if loginManager.unauthorized():
return render_template('login.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
message = ''
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
message = 'Invalid Credentials. Please Try Again.'
else:
return redirect(url_for('dashboard'))
return render_template('login.html', message=message, ip=ip, wbm=wbm, ehmi=ehmi)
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/change-pass')
def changePass():
return render_template('change-pass.html', message='')
@app.route('/logout')
def logout():
return render_template('login.html', message='')
@socketio.on('connect')
def connection():
emit('welcomeMessage', 'Welcome to the Phoenix Contact Node Web Kit for PLCnext!')
if __name__ == '__main__':
socketio.run(app, host=ip, port=5000, debug=True)
plcnextAPI = API.getData(waitTime=1)
|
py | 1a3b50499c919d72d1b891c19cffae4703a0e10f | """
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.api_client import ApiClient, Endpoint as _Endpoint
from com.precisely.apis.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from com.precisely.apis.model.geo_location_access_point import GeoLocationAccessPoint
from com.precisely.apis.model.geo_location_ip_addr import GeoLocationIpAddr
class GeolocationServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_location_by_ip_address_endpoint = _Endpoint(
settings={
'response_type': (GeoLocationIpAddr,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/geolocation/v1/location/byipaddress',
'operation_id': 'get_location_by_ip_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ip_address',
],
'required': [
'ip_address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'ip_address':
(str,),
},
'attribute_map': {
'ip_address': 'ipAddress',
},
'location_map': {
'ip_address': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_location_by_wi_fi_access_point_endpoint = _Endpoint(
settings={
'response_type': (GeoLocationAccessPoint,),
'auth': [
'oAuth2Password'
],
'endpoint_path': '/geolocation/v1/location/byaccesspoint',
'operation_id': 'get_location_by_wi_fi_access_point',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'mac',
'ssid',
'rsid',
'speed',
'access_point',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'mac':
(str,),
'ssid':
(str,),
'rsid':
(str,),
'speed':
(str,),
'access_point':
(str,),
},
'attribute_map': {
'mac': 'mac',
'ssid': 'ssid',
'rsid': 'rsid',
'speed': 'speed',
'access_point': 'accessPoint',
},
'location_map': {
'mac': 'query',
'ssid': 'query',
'rsid': 'query',
'speed': 'query',
'access_point': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def get_location_by_ip_address(
self,
ip_address,
**kwargs
):
"""Location By IP Address. # noqa: E501
This service accepts an IP address and returns the location coordinates corresponding to that IP address. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_location_by_ip_address(ip_address, async_req=True)
>>> result = thread.get()
Args:
ip_address (str): This is the ip address of network connected device. It must be a standard IPv4 octet and a valid external address.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeoLocationIpAddr
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['ip_address'] = \
ip_address
return self.get_location_by_ip_address_endpoint.call_with_http_info(**kwargs)
def get_location_by_wi_fi_access_point(
self,
**kwargs
):
"""Location by WiFi Access Point. # noqa: E501
This service accepts a WiFi access point MAC address and returns the location coordinates corresponding to that access point. Only mac or accessPoint are mandatory parameters (one of them has to be provided), rest are optional. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_location_by_wi_fi_access_point(async_req=True)
>>> result = thread.get()
Keyword Args:
mac (str): This should be the 48 bit mac address (or BSSID) of wireless access point. Accepted format is Six groups of two hexadecimal digits, separated by hyphens (-) or colons.. [optional]
ssid (str): The service set identifier for wi-fi access point. It should be alphanumeric with maximum 32 characters.. [optional]
rsid (str): This is the received signal strength indicator from particular wi-fi access point. It should be a number from -113 to 0 and the unit of this strength is dBm.. [optional]
speed (str): This is the connection speed for wi-fi. It should be a number from 0 to 6930 and the unit should be Mbps.. [optional]
access_point (str): This is the JSON based list of wifi access points in the vicinity of device to be located. This parameter is helpful in case, multiple wifi points are visible and we want to make sure that the location of device is best calculated considering all the access points location.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeoLocationAccessPoint
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_location_by_wi_fi_access_point_endpoint.call_with_http_info(**kwargs)
|
py | 1a3b50ab89eca6f98c4da90cda88cbf5ebe668ed | #!/usr/bin/env python
"""
For a given motif annotated VCF file (already run through motifs.py) and a bed-like file for loci of interest and some
value for each loci for each sample, find loci that overlap a variant and compare the value of samples with the variant
to those without the variant. Report robut z-scores for each loci overlapped in an output VCF and report the variants
for each loci in a bed-like, loci-centric output file as well.
Usage: activity.py -i <input.vcf> -a <activity.bed> -ov <output.vcf> -ob <output.bed> [OPTIONS]
Args:
-i (str): Path to sorted variant file to process.
-a (str): Path to activity 'bed' file.
-ov (str): Path to VCF output file to be created.
-ob (str): Path to loci output file to be created.
-th (float, optional): Z-score magnitude threshold that must be met for variants/loci to be reported to output.
Default is 0, so all loci a variant overlaps will be reported.
-fan (int, optional): Set number of samples that must meet z-score threshold for a locus to be reported to
bed output file. So this number of samples must have the variant and have the locus's activity be significantly
affected by it. Default is 0, so a locus will be reported if its activity is altered in even one sample above
the robust z-score threshold.
-ib (bool, optional): Should loci that don't contain any variants that significantly affect their activity
be included in the bed output? False by default, set to True if wanted.
-iv (bool, optional): Should variants that don't significantly alter a locus's activity be included in the
vcf output? False by default, set to True if wanted.
"""
from __future__ import print_function # so Ninja IDE will stop complaining & show symbols
import argparse
import time
from statistics import median
from sequence import read_line2sample_list
from utils import Position, timeString
# TODO - Move into a utils.py file and import as appropriate. Add doc_string.
class Variant(object):
"""
Use to process and handle variant records from a VCF more easily. Create from line of VCF file.
"""
def __init__(self, line, all_sample_names):
self.line_list = line.strip().split("\t")
self.pos = Position(self.line_list[0], int(self.line_list[1]),
(int(self.line_list[1]) + len(self.line_list[3])))
self.ref_allele = self.line_list[3]
self.var_allele = self.line_list[4]
self.iden = self.line_list[2]
self.orig_line = line.strip()
self.info_fields = self.line_list[7].split(";")
self.var_samples, self.motif_fields = self.parse_info_fields()
self.ref_samples = [x for x in all_sample_names if x not in self.var_samples]
self.loci = []
if self.var_samples is not None: # Should never evaluate to False.
self.num_var_samps = len(self.var_samples)
else:
self.num_var_samps = 0
def parse_info_fields(self):
"""
Get names of samples containing variant and motif INFO fields from a variant record's INFO fields.
Args:
self (Variant): Variant object.
Returns:
samples (list of str): List of samples in which variant was called.
motif_fields (list of str): List of INFO fields for variant that contain MOTIF related information.
"""
samples = None
motif_fields = []
for field in self.info_fields:
if field != "INDEL": # Take care of INDEL flag.
field_info = field.split("=")
# TODO - This is a hack work around a bug that's messing up the MOTIFN field in tf_expression.py.
# Go back and actually figure out why the MOTIFN field is getting split up sometimes.
try:
name, data = (field_info[0], field_info[1])
except:
name, data = "BROKEN", None
else:
name, data = "INDEL", None
# TODO - Write method that parses header to determine # samples with variant rather than this lazy method.
if name == "set":
samples = data.split("-")
elif name.startswith("MOTIF"):
motif_fields.append(field)
return (samples, motif_fields)
def get_variant_output(self, include_vcf=False):
"""
Create VCF output line for given Variant object.
Args:
include_vcf (bool): True if variants that don't pass the z-score threshold for any Locus should excluded
from output. False if they should be included.
Returns:
output (str): Line for Variant in appropriate VCF format.
or
None: If include_vcf is True and no Locus that Variant overlaps hits the z-score threshold.
"""
info = self.info_fields
info.insert(0, "SAMPSTV=" + ",".join(self.var_samples))
info.insert(0, "SAMPSR=" + ",".join([x.ref_samples[self] for x in self.loci][0]))
info.insert(0, "SAMPSV=" + ",".join([x.var_samples[self] for x in self.loci][0]))
# TODO - Check and make sure next two lines are functioning properly.
info.insert(0, "SAMPSNR=" + ",".join([str(x.num_valid_ref[self]) for x in self.loci]))
info.insert(0, "SAMPSNV=" + ",".join([str(x.num_valid_var[self]) for x in self.loci]))
# Use lists to maintain order in output so that LOCIID, LOCIVZ, SAMPTHN fields can all be matched up.
z_scores = []
pass_thresh = []
loci_idens = []
for item in self.loci:
loci_idens.append(item.iden)
pass_thresh.append(item.num_pass_thresh[self])
tmp = "(" + ",".join([str(round(x, 4)) for x in item.z_scores[self][0]]) + ")"
z_scores.append(tmp)
info.insert(0, "SAMPTHN=" + ",".join([str(x) for x in pass_thresh]))
info.insert(0, "LOCIVZ=" + ",".join(z_scores))
info.insert(0, "LOCIID=" + ",".join(loci_idens))
# Check if any loci have samples that pass the Z-score threshold.
# TODO - Change this so it check that the NUMBER OF SAMPLES reaching the z-score threshold are enough.
if any([x >= 1 for x in pass_thresh]):
self.info_fields = info
self.line_list[7] = ";".join(self.info_fields)
output = "\t".join(self.line_list)
return output
else:
return None
# TODO - Move into a utils.py file and import as appropriate. Make ActLocus a sub-class of Locus along with GeneLocus.
class Locus(object):
"""
Use to process and handle loci records from an activity file more easily.
Args:
pos (Position): Position object holding genomic position of locus.
orig_line (str): String from which the object was originally created.
iden (str): Unique identifier for the locus.
data (list of float): Data values for each sample for the record.
"""
def __init__(self, line):
line_list = line.strip().split("\t")
self.pos = Position(line_list[0], int(line_list[1]), int(line_list[2]))
self.orig_line = line.strip()
self.iden = str(line_list[3])
self.data = [float(x) for x in line_list[4:]]
self.var_samples = {}
self.ref_samples = {}
self.ref_scores = {}
self.var_scores = {}
self.num_valid_ref = {}
self.num_valid_var = {}
self.num_pass_thresh = {}
self.variants = []
self.z_scores = {}
def add_variant(self, variant, var_samples, ref_samples):
"""
Add Variant object variant to list of Variants that overlap the Locus.
"""
self.variants.append(variant)
self.ref_scores[variant] = []
self.var_scores[variant] = []
self.var_samples[variant] = var_samples
self.ref_samples[variant] = ref_samples
self.num_valid_ref[variant] = len(ref_samples)
self.num_valid_var[variant] = len(var_samples)
self.num_pass_thresh[variant] = 0
self.z_scores[variant] = []
def calc_z_score(self, ref_ind, var_ind, variant, thresh=0):
"""
Calculate a robust z-score for the given locus and variant.
This uses the median absolute deviation (MAD):
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
self.num_valid_ref[variant] = len(ref_ind)
self.num_valid_var[variant] = len(var_ind)
for entry in ref_ind:
scores = self.ref_scores[variant]
scores.append(self.data[int(entry)])
self.ref_scores[variant] = scores
for entry in var_ind:
scores = self.var_scores[variant]
scores.append(self.data[int(entry)])
self.var_scores[variant] = scores
# MAD calculation.
all_scores = self.ref_scores[variant] + self.var_scores[variant]
med = median(all_scores)
abs_score = [abs(x - med) for x in all_scores]
mad = median(abs_score) * 1.4826
# 1.4826 is a constant that assumes a normal distribution to use the MAD as a consistent estimator
# of standard deviation.
robust_z_scores = [((x - med) / mad) for x in self.var_scores[variant]]
for item in robust_z_scores:
if abs(item) >= thresh: # Check number of variant samples that passed the threshold.
passed = self.num_pass_thresh[variant]
passed += 1
self.num_pass_thresh[variant] = passed
vals = self.z_scores[variant]
vals.append(robust_z_scores)
self.z_scores[variant] = vals
return
def get_locus_output(self, include_bed=False, filter_num=0):
"""
Create list of output lines for given Locus object.
Args:
include_bed (bool): True if variants that don't pass the z-score threshold for any Locus should excluded
from output. False if they should be included.
filter_num (int): Number of samples the must meet z-score threshold for Variant to be included in output.
Returns:
output (list of str): List of lines for Locus in appropriate BED-like format.
or
None: If include_bed is True and Locus doesn't contain any Variants that hit z-score threshold for any
sample.
"""
output = []
chrom = self.pos.chrom
start = self.pos.start
end = self.pos.end
iden = self.iden
# Get info for each Variant that is necessary for output.
for item in self.variants:
num_meet_thresh = int(self.num_pass_thresh[item])
if num_meet_thresh < filter_num and include_bed is False:
continue # Just go to next Variant if required number of samples didn't meet z-score threshold.
out_line = [chrom, str(start), str(end), str(iden)]
motifs_out = ";".join(item.motif_fields)
out_line.append(item.pos.chrom + ":" + str(item.pos.start) + "_" + item.ref_allele + ">" + item.var_allele)
val_var_samps = self.var_samples[item]
num_val_var = len(self.var_samples[item])
val_ref_samps = ",".join(self.ref_samples[item])
num_val_ref = len(self.ref_samples[item])
all_var_samps = ",".join(item.var_samples)
num_all_var_samps = len(item.var_samples)
all_ref_samps = ",".join(item.ref_samples)
num_all_ref_samps = len(item.ref_samples)
# Handle z_scores.
scores_out = []
scores = self.z_scores[item]
for x in range(len(val_var_samps)):
samp = val_var_samps[x]
score = round(scores[x][x], 4)
scores_out.append(samp + "=" + str(score))
out_line.append(",".join(scores_out))
out_line.append(str(num_meet_thresh))
out_line.append(",".join(val_var_samps))
out_line.append(str(num_val_var))
out_line.append(val_ref_samps)
out_line.append(str(num_val_ref))
out_line.append(all_var_samps)
out_line.append(str(num_all_var_samps))
out_line.append(all_ref_samps)
out_line.append(str(num_all_ref_samps))
out_line.append(motifs_out)
output.append("\t".join(out_line))
if output: # If no variants are in the output list, just return None.
return output
else:
return None
def get_activity_samples(header_line):
"""
Parse header of activity file to return sample names and column indices.
Args:
header_line (str): Header line from activity file.
Returns:
act_samples (dict): Dictionary of {sample_name (str): sample_data_index (int)}.
sample_data_index is index for data in sample list, not the line as a whole.
e.g.: [samp1, samp2, samp3] & [20, 10, 5] for data values, then {'samp1': 0}.
"""
line_list = header_line.strip().split("\t")
samples = line_list[4:]
act_samples = {}
for item in samples:
samp_idx = samples.index(item)
sample = item.split(".")[0]
act_samples[sample] = samp_idx
return act_samples
def compare_samples(act_samples, vcf_samples):
"""
Compare samples from activity file and vcf file.
Return only samples in both as a list and delete those not found in both from the act_samples dict.
Args:
act_samples (dict): {(act_sample_names (str)): sample_indices (int)}
vcf_samples (list of str): List of samples found in VCF file.
Returns:
common_samps (list of str): List of names of samples found in both the activity file and VCF file.
valid_act_samps (dict): Dict of {sample_names (str): activity file data column index (int)} for samples found
in both the activity file and VCF file.
"""
common_samps = list(set(list(act_samples)) & set(vcf_samples))
valid_act_samples = {}
# Create new dict for activity samples containing only those found in VCF file as well.
for x in common_samps:
valid_act_samples[x] = act_samples[x]
return (common_samps, valid_act_samples)
def parse_activity_file(activity_file):
"""
Parse activity file to get data values for each record along with sample
names and indices.
Args:
activity_file (str): Path to activity file to process.
Returns:
act_samples (dict): Dict of {sample_name: index for activity vals}.
act_data (list of Locus): List of Locus objects.
"""
with open(activity_file) as f:
header = f.readline().strip()
act_samples = get_activity_samples(header) # Get sample names/indices.
act_data = []
for line in f:
record = Locus(line)
act_data.append(record)
return (act_samples, act_data)
def reduce_activity_names(act_samps, split_string="_"):
"""
Return only unique part of names in the passed list set.
Code assumes either start or end is unique based on unique set size
Args:
act_samps: list of strings for the activity samples
or dictionary with keys being strings for activity samples
split_string: String to split individual act_samps on
assumes single split is relevant
default = "_"
Returns:
act_samps modified to not include the non-unique part of the input strings
returns same type as the input act_samps, list or dict
"""
split_one = []
split_two = []
for sample in act_samps:
# 1. split on split_string
splitList = sample.split(split_string)
# 2. put first split and all remaining splits in 2 arrays
split_one.append(splitList[0])
if len(splitList) == 1:
# because otherwise it adds an empty list item and breaks below
split_two.append("")
else:
split_two.append(split_string.join(splitList[1:]))
# 3. determine the unique set size (just making it a set makes them unique
s1 = set(split_one)
s2 = set(split_two)
if len(s1) > len(s2):
# s2 is the non-unique part; ie s1 is unique
act_samps_temp = list(s1)
else:
# s1 is the non-unique part; ie s2 is unique
act_samps_temp = list(s2)
if type(act_samps) is list:
# do nothing just return
return (act_samps_temp)
elif type(act_samps) is dict:
# must rebuild the dictionary
act_samps_rebuild = {}
ind = -1
for sample in act_samps:
ind = ind + 1
act_samps_rebuild[act_samps_temp[ind]] = act_samps[sample]
return (act_samps_rebuild)
def main(vcf_file, act_file, out_vcf, out_bed, thresh=0, filter_num=0, include_bed=False, include_vcf=False,
drop_act_=1):
"""
Compare activity of loci for samples harboring a variant within a given locus to those samples that do not.
For a given motif annotated VCF file (already run through motifs.py) and a bed-like file for loci of interest and
some value for each loci for each sample, find loci that overlap a variant and compare the value of samples with
the variant to those without the variant. Report z-scores for each loci overlapped in an output VCF and report the
variants for each loci in a bed-like, loci-centric output file as well.
Args:
vcf_file (str): Path to sorted variant file to process.
act_file (str): Path to activity 'bed' file.
out_vcf (str): Path to VCF output file to be created.
out_bed (str): Path to loci output file to be created.
thresh (float, optional): Z-score magnitude that must be met for variants/loci to be reported to output.
filter_num (int, optional): Set number of samples that must meet z-score threshold for locus to be reported to
bed output file. So this number of samples must have the variant and be significantly affected by it.
include_bed (bool, optional): True if loci should be reported in the bed output even if they don't have a
variant in them that significantly affects their activity.
include_vcf (bool, optional): True if variants should be reported in the VCF output even if they don't lie in
a Locus and significantly affect its activity.
drop_act_ (integer, optional): If > 0 then break activity items on _,
return only unique part of name.
code assumes either start or end is unique based on unique set size
once dropped reruns comparison to the vcf samples
if 1: only runs if prior vcf comparison results in no overlap
if 2: runs no matter what
"""
print("Parsing activity data file: " + timeString() + ".")
act_samps, act_data = parse_activity_file(act_file)
output_vcf = open(out_vcf, "w")
output_bed = open(out_bed, "w")
loci_out = [] # Use to hold all Locus objects that overlap a Variant.
with open(vcf_file) as f:
# Add new INFO lines.
line = f.readline().strip()
now = time.strftime("%c")
info_needed = True
# TODO - Refactor this so output isn't such an enormous mess. One info field, multiple sub-fields per motif.
# TODO - Add sample names for those that pass threshold.
info = '##INFO=<ID=LOCIID,Number=.,Type=String,Description="IDs for loci that variant overlaps.">\n'
info += '##INFO=<ID=SAMPSTV,Number=.,Type=String,Description="All samples with the variant allele.">\n'
info += ('##INFO=<ID=SAMPSR,Number=.,Type=String,Description="Samples with the reference allele and loci data'
'.">\n')
info += ('##INFO=<ID=SAMPSV,Number=.,Type=String,Description="Samples with the variant allele and loci data.'
'">\n')
info += ('##INFO=<ID=LOCIVZ,Number=.,Type=String,Description="Robust z-score for each loci '
'containing the variant. Calculated for each sample containing the variant for each loci.">\n')
info += ('##INFO=<ID=SAMPTHN,Number=.,Type=Integer,Description="Number of samples in which the variant meets'
'the z-score magnitude threshold.">\n')
info += ('##INFO=<ID=SAMPSNV,Number=1,Type=Integer,Description="Number of samples containing variant'
' and having loci data.">\n')
info += ('##INFO=<ID=SAMPSNR,Number=1,Type=Integer,Description="Number of samples containing reference'
' and having loci data.">')
command = ('##venusaur=<ID=activity,Date="' + now + '",CommandLineOptions="--input ' + vcf_file +
' --activity ' + act_file + ' --outputvcf ' + out_vcf + ' --outputbed ' + out_bed +
' --threshold ' + str(thresh) + ' --filter_act_num ' + str(filter_num) + ' --include_bed ' +
str(include_bed) + ' --include_vcf ' + str(include_vcf) + '">')
# Print new info lines at the top of the ##INFO section.
while line.startswith("##"):
if info_needed and line.startswith("##INFO"):
print(command, file=output_vcf)
print(command, file=output_bed)
print(info, file=output_vcf)
info_needed = False
print(line, file=output_vcf)
line = f.readline().strip()
vcf_samples = read_line2sample_list(line) # Parse VCF sample header line to get samples present in file.
print(line, file=output_vcf)
print("Comparing samples in VCF file and activity file to find commonalities.\n")
print("VCF samples: ", *vcf_samples, end="\n\n")
print("Activity samples: ", *list(act_samps.keys()), end="\n\n")
common_samps, valid_act_samps = compare_samples(act_samps, vcf_samples) # Get common samples b/twn the two.
print("Common samples: ", *common_samps, end="\n\n")
if drop_act_ > 0:
if drop_act_ == 1 and len(common_samps) == 0:
redo_compare = True
act_samps = reduce_activity_names(act_samps)
elif drop_act_ == 2:
redo_compare = True
# merge old and new samps to match when compare_samples is run below
# if they were just lists the following would work but they are not
# act_samps = list(set(reduce_activity_names(act_samps)) | set(list(act_samps)))
extend_dict = reduce_activity_names(act_samps)
for extdictkey in extend_dict:
act_samps[extdictkey] = extend_dict[extdictkey]
else:
redo_compare = False
if redo_compare:
# Get common samples b/twn the two input sets: vcf and activity.
common_samps, valid_act_samps = compare_samples(act_samps, vcf_samples)
print("Updated Common samples: ", *common_samps, end="\n\n")
print("Processing variants. This may take some time.")
# TODO - Progress bar might actually be a decent addition.
for line in f:
current_var = Variant(line, vcf_samples)
loci_ovlp_var = []
# Check if any of the variant samples actually have activity data as well, skip if not.
for x in current_var.var_samples:
if x in common_samps:
for item in act_data:
if current_var.pos.chrom != item.pos.chrom:
continue
elif current_var.pos.overlaps(item.pos):
loci_ovlp_var.append(item)
break
# If variant overlaps no loci, print to output only if include_vcf option used.
if not loci_ovlp_var:
if include_vcf:
print(line.strip(), file=output_vcf)
continue
else:
continue
# Get activity data indices for both samples with variant and without.
var_act_indices = [valid_act_samps[x] for x in current_var.var_samples if x in valid_act_samps]
ref_act_indices = [valid_act_samps[x] for x in valid_act_samps if x not in current_var.var_samples]
# Calculate z-scores.
for x, loc in enumerate(loci_ovlp_var):
var_samples = [x for x in current_var.var_samples if x in valid_act_samps]
ref_samples = [x for x in valid_act_samps if x not in current_var.var_samples]
loc.add_variant(current_var, var_samples, ref_samples) # Add Variant to Locus object.
loc.calc_z_score(ref_act_indices, var_act_indices, current_var, thresh)
current_var.loci.append(loc) # Add Locus object to given Variant.
loci_ovlp_var[x] = loc
if loc not in loci_out:
loci_out.append(loc) # These will be used for eventual BED output.
vcf_out_line = current_var.get_variant_output(include_vcf)
if vcf_out_line is not None:
print(vcf_out_line, file=output_vcf)
elif include_vcf:
print(line.strip(), file=output_vcf)
print("Filtering loci and creating BED output.")
print("CHR", "START", "END", "ID", "VARIANT", "Z_SCORES", "NUM_PASS_THRESH", "COMMON_VAR_SAMPS",
"NUM_COMMON_VAR_SAMPS", "COMMON_REF_SAMPS", "NUM_COMMON_REF_SAMPS", "ALL_VAR_SAMPS", "NUM_ALL_VAR_SAMPS",
"ALL_REF_SAMPS", "NUM_COMMON_REF_SAMPS"
"MOTIF_INFO", sep="\t", file=output_bed)
for item in loci_out:
bed_out_line = item.get_locus_output(include_bed, filter_num)
if bed_out_line is not None:
print(*bed_out_line, sep="\n", file=output_bed)
print("Complete: " + timeString() + ".")
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("-i", "--input", dest="input_file", required=True)
parser.add_argument("-a", "--activity", dest="activity_file", required=True)
parser.add_argument("-ov", "--outputvcf", dest="output_vcf", required=True)
parser.add_argument("-ob", "--outputbed", dest="output_bed", required=True)
parser.add_argument("-th", "--threshold", dest="threshold", required=False, default=0)
parser.add_argument("-fan", "--filter_act_num", dest="filter_a_n", required=False, default=0)
parser.add_argument("-ib", "--include_bed", action="store_true", required=False)
parser.add_argument("-iv", "--include_vcf", action="store_true", required=False)
args = parser.parse_args()
inp_file = args.input_file
act_file = args.activity_file
vcf_out = args.output_vcf
bed_out = args.output_bed
th = float(args.threshold)
filter_bed_num = int(args.filter_a_n)
include_bed = args.include_bed
include_vcf = args.include_vcf
main(inp_file, act_file, vcf_out, bed_out, th, filter_bed_num, include_bed, include_vcf)
|
py | 1a3b50d1e9e96db59c70174608801e79eef8f55b | #!/usr/bin/env python
"""
Fetches github linguist repository, process its information
and store it in database
"""
import os
import rethinkdb as r
import schedule
import shutil
import subprocess
import sys
import time
import yaml
DEVNULL = open(os.devnull, 'wb')
LANGUAGES_REPO = "https://github.com/github/linguist.git"
LANGUAGES_PATH = "./lib/linguist/languages.yml"
REPO_DIR = "/tmp/linguist"
DB_HOST = os.getenv('DB', 'localhost')
DB = r.connect(DB_HOST, 28015)
def main():
"""
Executes the job at the beggining and at every SLEEP_MINUTES
"""
try:
run()
schedule.every().hour.do(run)
while True:
schedule.run_pending()
time.sleep(1)
except:
clean()
def run():
prepare()
dates = langs_dates()
metadata = languages_metadata()
languages = []
for l in metadata:
object = {}
object['name'] = l
object['timestamp'] = dates[l]
if metadata[l].get('type', None):
object['type'] = metadata[l]['type']
if metadata[l].get('group', None):
object['group'] = metadata[l]['group']
languages.append(object)
sorted_languages = sorted(languages,
key = lambda lang: lang["timestamp"],
reverse=True)
store(sorted_languages)
clean()
def prepare():
"""
Clone the linguist repo and change the working directory to it.
It also deletes the linguist directory it if was already present
"""
clean()
subprocess.call(["git", "clone", LANGUAGES_REPO, REPO_DIR],
stdout=DEVNULL, stderr=DEVNULL)
os.chdir(REPO_DIR)
def clean():
"""
Return to the previous working directory and remove the linguist directory
"""
if os.path.exists(REPO_DIR):
os.chdir("/")
shutil.rmtree(REPO_DIR)
def langs_dates():
"""
Returns the list of languages available in the language file
with the date in which it was added
"""
language_history = set()
result = {}
for i, commit in enumerate(commits()):
actual = languages_in_commit(commit)
if i == 0:
timestamp = commit_time(commit)
for language in actual:
result[language] = timestamp
language_history = set(actual)
else:
old = language_history
language_history = language_history.union(set(actual))
diff = language_history - old
if diff:
timestamp = commit_time(commit)
for language in diff:
result[language] = timestamp
filtered = filter_deleted(result)
return result
def languages_metadata():
yaml = read_langs_file()
metadata_keys = ('type', 'group')
result = {}
for languages in yaml:
result[languages] = {k: yaml[languages][k] for k in yaml[languages] if k in metadata_keys}
return result
def commits():
"""
Returns the list of commits in ascending order that changed
the languages file without counting the commit merges
"""
commits_b = subprocess.check_output(["git", "log", "--no-merges", "--pretty=%H", LANGUAGES_PATH], stderr=DEVNULL)
commits_reverse = commits_b.decode().strip().split('\n')
return commits_reverse[::-1]
def languages_lang_file():
"""
Returns the list of languages present in the language file
with their respective type and group
"""
yaml = read_langs_file()
return list(yaml.keys())
def read_langs_file():
"""
Reads the language file
"""
with open(LANGUAGES_PATH) as langs_file:
try:
languages_yaml = yaml.load(langs_file)
return languages_yaml
except:
return {}
def languages_in_commit(commit):
"""
Returns the list of languages
present in the language file for a specific commit
"""
subprocess.call(["git", "checkout", commit, LANGUAGES_PATH],
stdout=DEVNULL, stderr=DEVNULL)
return languages_lang_file()
def commit_time(commit):
"""
Returns the commit time in epoc format of a specific commit
"""
output_b = subprocess.check_output(["git", "show", "-s", "--format=%ct",
commit])
output = output_b.decode().strip()
return int(output)
def filter_deleted(languages):
"""
Returns a hash with the languages that are in the languages argument
minus the ones that are no longer present in the last commit
"""
subprocess.call(["git", "reset", "--hard", "master"],
stdout=DEVNULL, stderr=DEVNULL)
last_languages = languages_lang_file()
filtered_languages = {}
for lang in languages:
if lang in last_languages:
filtered_languages[lang] = languages[lang]
return filtered_languages
def store(languages):
"""
Stores in database the result.
If the result is equal to the latest row in the db
it only updates the timestamp
"""
table = r.db('indielangs').table("languages")
latest, latest_id = latest_result()
if latest == languages:
table.get(latest_id).update({'timestamp': r.now()}).run(DB)
else:
row = {'languages': languages, 'timestamp': r.now()}
table.insert(row).run(DB)
def latest_result():
"""
Returns the latest row with the list of languages
available in the database and the id of the row
"""
table = r.db('indielangs').table("languages")
latest = table.order_by(r.desc('timestamp')).limit(1).run(DB)
if latest:
return latest[0]['languages'], latest[0]['id']
else:
return {}, None
if __name__ == "__main__":
sys.exit(main())
|
py | 1a3b52382054e38b54612bb83300b93fe267a8b8 | from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super(CreateModel, self).__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, six.string_types) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
models_to_check = [base for base in self.bases if base is not models.Model]
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
models_to_check.append(field.remote_field.model)
# Now go over all the models and check against them
for model in models_to_check:
model_app_label, model_name = self.model_to_key(model)
if model_name.lower() == name_lower:
if app_label is None or not model_app_label or model_app_label == app_label:
return True
return False
def model_to_key(self, model):
"""
Take either a model class or an "app_label.ModelName" string
and return (app_label, object_name).
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return model._meta.app_label, model._meta.object_name
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
# Don't allow optimizations of FKs through models they reference
if hasattr(operation.field, "remote_field") and operation.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(operation.field.remote_field.model)
if between.references_model(object_name, app_label):
return False
# Check that it's not through the model
if getattr(operation.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(operation.field.remote_field.through)
if between.references_model(object_name, app_label):
return False
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
return super(CreateModel, self).reduce(operation, in_between, app_label=app_label)
class DeleteModel(ModelOperation):
"""
Drops a model's table.
"""
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(ModelOperation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super(RenameModel, self).__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
# Repoint M2Ms with through pointing to us
related_models = {
f.remote_field.model for f in model._meta.fields
if getattr(f.remote_field, 'model', None)
}
model_name = '%s.%s' % (app_label, self.old_name)
for related_model in related_models:
if related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (related_model._meta.app_label, related_model._meta.model_name)
new_fields = []
changed = False
for name, field in state.models[related_key].fields:
if field.is_relation and field.many_to_many and field.remote_field.through == model_name:
field = field.clone()
field.remote_field.through = '%s.%s' % (app_label, self.new_name)
changed = True
new_fields.append((name, field))
if changed:
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, RenameModel) and
self.new_name_lower == operation.old_name_lower):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.new_name, app_label)
)
class AlterModelTable(ModelOperation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.table = table
super(AlterModelTable, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class FieldRelatedOptionOperation(ModelOptionOperation):
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, FieldOperation) and
self.name_lower == operation.model_name_lower and
not self.references_field(operation.model_name, operation.name)):
return [operation, self]
return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class AlterUniqueTogether(FieldRelatedOptionOperation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
super(AlterUniqueTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(FieldRelatedOptionOperation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
super(AlterIndexTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super(AlterOrderWithRespectTo, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(ModelOptionOperation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super(AlterModelOptions, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(ModelOptionOperation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.managers = managers
super(AlterModelManagers, self).__init__(name)
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % (self.name, )
|
py | 1a3b5274e033c56bb23563b3dbdf31d22faef3ef | from typing import Tuple
import os
import torch as th
import numpy as np
import gym
import copy
from torch.utils.tensorboard import SummaryWriter
from auto_rl.learning.policy import MLPActorCritic
from auto_rl.learning.buffer import OnPolicyBuffer
from auto_rl.learning.rewards import single_worker_gae, mc_reward_estimation
from auto_rl.utils.torch import change_optim_lr, grad_clip
from auto_rl.utils.gym import infer_action_size, infer_action_type
from auto_rl.simulation.run import single_worker_rollout, rollout_rew, eval_with_render
from auto_rl.utils.tensorboard import log_actor_critic_graph
from auto_rl.utils.logger import Logger
class PPO:
def __init__(self,
env: gym.Env,
policy: MLPActorCritic,
device: str,
log_dir=None):
assert policy.device == device
self.env = env
self.policy = policy
self.device = device
# general logger
self.logger = Logger()
# Tensorboard writer
self.enable_tensorboard = False
if log_dir is not None:
self.enable_tensorboard = True
if self.enable_tensorboard:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
self.tb_writer = SummaryWriter(log_dir)
# log computational graph
log_actor_critic_graph(self.tb_writer, self.env, self.policy, self.device)
# Initialize optimizer
self.optimizer = th.optim.Adam(params=self.policy.parameters(), lr=1e-4)
# Old policy
self.policy_old = copy.deepcopy(self.policy)
self.policy_old.eval()
self.mse_loss = th.nn.MSELoss()
self.buffer = OnPolicyBuffer()
# Action type and size
self.action_type = infer_action_type(self.env)
self.action_size = infer_action_size(self.env, self.action_type)
def predict(self, obs: np.ndarray, deterministic=False):
action = self.policy_old.predict(obs, deterministic)
return action
def rollout(self, rollout_steps):
single_worker_rollout(self.env, self.policy, self.buffer, rollout_steps)
# Log
rew_mean, rew_min, rew_max = rollout_rew(self.buffer)
self.logger.add("rew. avg.", rew_mean)
self.logger.add("rew. min", rew_min)
self.logger.add("rew. max", rew_max)
if self.enable_tensorboard:
self.tb_writer.add_scalar("reward/mean", rew_mean)
self.tb_writer.add_scalar("reward/min", rew_min)
self.tb_writer.add_scalar("reward/max", rew_max)
def update(self, lr, optimize_epoch_num, batch_size,
gamma, gae_lam, ratio_clip_cnst,
entropy_coef, value_coef, grad_clip_cnst):
change_optim_lr(self.optimizer, lr)
loss, value, entropy = None, None, None
for _ in range(optimize_epoch_num):
loss, value, entropy = self.compute_loss(batch_size, gamma, gae_lam,
ratio_clip_cnst, entropy_coef, value_coef)
self.optimizer.zero_grad()
loss.backward()
if grad_clip_cnst is not None:
grad_clip(self.policy, grad_clip_cnst)
self.optimizer.step()
# Log
if loss is not None:
self.logger.add("loss", loss.detach().cpu().numpy())
self.logger.add("value", value.detach().cpu().numpy())
self.logger.add("entropy", entropy.detach().cpu().numpy())
if self.enable_tensorboard:
self.tb_writer.add_scalar("loss/loss", loss)
self.tb_writer.add_scalar("loss/value", value)
self.tb_writer.add_scalar("loss/entropy", entropy)
# Copy new weights into old policy
self.policy_old.load_state_dict(self.policy.state_dict())
assert not self.policy_old.training
self.buffer.clear()
def learn(self, total_steps,
rollout_steps,
lr,
optimize_epoch_num,
batch_size,
gamma,
gae_lam,
ratio_clip_cnst,
entropy_coef,
value_coef,
grad_clip_cnst=None,
eval_intv=None):
for i in range(total_steps // rollout_steps + 1):
self.rollout(rollout_steps)
self.update(lr, optimize_epoch_num, batch_size,
gamma, gae_lam, ratio_clip_cnst,
entropy_coef, value_coef,
grad_clip_cnst)
# Log output
self.logger.dump()
# evaluate with video
if eval_intv is not None and i % eval_intv == 0:
eval_with_render(self.env, self.policy)
def compute_loss(self, batch_size, gamma, gae_lam,
ratio_clip_cnst,
entropy_coef, value_coef, use_gae=False) \
-> Tuple[th.Tensor, th.Tensor, th.Tensor]:
if batch_size is None:
# read all data, no batch
s1, actions, rewards, dones, s2 = self.buffer.read()
else:
s1, actions, rewards, dones, s2 = self.buffer.sample(batch_size)
assert not use_gae, "Inefficient to compute GAE from random sample."
s1 = th.from_numpy(s1).float().to(self.device)
actions = th.from_numpy(actions).float().to(self.device)
_, old_log_probs, _ = self.policy_old.eval_policy(s1, actions)
assert self.policy.training
values, log_probs, entropy = self.policy.eval_policy(s1, actions)
advantages, value_estimation = self.compute_advantage(gae_lam, dones, rewards, values, gamma)
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
value_estimation = (value_estimation - value_estimation.mean()) / (value_estimation.std() + 1e-8)
ratios = th.exp(log_probs - old_log_probs.detach())
surr1 = ratios * advantages
surr2 = th.clamp(ratios, 1 - ratio_clip_cnst, 1 + ratio_clip_cnst) * advantages
loss = -th.min(surr1, surr2).mean() - entropy_coef * entropy.mean()
loss = loss + value_coef * self.mse_loss(values, value_estimation)
return loss, values.mean(), entropy.mean()
def compute_advantage(self, gae_lam, dones, rewards, values, gamma):
# FIXME: Understand GAE fully and write this part
"""if gae_lam is not None:
dones = th.from_numpy(dones).float().to(self.device)
rewards = th.from_numpy(rewards).float().to(self.device)
advantages, value_estimation = single_worker_gae(values, dones, rewards, gamma, gae_lam, self.device)
else:"""
value_estimation = mc_reward_estimation(rewards, dones, gamma)
value_estimation = th.tensor(value_estimation).float().to(self.device)
advantages = value_estimation - values.detach()
return advantages, value_estimation
def __del__(self):
if self.enable_tensorboard:
self.tb_writer.close()
|
py | 1a3b53115fe71c53d527f38b1271964b5495e784 | import turtle
# turtle object
t = turtle.Turtle()
t.pensize(6)
turtle.bgcolor("#5383C1")
t.speed(9)
# function for creation of eye
def eye(col, rad):
t.down()
t.fillcolor(col)
t.begin_fill()
t.circle(rad)
t.end_fill()
t.up()
# function for cheeks
def cheek():
t.down()
t.fillcolor("#D03D3D");
t.begin_fill()
t.circle(20)
t.end_fill()
t.up()
# draw face
t.fillcolor('yellow')
t.begin_fill()
t.circle(100)
t.end_fill()
t.up()
# draw eyes
t.goto(-40, 120)
eye('white', 10)
t.goto(-37, 125)
eye('black', 5)
t.goto(40, 120)
eye('white', 10)
t.goto(37, 125)
eye('black', 5)
# draw nose
t.goto(0, 75)
eye('black', 8)
#draw cheek
t.goto(-80, 80)
cheek()
t.goto(80, 80)
cheek()
# draw mouth
t.goto(-40, 85)
t.down()
t.right(90)
t.circle(20, 180)
t.up()
t.goto(0, 85)
t.down()
t.right(180)
t.circle(20, 180)
t.up()
# Drawing left Ear
t.goto(-67,180)
t.down()
t.left(58)
t.fillcolor('#C29349')
t.begin_fill()
t.circle(30, 180)
t.end_fill()
t.up()
# Drawing right ear
t.goto(85, 150)
t.down()
t.right(-73)
t.fillcolor('#C29349')
t.begin_fill()
t.circle(30, 180)
t.end_fill()
t.up()
# draw tongue
t.goto(-30, 65)
t.down()
t.right(-48)
t.fillcolor('white')
t.begin_fill()
t.circle(30, 180)
t.lt(90)
t.fd(60)
t.end_fill()
t.hideturtle()
turtle.done() |
py | 1a3b53803cf9f9b3ff4799c9090782674b8265f5 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Zope Publisher-based FTP Server
This FTP server uses the Zope 3 Publisher to execute commands.
"""
import posixpath
from io import BytesIO
from zope.server.interfaces.ftp import IFileSystem
from zope.server.interfaces.ftp import IFileSystemAccess
from zope.server.ftp.server import FTPServer
from zope.publisher.publish import publish
from zope.interface import implementer
@implementer(IFileSystem)
class PublisherFileSystem(object):
"""Generic Publisher FileSystem implementation."""
def __init__(self, credentials, request_factory):
self.credentials = credentials
self.request_factory = request_factory
def type(self, path):
if path == '/':
return 'd'
return self._execute(path, 'type')
def readfile(self, path, outstream, start=0, end=None):
return self._execute(path, 'readfile',
outstream=outstream, start=start, end=end)
_name = None
for _name in ('names', 'ls'):
f = locals()[_name] = lambda self, path, filter=None, _name=_name: self._execute(
path,
_name,
split=False,
filter=filter)
f.__name__ = _name
for _name in ('lsinfo', 'mtime', 'size', 'mkdir', 'remove', 'rmdir'):
f = locals()[_name] = lambda self, path, _name=_name: self._execute(path, _name)
f.__name__ = _name
del _name
def rename(self, old, new):
'See IWriteFileSystem'
old = self._translate(old)
new = self._translate(new)
path0, old = posixpath.split(old)
path1, new = posixpath.split(new)
assert path0 == path1
return self._execute(path0, 'rename', split=False, old=old, new=new)
def writefile(self, path, instream, start=None, end=None, append=False):
'See IWriteFileSystem'
return self._execute(
path, 'writefile',
instream=instream, start=start, end=end, append=append)
def writable(self, path):
'See IWriteFileSystem'
return self._execute(path, 'writable')
def _execute(self, path, command, split=True, **kw):
env = {}
env.update(kw)
env['command'] = command
path = self._translate(path)
if split:
env['path'], env['name'] = posixpath.split(path)
else:
env['path'] = path
env['credentials'] = self.credentials
request = self.request_factory(BytesIO(b''), env)
# Note that publish() calls close() on request, which deletes the
# response from the request, so that we need to keep track of it.
# agroszer: 2008.feb.1.: currently the above seems not to be true
# request will KEEP the response on close()
# even more if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = publish(request)
return request.response.getResult()
def _translate(self, path):
# Normalize
path = posixpath.normpath(path)
if path.startswith('..'):
# Someone is trying to get lower than the permitted root.
# We just ignore it.
path = '/'
return path
class PublisherFTPServer(FTPServer):
"""Generic FTP Server"""
def __init__(self, request_factory, name, ip, port, *args, **kw):
fs_access = PublisherFileSystemAccess(request_factory)
super(PublisherFTPServer, self).__init__(ip, port, fs_access,
*args, **kw)
@implementer(IFileSystemAccess)
class PublisherFileSystemAccess(object):
def __init__(self, request_factory):
self.request_factory = request_factory
def authenticate(self, credentials):
# We can't actually do any authentication initially, as the
# user may not be defined at the root.
pass
def open(self, credentials):
return PublisherFileSystem(credentials, self.request_factory)
|
py | 1a3b538982e98e8c042f83d0a33bb898419da21d | from Child import Child
from Node import Node # noqa: I201
ATTRIBUTE_NODES = [
# token-list -> token? token-list?
Node('TokenList', kind='SyntaxCollection',
element='Token'),
# token-list -> token token-list?
Node('NonEmptyTokenList', kind='SyntaxCollection',
element='Token', omit_when_empty=True),
Node('CustomAttribute', kind='Syntax',
description='''
A custom `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Type', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='FunctionCallArgumentList',
collection_element_name='Argument', is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
# attribute -> '@' identifier '('?
# ( identifier
# | string-literal
# | integer-literal
# | availability-spec-list
# | specialize-attr-spec-list
# | implements-attr-arguments
# | differentiable-attr-arguments
# | named-attribute-string-argument
# )? ')'?
Node('Attribute', kind='Syntax',
description='''
An `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Token', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken', is_optional=True,
description='''
If the attribute takes arguments, the opening parenthesis.
'''),
Child('Argument', kind='Syntax', is_optional=True,
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('String', kind='StringLiteralToken'),
Child('Integer', kind='IntegerLiteralToken'),
Child('Availability', kind='AvailabilitySpecList'),
Child('SpecializeArguments',
kind='SpecializeAttributeSpecList'),
Child('ObjCName', kind='ObjCSelector'),
Child('ImplementsArguments',
kind='ImplementsAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('DifferentiableArguments',
kind='DifferentiableAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('DifferentiatingArguments',
kind='DifferentiatingAttributeArguments'),
# SWIFT_ENABLE_TENSORFLOW
Child('TransposingArguments',
kind='DifferentiatingAttributeArguments'),
Child('NamedAttributeString',
kind='NamedAttributeStringArgument'),
], description='''
The arguments of the attribute. In case the attribute \
takes multiple arguments, they are gather in the \
appropriate takes first.
'''),
Child('RightParen', kind='RightParenToken', is_optional=True,
description='''
If the attribute takes arguments, the closing parenthesis.
'''),
# TokenList to gather remaining tokens of invalid attributes
# FIXME: Remove this recovery option entirely
Child('TokenList', kind='TokenList',
collection_element_name='Token', is_optional=True),
]),
# attribute-list -> attribute attribute-list?
Node('AttributeList', kind='SyntaxCollection',
element='Syntax', element_name='Attribute',
element_choices=[
'Attribute',
'CustomAttribute',
]),
# The argument of '@_specialize(...)'
# specialize-attr-spec-list -> labeled-specialize-entry
# specialize-spec-attr-list?
# | generic-where-clause
# specialize-spec-attr-list?
Node('SpecializeAttributeSpecList', kind='SyntaxCollection',
description='''
A collection of arguments for the `@_specialize` attribute
''',
element='Syntax', element_name='SpecializeAttribute',
element_choices=[
'LabeledSpecializeEntry',
'GenericWhereClause',
]),
# Representation of e.g. 'exported: true,'
# labeled-specialize-entry -> identifier ':' token ','?
Node('LabeledSpecializeEntry', kind='Syntax',
description='''
A labeled argument for the `@_specialize` attribute like \
`exported: true`
''',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('Value', kind='Token',
description='The value for this argument'),
Child('TrailingComma', kind='CommaToken',
is_optional=True, description='''
A trailing comma if this argument is followed by another one
'''),
]),
# The argument of '@_dynamic_replacement(for:)' or '@_private(sourceFile:)'
# named-attribute-string-arg -> 'name': string-literal
Node('NamedAttributeStringArgument', kind='Syntax',
description='''
The argument for the `@_dynamic_replacement` or `@_private` \
attribute of the form `for: "function()"` or `sourceFile: \
"Src.swift"`
''',
children=[
Child('NameTok', kind='Token',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('StringOrDeclname', kind='Syntax', node_choices=[
Child('String', kind='StringLiteralToken'),
Child('Declname', kind='DeclName'),
]),
]),
Node('DeclName', kind='Syntax', children=[
Child('DeclBaseName', kind='Syntax', description='''
The base name of the protocol\'s requirement.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('Operator', kind='PrefixOperatorToken'),
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# The argument of '@_implements(...)'
# implements-attr-arguments -> simple-type-identifier ','
# (identifier | operator) decl-name-arguments
Node('ImplementsAttributeArguments', kind='Syntax',
description='''
The arguments for the `@_implements` attribute of the form \
`Type, methodName(arg1Label:arg2Label:)`
''',
children=[
Child('Type', kind='SimpleTypeIdentifier', description='''
The type for which the method with this attribute \
implements a requirement.
'''),
Child('Comma', kind='CommaToken',
description='''
The comma separating the type and method name
'''),
Child('DeclBaseName', kind='Syntax', description='''
The base name of the protocol\'s requirement.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('Operator', kind='PrefixOperatorToken'),
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@differentiable(...)'.
# differentiable-attr-arguments ->
# differentiation-params-clause? ','?
# differentiable-attr-func-specifier? # primal
# differentiable-attr-func-specifier? # adjoint
# differentiable-attr-func-specifier? # jvp
# differentiable-attr-func-specifier? # vjp
# generic-where-clause?
# FIXME: There is currently no guarantee that 'MaybePrimal' is in fact
# the primal specifier, it could be any specifier. The current syntax
# definitions only ensure that there are between 0 and 4 function
# specifiers. A more robust definition would enforce that specific function
# specifiers appear only once, in order.
Node('DifferentiableAttributeArguments', kind='Syntax',
description='''
The arguments for the `@differentiable` attribute: an optional \
differentiation parameter list and associated functions.
''',
children=[
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
Child('DiffParamsComma', kind='CommaToken', description='''
The comma following the differentiation parameters clause,
if it exists.
''', is_optional=True),
Child('MaybePrimal', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeAdjoint', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeJVP', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('MaybeVJP', kind='DifferentiableAttributeFuncSpecifier',
is_optional=True),
Child('WhereClause', kind='GenericWhereClause', is_optional=True),
]),
# differentiation-params-clause ->
# 'wrt' ':' (differentiation-param | differentiation-params)
Node('DifferentiationParamsClause', kind='Syntax',
description='A clause containing differentiation parameters.',
children=[
Child('WrtLabel', kind='IdentifierToken',
text_choices=['wrt'], description='The "wrt" label.'),
Child('Colon', kind='ColonToken', description='''
The colon separating "wrt" and the parameter list.
'''),
Child('Parameters', kind='Syntax',
node_choices=[
Child('Parameter', kind='DifferentiationParam'),
Child('ParameterList', kind='DifferentiationParams'),
]),
]),
# differentiation-params -> '(' differentiation-param-list ')'
Node('DifferentiationParams', kind='Syntax',
description='The differentiation parameters.',
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('DiffParams', kind='DifferentiationParamList',
collection_element_name='DifferentiationParam',
description='The parameters for differentiation.'),
Child('RightParen', kind='RightParenToken'),
]),
# differentiation-param-list ->
# differentiation-param differentiation-param-list?
Node('DifferentiationParamList', kind='SyntaxCollection',
element='DifferentiationParam'),
# differentiation-param -> ('self' | identifer) ','?
Node('DifferentiationParam', kind='Syntax',
description='''
A differentiation parameter: either the "self" identifier or a \
function parameter name.
''',
traits=['WithTrailingComma'],
children=[
Child('Parameter', kind='Syntax',
node_choices=[
Child('Self', kind='SelfToken'),
Child('Name', kind='IdentifierToken'),
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# differentiable-attr-func-specifier ->
# ('jvp' | 'vjp') ':' func-decl-name ','?
Node('DifferentiableAttributeFuncSpecifier', kind='Syntax',
description='''
A function specifier, consisting of an identifier, colon, and a \
function declaration name (e.g. `vjp: foo(_:_:)`).
''',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
text_choices=['jvp', 'vjp']),
Child('Colon', kind='ColonToken'),
Child('FunctionDeclName', kind='FunctionDeclName',
description='The referenced function name.'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# func-decl-name -> (identifier | operator) decl-name-arguments?
# NOTE: This is duplicated with `DeclName` above. Change `DeclName`
# description and use it if possible.
Node('FunctionDeclName', kind='Syntax',
description='A function declaration name (e.g. `foo(_:_:)`).',
children=[
Child('Name', kind='Syntax', description='''
The base name of the referenced function.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('PrefixOperator', kind='PrefixOperatorToken'),
Child('SpacedBinaryOperator',
kind='SpacedBinaryOperatorToken'),
]),
Child('Arguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the referenced function, optionally \
specified.
'''),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@differentiating(...)'.
# differentiating-attr-arguments ->
# func-decl-name ','? differentiable-attr-parameters?
Node('DifferentiatingAttributeArguments', kind='Syntax',
description='''
The arguments for the `@differentiating` attribute: the original
function and an optional differentiation parameter list.
''',
children=[
Child('Original', kind='FunctionDeclName',
description='The referenced original function.'),
Child('Comma', kind='CommaToken', is_optional=True),
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
]),
# SWIFT_ENABLE_TENSORFLOW
# The argument of '@transposing(...)'.
# transposing-attr-arguments ->
# func-decl-name ','? differentiable-attr-parameters?
Node('TransposingAttributeArguments', kind='Syntax',
description='''
The arguments for the `@transposing` attribute: the original
function and an optional differentiation parameter list.
''',
children=[
Child('Original', kind='FunctionDeclName',
description='The referenced original function.'),
Child('Comma', kind='CommaToken', is_optional=True),
Child('DiffParams', kind='DifferentiationParamsClause',
is_optional=True),
]),
# objc-selector-piece -> identifier? ':'?
Node('ObjCSelectorPiece', kind='Syntax',
description='''
A piece of an Objective-C selector. Either consisiting of just an \
identifier for a nullary selector, an identifier and a colon for a \
labeled argument or just a colon for an unlabeled argument
''',
children=[
Child('Name', kind='IdentifierToken', is_optional=True),
Child('Colon', kind='ColonToken', is_optional=True),
]),
# objc-selector -> objc-selector-piece objc-selector?
Node('ObjCSelector', kind='SyntaxCollection', element='ObjCSelectorPiece')
]
|
py | 1a3b541398c77f4527ceec753b432338fe12b9c6 | from protocolbuffers import UI_pb2
from careers.career_enums import CareerCategory, WORK_CAREER_CATEGORIES
from careers.career_ops import CareerTimeOffReason
from date_and_time import TimeSpan, DateAndTime
from distributor.shared_messages import build_icon_info_msg, IconInfoData
from drama_scheduler.drama_node import BaseDramaNode, DramaNodeUiDisplayType, DramaNodeRunOutcome
from drama_scheduler.drama_node_types import DramaNodeType
from holidays.holiday_globals import HolidayState, HolidayTuning
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.instances import lock_instance_tunables
from sims4.tuning.tunable import TunableReference, OptionalTunable
from sims4.utils import classproperty
from situations.bouncer.bouncer_types import RequestSpawningOption, BouncerRequestPriority
from situations.situation_guest_list import SituationGuestList, SituationGuestInfo
from situations.situation_types import SituationCallbackOption
from tunable_time import TunableTimeSpan
import alarms
import services
import sims4.log
import sims4.resources
logger = sims4.log.Logger('HolidayDramaNode', default_owner='nsavalani')
HOLIDAY_START_TIME_TOKEN = 'holiday_start_time_ticks'
HOLIDAY_END_TIME_TOKEN = 'holiday_end_time_ticks'
class HolidayDramaNode(BaseDramaNode):
INSTANCE_TUNABLES = {'pre_holiday_duration': TunableTimeSpan(description="\n This duration is used to calculate the drama node's start time for\n main holidays by subtracting the tuned amount from the globally \n tuned start time. The player is notified with a reminder for the\n holiday, and decorations will be put up in the neighborhood.\n For surprise holidays, this should be set to 0, as surprise \n holidays have no pre-holiday state.\n ", default_hours=23, locked_args={'days': 0, 'minutes': 0}), 'holiday': TunableReference(description='\n The holiday that this drama node starts.\n ', manager=services.get_instance_manager(sims4.resources.Types.HOLIDAY_DEFINITION))}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._holiday_alarm = None
self._state = None
self._situation_ids = []
self._holiday_end_time = None
self._active_household_id = None
self._holiday_start_time = None
@classproperty
def drama_node_type(cls):
return DramaNodeType.HOLIDAY
@classproperty
def persist_when_active(cls):
return True
@classproperty
def simless(cls):
return True
@property
def is_in_preholiday(self):
return self._state == HolidayState.PRE_DAY
@property
def is_running(self):
return self._state == HolidayState.RUNNING
@property
def holiday_id(self):
return self.holiday.guid64
@property
def day(self):
if self._holiday_start_time is not None:
return int(self._holiday_start_time.absolute_days())
actual_start_time = self._selected_time + self.pre_holiday_duration()
return int(actual_start_time.absolute_days())
def get_time_off_reason(self, sim_info, career_category, career_end_time):
holiday_service = services.holiday_service()
if self._state == HolidayState.SHUTDOWN or holiday_service is None:
return CareerTimeOffReason.NO_TIME_OFF
take_time_off = False
if career_category == CareerCategory.School:
take_time_off = holiday_service.get_holiday_time_off_school(self.holiday_id)
elif career_category in WORK_CAREER_CATEGORIES:
take_time_off = holiday_service.get_holiday_time_off_work(self.holiday_id)
elif career_category == CareerCategory.Volunteer or career_category == CareerCategory.UniversityCourse:
take_time_off = False
else:
logger.error('Unexpected CareerCategory {} when determining if a holiday should give Sims time off.', career_category)
if take_time_off and (self.is_running or self.get_calendar_start_time() < career_end_time):
return HolidayTuning.HOLIDAY_TIME_OFF_REASON
return CareerTimeOffReason.NO_TIME_OFF
def create_calendar_entry(self):
calendar_entry = super().create_calendar_entry()
active_household = services.active_household()
if active_household is not None:
holiday_service = services.holiday_service()
build_icon_info_msg(IconInfoData(icon_resource=holiday_service.get_holiday_display_icon(self.holiday_id)), holiday_service.get_holiday_display_name(self.holiday_id), calendar_entry.icon_info)
calendar_entry.holiday_id = self.holiday_id
for tradition in holiday_service.get_holiday_traditions(self.holiday_id):
calendar_entry.tradition_ids.append(tradition.guid64)
return calendar_entry
def create_calendar_alert(self):
if self.ui_display_type == DramaNodeUiDisplayType.POP_UP_HOLIDAY:
return
holiday_service = services.holiday_service()
calendar_alert = super().create_calendar_alert()
calendar_alart_description = holiday_service.get_holiday_calendar_alert_notification(self.holiday_id)
if calendar_alart_description is not None:
calendar_alert.description = calendar_alart_description(holiday_service.get_holiday_display_name(self.holiday_id))
build_icon_info_msg(IconInfoData(icon_resource=holiday_service.get_holiday_display_icon(self.holiday_id)), holiday_service.get_holiday_display_name(self.holiday_id), calendar_alert.calendar_icon)
for tradition in holiday_service.get_holiday_traditions(self.holiday_id):
calendar_alert.tradition_ids.append(tradition.guid64)
return calendar_alert
def get_calendar_start_time(self):
return self.selected_time.time_of_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
def get_calendar_end_time(self):
return self.get_calendar_start_time() + HolidayTuning.HOLIDAY_DURATION()
def _run_pre_holiday(self, from_load=False):
self._state = HolidayState.PRE_DAY
now = services.time_service().sim_now
time_to_holiday_start = now.time_till_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
self._holiday_start_time = now + time_to_holiday_start
self._holiday_alarm = alarms.add_alarm(self, time_to_holiday_start, lambda _: self._run_holiday())
active_household = services.active_household()
active_household.holiday_tracker.preactivate_holiday(self.holiday_id)
self._active_household_id = active_household.id
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.request_holiday_decorations(self, from_load=from_load)
def _on_holiday_situation_ended(self, situation_id, callback_option, _):
current_zone = services.current_zone()
if current_zone.is_zone_shutting_down:
return
self._unregister_situation_ended_callbacks()
self._end_holiday()
active_household = services.active_household()
if active_household is not None:
active_household.holiday_tracker.cancel_holiday(self.holiday_id)
def _register_situation_ended_callbacks(self):
situation_manager = services.get_zone_situation_manager()
for situation_id in self._situation_ids:
situation_manager.register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _unregister_situation_ended_callbacks(self):
situation_manager = services.get_zone_situation_manager()
for situation_id in self._situation_ids:
situation_manager.unregister_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _run_holiday(self, from_load=False):
self._state = HolidayState.RUNNING
if not from_load:
self._holiday_end_time = services.time_service().sim_now + HolidayTuning.HOLIDAY_DURATION()
holiday_duration = HolidayTuning.HOLIDAY_DURATION()
else:
holiday_duration = self._holiday_end_time - services.time_service().sim_now
self._holiday_alarm = alarms.add_alarm(self, holiday_duration, self._holiday_end_callback)
active_household = services.active_household()
holiday_tracker = active_household.holiday_tracker
if holiday_tracker.is_holiday_cancelled(self.holiday_id):
return
holiday_tracker.activate_holiday(self.holiday_id, from_load=from_load)
self._active_household_id = active_household.id
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.request_holiday_decorations(self, from_load=from_load)
if from_load:
(situation_ids, sims_needing_situations) = holiday_tracker.load_holiday_situations(self.holiday_id)
self._situation_ids.extend(situation_ids)
if not sims_needing_situations:
self._register_situation_ended_callbacks()
return
else:
sims_needing_situations = [sim_info for sim_info in active_household.sim_infos if sim_info.is_human]
holiday_service = services.holiday_service()
holiday_goals = list(tradition.situation_goal for tradition in holiday_service.get_holiday_traditions(self.holiday_id))
for sim_info in sims_needing_situations:
situation_id = self._create_holiday_situation(sim_info, holiday_goals)
self._register_situation_ended_callbacks()
def on_sim_added(self, sim_info):
if self._state != HolidayState.RUNNING:
return
holiday_goals = list(tradition.situation_goal for tradition in services.holiday_service().get_holiday_traditions(self.holiday_id))
situation_id = self._create_holiday_situation(sim_info, holiday_goals)
if situation_id:
services.get_zone_situation_manager().register_for_callback(situation_id, SituationCallbackOption.END_OF_SITUATION, self._on_holiday_situation_ended)
def _create_holiday_situation(self, sim_info, holiday_goals):
guest_list = SituationGuestList(invite_only=True, host_sim_id=sim_info.id)
guest_list.add_guest_info(SituationGuestInfo(sim_info.id, HolidayTuning.HOLIDAY_JOB, RequestSpawningOption.DONT_CARE, BouncerRequestPriority.EVENT_VIP))
situation_id = services.get_zone_situation_manager().create_situation(HolidayTuning.HOLIDAY_SITUATION, guest_list=guest_list, linked_sim_id=sim_info.id, dynamic_goals=holiday_goals)
if situation_id:
self._situation_ids.append(situation_id)
return situation_id
def _give_time_off_loot(self, sim_info, time_off_loot):
if sim_info is not None and time_off_loot is not None:
resolver = sim_info.get_resolver()
time_off_loot.apply_to_resolver(resolver)
def _end_holiday(self):
active_household = services.active_household()
if not active_household.holiday_tracker.is_holiday_cancelled(self.holiday_id):
self._unregister_situation_ended_callbacks()
for situation_id in self._situation_ids:
services.get_zone_situation_manager().destroy_situation_by_id(situation_id)
active_household.holiday_tracker.deactivate_holiday()
def _holiday_end_callback(self, _):
self._state = HolidayState.SHUTDOWN
self._unregister_situation_ended_callbacks()
self._end_holiday()
services.drama_scheduler_service().complete_node(self.uid)
def schedule(self, resolver, specific_time=None, time_modifier=TimeSpan.ZERO):
self._state = HolidayState.INITIALIZED
success = super().schedule(resolver, specific_time=specific_time, time_modifier=time_modifier)
if success:
services.calendar_service().mark_on_calendar(self, advance_notice_time=HolidayTuning.HOLIDAY_DURATION())
return success
def cleanup(self, from_service_stop=False):
if self._holiday_alarm is not None:
self._holiday_alarm.cancel()
self._holiday_alarm = None
services.calendar_service().remove_on_calendar(self.uid)
super().cleanup(from_service_stop=from_service_stop)
if self._state == HolidayState.PRE_DAY:
household = services.household_manager().get(self._active_household_id)
if household is not None:
household.holiday_tracker.deactivate_pre_holiday()
elif self._state == HolidayState.RUNNING:
household = services.household_manager().get(self._active_household_id)
if household is not None and not household.holiday_tracker.is_holiday_cancelled(self.holiday_id):
household.holiday_tracker.deactivate_holiday()
if self._state in (HolidayState.PRE_DAY, HolidayState.RUNNING, HolidayState.SHUTDOWN):
lot_decoration_service = services.lot_decoration_service()
if lot_decoration_service is not None:
lot_decoration_service.cancel_decoration_requests_for(self)
def _select_time(self, specific_time=None, time_modifier=TimeSpan.ZERO):
if specific_time is None:
result = super()._select_time(time_modifier=time_modifier)
if not result:
return result
drama_scheduler_service = services.drama_scheduler_service()
for drama_node in drama_scheduler_service.scheduled_nodes_gen():
if drama_node.drama_node_type != DramaNodeType.HOLIDAY and drama_node.drama_node_type != DramaNodeType.PLAYER_PLANNED:
continue
if drama_node.day == self.day:
return False
return True
holiday_start_time = specific_time.time_of_next_day_time(HolidayTuning.MAIN_HOLIDAY_START_TIME)
now = services.time_service().sim_now
if holiday_start_time < now:
return False
selected_time = holiday_start_time + self.pre_holiday_duration()*-1
if selected_time < now:
selected_time = now + TimeSpan.ONE
self._selected_time = selected_time
return True
def _save_custom_data(self, writer):
if self._holiday_start_time is not None:
writer.write_uint64(HOLIDAY_START_TIME_TOKEN, self._holiday_start_time.absolute_ticks())
if self._holiday_end_time is not None:
writer.write_uint64(HOLIDAY_END_TIME_TOKEN, self._holiday_end_time.absolute_ticks())
def _load_custom_data(self, reader):
holiday_start_time_ticks = reader.read_uint64(HOLIDAY_START_TIME_TOKEN, None)
if holiday_start_time_ticks is not None:
self._holiday_start_time = DateAndTime(holiday_start_time_ticks)
holiday_end_time_ticks = reader.read_uint64(HOLIDAY_END_TIME_TOKEN, None)
if holiday_end_time_ticks is not None:
self._holiday_end_time = DateAndTime(holiday_end_time_ticks)
if self._holiday_start_time and not self._holiday_end_time and self._holiday_start_time + HolidayTuning.HOLIDAY_DURATION() < services.time_service().sim_now:
return False
return True
def resume(self):
now = services.time_service().sim_now
if now < self._holiday_start_time:
self._run_pre_holiday(from_load=True)
else:
self._run_holiday(from_load=True)
def _run(self):
if self.pre_holiday_duration().in_ticks() == 0:
self._run_holiday()
self._holiday_start_time = services.time_service().sim_now
else:
self._run_pre_holiday()
return DramaNodeRunOutcome.SUCCESS_NODE_INCOMPLETE
def load(self, drama_node_proto, schedule_alarm=True):
super_success = super().load(drama_node_proto, schedule_alarm=schedule_alarm)
if not super_success:
return False
services.calendar_service().mark_on_calendar(self, advance_notice_time=HolidayTuning.HOLIDAY_DURATION())
return True
lock_instance_tunables(HolidayDramaNode, ui_display_data=None)
HOLIDAY_ID_TOKEN = 'holiday_id'
class CustomHolidayDramaNode(HolidayDramaNode):
REMOVE_INSTANCE_TUNABLES = ('holiday',)
def __init__(self, *args, holiday_id=None, **kwargs):
super().__init__(*args, **kwargs)
self._holiday_id = holiday_id
@property
def holiday_id(self):
return self._holiday_id
def _save_custom_data(self, writer):
super()._save_custom_data(writer)
writer.write_uint64(HOLIDAY_ID_TOKEN, self._holiday_id)
def _load_custom_data(self, reader):
self._holiday_id = reader.read_uint64(HOLIDAY_ID_TOKEN, None)
return super()._load_custom_data(reader)
|
py | 1a3b5453ff7c725870a10902aa1cf1d4f1dd7d7d | import ray
from copy import deepcopy
from leaderboard.leaderboard_evaluator import LeaderboardEvaluator
from leaderboard.utils.statistics_manager import StatisticsManager
class ChallengeRunner():
def __init__(self, args, scenario, route, port=1000, tm_port=1002, debug=False):
args = deepcopy(args)
# Inject args
args.scenario_class = 'route_scenario'
args.port = port
args.trafficManagerPort = tm_port
args.scenarios = scenario
args.routes = route
args.debug = debug
args.record = ''
self.runner = LeaderboardEvaluator(args, StatisticsManager())
self.args = args
def run(self):
return self.runner.run(self.args)
|
py | 1a3b547ab31e4700c551f17e8be3e30300d9f363 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 0); |
py | 1a3b54f105b65c24ed5f2400f7dc1d8ade4c2aaa | import os
import boto3
from base import LambdaFunctionBase
class CWScheduledEventManageEC2State(LambdaFunctionBase):
"""
Class starting or stopping EC2 instances not part of a AutoScaling group.
"""
# Section specific to the lambda.
ACTION = os.environ['PARAM_ACTION']
RESOURCE_TAG_KEY = os.environ['PARAM_RESOURCE_TAG_KEY']
RESOURCE_TAG_VALUE = os.environ['PARAM_RESOURCE_TAG_VALUE']
AWS_REGIONS = os.environ['PARAM_AWS_REGIONS'].split(',')
def _get_ec2_instance_ids_by_tag(self, aws_region_name, instance_state, tag_key, tag_value):
""" Returns all resources identifiers linked to tag. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
autoscaling_client = boto3.client('autoscaling', region_name=aws_region_name)
# Finds EC2 instances.
resource_pages = ec2_client.get_paginator('describe_instances').paginate(
Filters=[
{
'Name': f'tag:{tag_key}',
'Values': [
tag_value
]
},
{
'Name': 'instance-state-name',
'Values': [
instance_state
]
}
]
)
# Browse EC2 instances and exclude EC2 member of a AutoScalingGroup.
ec2_instance_ids = []
for resource_page in resource_pages:
for resource in resource_page['Reservations']:
for ec2_instance in resource['Instances']:
ec2_instance_id = ec2_instance['InstanceId']
# Check if part of an autoscaling group.
is_part_of_autoscaling_group = len(autoscaling_client.describe_auto_scaling_instances(
InstanceIds=[
ec2_instance_id,
]
)['AutoScalingInstances']) > 0
# If not, the instance is eligible.
if not is_part_of_autoscaling_group:
self.logger.debug('>> Instance %s is eligible.', ec2_instance_id)
ec2_instance_ids.append(ec2_instance_id)
else:
self.logger.debug('>> Instance %s is not eligible as part of an AutoScaling Group.', ec2_instance_id)
return ec2_instance_ids
def _stop_ec2_instances(self, aws_region_name, ec2_instance_ids):
""" Stop the EC2 instances. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
self.logger.info('> Stopping EC2 instances.')
for ec2_instance_id in ec2_instance_ids:
self.logger.debug('>> Stopping instance %s.', ec2_instance_id)
ec2_client.stop_instances(InstanceIds=[ec2_instance_id])
self.logger.info('>> EC2 Instance %s => [STOPPED].', ec2_instance_id)
def _start_ec2_instances(self, aws_region_name, ec2_instance_ids):
""" Start the EC2 instances. """
ec2_client = boto3.client('ec2', region_name=aws_region_name)
self.logger.info('> Starting EC2 instances.')
for ec2_instance_id in ec2_instance_ids:
self.logger.debug('>> Starting instance %s.', ec2_instance_id)
ec2_client.start_instances(InstanceIds=[ec2_instance_id])
self.logger.info('>> EC2 Instance %s => [RUNNING].', ec2_instance_id)
def _execute(self, event, context): # pylint: disable=W0613
""" Execute the method. """
self.logger.info('Starting the operation.')
if self.ACTION in ['enable', 'start']:
ec2_instance_state = 'stopped'
elif self.ACTION in ['disable', 'stop']:
ec2_instance_state = 'running'
else:
raise Exception('Unexpected action.')
for aws_region_name in self.AWS_REGIONS:
self.logger.info('> Searching EC2 instances in region %s having tag %s=%s and state=%s.',
aws_region_name, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE, ec2_instance_state)
# Get EC2 by tag.
ec2_instance_ids = self._get_ec2_instance_ids_by_tag(aws_region_name, ec2_instance_state, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE)
self.logger.info('> Found %s EC2 instances in region %s having tag %s=%s and state=%s.',
str(len(ec2_instance_ids)), aws_region_name, self.RESOURCE_TAG_KEY, self.RESOURCE_TAG_VALUE, ec2_instance_state)
# Start/Stop
if len(ec2_instance_ids) > 0:
if self.ACTION in ['enable', 'start']:
self._start_ec2_instances(aws_region_name, ec2_instance_ids)
elif self.ACTION in ['disable', 'stop']:
self._stop_ec2_instances(aws_region_name, ec2_instance_ids)
self.logger.info('Operation completed successfully.')
return self._build_response_ok()
def lambda_handler(event, context):
""" Function invoked by AWS. """
return CWScheduledEventManageEC2State().process_event(event, context)
|
py | 1a3b56185707f50f9553067ee74ce5232b7db405 | # -*- coding: utf-8 -*-
task = Task("MainStory69",desc = u"自动主线69",pretask = ["MainStory15"])
#task.addSetupActionSet("RefreshGame",tag="pre1",desc="RefreshGame")
#task.addTeardownActionSet("TypeCommand",tag= "end1",desc = u"清除所有任务",mp = {'command':'$cleartask 1'})
#task.addTeardownActionSet("TypeCommand",tag= "end2",desc = u"重置等级为1级",mp ={'command':'$r who.Base.m_Grade = 1;who.Base.m_Exp=0;who.CalculateProp();who.SendPropChange();'})
#task.addTeardownActionSet("TypeCommand",tag= "end3",desc = u"添加主线任务",mp = {'command':'$task 10000'})
tasksuit.addTask(task)
step = Step("step0.5",u"预处理")
task.addStep(step)
#step.addActionSet("InputNamePsd",tag = "login",desc = "login input username and password", mp={'username':'autotest1','password':'123456'})
#step.addActionSet("TypeCommand",tag = "0.5",desc = u"获得万劫不复技能", mp = {'command':'$setskill 2099 1'})
#action 0.1
arg = { "detectRegion": gl.AREA_ICON_MAP ,"imagePattern" : "icon_map.png",
"clk_region" : GetRegionFromGrid(16) ,"clk_ptn" : Location(GetRegionFromGrid(16).getX()+40, GetRegionFromGrid(16).getY()+40),
"failResponse" : "Fail"}
act = ClickAction(tag = "0.1",desc=u"点击人物角色信息", **arg)
step.addAction(act)
arg = { "detectRegion": GetRegionFromGrid(140,141) ,"imagePattern" : "levelupbutton.png",
"failResponse" : "Fail"}
act = ClickAction(tag = "0.2",desc=u"点击升级按钮", **arg)
step.addAction(act)
arg = { "detectRegion": GetRegionFromGrid(89,91) ,"imagePattern" : "btn_queren.png",
"failResponse" : "Fail"}
act = ClickAction(tag = "0.3",desc=u"点击确认按钮", **arg)
step.addAction(act)
arg = { "time" : 1}
act = SleepAction(tag = "0.4",desc=u"wait 1s", **arg)
step.addAction(act)
#注意关闭按钮的相似度,越小的图越要用sikuli ide的匹配功能进行测试,防止出现误识别
arg = { "detectRegion": GetRegionFromGrid(13,30) ,"imagePattern" : Pattern("btn_close_welfare_center.png").similar(0.80),
"failResponse" : "Fail"}
act = ClickAction(tag = "0.41",desc=u"关闭人物信息", **arg)
step.addAction(act)
arg = { "time" : 1}
act = SleepAction(tag = "0.42",desc=u"wait 1s", **arg)
step.addAction(act)
step.addActionSet("TypeCommand",tag = "0.5",desc = u"升一级", mp = {'command':'$addexp 1000000000'})
step.addActionSet("TypeCommand",tag = "0.6",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.7",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.9",desc = u"升一级", mp = {'command':'$r who.Base.m_Grade = 92;who.Base.m_Exp=0;who.CalculateProp();who.SendPropChange();'})
#step1
step = Step("step1",u"主循环")
task.addStep(step)
#action1.7
arg = { "detectRegion": GetRegionFromGrid(13,30) ,"imagePattern" : Pattern("btn_close_welfare_center.png").similar(0.80),
"failResponse" : "Ignore" ,"loopWaitingTime": 0 }
act = ClickAction(tag = "1.7",desc=u"关闭人物信息(防止误操作)", **arg)
step.addAction(act)
#act2
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_equip.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore" ,"loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "2", desc = u"是否有装备窗口", **arg)
step.addAction(act)
#act3
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_useitem.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"是否有使用道具窗口", **arg)
step.addAction(act)
#act4
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step3"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "4", desc = u"是否在剧情或对话中", **arg)
step.addAction(act)
#act5
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1, "failNext" : ["step","step1"],
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
#Step2
step = Step("step2",u"处理道具装备")
task.addStep(step)
#action1
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_useitem.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_useitem.PNG",
"loopTime" : 5 ,"loopType" : 0 ,
"loopSleepTime" : 0.1 ,"saveImage" : True}
act = ClickAction(tag = "1", desc = u"使用道具", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_equip.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_equip.PNG",
"loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"使用装备", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"返回主循环", target = ["step","step1"])
step.addAction(act)
#Step3
step = Step("step3", desc = u"处理剧情中")
task.addStep(step)
#action1
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"failNext" : ["step","step1"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"是否在剧情或对话中,不在则返回主循环Step1", **arg)
step.addAction(act)
#action2
arg = { "detectRegion": GetRegionFromGrid(76, 112) ,"imagePattern" : "enterbattle.png",
"loopWaitingTime": 0 ,"next" : ["step","step6"],
"failResponse" : "Ignore" ,"saveImage" : True,
"loopSleepTime" : 0.1}
act = ClickAction(tag = "2", desc = u"如果有开始战斗则点击直到消失,并进入战斗Step", **arg)
step.addAction(act)
#action3
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"next" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"如果有主线则点主线", **arg)
step.addAction(act)
#action 4
arg = { "detectRegion" : gl.AREA_BTN_SKIPSTORY, "imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.3, "saveImage" : False,
"loopRegion": gl.AREA_BTN_SKIPSTORY ,"loopPattern" :Pattern("btn_skip_story.png").similar(0.60),
"loopTime" : 8 ,"loopType" : 0 }
act = ClickAction(tag = "4",desc=u"点击跳过", **arg)
step.addAction(act)
#action 5
arg = { "time":1}
act = SleepAction(tag = "5",desc=u"sleep 1s", **arg)
step.addAction(act)
#action 6
act = Jump(tag = "6", desc = u"返回继续处理剧情", target = ["action","1"])
step.addAction(act)
#Step4
step = Step("step4",u"处理剧情追踪")
task.addStep(step)
#act0.5
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"saveImage" : True,
"failNext" : ["step","step3"] ,"failResponse" : "Ignore",
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "0.5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
#action0.6
arg = { "detectRegion": GetRegionFromGrid(60, 112) ,"imagePattern" : "special_zhuxian.png",
"loopWaitingTime": 0 ,
"failResponse" : "Ignore"}
act = ClickAction(tag = "0.6", desc = u"特殊主线", **arg)
step.addAction(act)
#act1
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("jingqingqidai.png").similar(0.80),
"loopWaitingTime": 0 ,"successNext" : ["step","step7"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"主线任务为敬请期待则结束任务完成", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : GetRegionFromGrid(45, 128), "imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime" : 1 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.1, "saveImage" : False,
"loopRegion": GetRegionFromGrid(45, 128) ,"loopPattern" :Pattern("main_story.png").similar(0.60),
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"循环点击主线直到消失", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"jump to step4 action1",target=["action","0.5"])
step.addAction(act)
#Step6
step = Step("step6",u"自动战斗")
task.addStep(step)
step.addActionSet("AutoBattle",tag = "1",desc = u"自动战斗actionset", mp = {})
act = Jump(tag = "1", desc = u"jump to step1", target=['step','step1'])
step.addAction(act)
#Step7
#Step7
step = Step("step7",u"结束Task")
task.addStep(step)
arg = {'time':1}
act = SleepAction(tag="end sleep",desc = u"准备结束该任务",**arg)
step.addAction(act) |
py | 1a3b56668c0823d219b3b3761bb22a393c534f6e | #!/usr/bin/env python3
import json
import os
import sys
import requests
base_url = "https://api.northwell.edu/"
url = "https://api.northwell.edu/v2/vax-locations/all"
def get_paginated_urls():
response = requests.get(url)
data = response.json()
return [page_url["url"] for page_url in data["response"]["pagination"]["display"]]
def get_locations(page_url):
response = requests.get(base_url + page_url)
data = response.json()
return data["response"]["locations"]
def main():
output_dir = sys.argv[1]
if output_dir is None:
raise Exception("Must pass an output_dir as first argument")
page_urls = get_paginated_urls()
for index, page_url in enumerate(page_urls):
locations = get_locations(page_url)
output_file_path = os.path.join(output_dir, f"output{index}.json")
with open(output_file_path, "w", encoding="utf-8") as f:
json.dump(locations, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
sys.exit(main())
|
py | 1a3b567146169d54457ff3db4fc0c57950fad78b | import yaml
from g import versions_file_path
from .misc import generate_random_string
def validate(conf, **kwargs):
protocol = conf.get("protocol")
if protocol != "https" and kwargs.get('notary_mode'):
raise Exception(
"Error: the protocol must be https when Harbor is deployed with Notary")
if protocol == "https":
if not conf.get("cert_path"):
raise Exception("Error: The protocol is https but attribute ssl_cert is not set")
if not conf.get("cert_key_path"):
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
# Storage validate
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
storage_provider_name = conf.get("storage_provider_name")
if storage_provider_name not in valid_storage_drivers:
raise Exception("Error: storage driver %s is not supported, only the following ones are supported: %s" % (
storage_provider_name, ",".join(valid_storage_drivers)))
storage_provider_config = conf.get("storage_provider_config") ## original is registry_storage_provider_config
if storage_provider_name != "filesystem":
if storage_provider_config == "":
raise Exception(
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
# Redis validate
redis_host = conf.get("redis_host")
if redis_host is None or len(redis_host) < 1:
raise Exception(
"Error: redis_host in harbor.cfg needs to point to an endpoint of Redis server or cluster.")
redis_port = conf.get("redis_port")
if redis_host is None or (redis_port < 1 or redis_port > 65535):
raise Exception(
"Error: redis_port in harbor.cfg needs to point to the port of Redis server or cluster.")
def parse_versions():
if not versions_file_path.is_file():
return {}
with open('versions') as f:
versions = yaml.load(f)
return versions
def parse_yaml_config(config_file_path):
'''
:param configs: config_parser object
:returns: dict of configs
'''
with open(config_file_path) as f:
configs = yaml.load(f)
config_dict = {
'adminserver_url': "http://adminserver:8080",
'registry_url': "http://registry:5000",
'registry_controller_url': "http://registryctl:8080",
'core_url': "http://core:8080",
'token_service_url': "http://core:8080/service/token",
'jobservice_url': 'http://jobservice:8080',
'clair_url': 'http://clair:6060',
'notary_url': 'http://notary-server:4443',
'chart_repository_url': 'http://chartmuseum:9999'
}
config_dict['hostname'] = configs["hostname"]
config_dict['protocol'] = 'http'
http_config = configs.get('http') or {}
config_dict['http_port'] = http_config.get('port', 80)
https_config = configs.get('https')
if https_config:
config_dict['protocol'] = 'https'
config_dict['https_port'] = https_config.get('port', 443)
config_dict['cert_path'] = https_config["certificate"]
config_dict['cert_key_path'] = https_config["private_key"]
config_dict['public_url'] = configs.get('external_url') or '{protocol}://{hostname}'.format(**config_dict)
# DB configs
db_configs = configs.get('database')
if db_configs:
config_dict['db_host'] = 'postgresql'
config_dict['db_port'] = 5432
config_dict['db_user'] = 'postgres'
config_dict['db_password'] = db_configs.get("password") or ''
config_dict['ssl_mode'] = 'disable'
# Data path volume
config_dict['data_volume'] = configs['data_volume']
# Initial Admin Password
config_dict['harbor_admin_password'] = configs["harbor_admin_password"]
# Registry storage configs
storage_config = configs.get('storage_service') or {}
config_dict['registry_custom_ca_bundle_path'] = storage_config.get('ca_bundle') or ''
if storage_config.get('filesystem'):
config_dict['storage_provider_name'] = 'filesystem'
config_dict['storage_provider_config'] = storage_config['filesystem']
elif storage_config.get('azure'):
config_dict['storage_provider_name'] = 'azure'
config_dict['storage_provider_config'] = storage_config['azure']
elif storage_config.get('gcs'):
config_dict['storage_provider_name'] = 'gcs'
config_dict['storage_provider_config'] = storage_config['gcs']
elif storage_config.get('s3'):
config_dict['storage_provider_name'] = 's3'
config_dict['storage_provider_config'] = storage_config['s3']
elif storage_config.get('swift'):
config_dict['storage_provider_name'] = 'swift'
config_dict['storage_provider_config'] = storage_config['swift']
elif storage_config.get('oss'):
config_dict['storage_provider_name'] = 'oss'
config_dict['storage_provider_config'] = storage_config['oss']
else:
config_dict['storage_provider_name'] = 'filesystem'
config_dict['storage_provider_config'] = {}
# Clair configs
clair_configs = configs.get("clair") or {}
config_dict['clair_db'] = 'postgres'
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# jobservice config
js_config = configs.get('jobservice') or {}
config_dict['max_job_workers'] = js_config["max_job_workers"]
config_dict['jobservice_secret'] = generate_random_string(16)
# Log configs
log_configs = configs.get('log') or {}
config_dict['log_location'] = log_configs["location"]
config_dict['log_rotate_count'] = log_configs["rotate_count"]
config_dict['log_rotate_size'] = log_configs["rotate_size"]
config_dict['log_level'] = log_configs['level']
# external DB, if external_db enabled, it will cover the database config
external_db_configs = configs.get('external_database') or {}
if external_db_configs:
config_dict['db_password'] = external_db_configs.get('password') or ''
config_dict['db_host'] = external_db_configs['host']
config_dict['db_port'] = external_db_configs['port']
config_dict['db_user'] = db_configs['username']
if external_db_configs.get('ssl_mode'):
config_dict['db_ssl_mode'] = external_db_configs['ssl_mode']
# redis config
redis_configs = configs.get("external_redis")
if redis_configs:
# using external_redis
config_dict['redis_host'] = redis_configs['host']
config_dict['redis_port'] = redis_configs['port']
config_dict['redis_password'] = redis_configs.get("password") or ''
config_dict['redis_db_index_reg'] = redis_configs.get('registry_db_index') or 1
config_dict['redis_db_index_js'] = redis_configs.get('jobservice_db_index') or 2
config_dict['redis_db_index_chart'] = redis_configs.get('chartmuseum_db_index') or 3
else:
## Using local redis
config_dict['redis_host'] = 'redis'
config_dict['redis_port'] = 6379
config_dict['redis_password'] = ''
config_dict['redis_db_index_reg'] = 1
config_dict['redis_db_index_js'] = 2
config_dict['redis_db_index_chart'] = 3
# redis://[arbitrary_username:password@]ipaddress:port/database_index
if config_dict.get('redis_password'):
config_dict['redis_url_js'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
config_dict['redis_url_reg'] = "redis://anonymous:%s@%s:%s/%s" % (config_dict['redis_password'], config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
else:
config_dict['redis_url_js'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_js'])
config_dict['redis_url_reg'] = "redis://%s:%s/%s" % (config_dict['redis_host'], config_dict['redis_port'], config_dict['redis_db_index_reg'])
# auto generated secret string for core
config_dict['core_secret'] = generate_random_string(16)
# Admiral configs
config_dict['admiral_url'] = configs.get("admiral_url") or ""
return config_dict |
py | 1a3b57c9a3729a6cec29133cfe685acb2d7c82f4 | import pytest
from deepctr.models import AFM
from ..utils import check_model, get_test_data,SAMPLE_SIZE
@pytest.mark.parametrize(
'use_attention,sparse_feature_num,dense_feature_num',
[(True, 1, 1), (False, 3, 3),
]
)
def test_AFM(use_attention, sparse_feature_num, dense_feature_num):
model_name = "AFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num, dense_feature_num)
print('xxxxxx',feature_columns)
model = AFM(feature_columns, feature_columns, use_attention=use_attention, afm_dropout=0.5)
check_model(model, model_name, x, y)
if __name__ == "__main__":
pass
|
py | 1a3b57ebfb0cd410efb33c3a4a72338e2c1bd16a | #!/usr/bin/env python
import argparse
import boto3
import pandas as pd
import sagemaker
import json
from sagemaker.deserializers import JSONDeserializer
from sagemaker.serializers import JSONSerializer
from botocore.exceptions import ClientError
import logging
import traceback
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
if __name__=='__main__':
parser = argparse.ArgumentParser()
# parameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--ticker-cik", type=str, default='amzn')
parser.add_argument("--endpoint-name", type=str)
parser.add_argument("--region", type=str)
args, _ = parser.parse_known_args()
sagemaker_session = sagemaker.Session(boto3.session.Session(region_name=args.region))
#get the json data
f = open(f'/opt/ml/processing/input/10k10q/{args.ticker_cik}_10k_10q_summary.json',)
# returns JSON object as
# a dictionary
sec_summary = json.load(f)
sec_summary['inputs'] = sec_summary['inputs'][:2500]
sec_summary['source'] = f'{args.ticker_cik} SEC Report'
sec_df = pd.json_normalize(sec_summary)
sec_df = sec_df[['source', 'inputs']]
articles_df = pd.read_csv(f'/opt/ml/processing/input/articles/{args.ticker_cik}_articles.csv')
articles_df = articles_df[['source.name', 'content', 'description']]
articles_df['inputs'] = articles_df[['content', 'description']].apply(lambda x: ''.join(x), axis=1)
articles_df.drop(['content', 'description'], axis=1, inplace=True)
articles_df.rename(columns={'source.name': 'source'}, inplace=True)
df = sec_df.append(articles_df,ignore_index=True)
data={}
data['inputs'] = df['inputs'].tolist()
#initialize predictor from Endpoint
predictor = sagemaker.predictor.Predictor(endpoint_name=args.endpoint_name,
sagemaker_session=sagemaker_session,
serializer=JSONSerializer(),
deserializer=JSONDeserializer())
# predict for all chunks
try:
response = predictor.predict(data)
response_df = pd.json_normalize(response)
response_df['source'] = df['source']
response_df=response_df[['source', 'label', 'score']]
response_df.to_csv(f'/opt/ml/processing/output/{args.ticker_cik}_sentiment_result.csv', index=False)
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message) |
py | 1a3b5b75744f8e6d84abc32c7b867bf7e885566e | """
OpenVINO DL Workbench
Endpoints to work with environments
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import jsonify
from wb.extensions_factories.database import get_db_session_for_app
from wb.main.api_endpoints.v1 import V1_ENVIRONMENT_API
from wb.main.environment.handler import EnvironmentAPIHandler
from wb.main.utils.safe_runner import safe_run
@V1_ENVIRONMENT_API.route('/environment/frameworks/status')
@safe_run
def get_all_environments():
session = get_db_session_for_app()
return jsonify(EnvironmentAPIHandler.get_framework_specific_environments_status(session=session))
@V1_ENVIRONMENT_API.route('environment/setup/stop', methods=['DELETE'])
@safe_run
def stop_setup_environment():
session = get_db_session_for_app()
return jsonify(EnvironmentAPIHandler.stop_all_running_environments(session))
|
py | 1a3b5e43aa3c45d845b9ce670fd4ea38349175f8 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class DataUsageStats(object):
"""Implementation of the 'DataUsageStats' model.
Specifies the data usage metric of the data stored on the Cohesity
Cluster or Storage Domains (View Boxes).
Attributes:
cloud_data_written_bytes (long|int): Specifies the total data written
on cloud tiers, as computed by the Cohesity Cluster.
cloud_data_written_bytes_timestamp_usec (long|int): Specifies
Timestamp of CloudDataWrittenBytes.
cloud_total_physical_usage_bytes (long|int): Specifies the total cloud
capacity, as computed by the Cohesity Cluster, after the size of
the data has been reduced by change-block tracking, compression
and deduplication.
cloud_total_physical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of CloudTotalPhysicalUsageBytes.
data_in_bytes (long|int): Specifies the data read from the protected
objects by the Cohesity Cluster before any data reduction using
deduplication and compression.
data_in_bytes_after_dedup (long|int): Specifies the size of
the data has been reduced by change-block tracking and
deduplication but before compression or data is replicated to
other nodes as per RF or Erasure Coding policy.
data_in_bytes_after_dedup_timestamp_usec (long|int): Specifies
Timestamp of DataInBytesAfterDedup.
data_in_bytes_timestamp_usec (long|int): Specifies Timestamp of
DataInBytes.
data_protect_logical_usage_bytes (long|int): Specifies the logical
data used by Data Protect on Cohesity cluster.
data_protect_logical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of DataProtectLogicalUsageBytes.
data_protect_physical_usage_bytes (long|int): Specifies the physical
data used by Data Protect on Cohesity cluster.
data_protect_physical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of DataProtectPhysicalUsageBytes.
data_written_bytes (long|int): Specifies the data written after it has
been reduced by deduplication and compression. This does not
include resiliency impact.
data_written_bytes_timestamp_usec (long|int): Specifies Timestamp of
DataWrittenBytes.
file_services_logical_usage_bytes (long|int): Specifies the logical
data used by File services on Cohesity cluster.
file_services_logical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of FileServicesLogicalUsageBytes.
file_services_physical_usage_bytes (long|int): Specifies the physical
data used by File services on Cohesity cluster.
file_services_physical_usage_bytes_timestamp_usec (long|int):
Specifies Timestamp of FileServicesPhysicalUsageBytes.
local_data_written_bytes (long|int): Specifies the total data written
on local tiers, as computed by the Cohesity Cluster, after the
size of the data has been reduced by change-block tracking,
deduplication and compression. This does not include resiliency
impact.
local_data_written_bytes_timestamp_usec (long|int): Specifies
Timestamp of LocalDataWrittenBytes.
local_tier_resiliency_impact_bytes (long|int): Specifies the size of
the data has been replicated to other nodes as per RF or Erasure
Coding policy.
local_tier_resiliency_impact_bytes_timestamp_usec (long|int):
Specifies Timestamp of LocalTierResiliencyImpactBytes.
local_total_physical_usage_bytes (long|int): Specifies the total local
capacity, as computed by the Cohesity Cluster, after the size of
the data has been reduced by change-block tracking, compression
and deduplication.
local_total_physical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of LocalTotalPhysicalUsageBytes.
outdated_logical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of OutdatedLogicalUsageBytes.
storage_consumed_bytes (long|int): Specifies the total capacity, as
computed by the Cohesity Cluster, after the size of the data has
been reduced by change-block tracking, compression and
deduplication. This includes resiliency impact.
storage_consumed_bytes_timestamp_usec (long|int): Specifies Timestamp
of StorageConsumedBytes.
total_logical_usage_bytes (long|int): Provides the combined data
residing on protected objects. The size of data before reduction
by deduplication and compression.
total_logical_usage_bytes_timestamp_usec (long|int): Specifies
Timestamp of TotalLogicalUsageBytes.
unique_physical_data_bytes (int): pecifies the unique physical data
usage in bytes.
"""
# Create a mapping from Model property names to API property names
_names = {
"cloud_data_written_bytes":'cloudDataWrittenBytes',
"cloud_data_written_bytes_timestamp_usec":'cloudDataWrittenBytesTimestampUsec',
"cloud_total_physical_usage_bytes":'cloudTotalPhysicalUsageBytes',
"cloud_total_physical_usage_bytes_timestamp_usec":'cloudTotalPhysicalUsageBytesTimestampUsec',
"data_in_bytes":'dataInBytes',
"data_in_bytes_after_dedup":'dataInBytesAfterDedup',
"data_in_bytes_after_dedup_timestamp_usec":'dataInBytesAfterDedupTimestampUsec',
"data_in_bytes_timestamp_usec":'dataInBytesTimestampUsec',
"data_protect_logical_usage_bytes":'dataProtectLogicalUsageBytes',
"data_protect_logical_usage_bytes_timestamp_usec":'dataProtectLogicalUsageBytesTimestampUsec',
"data_protect_physical_usage_bytes":'dataProtectPhysicalUsageBytes',
"data_protect_physical_usage_bytes_timestamp_usec":'dataProtectPhysicalUsageBytesTimestampUsec',
"data_written_bytes":'dataWrittenBytes',
"data_written_bytes_timestamp_usec":'dataWrittenBytesTimestampUsec',
"file_services_logical_usage_bytes":'fileServicesLogicalUsageBytes',
"file_services_logical_usage_bytes_timestamp_usec":'fileServicesLogicalUsageBytesTimestampUsec',
"file_services_physical_usage_bytes":'fileServicesPhysicalUsageBytes',
"file_services_physical_usage_bytes_timestamp_usec":'fileServicesPhysicalUsageBytesTimestampUsec',
"local_data_written_bytes":'localDataWrittenBytes',
"local_data_written_bytes_timestamp_usec":'localDataWrittenBytesTimestampUsec',
"local_tier_resiliency_impact_bytes":'localTierResiliencyImpactBytes',
"local_tier_resiliency_impact_bytes_timestamp_usec":'localTierResiliencyImpactBytesTimestampUsec',
"local_total_physical_usage_bytes":'localTotalPhysicalUsageBytes',
"local_total_physical_usage_bytes_timestamp_usec":'localTotalPhysicalUsageBytesTimestampUsec',
"outdated_logical_usage_bytes_timestamp_usec":'outdatedLogicalUsageBytesTimestampUsec',
"storage_consumed_bytes":'storageConsumedBytes',
"storage_consumed_bytes_timestamp_usec":'storageConsumedBytesTimestampUsec',
"total_logical_usage_bytes":'totalLogicalUsageBytes',
"total_logical_usage_bytes_timestamp_usec":'totalLogicalUsageBytesTimestampUsec',
"unique_physical_data_bytes":'uniquePhysicalDataBytes'
}
def __init__(self,
cloud_data_written_bytes=None,
cloud_data_written_bytes_timestamp_usec=None,
cloud_total_physical_usage_bytes=None,
cloud_total_physical_usage_bytes_timestamp_usec=None,
data_in_bytes=None,
data_in_bytes_after_dedup=None,
data_in_bytes_after_dedup_timestamp_usec=None,
data_in_bytes_timestamp_usec=None,
data_protect_logical_usage_bytes=None,
data_protect_logical_usage_bytes_timestamp_usec=None,
data_protect_physical_usage_bytes=None,
data_protect_physical_usage_bytes_timestamp_usec=None,
data_written_bytes=None,
data_written_bytes_timestamp_usec=None,
file_services_logical_usage_bytes=None,
file_services_logical_usage_bytes_timestamp_usec=None,
file_services_physical_usage_bytes=None,
file_services_physical_usage_bytes_timestamp_usec=None,
local_data_written_bytes=None,
local_data_written_bytes_timestamp_usec=None,
local_tier_resiliency_impact_bytes=None,
local_tier_resiliency_impact_bytes_timestamp_usec=None,
local_total_physical_usage_bytes=None,
local_total_physical_usage_bytes_timestamp_usec=None,
outdated_logical_usage_bytes_timestamp_usec=None,
storage_consumed_bytes=None,
storage_consumed_bytes_timestamp_usec=None,
total_logical_usage_bytes=None,
total_logical_usage_bytes_timestamp_usec=None,
unique_physical_data_bytes=None):
"""Constructor for the DataUsageStats class"""
# Initialize members of the class
self.cloud_data_written_bytes = cloud_data_written_bytes
self.cloud_data_written_bytes_timestamp_usec = cloud_data_written_bytes_timestamp_usec
self.cloud_total_physical_usage_bytes = cloud_total_physical_usage_bytes
self.cloud_total_physical_usage_bytes_timestamp_usec = cloud_total_physical_usage_bytes_timestamp_usec
self.data_in_bytes = data_in_bytes
self.data_in_bytes_after_dedup = data_in_bytes_after_dedup
self.data_in_bytes_after_dedup_timestamp_usec = data_in_bytes_after_dedup_timestamp_usec
self.data_in_bytes_timestamp_usec = data_in_bytes_timestamp_usec
self.data_protect_logical_usage_bytes = data_protect_logical_usage_bytes
self.data_protect_logical_usage_bytes_timestamp_usec = data_protect_logical_usage_bytes_timestamp_usec
self.data_protect_physical_usage_bytes = data_protect_physical_usage_bytes
self.data_protect_physical_usage_bytes_timestamp_usec = data_protect_physical_usage_bytes_timestamp_usec
self.data_written_bytes = data_written_bytes
self.data_written_bytes_timestamp_usec = data_written_bytes_timestamp_usec
self.file_services_logical_usage_bytes = file_services_logical_usage_bytes
self.file_services_logical_usage_bytes_timestamp_usec = file_services_logical_usage_bytes_timestamp_usec
self.file_services_physical_usage_bytes = file_services_physical_usage_bytes
self.file_services_physical_usage_bytes_timestamp_usec = file_services_physical_usage_bytes_timestamp_usec
self.local_data_written_bytes = local_data_written_bytes
self.local_data_written_bytes_timestamp_usec = local_data_written_bytes_timestamp_usec
self.local_tier_resiliency_impact_bytes = local_tier_resiliency_impact_bytes
self.local_tier_resiliency_impact_bytes_timestamp_usec = local_tier_resiliency_impact_bytes_timestamp_usec
self.local_total_physical_usage_bytes = local_total_physical_usage_bytes
self.local_total_physical_usage_bytes_timestamp_usec = local_total_physical_usage_bytes_timestamp_usec
self.outdated_logical_usage_bytes_timestamp_usec = outdated_logical_usage_bytes_timestamp_usec
self.storage_consumed_bytes = storage_consumed_bytes
self.storage_consumed_bytes_timestamp_usec = storage_consumed_bytes_timestamp_usec
self.total_logical_usage_bytes = total_logical_usage_bytes
self.total_logical_usage_bytes_timestamp_usec = total_logical_usage_bytes_timestamp_usec
self.unique_physical_data_bytes = unique_physical_data_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
cloud_data_written_bytes = dictionary.get('cloudDataWrittenBytes')
cloud_data_written_bytes_timestamp_usec = dictionary.get('cloudDataWrittenBytesTimestampUsec')
cloud_total_physical_usage_bytes = dictionary.get('cloudTotalPhysicalUsageBytes')
cloud_total_physical_usage_bytes_timestamp_usec = dictionary.get('cloudTotalPhysicalUsageBytesTimestampUsec')
data_in_bytes = dictionary.get('dataInBytes')
data_in_bytes_after_dedup = dictionary.get('dataInBytesAfterDedup')
data_in_bytes_after_dedup_timestamp_usec = dictionary.get('dataInBytesAfterDedupTimestampUsec')
data_in_bytes_timestamp_usec = dictionary.get('dataInBytesTimestampUsec')
data_protect_logical_usage_bytes = dictionary.get('dataProtectLogicalUsageBytes')
data_protect_logical_usage_bytes_timestamp_usec = dictionary.get('dataProtectLogicalUsageBytesTimestampUsec')
data_protect_physical_usage_bytes = dictionary.get('dataProtectPhysicalUsageBytes')
data_protect_physical_usage_bytes_timestamp_usec = dictionary.get('dataProtectPhysicalUsageBytesTimestampUsec')
data_written_bytes = dictionary.get('dataWrittenBytes')
data_written_bytes_timestamp_usec = dictionary.get('dataWrittenBytesTimestampUsec')
file_services_logical_usage_bytes = dictionary.get('fileServicesLogicalUsageBytes')
file_services_logical_usage_bytes_timestamp_usec = dictionary.get('fileServicesLogicalUsageBytesTimestampUsec')
file_services_physical_usage_bytes = dictionary.get('fileServicesPhysicalUsageBytes')
file_services_physical_usage_bytes_timestamp_usec = dictionary.get('fileServicesPhysicalUsageBytesTimestampUsec')
local_data_written_bytes = dictionary.get('localDataWrittenBytes')
local_data_written_bytes_timestamp_usec = dictionary.get('localDataWrittenBytesTimestampUsec')
local_tier_resiliency_impact_bytes = dictionary.get('localTierResiliencyImpactBytes')
local_tier_resiliency_impact_bytes_timestamp_usec = dictionary.get('localTierResiliencyImpactBytesTimestampUsec')
local_total_physical_usage_bytes = dictionary.get('localTotalPhysicalUsageBytes')
local_total_physical_usage_bytes_timestamp_usec = dictionary.get('localTotalPhysicalUsageBytesTimestampUsec')
outdated_logical_usage_bytes_timestamp_usec = dictionary.get('outdatedLogicalUsageBytesTimestampUsec')
storage_consumed_bytes = dictionary.get('storageConsumedBytes')
storage_consumed_bytes_timestamp_usec = dictionary.get('storageConsumedBytesTimestampUsec')
total_logical_usage_bytes = dictionary.get('totalLogicalUsageBytes')
total_logical_usage_bytes_timestamp_usec = dictionary.get('totalLogicalUsageBytesTimestampUsec')
unique_physical_data_bytes = dictionary.get('uniquePhysicalDataBytes')
# Return an object of this model
return cls(cloud_data_written_bytes,
cloud_data_written_bytes_timestamp_usec,
cloud_total_physical_usage_bytes,
cloud_total_physical_usage_bytes_timestamp_usec,
data_in_bytes,
data_in_bytes_after_dedup,
data_in_bytes_after_dedup_timestamp_usec,
data_in_bytes_timestamp_usec,
data_protect_logical_usage_bytes,
data_protect_logical_usage_bytes_timestamp_usec,
data_protect_physical_usage_bytes,
data_protect_physical_usage_bytes_timestamp_usec,
data_written_bytes,
data_written_bytes_timestamp_usec,
file_services_logical_usage_bytes,
file_services_logical_usage_bytes_timestamp_usec,
file_services_physical_usage_bytes,
file_services_physical_usage_bytes_timestamp_usec,
local_data_written_bytes,
local_data_written_bytes_timestamp_usec,
local_tier_resiliency_impact_bytes,
local_tier_resiliency_impact_bytes_timestamp_usec,
local_total_physical_usage_bytes,
local_total_physical_usage_bytes_timestamp_usec,
outdated_logical_usage_bytes_timestamp_usec,
storage_consumed_bytes,
storage_consumed_bytes_timestamp_usec,
total_logical_usage_bytes,
total_logical_usage_bytes_timestamp_usec,
unique_physical_data_bytes)
|
py | 1a3b5e833efb28d0ac8eb67806a2ee35895ebb1c | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides ``kedro.config`` with the functionality to load one
or more configuration files from specified paths.
"""
import logging
from glob import iglob
from pathlib import Path
from typing import AbstractSet, Any, Dict, Iterable, List, Set, Union
from warnings import warn
SUPPORTED_EXTENSIONS = [
".yml",
".yaml",
".json",
".ini",
".pickle",
".properties",
".xml",
]
class MissingConfigException(Exception):
"""Raised when no configuration files can be found within a config path"""
pass
class BadConfigException(Exception):
"""Raised when a configuration file cannot be loaded, for instance
due to wrong syntax or poor formatting.
"""
pass
class ConfigLoader:
"""Recursively scan the directories specified in ``conf_paths`` for
configuration files with a ``yaml``, ``yml``, ``json``, ``ini``,
``pickle``, ``xml`` or ``properties`` extension, load them,
and return them in the form of a config dictionary.
When the same top-level key appears in any 2 config files located in
the same ``conf_path`` (sub)directory, a ``ValueError`` is raised.
When the same key appears in any 2 config files located in different
``conf_path`` directories, the last processed config path takes
precedence and overrides this key.
For example, if your ``conf_path`` looks like this:
::
.
`-- conf
|-- README.md
|-- base
| |-- catalog.yml
| |-- logging.yml
| `-- experiment1
| `-- parameters.yml
`-- local
|-- catalog.yml
|-- db.ini
|-- experiment1
| |-- parameters.yml
| `-- model_parameters.yml
`-- experiment2
`-- parameters.yml
You can access the different configurations as follows:
::
>>> import logging.config
>>> from kedro.config import ConfigLoader
>>>
>>> conf_paths = ['conf/base', 'conf/local']
>>> conf_loader = ConfigLoader(conf_paths)
>>>
>>> conf_logging = conf_loader.get('logging*')
>>> logging.config.dictConfig(conf_logging) # set logging conf
>>>
>>> conf_catalog = conf_loader.get('catalog*', 'catalog*/**')
>>> conf_params = conf_loader.get('**/parameters.yml')
"""
def __init__(self, conf_paths: Union[str, Iterable[str]]):
"""Instantiate a ConfigLoader.
Args:
conf_paths: Non-empty path or list of paths to configuration
directories.
Raises:
ValueError: If ``conf_paths`` is empty.
"""
if not conf_paths:
raise ValueError(
"`conf_paths` must contain at least one path to "
"load configuration files from."
)
if isinstance(conf_paths, str):
conf_paths = [conf_paths]
self.conf_paths = _remove_duplicates(conf_paths)
self.logger = logging.getLogger(__name__)
@staticmethod
def _load_config_file(config_file: Path) -> Dict[str, Any]:
"""Load an individual config file using `anyconfig` as a backend.
Args:
config_file: Path to a config file to process.
Raises:
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Parsed configuration.
"""
# for performance reasons
import anyconfig # pylint: disable=import-outside-toplevel
try:
# Default to UTF-8, which is Python 3 default encoding, to decode the file
with open(config_file, encoding="utf8") as yml:
return {
k: v
for k, v in anyconfig.load(yml).items()
if not k.startswith("_")
}
except AttributeError as exc:
raise BadConfigException(
f"Couldn't load config file: {config_file}"
) from exc
def _load_configs(self, config_filepaths: List[Path]) -> Dict[str, Any]:
"""Recursively load all configuration files, which satisfy
a given list of glob patterns from a specific path.
Args:
config_filepaths: Configuration files sorted in the order of precedence.
Raises:
ValueError: If 2 or more configuration files contain the same key(s).
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Resulting configuration dictionary.
"""
aggregate_config = {}
seen_file_to_keys = {} # type: Dict[Path, AbstractSet[str]]
for config_filepath in config_filepaths:
single_config = self._load_config_file(config_filepath)
_check_duplicate_keys(seen_file_to_keys, config_filepath, single_config)
seen_file_to_keys[config_filepath] = single_config.keys()
aggregate_config.update(single_config)
return aggregate_config
def _lookup_config_filepaths(
self, conf_path: Path, patterns: Iterable[str], processed_files: Set[Path]
) -> List[Path]:
config_files = _path_lookup(conf_path, patterns)
seen_files = config_files & processed_files
if seen_files:
self.logger.warning(
"Config file(s): %s already processed, skipping loading...",
", ".join(str(seen) for seen in sorted(seen_files)),
)
config_files -= seen_files
return sorted(config_files)
def get(self, *patterns: str) -> Dict[str, Any]:
"""Recursively scan for configuration files, load and merge them, and
return them in the form of a config dictionary.
Args:
patterns: Glob patterns to match. Files, which names match
any of the specified patterns, will be processed.
Raises:
ValueError: If 2 or more configuration files inside the same
config path (or its subdirectories) contain the same
top-level key.
MissingConfigException: If no configuration files exist within
a specified config path.
BadConfigException: If configuration is poorly formatted and
cannot be loaded.
Returns:
Dict[str, Any]: A Python dictionary with the combined
configuration from all configuration files. **Note:** any keys
that start with `_` will be ignored.
"""
if not patterns:
raise ValueError(
"`patterns` must contain at least one glob "
"pattern to match config filenames against."
)
config = {} # type: Dict[str, Any]
processed_files = set() # type: Set[Path]
for conf_path in self.conf_paths:
if not Path(conf_path).is_dir():
raise ValueError(
f"Given configuration path either does not exist "
f"or is not a valid directory: {conf_path}"
)
logging.info("IN GET. STARTGIN __LOOKUP_CONFIG_FILE_PATHS")
config_filepaths = self._lookup_config_filepaths(
Path(conf_path), patterns, processed_files
)
logging.info("COMPLETED _LOOKUP_CONFIG_FILE_PATHS. STARTING _LOAD_CONFIGS.")
new_conf = self._load_configs(config_filepaths)
logging.info("COMPLETED _LOAD_CONFIGS.")
common_keys = config.keys() & new_conf.keys()
if common_keys:
sorted_keys = ", ".join(sorted(common_keys))
msg = (
"Config from path `%s` will override the following "
"existing top-level config keys: %s"
)
self.logger.info(msg, conf_path, sorted_keys)
config.update(new_conf)
processed_files |= set(config_filepaths)
logging.info("COMPLETED CONFIG UPDATE")
if not processed_files:
raise MissingConfigException(
f"No files found in {self.conf_paths} matching the glob "
f"pattern(s): {list(patterns)}"
)
return config
def _check_duplicate_keys(
processed_files: Dict[Path, AbstractSet[str]], filepath: Path, conf: Dict[str, Any]
) -> None:
duplicates = []
for processed_file, keys in processed_files.items():
overlapping_keys = conf.keys() & keys
if overlapping_keys:
sorted_keys = ", ".join(sorted(overlapping_keys))
if len(sorted_keys) > 100:
sorted_keys = sorted_keys[:100] + "..."
duplicates.append(f"{processed_file}: {sorted_keys}")
if duplicates:
dup_str = "\n- ".join(duplicates)
raise ValueError(f"Duplicate keys found in {filepath} and:\n- {dup_str}")
def _path_lookup(conf_path: Path, patterns: Iterable[str]) -> Set[Path]:
"""Return a set of all configuration files from ``conf_path`` or
its subdirectories, which satisfy a given list of glob patterns.
Args:
conf_path: Path to configuration directory.
patterns: List of glob patterns to match the filenames against.
Returns:
A set of paths to configuration files.
"""
config_files = set()
conf_path = conf_path.resolve()
for pattern in patterns:
# `Path.glob()` ignores the files if pattern ends with "**",
# therefore iglob is used instead
for each in iglob(str(conf_path / pattern), recursive=True):
path = Path(each).resolve()
if path.is_file() and path.suffix in SUPPORTED_EXTENSIONS:
config_files.add(path)
return config_files
def _remove_duplicates(items: Iterable[str]):
"""Remove duplicates while preserving the order."""
unique_items = [] # type: List[str]
for item in items:
if item not in unique_items:
unique_items.append(item)
else:
warn(
f"Duplicate environment detected! "
f"Skipping re-loading from configuration path: {item}"
)
return unique_items
|
py | 1a3b5ef7df1baf8a8dbca49e4bb62b59191c9f3f | from django.views.generic import ListView
from constance import config
from learning.models import Tutorial, Category
from learning.filters import TutorialArchiveFilterSet
class TutorialListView(ListView):
model = Tutorial
template_name = "learning/tutorials_archive.html"
page_kwarg = "page"
context_object_name = "tutorials"
def get_paginate_by(self, queryset):
return config.LEARNING_TUTORIAL_ARCHIVE_PAGINATE_BY
def get_queryset(self):
# All confirmed and active tutorials
tutorials = (
Tutorial.objects.order_by("-create_date")
.only_main_fields()
.active_and_confirmed_tutorials()
)
# Filter and order tutorials, then annonate comments_count
tutorials = TutorialArchiveFilterSet(
self.request.GET, tutorials
).qs.annonate_comments_count()
return tutorials
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_slug = self.request.GET.get("category")
if category_slug:
context["category"] = (
Category.objects.active_categories()
.filter(slug=category_slug)
.first()
)
return context
|
py | 1a3b5f921f357b4648bad07df1595c8ed7ea9f32 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
# test API provided through BaseSktimeForecaster
"""SKtime Forecasters Test."""
__author__ = ["@mloning"]
__all__ = [
"test_different_fh_in_fit_and_predict_req",
"test_fh_in_fit_opt",
"test_fh_in_fit_req",
"test_fh_in_predict_opt",
"test_no_fh_in_fit_req",
"test_no_fh_opt",
"test_oh_setting",
"test_same_fh_in_fit_and_predict_opt",
"test_same_fh_in_fit_and_predict_req",
]
import numpy as np
import pytest
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._sktime import _BaseWindowForecaster
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.registry import all_estimators
from sktime.utils._testing.forecasting import _get_n_columns, make_forecasting_problem
from sktime.utils._testing.series import _make_series
# get all forecasters
FORECASTERS = [
forecaster
for (name, forecaster) in all_estimators(estimator_types="forecaster")
if issubclass(forecaster, BaseForecaster)
]
FH0 = 1
WINDOW_FORECASTERS = [
forecaster
for (name, forecaster) in all_estimators(estimator_types="forecaster")
if issubclass(forecaster, _BaseWindowForecaster)
]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# test _y setting
@pytest.mark.parametrize("Forecaster", FORECASTERS)
def test_oh_setting(Forecaster):
"""Check cuttoff and _y."""
# check _y and cutoff is None after construction
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
assert f._y is None
assert f.cutoff is None
# check that _y and cutoff is updated during fit
f.fit(y_train, fh=FH0)
# assert isinstance(f._y, pd.Series)
# action:uncomments the line above
# why: fails for multivariates cause they are DataFrames
# solution: look for a general solution for Series and DataFrames
assert len(f._y) > 0
assert f.cutoff == y_train.index[-1]
# check data pointers
np.testing.assert_array_equal(f._y.index, y_train.index)
# check that _y and cutoff is updated during update
f.update(y_test, update_params=False)
np.testing.assert_array_equal(
f._y.index, np.append(y_train.index, y_test.index)
)
assert f.cutoff == y_test.index[-1]
# check setting/getting API for forecasting horizon
# divide Forecasters into groups based on when fh is required
FORECASTERS_REQUIRED = [
f for f in FORECASTERS if f.get_class_tag("requires-fh-in-fit", True)
]
FORECASTERS_OPTIONAL = [
f for f in FORECASTERS if not f.get_class_tag("requires-fh-in-fit", True)
]
# testing Forecasters which require fh during fitting
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_no_fh_in_fit_req(Forecaster):
"""Check if fh is required in fit."""
f = Forecaster.create_test_instance()
# fh required in fit, raises error if not passed
with pytest.raises(ValueError):
f.fit(y_train)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_fh_in_fit_req(Forecaster):
"""Checks if fh is requred in fit."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_same_fh_in_fit_and_predict_req(Forecaster):
"""Check if fh is the same in fit and predict."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_REQUIRED)
def test_different_fh_in_fit_and_predict_req(Forecaster):
"""Check if fh is different in fit and predict."""
f = Forecaster.create_test_instance()
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
# updating fh during predict raises error as fitted model depends on fh
# seen in fit
with pytest.raises(ValueError):
f.predict(fh=FH0 + 1)
# testing Forecasters which take fh either during fitting or predicting
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_no_fh_opt(Forecaster):
"""Check if fh is optional in fit."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train)
# not passing fh to either fit or predict raises error
with pytest.raises(ValueError):
f.predict()
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_fh_in_fit_opt(Forecaster):
"""Check if fh is optional in fit."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_fh_in_predict_opt(Forecaster):
"""Check if fh is optional in predict."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
f.fit(y_train)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", FORECASTERS_OPTIONAL)
def test_same_fh_in_fit_and_predict_opt(Forecaster):
"""Check if fh is the same in fit and predict."""
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
# passing the same fh to both fit and predict works
f.fit(y_train, fh=FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
@pytest.mark.parametrize("Forecaster", WINDOW_FORECASTERS)
def test_last_window(Forecaster):
f = Forecaster.create_test_instance()
n_columns_list = _get_n_columns(f.get_tag("scitype:y"))
for n_columns in n_columns_list:
f = Forecaster.create_test_instance()
y_train = _make_series(n_columns=n_columns)
# passing the same fh to both fit and predict works
f.fit(y_train, fh=FH0)
actual, _ = f._get_last_window()
expected = y_train.iloc[-f.window_length_ :]
np.testing.assert_array_equal(actual, expected)
assert len(actual) == f.window_length_
|
py | 1a3b60aca5457984df0934bea8c6995a99e13290 | import numpy as np
from autoencirt.irt.grm import GRModel
from bayesianquilts.dense import DenseHorseshoe
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.mcmc.transformed_kernel import (
make_transform_fn, make_transformed_log_prob, make_log_det_jacobian_fn)
from tensorflow_probability.python.bijectors import softplus as softplus_lib
from bayesianquilts.util import (
clip_gradients
)
tfd = tfp.distributions
tfd = tfp.distributions
tfb = tfp.bijectors
class AEGRModel(GRModel):
def __init__(self,
auxiliary_parameterization=True,
xi_scale=1e-2,
eta_scale=1e-2,
kappa_scale=1e-2,
weight_exponent=1.0,
dim=2,
decay=0.25,
positive_discriminations=True,
hidden_layers=[100, 100],
num_items=1,
):
super(AEGRModel, self).__init__(
auxiliary_parameterization=True,
xi_scale=xi_scale,
eta_scale=eta_scale,
kappa_scale=kappa_scale,
weight_exponent=weight_exponent,
dim=dim,
decay=decay,
positive_discriminations=positive_discriminations
)
self.num_items = num_items,
self.hidden_layers = hidden_layers
self.grm_vars = self.var_list
def initialize_nn(self, hidden_layers=None):
if hidden_layers is not None:
self.hidden_layers = hidden_layers
else:
hidden_layers = self.hidden_layers
self.nn = DenseHorseshoe(
self.num_items,
hidden_layers + [self.dimensions],
reparameterized=True)
self.nn_var_list = self.nn.var_list
def load_data(self, *args, **kwargs):
super(AEGRModel, self).load_data(*args, **kwargs)
self.initialize_nn()
def joint_log_prob(self, **x):
prior = self.joint_log_prior(**x)
d0 = tf.concat(
[x['difficulties0'], x['ddifficulties']],
axis=-1)
difficulties = tf.cumsum(
d0, axis=-1)
likelihood = tf.reduce_sum(
self.log_likelihood(
self.calibration_data,
x['discriminations'],
difficulties,
x['abilities']
),
axis=[-1, -2]
)
return prior + likelihood
def joint_log_prior(
self, **x):
weight_tensors = {v: x[v] for v in self.nn.weight_var_list}
abilities = self.nn.assemble_networks(
weight_tensors)(self.calibration_data)
grm_vars = {k: x[k] for k in self.grm_vars}
grm_vars["abilities"] = abilities[..., tf.newaxis, tf.newaxis]
grm_vars["responses"] = self.calibration_data
nn_log_prior = self.nn.log_prob(weight_tensors)
grm_log_prior = (
super(
AEGRModel, self
).joint_log_prob_auxiliary(**grm_vars) if self.auxiliary_parameterization
else
super(
AEGRModel, self
).joint_log_prob(**grm_vars)
)
return nn_log_prior + grm_log_prior
def sample(self, *args, **kwargs):
nn_sample = self.nn.sample(*args, **kwargs)
grm_sample = self.surrogate_posterior.sample(*args, **kwargs)
return {**nn_sample, **grm_sample}
def create_distributions(self, *args, **kwargs):
super(
AEGRModel, self
).create_distributions(
*args, **kwargs
)
self.surrogate_distribution_hybrid = (
tfd.JointDistributionNamed({
**self.surrogate_distribution_dict,
**self.nn.surrogate_distribution_dict
})
)
def calibrate_advi(
self, num_steps=10, initial_learning_rate=5e-3,
decay_rate=0.99, learning_rate=None,
opt=None, clip=None):
if learning_rate is None:
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=initial_learning_rate,
decay_steps=num_steps,
decay_rate=decay_rate,
staircase=True)
if opt is None:
opt = tf.optimizers.Adam(
learning_rate=learning_rate)
@tf.function
def run_approximation(num_steps):
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=(
self.joint_log_prob if clip is None
else clip_gradients(
self.joint_log_prob, clip)),
surrogate_posterior=self.surrogate_distribution_hybrid,
optimizer=opt,
num_steps=num_steps,
sample_size=25
)
return(losses)
losses = run_approximation(num_steps)
print(losses)
if (not np.isnan(losses[-1])) and (not np.isinf(losses[-1])):
self.set_calibration_expectations()
return(losses)
def main():
from autoencirt.data.rwa import get_data
aegrm = AEGRModel(hidden_layers=[20, 30])
aegrm.load_data(get_data())
aegrm.create_distributions()
sample = aegrm.sample([2, 3])
prob = aegrm.joint_log_prob(**sample)
print(prob)
aegrm.calibrate_advi(10, clip=1.)
return
if __name__ == "__main__":
main()
|
py | 1a3b635a0374579e2eb791cab9311deb5bae2e3b | from setuptools import find_packages, setup
setup(
name="graphene-mongo",
version="0.2.12",
description="Graphene Mongoengine integration",
long_description=open("README.rst").read(),
url="https://github.com/graphql-python/graphene-mongo",
author="Abaw Chen",
author_email="[email protected]",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: MIT License",
],
keywords="api graphql protocol rest relay graphene mongo mongoengine",
packages=find_packages(exclude=["tests"]),
install_requires=[
"graphene>=2.1.3,<3",
"mongoengine>=0.15.0",
"singledispatch>=3.4.0.3",
"iso8601>=0.1.12",
],
python_requires=">=2.7",
zip_safe=True,
tests_require=["pytest>=3.3.2", "mongomock", "mock"],
)
|
py | 1a3b6413d2cc6a73673dfa855b56260259bb86ae | """
Support for local control of entities by emulating the Phillips Hue bridge.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/emulated_hue/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant import util
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.components.http import REQUIREMENTS # NOQA
from homeassistant.components.http import HomeAssistantWSGI
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.deprecation import get_deprecated
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from .hue_api import (
HueUsernameView, HueAllLightsStateView, HueOneLightStateView,
HueOneLightChangeView)
from .upnp import DescriptionXmlView, UPNPResponderThread
DOMAIN = 'emulated_hue'
_LOGGER = logging.getLogger(__name__)
NUMBERS_FILE = 'emulated_hue_ids.json'
CONF_HOST_IP = 'host_ip'
CONF_LISTEN_PORT = 'listen_port'
CONF_ADVERTISE_IP = 'advertise_ip'
CONF_ADVERTISE_PORT = 'advertise_port'
CONF_UPNP_BIND_MULTICAST = 'upnp_bind_multicast'
CONF_OFF_MAPS_TO_ON_DOMAINS = 'off_maps_to_on_domains'
CONF_EXPOSE_BY_DEFAULT = 'expose_by_default'
CONF_EXPOSED_DOMAINS = 'exposed_domains'
CONF_TYPE = 'type'
TYPE_ALEXA = 'alexa'
TYPE_GOOGLE = 'google_home'
DEFAULT_LISTEN_PORT = 8300
DEFAULT_UPNP_BIND_MULTICAST = True
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ['script', 'scene']
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
'switch', 'light', 'group', 'input_boolean', 'media_player', 'fan'
]
DEFAULT_TYPE = TYPE_GOOGLE
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean,
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE):
vol.Any(TYPE_ALEXA, TYPE_GOOGLE)
})
}, extra=vol.ALLOW_EXTRA)
ATTR_EMULATED_HUE = 'emulated_hue'
ATTR_EMULATED_HUE_HIDDEN = 'emulated_hue_hidden'
def setup(hass, yaml_config):
"""Activate the emulated_hue component."""
config = Config(hass, yaml_config.get(DOMAIN, {}))
server = HomeAssistantWSGI(
hass,
server_host=config.host_ip_addr,
server_port=config.listen_port,
api_password=None,
ssl_certificate=None,
ssl_key=None,
cors_origins=None,
use_x_forwarded_for=False,
trusted_networks=[],
login_threshold=0,
is_ban_enabled=False
)
server.register_view(DescriptionXmlView(config))
server.register_view(HueUsernameView)
server.register_view(HueAllLightsStateView(config))
server.register_view(HueOneLightStateView(config))
server.register_view(HueOneLightChangeView(config))
upnp_listener = UPNPResponderThread(
config.host_ip_addr, config.listen_port,
config.upnp_bind_multicast, config.advertise_ip,
config.advertise_port)
@asyncio.coroutine
def stop_emulated_hue_bridge(event):
"""Stop the emulated hue bridge."""
upnp_listener.stop()
yield from server.stop()
@asyncio.coroutine
def start_emulated_hue_bridge(event):
"""Start the emulated hue bridge."""
upnp_listener.start()
yield from server.start()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
return True
class Config(object):
"""Hold configuration variables for the emulated hue bridge."""
def __init__(self, hass, conf):
"""Initialize the instance."""
self.hass = hass
self.type = conf.get(CONF_TYPE)
self.numbers = None
self.cached_states = {}
if self.type == TYPE_ALEXA:
_LOGGER.warning("Alexa type is deprecated and will be removed in a"
" future version")
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.info(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.info(
"Listen port not specified, defaulting to %s",
self.listen_port)
if self.type == TYPE_GOOGLE and self.listen_port != 80:
_LOGGER.warning("When targeting Google Home, listening port has "
"to be port 80")
# Get whether or not UPNP binds to multicast address (239.255.255.250)
# or to the unicast address (host_ip_addr)
self.upnp_bind_multicast = conf.get(
CONF_UPNP_BIND_MULTICAST, DEFAULT_UPNP_BIND_MULTICAST)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
# Get whether or not entities should be exposed by default, or if only
# explicitly marked ones will be exposed
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT)
# Get domains that are exposed by default when expose_by_default is
# True
self.exposed_domains = conf.get(
CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
# Calculated effective advertised IP and port for network isolation
self.advertise_ip = conf.get(
CONF_ADVERTISE_IP) or self.host_ip_addr
self.advertise_port = conf.get(
CONF_ADVERTISE_PORT) or self.listen_port
def entity_id_to_number(self, entity_id):
"""Get a unique number for the entity id."""
if self.type == TYPE_ALEXA:
return entity_id
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
for number, ent_id in self.numbers.items():
if entity_id == ent_id:
return number
number = '1'
if self.numbers:
number = str(max(int(k) for k in self.numbers) + 1)
self.numbers[number] = entity_id
save_json(self.hass.config.path(NUMBERS_FILE), self.numbers)
return number
def number_to_entity_id(self, number):
"""Convert unique number to entity id."""
if self.type == TYPE_ALEXA:
return number
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
assert isinstance(number, str)
return self.numbers.get(number)
def is_entity_exposed(self, entity):
"""Determine if an entity should be exposed on the emulated bridge.
Async friendly.
"""
if entity.attributes.get('view') is not None:
# Ignore entities that are views
return False
domain = entity.domain.lower()
explicit_expose = entity.attributes.get(ATTR_EMULATED_HUE, None)
explicit_hidden = entity.attributes.get(ATTR_EMULATED_HUE_HIDDEN, None)
if explicit_expose is True or explicit_hidden is False:
expose = True
elif explicit_expose is False or explicit_hidden is True:
expose = False
else:
expose = None
get_deprecated(entity.attributes, ATTR_EMULATED_HUE_HIDDEN,
ATTR_EMULATED_HUE, None)
domain_exposed_by_default = \
self.expose_by_default and domain in self.exposed_domains
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = \
domain_exposed_by_default and expose is not False
return is_default_exposed or expose
def _load_json(filename):
"""Wrapper, because we actually want to handle invalid json."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
|
py | 1a3b64c0cc5763998d9fb6e50019da87078d0d95 | """mysite2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('test_get',views.test_get),
path('test_post',views.test_post),
path('birthday',views.birthday),
path('test_html',views.test_html),
path('mycalc',views.test_calc),
path("",views.test_html)
]
|
py | 1a3b6568d5ea44fb80a8961282e72d5a2ac92e53 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_initializers import V1Initializers
class TestV1Initializers(unittest.TestCase):
""" V1Initializers unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Initializers(self):
"""
Test V1Initializers
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_initializers.V1Initializers()
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a3b65690ab61b456403fe3998d720d09f720c78 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
###############################################################################
# Copyright (c), The AiiDA-CP2K authors. #
# SPDX-License-Identifier: MIT #
# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #
# For further information on the license, see the LICENSE.txt file. #
###############################################################################
"""Run simple DFT calculation"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import ase.build
import click
from aiida.engine import run
from aiida.orm import (Code, Dict, SinglefileData, StructureData)
from aiida.common import NotExistent
from aiida.plugins import CalculationFactory
Cp2kCalculation = CalculationFactory('cp2k')
def example_structure_through_file(cp2k_code):
"""Run simple DFT calculation"""
print("Testing CP2K ENERGY on H2O (DFT). Water molecule is provided through a file input...")
pwd = os.path.dirname(os.path.realpath(__file__))
# structure
atoms = ase.build.molecule('H2O')
atoms.center(vacuum=2.0)
structure = StructureData(ase=atoms)
# basis set
basis_file = SinglefileData(file=os.path.join(pwd, "..", "files", "BASIS_MOLOPT"))
# pseudopotentials
pseudo_file = SinglefileData(file=os.path.join(pwd, "..", "files", "GTH_POTENTIALS"))
# parameters
parameters = Dict(
dict={
'FORCE_EVAL': {
'METHOD': 'Quickstep',
'DFT': {
'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
'POTENTIAL_FILE_NAME': 'GTH_POTENTIALS',
'QS': {
'EPS_DEFAULT': 1.0e-12,
'WF_INTERPOLATION': 'ps',
'EXTRAPOLATION_ORDER': 3,
},
'MGRID': {
'NGRIDS': 4,
'CUTOFF': 280,
'REL_CUTOFF': 30,
},
'XC': {
'XC_FUNCTIONAL': {
'_': 'LDA',
},
},
'POISSON': {
'PERIODIC': 'none',
'PSOLVER': 'MT',
},
},
'SUBSYS': {
'TOPOLOGY': {
'COORD_FILE_NAME': 'water.xyz',
'COORD_FILE_FORMAT': 'XYZ'
},
'CELL': {
'ABC': '{:<15} {:<15} {:<15}'.format(*atoms.cell.diagonal()),
},
'KIND': [
{
'_': 'O',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q6'
},
{
'_': 'H',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q1'
},
],
},
}
})
# Construct process builder
builder = Cp2kCalculation.get_builder()
builder.parameters = parameters
builder.code = cp2k_code
builder.file = {
'basis': basis_file,
'pseudo': pseudo_file,
'water': structure,
}
builder.metadata.options.resources = {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
}
builder.metadata.options.max_wallclock_seconds = 1 * 3 * 60
print("Submitted calculation...")
run(builder)
@click.command('cli')
@click.argument('codelabel')
def cli(codelabel):
"""Click interface"""
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist".format(codelabel))
sys.exit(1)
example_structure_through_file(code)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
|
py | 1a3b65bd21e395c6bd7b61fccbccd02f554a8a2e | import subprocess as sp
import sys
import os
sp.run(["pip", "install", "-e", "."], check=True)
sp.run(["pytest", "blobfile"] + sys.argv[1:], check=True)
os.environ["BLOBFILE_FORCE_GOOGLE_ANONYMOUS_AUTH"] = "1"
sp.run(["pytest", "blobfile", "-k", "test_gcs_public"] + sys.argv[1:], check=True)
|
py | 1a3b66582194707228bce4c9b3bf2bfa59faf2e1 | _base_ = './pspnet_unet_s5-d16_128x128_40k_stare.py'
model = dict(
decode_head=dict(loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)
]))
|
py | 1a3b6735d8c4e899a7439b81042747017013e1d1 | #! /user/bin/python
import sys, os, subprocess
from config import OPT,LLVMPASS_FOLDER
############################
irPath = sys.argv[1]
targetIndex = int(sys.argv[2])
############################
# Model: SIM, LM, MM
instCountDic = {}
smDic = {} # store masking dic
# Read "profile_cmp_prob_result.txt"
with open("profile_cmp_prob_result.txt", 'r') as cmpf:
pcLines = cmpf.readlines()
for pcLine in pcLines:
index = int(pcLine.split(" ")[0].replace(":", ""))
c1 = int(pcLine.split(" ")[1])
c2 = int(pcLine.split(" ")[2])
totalC = c1 + c2
instCountDic[index] = totalC
cmpf.close()
# Read "profile_call_prob_result.txt"
with open("profile_call_prob_result.txt", 'r') as callf:
pcLines = callf.readlines()
for pcLine in pcLines:
index = int(pcLine.split(" ")[0].replace(":", ""))
totalC = int(pcLine.split(" ")[1])
instCountDic[index] = totalC
callf.close()
# Read "store_masking.txt"
with open("store_masking.txt", 'r') as sf:
sLines = sf.readlines()
for sLine in sLines:
index = int(sLine.split(" ")[0])
sm = float(sLine.split(" ")[1])
totalC = int(sLine.split(" ")[2])
instCountDic[index] = totalC
smDic[index] = sm # masking rate of store
sf.close()
os.system("rm null")
############################################################################
# Run Static-instruction-level Masking (SIM)
############################################################################
selectIndexStr = "-select_index=" + `targetIndex`
command = [OPT, "-S", "-load", LLVMPASS_FOLDER + "/SIM.so", "-bishe_insert", selectIndexStr, irPath, "-o", "null", "-select_stuples_file=simplified_inst_tuples.txt"]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
simOutput = p.stdout.read()
totalTmnInstCount = 0
accumSdc = 0
accumCrash = 0
for opLine in simOutput.split("\n"): # Each line is a leaf node of SIM, need weighted at the end
if " " in opLine:
indexNType = opLine.split(":")[0]
instIndex = int(indexNType.split(" ")[0])
instType = indexNType.split(" ")[1]
# Get all rates from SIM
# 397 cmp: 0.015324, 0.219051, 0.765625
# 400 store: 0.054932, 0.179443, 0.765625
simPR = float( opLine.split(":")[1].split(", ")[0] )
simMR = float( opLine.split(":")[1].split(", ")[1] )
simCR = float( opLine.split(":")[1].split(", ")[2].replace("\n", "") ) # Crash rate of SIM is used as final crash rate
instCount = 0
if instIndex in instCountDic:
instCount = instCountDic[instIndex]
if "cmp" in instType:
############################################################################
# RUN Logic-level Masking:Get logic masking and final benign rate
############################################################################
llCommand = ["python", "getCmpLogicMasking.py", irPath, `instIndex`]
p = subprocess.Popen(llCommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
llOutput = p.stdout.read()
llBenign = float(llOutput.split("\n")[-2])
llSdc = 1 - llBenign
sdcContr = llSdc * simPR
totalTmnInstCount += instCount
accumSdc += sdcContr * instCount
accumCrash += simCR * instCount
print "SDC: cmp " + `instIndex` + " ------> " + `sdcContr`
if "store" in instType:
############################################################################
# Get Store masking rate
############################################################################
sm = 0 # store masking rate
sPr = 1 # store sdc rate
if instIndex in smDic:
sm = smDic[instIndex]
sPr = 1 - sm
llBenign = float(sm)
llSdc = 1 - llBenign
sdcContr = llSdc * simPR
totalTmnInstCount += instCount
accumSdc += sdcContr * instCount
accumCrash += simCR * instCount
print "SDC: store " + `instIndex` + " ------> " + `sdcContr`
if "call" in instType:
if len( indexNType.split(" ") ) >= 3:
funcName = indexNType.split(" ")[2]
# Specify SDC
if "fopen" in funcName or "fputs" in funcName or "fwrite" in funcName or "_IO_putc" in funcName or "fputc" in funcName or "puts" in funcName or "fprintf" in funcName:
cPr = 1
cMr = 0
sdcContr = cPr * simPR
totalTmnInstCount += instCount
accumSdc += sdcContr * instCount
accumCrash += simCR * instCount
print "SDC: call " + `instIndex` + " ------> " + `sdcContr`
fSdc = 0
fCrash = 0
fBenign = 1
if totalTmnInstCount != 0:
fSdc = accumSdc / float(totalTmnInstCount)
fCrash = accumCrash / float(totalTmnInstCount)
fBenign = 1 - fSdc - fCrash
print "\n***************************"
print "Final SDC: " + `fSdc`
print "Final Benign: " + `fBenign`
print "Final Crash: " + `fCrash`
|
py | 1a3b67742b1e8e32905d64c8e1775a158a2c4334 | from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from src.api import Movies , Categories , Upload
from src.models import Database
from os import getcwd
def create_app():
static_folder = getcwd() + '/img/'
app = Flask(
__name__,
static_folder=static_folder
)
CORS(app)
api = Api(app)
Database().migrate()
api.add_resource(Upload,'/upload')
api.add_resource(Movies,'/movies','/movies/<string:id>')
api.add_resource(Categories,'/categories','/categories/<string:id>')
return app
if __name__ == '__main__':
app = create_app()
host = '0.0.0.0'
port = 8000
debug = True
app.run(host,port,debug)
|
py | 1a3b6983f350852d276f0f4a59f9c0f5829812de | import discord
import logging
import pprint
import socket
from aiohttp import web
from json import JSONDecodeError
from logging.config import fileConfig
from typing import List, Union
from utils.match import Match
class WebServer:
def __init__(self, bot):
from bot import ICL_bot
fileConfig('logging.conf')
self.logger = logging.getLogger(f'ICL_bot.{__name__}')
self.bot: ICL_bot = bot
self.IP: str = socket.gethostbyname(socket.gethostname())
self.port: int = self.bot.bot_port
self.site: web.TCPSite = None
async def _handler(self, request: web.Request) -> Union[web.Response, web.FileResponse]:
"""
Super simple HTTP handler.
Parameters
----------
request : web.Request
AIOHTTP request object.
"""
if request.method == 'GET':
self.logger.debug(f'{request.remote} accessed {self.IP}:{self.port}{request.path}')
return WebServer._http_error_handler()
# Auth check for json
elif request.method == 'POST':
try:
faceit = await request.json()
except JSONDecodeError:
self.logger.warning(f'{request.remote} sent a invalid json POST ')
return WebServer._http_error_handler('json-body')
self.logger.debug(f'webhook = \n {pprint.pformat(faceit)}')
if faceit['retry_count'] == 0:
if faceit['event'] == 'match_status_ready':
self.logger.debug(f'{faceit["payload"]["id"]} is ready')
match_exists = False
for match_check in self.bot.matches:
self.logger.debug(f'{match_check.match_id}')
if match_check.match_id == str(faceit['payload']['id']):
match_exists = True
self.logger.error('Match already exists')
break
if not match_exists:
self.logger.info('Creating channels')
team1_channel: discord.VoiceChannel = await self.bot.get_channel(
787774505854042132).create_voice_channel(
name=faceit["payload"]["teams"][0]["name"], user_limit=6)
team2_channel: discord.VoiceChannel = await self.bot.get_channel(
787774505854042132).create_voice_channel(
name=faceit["payload"]["teams"][1]["name"], user_limit=6)
team1_roster = []
for team1_player in faceit["payload"]["teams"][0]["roster"]:
team1_roster.append((team1_player['id'], team1_player['nickname']))
team2_roster = []
for team2_player in faceit["payload"]["teams"][1]["roster"]:
team2_roster.append((team2_player['id'], team2_player['nickname']))
team1_invite = await team1_channel.create_invite(max_age=7200)
team2_invite = await team2_channel.create_invite(max_age=7200)
new_match = Match(faceit['payload']['id'], team1_channel, team2_channel, team1_invite, team2_invite,
faceit["payload"]["teams"][0]["name"], faceit["payload"]["teams"][1]["name"],
team1_roster, team2_roster)
self.bot.matches.append(new_match)
self.logger.debug(len(self.bot.matches))
self.logger.debug('finishing creating the match')
if not self.bot.cogs['CSGO'].update_scorecard.is_running():
self.logger.debug('starting loop thingy')
self.bot.cogs['CSGO'].update_scorecard.start()
if faceit['event'] == 'match_status_finished' or faceit['event'] == 'match_status_aborted' or faceit['event'] == 'match_status_cancelled':
self.logger.debug(f'{faceit["payload"]["id"]} is over')
match: Match = None
for match_check in self.bot.matches:
self.logger.debug(f'{match_check.match_id}')
if match_check.match_id == str(faceit['payload']['id']):
match = match_check
self.logger.debug(f'Found match {match.match_id}')
break
if match is not None:
for member in match.team1_channel.members + match.team2_channel.members:
try:
await member.move_to(channel=self.bot.get_channel(784164015122546751), reason=f'Match Complete')
except (discord.HTTPException, discord.Forbidden):
self.logger.error(f'Could not move {member}')
await match.team1_channel.delete(reason=f'{faceit["payload"]["id"]} Complete')
await match.team2_channel.delete(reason=f'{faceit["payload"]["id"]} Complete')
self.bot.matches.remove(match)
self.logger.debug('Sending 200')
return web.Response(status=200)
else:
# Used to decline any requests what doesn't match what our
# API expects.
self.logger.warning(f'{request.remote} sent an invalid request.')
return WebServer._http_error_handler("request-type")
async def http_start(self) -> None:
"""
Used to start the webserver inside the same context as the bot.
"""
server = web.Server(self._handler)
runner = web.ServerRunner(server)
await runner.setup()
self.site = web.TCPSite(runner, self.IP, self.port)
await self.site.start()
self.logger.info(f'Webserver Started on {self.IP}:{self.port}')
async def http_stop(self) -> None:
"""
Used to stop the webserver inside the same context as the bot.
"""
self.logger.warning(f'Webserver Stopping on {self.IP}:{self.port}')
await self.site.stop()
@staticmethod
def _http_error_handler(error: str = 'Undefined Error') -> web.Response:
"""
Used to handle HTTP error response.
Parameters
----------
error : bool, optional
Bool or string to be used, by default False
Returns
-------
web.Response
AIOHTTP web server response.
"""
return web.json_response(
{"error": error},
status=400 if error else 200
)
|
py | 1a3b69be730766d1ee7032c8298cf73293135961 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Weight loader."""
import numpy as np
from mindspore.train.serialization import load_checkpoint
def load_infer_weights(config):
"""
Load weights from ckpt or npz.
Args:
config (TransformerConfig): Config.
Returns:
dict, weights.
"""
model_path = config.checkpoint_file_path
if model_path.endswith(".npz"):
ms_ckpt = np.load(model_path)
is_npz = True
else:
ms_ckpt = load_checkpoint(model_path)
is_npz = False
weights = {}
with open("variable_after_deal.txt", "a") as f:
for param_name in ms_ckpt:
infer_name = param_name.replace("transformer.transformer.", "")
if not infer_name.startswith("encoder"):
if infer_name.startswith("decoder.layers."):
infer_name = infer_name.replace("decoder.layers.", "decoder.layer")
infer_name = "decoder.decoder." + infer_name
if is_npz:
weights[infer_name] = ms_ckpt[param_name]
else:
weights[infer_name] = ms_ckpt[param_name].data.asnumpy()
f.write(infer_name)
f.write("\n")
f.close()
return weights
|
py | 1a3b6a74fd80c7b4c7954e2af71cc0fdf60b262f | from django.db import models
from django.contrib.auth.models import User
from django_resized import ResizedImageField
from hood.models import Neighborhood
class Profile(models.Model):
user = models.OneToOneField(
User, on_delete=models.CASCADE)
id_number = models.IntegerField(default=0)
neighborhood = models.ForeignKey(
Neighborhood, null=True, on_delete=models.CASCADE, blank=True)
profile_picture = ResizedImageField(size=[300, 300], quality=75,
default='default.jpg', upload_to='profile_pics/')
def __str__(self):
return f'{self.user.username} UserProfile'
|
py | 1a3b6b2e1910e17680cca25a32858934da1cdfcb | from collections import Counter
from itertools import combinations
import json
import warnings
import networkx as nx
class CrossCorrelationGraph:
"""CrossCorrelationGraph for computing correlation between clusters
Attributes
----------
window : float
Threshold for the window size in seconds
correlation : float
Threshold for the minimum required correlation
graph : nx.Graph
Cross correlation graph containing all correlations
Note that each node in the graph represents an 'activity signature'
to avoid duplicates. The NetworkDestinations corresponding to each
signature are stored in the 'mapping' attribute.
Note
----
IMPORTANT: The CrossCorrelation.graph object is an optimised graph.
Each node does not represent a network destination, but represents
an activity fingerprint. E.g. when destinations A and B are both
only active at time slices 3 and 7, then these destinations are
represented by a single node. We use the self.mapping to extract the
network destinations from each graph node.
This is a huge optimisation for finding cliques as the number of
different network destinations theoretically covers the entire IP
space, whereas the number of activity fingerprints is bounded by
2^(batch / window), in our work 2^(300/30) = 2^10 = 1024. If these
parameters change, the complexity may increase, but never beyond the
original bounds. Hence, this optimisation never has a worse time
complexity.
mapping : dict
NetworkDestinations corresponding to each node in the graph
"""
def __init__(self, window=30, correlation=0.1):
"""CrossCorrelationGraph for computing correlation between clusters
Parameters
----------
window : float, default=30
Threshold for the window size in seconds
correlation : float, default=0.1
Threshold for the minimum required correlation
"""
# Set parameters
self.window = window
self.correlation = correlation
self.mapping = dict()
self.graph = nx.Graph()
def fit(self, cluster, y=None):
"""Fit Cross Correlation Graph.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
y : ignored
Returns
-------
result : self
Returns self
"""
# Compute cross correlations within cluster
correlations, self.mapping = self.cross_correlation(cluster)
if self.correlation <= 0: # Create a fully connected graph
self.graph = nx.complete_graph(list(self.mapping.keys()))
else:
self.graph = nx.Graph()
self.graph.add_nodes_from(list(self.mapping.keys()))
for (u, v), weight in correlations.items():
if weight >= self.correlation:
self.graph.add_edge(u, v, weight=weight)
return self
def predict(self, X=None, y=None):
"""Fit Cross Correlation Graph and return cliques.
Parameters
----------
X : ignored
y : ignored
Returns
-------
result : Generator of cliques
Generator of all cliques in the graph
"""
cliques = nx.find_cliques(self.graph)
return (set.union(*[self.mapping.get(n) for n in c]) for c in cliques)
def fit_predict(self, cluster, y=None):
"""Fit cross correlation graph with clusters from X and return cliques.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
y : ignored
Returns
-------
result : Generator of cliques
Generator of all cliques in the graph
"""
return self.fit(cluster).predict(cluster)
def export(self, outfile, dense=True, format="gexf"):
"""Export CrossCorrelationGraph to outfile for further analysis
Parameters
----------
outfile : string
File to export CrossCorrelationGraph
dense : boolean, default=True
If True export the dense graph (see IMPORTANT note at graph),
this means that each node is represented by the time slices in
which they were active. Each node still has the information of
all correlated nodes.
If False export the complete graph. Note that these graphs can
get very large with lots of edges, therefore, for manual
inspection it is recommended to use dense=True instead.
format : ('gexf'|'gml'), default='gexf'
Format in which to export, currently only 'gexf', 'gml' are
supported.
"""
if dense:
graph = self.graph
# Initialise human-readable mapping of nodes
mapping = dict()
# Fill mapping
for node in graph:
info = {
"window": list(sorted(node)),
"ips": set(),
"certs": set(),
"labels": Counter(),
}
# Loop over corresponding network destinations
for destination in self.mapping.get(node):
info["ips"] = info.get("ips", set()) | destination.destinations
info["certs"] = info.get("certs", set()) | destination.certificates
info["labels"] = info.get("labels", Counter()) + destination.labels
# Remove None from certificates
info["certs"] = info.get("certs", set()) - {None}
# Transform sets into lists
info["ips"] = list(info.get("ips", set()))
info["certs"] = list(info.get("certs", set()))
# Store mapping as text
mapping[node] = json.dumps(info, sort_keys=True)
graph = nx.relabel_nodes(graph, mapping)
# Make graph not dense
else:
graph = nx.Graph()
for node in self.graph:
for destination in self.mapping.get(node):
graph.add_node(destination)
for node in self.graph:
for source in self.mapping.get(node):
for destination in self.mapping.get(node):
if source == destination:
continue
graph.add_edge(source, destination, weight=1)
# Add all edges to other nodes
for connected in nx.neighbors(self.graph, node):
# Get edge get_edge_data
data = self.graph.get_edge_data(node, connected)
# Get all destinations
for destination in self.mapping.get(connected):
graph.add_edge(source, destination, data=data)
# Transform network destinations to human readable format
mapping = dict()
for node in self.graph:
for destination in self.mapping.get(node):
info = {
"window": list(sorted(node)),
"ips": list(destination.destinations),
"certs": list(destination.certificates - {None}),
"labels": destination.labels,
}
mapping[destination] = json.dumps(info, sort_keys=True)
graph = nx.relabel_nodes(graph, mapping)
# Export graph to file
if format.lower() == "gexf":
nx.write_gexf(graph, outfile)
elif format.lower() == "gml":
nx.write_gml(graph, outfile)
else:
# Warn user of unknown format
warnings.warn(f"Unknown export format '{format}', defaulting to 'gexf'")
# Export as gexf
nx.write_gexf(graph, outfile)
def cross_correlation(self, cluster):
"""Compute cross correlation between clusters
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
Returns
-------
correlation : dict
Dictionary of cross correlation values between each
NetworkDestination inside cluster.
mapping : dict
Mapping of activity fingerprint -> clusters
"""
correlation = dict()
# Get activity of samples
activity = self.activity(cluster)
# Get inverted mapping
mapping = dict()
for destination, active in activity.items():
mapping[frozenset(active)] = mapping.get(frozenset(active), set()) | set(
[destination]
)
# Compute cross correlation values
for x, y in combinations(mapping, 2):
union = len(x & y)
if union:
intersection = len(x | y)
correlation[x, y] = union / intersection
return correlation, mapping
def activity(self, cluster):
"""Extracts sets of active clusters by time.
Parameters
----------
cluster : Cluster
Cluster to fit graph, cluster must be populated with flows
Returns
-------
mapping : dict
Dictionary of NetworkDestination -> activity
"""
X = cluster.samples
start = min(x.time_start for x in X)
# Initialise mapping of NetworkDestination -> activity
mapping = dict()
for destination in cluster.clusters():
for flow in destination.samples:
activity = set()
for timestamp in flow.timestamps:
activity.add(int((timestamp - start) // self.window))
mapping[destination] = mapping.get(destination, set()) | activity
return mapping
|
py | 1a3b6b7df0b7a9d73d809337899a312041873647 | from PyQt5 import QtCore, QtWidgets, QtGui
import numpy as np
import brainbox as bb
class FilterGroup:
def __init__(self):
self.reset_filter_button = QtWidgets.QPushButton('Reset Filters')
self.contrast_options_text = QtWidgets.QLabel('Stimulus Contrast')
self.contrasts = [1, 0.25, 0.125, 0.0625, 0]
self.contrast_options = QtWidgets.QListWidget()
for val in self.contrasts:
item = QtWidgets.QListWidgetItem(str(val * 100) + ' %')
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.contrast_options.addItem(item)
self.hold_button = QtWidgets.QCheckBox('Hold')
self.hold_button.setCheckState(QtCore.Qt.Checked)
self.filter_buttons = QtWidgets.QButtonGroup()
self.filter_group = QtWidgets.QGroupBox('Filter Options')
self.filter_layout = QtWidgets.QVBoxLayout()
self.filter_layout.setSpacing(5)
#self.filter_buttons.setExclusive(False)
filter_options = ['all', 'correct', 'incorrect', 'left', 'right', 'left correct', 'left incorrect', 'right correct', 'right incorrect']
for i, val in enumerate(filter_options):
button = QtWidgets.QCheckBox(val)
if val == 'all':
button.setCheckState(QtCore.Qt.Checked)
else:
button.setCheckState(QtCore.Qt.Unchecked)
self.filter_buttons.addButton(button, id=i)
self.filter_layout.addWidget(button)
self.filter_group.setLayout(self.filter_layout)
self.trial_buttons = QtWidgets.QButtonGroup()
self.trial_group = QtWidgets.QGroupBox('Sort Trials By:')
self.trial_layout = QtWidgets.QHBoxLayout()
trial_options = ['trial no.', 'correct vs incorrect', 'left vs right', 'correct vs incorrect and left vs right']
for i, val in enumerate(trial_options):
button = QtWidgets.QRadioButton(val)
if i == 0:
button.setChecked(True)
else:
button.setChecked(False)
self.trial_buttons.addButton(button, id = i)
self.trial_layout.addWidget(button)
self.trial_group.setLayout(self.trial_layout)
# Print out no. of trials for each filter condition
self.ntrials_text = QtWidgets.QLabel('No. of trials = ')
self.filter_options_group = QtWidgets.QGroupBox()
self.group_filter_widget()
self.filter_options_group.setFixedSize(250, 380)
def group_filter_widget(self):
group_layout = QtWidgets.QVBoxLayout()
group_layout.addWidget(self.reset_filter_button)
group_layout.addWidget(self.contrast_options_text)
group_layout.addWidget(self.contrast_options)
group_layout.addWidget(self.hold_button)
group_layout.addWidget(self.filter_group)
group_layout.addWidget(self.ntrials_text)
self.filter_options_group.setLayout(group_layout)
def get_checked_contrasts(self):
'''
Finds the contrast options that are selected. Called by on_contrast_list_changed in gui_main.
Returns
----------
stim_contrast: list
A list of the contrast options that are selected
'''
stim_contrast = []
for idx in range(self.contrast_options.count()):
if self.contrast_options.item(idx).checkState() == QtCore.Qt.Checked:
stim_contrast.append(self.contrasts[idx])
return stim_contrast
def compute_and_sort_trials(self, stim_contrast):
#Precompute trials for a given contrast set
#All
all_trials = bb.core.Bunch()
all_trials['colour'] = QtGui.QColor('#808080')
all_trials['fill'] = QtGui.QColor('#808080')
all_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
all_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#1f77b4'), QtGui.QColor('#d62728')]
trials_ic['text'] = ['correct', 'incorrect']
all_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.left_idx)
right = np.intersect1d(trials_id, self.right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#2ca02c'), QtGui.QColor('#bcbd22')]
trials_lf['text'] = ['left', 'right']
all_trials['left vs right'] = trials_lf
trials_iclf = bb.core.Bunch()
correct_right = np.intersect1d(trials_id, self.correct_right_idx)
correct_left = np.intersect1d(trials_id, self.correct_left_idx)
incorrect_right = np.intersect1d(trials_id, self.incorrect_right_idx)
incorrect_left = np.intersect1d(trials_id, self.incorrect_left_idx)
trials_iclf['trials'] = np.concatenate((correct_left, correct_right, incorrect_left, incorrect_right))
trials_iclf['lines'] = [[0, len(correct_left)], [len(correct_left), len(correct_left)
+ len(correct_right)], [len(correct_left) + len(correct_right), len(correct_left)
+ len(correct_right) + len(incorrect_left)],[len(correct_left) + len(correct_right)
+ len(incorrect_left), len(trials_iclf['trials'])]]
trials_iclf['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#9467bd'), QtGui.QColor('#8c564b'), QtGui.QColor('#ff7f0e')]
trials_iclf['text'] = ['left correct', 'right correct', 'left incorrect', 'right incorrect']
all_trials['correct vs incorrect and left vs right'] = trials_iclf
#Correct
correct_trials = bb.core.Bunch()
correct_trials['colour'] = QtGui.QColor('#1f77b4')
correct_trials['fill'] = QtGui.QColor('#1f77b4')
correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c)) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
correct_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
trials_ic['trials'] = trials_id
trials_ic['lines'] = [[0, len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#1f77b4')]
trials_ic['text'] = ['correct']
correct_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.correct_left_idx)
right = np.intersect1d(trials_id, self.correct_right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#9467bd')]
trials_lf['text'] = ['left correct', 'right correct']
correct_trials['left vs right'] = trials_lf
correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Incorrect
incorrect_trials = bb.core.Bunch()
incorrect_trials['colour'] = QtGui.QColor('#d62728')
incorrect_trials['fill'] = QtGui.QColor('#d62728')
incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(((self.trials['contrastLeft'] == c) | (self.trials['contrastRight'] == c)) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
incorrect_trials['trial no.'] = trials_no
trials_ic = bb.core.Bunch()
trials_ic['trials'] = trials_id
trials_ic['lines'] = [[0, len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#d62728')]
trials_ic['text'] = ['incorrect']
incorrect_trials['correct vs incorrect'] = trials_ic
trials_lf = bb.core.Bunch()
trials_iclf = bb.core.Bunch()
left = np.intersect1d(trials_id, self.incorrect_left_idx)
right = np.intersect1d(trials_id, self.incorrect_right_idx)
trials_lf['trials'] = np.append(left, right)
trials_lf['lines'] = [[0, len(left)], [len(left), len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#8c564b'), QtGui.QColor('#ff7f0e')]
trials_lf['text'] = ['left incorrect', 'right incorrect']
incorrect_trials['left vs right'] = trials_lf
incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
#Left
left_trials = bb.core.Bunch()
left_trials['colour'] = QtGui.QColor('#2ca02c')
left_trials['fill'] = QtGui.QColor('#2ca02c')
left_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(self.trials['contrastLeft'] == c)[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#2ca02c')]
trials_lf['text'] = ['left']
left_trials['left vs right'] = trials_lf
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_left_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_left_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#17becf'), QtGui.QColor('#8c564b')]
trials_ic['text'] = ['left correct', 'left incorrect']
left_trials['correct vs incorrect'] = trials_ic
left_trials['correct vs incorrect and left vs right'] = trials_ic
#Right
right_trials = bb.core.Bunch()
right_trials['colour'] = QtGui.QColor('#bcbd22')
right_trials['fill'] = QtGui.QColor('#bcbd22')
right_trials['linestyle'] = QtGui.QPen(QtCore.Qt.SolidLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where(self.trials['contrastRight'] == c)[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#bcbd22')]
trials_lf['text'] = ['right']
right_trials['left vs right'] = trials_lf
trials_ic = bb.core.Bunch()
correct = np.intersect1d(trials_id, self.correct_right_idx)
incorrect = np.intersect1d(trials_id, self.incorrect_right_idx)
trials_ic['trials'] = np.append(correct, incorrect)
trials_ic['lines'] = [[0, len(correct)], [len(correct), len(trials_ic['trials'])]]
trials_ic['linecolours'] = [QtGui.QColor('#9467bd'), QtGui.QColor('#ff7f0e')]
trials_ic['text'] = ['right correct', 'right incorrect']
right_trials['correct vs incorrect'] = trials_ic
right_trials['correct vs incorrect and left vs right'] = trials_ic
#Left Correct
left_correct_trials = bb.core.Bunch()
left_correct_trials['colour'] = QtGui.QColor('#17becf')
left_correct_trials['fill'] = QtGui.QColor('#17becf')
left_correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_correct_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#17becf')]
trials_lf['text'] = ['left correct']
left_correct_trials['left vs right'] = trials_lf
left_correct_trials['correct vs incorrect'] = trials_lf
left_correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Left Incorrect
left_incorrect_trials = bb.core.Bunch()
left_incorrect_trials['colour'] = QtGui.QColor('#8c564b')
left_incorrect_trials['fill'] = QtGui.QColor('#8c564b')
left_incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastLeft'] == c) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
left_incorrect_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#8c564b')]
trials_lf['text'] = ['left incorrect']
left_incorrect_trials['left vs right'] = trials_lf
left_incorrect_trials['correct vs incorrect'] = trials_lf
left_incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
#Right Correct
right_correct_trials = bb.core.Bunch()
right_correct_trials['colour'] = QtGui.QColor('#9467bd')
right_correct_trials['fill'] = QtGui.QColor('#9467bd')
right_correct_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastRight'] == c) & (self.trials['feedbackType'] == 1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_correct_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#9467bd')]
trials_lf['text'] = ['right correct']
right_correct_trials['left vs right'] = trials_lf
right_correct_trials['correct vs incorrect'] = trials_lf
right_correct_trials['correct vs incorrect and left vs right'] = trials_lf
#Right Incorrect
right_incorrect_trials = bb.core.Bunch()
right_incorrect_trials['colour'] = QtGui.QColor('#ff7f0e')
right_incorrect_trials['fill'] = QtGui.QColor('#ff7f0e')
right_incorrect_trials['linestyle'] = QtGui.QPen(QtCore.Qt.DashLine)
trials_id = np.empty(0,int)
idx= np.empty(0, int)
for c in stim_contrast:
idx = np.where((self.trials['contrastRight'] == c) & (self.trials['feedbackType'] == -1))[0]
trials_id = np.append(trials_id, idx)
trials_id = np.setdiff1d(trials_id, self.nan_trials)
trials_no = bb.core.Bunch()
trials_no['trials'] = trials_id
trials_no['lines'] = []
trials_no['linecolours'] = []
trials_no['text'] = []
right_incorrect_trials['trial no.'] = trials_no
trials_lf = bb.core.Bunch()
trials_lf['trials'] = trials_id
trials_lf['lines'] = [[0, len(trials_lf['trials'])]]
trials_lf['linecolours'] = [QtGui.QColor('#ff7f0e')]
trials_lf['text'] = ['right incorrect']
right_incorrect_trials['left vs right'] = trials_lf
right_incorrect_trials['correct vs incorrect'] = trials_lf
right_incorrect_trials['correct vs incorrect and left vs right'] = trials_lf
trials = bb.core.Bunch()
trials['all'] = all_trials
trials['correct'] = correct_trials
trials['incorrect'] = incorrect_trials
trials['left'] = left_trials
trials['right'] = right_trials
trials['left correct'] = left_correct_trials
trials['left incorrect'] = left_incorrect_trials
trials['right correct'] = right_correct_trials
trials['right incorrect'] = right_incorrect_trials
return trials
def get_sort_method(self, case):
if case == 'all':
sort_method = 'trial no.'
id = 0
elif (case == 'correct') | (case == 'incorrect'):
sort_method = 'correct vs incorrect'
id = 1
elif (case == 'left') | (case == 'right'):
sort_method = 'left vs right'
id = 2
else:
sort_method = 'correct vs incorrect and left vs right'
id = 3
return sort_method, id
def compute_trial_options(self, trials):
self.trials = trials
nan_feedback = np.where(np.isnan(self.trials['feedback_times']))[0]
nan_goCue = np.where(np.isnan(self.trials['goCue_times']))[0]
self.nan_trials = np.unique(np.append(nan_feedback, nan_goCue))
self.n_trials = len(np.setdiff1d(np.arange(len(self.trials['feedbackType'])), self.nan_trials))
self.correct_idx = np.setdiff1d(np.where(self.trials['feedbackType'] == 1)[0], self.nan_trials)
self.incorrect_idx = np.setdiff1d(np.where(self.trials['feedbackType'] == -1)[0], self.nan_trials)
self.right_idx = np.setdiff1d(np.where(np.isfinite(self.trials['contrastRight']))[0], self.nan_trials)
self.left_idx = np.setdiff1d(np.where(np.isfinite(self.trials['contrastLeft']))[0], self.nan_trials)
self.correct_right_idx = np.setdiff1d(np.intersect1d(self.correct_idx, self.right_idx), self.nan_trials)
self.correct_left_idx = np.setdiff1d(np.intersect1d(self.correct_idx, self.left_idx), self.nan_trials)
self.incorrect_right_idx = np.setdiff1d(np.intersect1d(self.incorrect_idx, self.right_idx), self.nan_trials)
self.incorrect_left_idx = np.setdiff1d(np.intersect1d(self.incorrect_idx, self.left_idx), self.nan_trials)
return self.nan_trials
def reset_filters(self, stim = True):
stim_contrast = [1, 0.25, 0.125, 0.0625, 0]
case = 'all'
sort_method = 'trial no.'
if stim is True:
for idx in range(self.contrast_options.count()):
item = self.contrast_options.item(idx)
item.setCheckState(QtCore.Qt.Checked)
for idx, but in enumerate(self.filter_buttons.buttons()):
if idx == 0:
but.setCheckState(QtCore.Qt.Checked)
else:
but.setCheckState(QtCore.Qt.Unchecked)
for idx, but in enumerate(self.trial_buttons.buttons()):
if idx == 0:
but.setChecked(True)
else:
but.setChecked(False)
return stim_contrast, case, sort_method
|
py | 1a3b6bba16f277814caddbc15d6cb30e16db1ed0 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from collections import namedtuple
import functools
import logging
import re
import attr
from commoncode.datautils import choices
from commoncode.datautils import Boolean
from commoncode.datautils import Date
from commoncode.datautils import Integer
from commoncode.datautils import List
from commoncode.datautils import Mapping
from commoncode.datautils import String
from commoncode.datautils import TriBoolean
from textcode import analysis
"""
Handle Gemfile.lock Rubygems lockfile.
Since there is no specifications of the Gemfile.lock format, this
script is based on and contains code derived from Ruby Bundler:
https://raw.githubusercontent.com/bundler/bundler/77e7050364367d98f9bc96911ea2769b69a4735c/lib/bundler/lockfile_parser.rb
TODO: update to latest https://github.com/bundler/bundler/compare/77e7050364367d98f9bc96911ea2769b69a4735c...master#diff-3c625d3cd7d7604b3e2e3c5487a5ede6
Portions copyright (c) 2010 Andre Arko
Portions copyright (c) 2009 Engine Yard
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Some examples:
SVN
remote: file://#{lib_path('foo-1.0')}
revision: 1
ref: HEAD
glob: some globs
specs:
foo (1.0)
GIT
remote: #{lib_path("foo-1.0")}
revision: #{git.ref_for('omg')}
branch: omg
ref: xx
tag: xxx
submodules: xxx
glob:xxx
specs:
foo (1.0)
PATH
remote: relative-path
glob:
specs:
foo (1.0)
"""
TRACE = False
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
# Section headings: these are also used as switches to track a parsing state
PATH = u'PATH'
GIT = u'GIT'
SVN = u'SVN'
GEM = u'GEM'
PLATFORMS = u'PLATFORMS'
DEPENDENCIES = u'DEPENDENCIES'
SPECS = u' specs:'
# types of Gems, which is really where they are provisioned from
# RubyGems repo, local path or VCS
GEM_TYPES = (GEM, PATH, GIT, SVN,)
@attr.s()
class GemDependency(object):
name = String()
version = String()
@attr.s()
class Gem(object):
"""
A Gem can be packaged as a .gem archive, or it can be a source gem either
fetched from GIT or SVN or from a local path.
"""
supported_opts = 'remote', 'ref', 'revision', 'branch', 'submodules', 'tag',
name = String()
version = String()
platform = String(
help='Gem platform')
remote = String(
help='remote can be a path, git, svn or Gem repo url. One of GEM, PATH, GIT or SVN')
type = String(
# validator=choices(GEM_TYPES),
help='the type of this Gem: One of: {}'.format(', '.join(GEM_TYPES))
)
pinned = Boolean()
spec_version = String()
# relative path
path = String()
revision = String(
help='A version control full revision (e.g. a Git commit hash).'
)
ref = String(
help='A version control ref (such as a tag, a shortened revision hash, etc.).'
)
branch = String()
submodules = String()
tag = String()
requirements = List(
item_type=String,
help='list of constraints such as ">= 1.1.9"'
)
dependencies = Mapping(
help='a map of direct dependent Gems, keyed by name',
value_type='Gem',
)
def refine(self):
"""
Apply some refinements to the Gem based on its type:
- fix version and revisions for Gems checked-out from VCS
"""
if self.type == PATH:
self.path = self.remote
if self.type in (GIT, SVN,):
# FIXME: this likely WRONG
# TODO: this may not be correct for SVN BUT SVN has been abandoned
self.spec_version = self.version
if self.revision and not self.ref:
self.version = self.revision
elif self.revision and self.ref:
self.version = self.revision
elif not self.revision and self.ref:
self.version = self.ref
elif not self.revision and self.ref:
self.version = self.ref
def as_nv_tree(self):
"""
Return a tree of name/versions dependency tuples from self as nested
dicts. The tree root is self. Each key is a name/version tuple.
Values are dicts.
"""
tree = {}
root = (self.name, self.version,)
tree[root] = {}
for _name, gem in self.dependencies.items():
tree[root].update(gem.as_nv_tree())
return tree
def flatten(self):
"""
Return a sorted flattened list of unique parent/child tuples.
"""
flattened = []
seen = set()
for gem in self.dependencies.values():
snv = self.type, self.name, self.version
gnv = gem.type, gem.name, gem.version
rel = self, gem
rel_key = snv, gnv
if rel_key not in seen:
flattened.append(rel)
seen.add(rel_key)
for rel in gem.flatten():
parent, child = rel
pnv = parent.type, parent.name, parent.version
cnv = child.type, child.name, child.version
rel_key = pnv, cnv
if rel_key not in seen:
flattened.append(rel)
seen.add(rel_key)
return sorted(flattened)
def dependency_tree(self):
"""
Return a tree of dependencies as nested mappings.
Each key is a "name@version" string and values are dicts.
"""
tree = {}
root = '{}@{}'.format(self.name or '', self.version or '')
tree[root] = {}
for _name, gem in self.dependencies.items():
tree[root].update(gem.dependency_tree())
return tree
def to_dict(self):
"""
Return a native mapping for this Gem.
"""
return dict([
('name', self.name),
('version', self.version),
('platform', self.platform),
('pinned', self.pinned),
('remote', self.remote),
('type', self.type),
('path', self.path),
('revision', self.revision),
('ref', self.ref),
('branch', self.branch),
('submodules', self.submodules),
('tag', self.tag),
('requirements', self.requirements),
('dependencies', self.dependency_tree()),
])
@property
def gem_name(self):
return '{}-{}.gem'.format(self.name, self.version)
OPTIONS = re.compile(r'^ (?P<key>[a-z]+): (?P<value>.*)$').match
def get_option(s):
"""
Parse Gemfile.lock options such as remote, ref, revision, etc.
"""
key = None
value = None
opts = OPTIONS(s)
if opts:
key = opts.group('key') or None
value = opts.group('value') or None
# normalize truth
if value == 'true':
value = True
if value == 'false':
value = False
# only keep known options, discard others
if key not in Gem.supported_opts:
key = None
value = None
return key, value
# parse name/version/platform
NAME_VERSION = (
# negative lookahead: not a space
'(?! )'
# a Gem name: several chars are not allowed
'(?P<name>[^ \\)\\(,!:]+)?'
# a space then opening parens (
'(?: \\('
# the version proper which is anything but a dash
'(?P<version>[^-]*)'
# and optionally some non-captured dash followed by anything, once
# pinned version can have this form:
# version-platform
# json (1.8.0-java) alpha (1.9.0-x86-mingw32) and may not contain a !
'(?:-(?P<platform>[^!]*))?'
# closing parens )
'\\)'
# NV is zero or one time
')?')
# parse direct dependencies
DEPS = re.compile(
# two spaces at line start
'^ {2}'
# NV proper
'%(NAME_VERSION)s'
# optional bang pinned
'(?P<pinned>!)?'
'$' % locals()).match
# parse spec-level dependencies
SPEC_DEPS = re.compile(
# four spaces at line start
'^ {4}'
'%(NAME_VERSION)s'
'$' % locals()).match
# parse direct dependencies on spec
SPEC_SUB_DEPS = re.compile(
# six spaces at line start
'^ {6}'
'%(NAME_VERSION)s'
'$' % locals()).match
PLATS = re.compile('^ (?P<platform>.*)$').match
class GemfileLockParser(object):
"""
Parse a Gemfile.lock. Code originally derived from Bundler's
/bundler/lib/bundler/lockfile_parser.rb parser
The parsing use a simple state machine, switching states based on sections
headings. The result is a tree of Gems objects stored in
self.dependencies.
"""
def __init__(self, lockfile):
self.lockfile = lockfile
# map of a line start string to the next parsing state function
self.STATES = {
DEPENDENCIES: self.parse_dependency,
PLATFORMS: self.parse_platform,
GIT: self.parse_options,
PATH: self.parse_options,
SVN: self.parse_options,
GEM: self.parse_options,
SPECS: self.parse_spec
}
# the final tree of dependencies, keyed by name
self.dependency_tree = {}
# a flat dict of all gems, keyed by name
self.all_gems = {}
self.platforms = []
# init parsing state
self.reset_state()
# parse proper
for line in analysis.unicode_text_lines(lockfile):
line = line.rstrip()
# reset state
if not line:
self.reset_state()
continue
# switch to new state
if line in self.STATES:
if line in GEM_TYPES:
self.current_type = line
self.state = self.STATES[line]
continue
# process state
if self.state:
self.state(line)
# finally refine the collected data
self.refine()
def reset_state (self):
self.state = None
self.current_options = {}
self.current_gem = None
self.current_type = None
def refine(self):
for gem in self.all_gems.values():
gem.refine()
def get_or_create(self, name, version=None, platform=None):
"""
Return an existing gem if it exists or creates a new one.
Update the all_gems registry.
"""
if name in self.all_gems:
gem = self.all_gems[name]
gem.version = gem.version or version
gem.platform = gem.platform or platform
else:
gem = Gem(name, version, platform)
self.all_gems[name] = gem
return gem
def parse_options(self, line):
key, value = get_option(line)
if key:
self.current_options[key] = value
def parse_spec(self, line):
spec_dep = SPEC_DEPS(line)
if spec_dep:
name = spec_dep.group('name')
# first level dep is always an exact version
version = spec_dep.group('version')
platform = spec_dep.group('platform') or 'ruby'
# always set a new current gem
self.current_gem = self.get_or_create(name, version, platform)
self.current_gem.type = self.current_type
if version:
self.current_gem.version = version
self.current_gem.platform = platform
for k, v in self.current_options.items():
setattr(self.current_gem, k, v)
return
spec_sub_dep = SPEC_SUB_DEPS(line)
if spec_sub_dep:
name = spec_sub_dep.group('name')
if name == 'bundler':
return
# second level dep is always a version constraint
requirements = spec_sub_dep.group('version') or []
if requirements:
requirements = [d.strip() for d in requirements.split(',')]
if name in self.current_gem.dependencies:
dep = self.current_gem.dependencies[name]
else:
dep = self.get_or_create(name)
self.current_gem.dependencies[name] = dep
# unless set , a sub dep is always a gem
if not dep.type:
dep.type = GEM
for v in requirements:
if v not in dep.requirements:
dep.requirements.append(v)
def parse_dependency(self, line):
deps = DEPS(line)
if not deps:
if TRACE:
logger_debug('ERROR: parse_dependency: '
'line not matched: %(line)r' % locals())
return
name = deps.group('name')
# at this stage ALL gems should already exist except possibly
# for bundler: not finding one is an error
try:
gem = self.all_gems[name]
except KeyError as e:
gem = Gem(name)
self.all_gems[name] = gem
if name != 'bundler' and TRACE:
logger_debug('ERROR: parse_dependency: '
'gem %(name)r does not yet exists in all_gems: '
'%(line)r' % locals())
if name in self.dependency_tree:
if TRACE:
logger_debug('WARNING: parse_dependency: '
'dependency %(name)r / %(version)r already declared. '
'line: %(line)r' % locals())
else:
self.dependency_tree[name] = gem
version = deps.group('version') or []
if version:
version = [v.strip() for v in version.split(',')]
# the version of a direct dep is always a constraint
# we append these at the top of the list as this is
# the main constraint
for v in version:
gem.requirements.insert(0, v)
# assert gem.version == version
gem.pinned = True if deps.group('pinned') else False
def parse_platform(self, line):
plat = PLATS(line)
if not plat:
if TRACE:
logger_debug('ERROR: parse_platform: '
'line not matched: %(line)r' % locals())
return
plat = plat.group('platform')
self.platforms.append(plat.strip())
def flatten(self):
"""
Return the Gems dependency_tree as a sorted list of unique
of tuples (parent Gem / child Gem) relationships.
"""
flattened = []
for direct in self.dependency_tree.values():
flattened.append((None, direct,))
flattened.extend(direct.flatten())
return sorted(set(flattened))
|
py | 1a3b6bcf75566e920dd4b17157464b9ebac7c5e8 | # -*- coding: utf-8 -*-
"""Create EMR steps and upload files."""
import os
import tempfile
import zipfile
REMOTE_DIR = '/home/hadoop/'
S3_KEY_PREFIX = 'sparksteps/sources/'
S3_URI_FMT = "s3://{bucket}/{key}"
def get_basename(path):
return os.path.basename(os.path.normpath(path))
def ls_recursive(dirname):
"""Recursively list files in a directory."""
for (dirpath, dirnames, filenames) in os.walk(os.path.expanduser(dirname)):
for f in filenames:
yield os.path.join(dirpath, f)
def zip_to_s3(s3_resource, dirpath, bucket, key):
"""Zip folder and upload to S3."""
with tempfile.SpooledTemporaryFile() as tmp:
with zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:
for fpath in ls_recursive(dirpath):
archive.write(fpath, get_basename(fpath))
tmp.seek(0) # Reset file pointer
response = s3_resource.Bucket(bucket).put_object(Key=key, Body=tmp)
return response
class CmdStep(object):
on_failure = 'CANCEL_AND_WAIT'
@property
def step_name(self):
raise NotImplementedError()
@property
def cmd(self):
raise NotImplementedError()
@property
def step(self):
return {
'Name': self.step_name,
'ActionOnFailure': self.on_failure,
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': self.cmd
}
}
class CopyStep(CmdStep):
def __init__(self, bucket, filename):
self.bucket = bucket
self.filename = filename
@property
def step_name(self):
return "Copy {}".format(self.filename)
@property
def cmd(self):
return ['aws', 's3', 'cp', self.s3_uri, REMOTE_DIR]
@property
def key(self):
return S3_KEY_PREFIX + self.filename
@property
def s3_uri(self):
return S3_URI_FMT.format(bucket=self.bucket, key=self.key)
class DebugStep(CmdStep):
on_failure = 'TERMINATE_CLUSTER'
@property
def step_name(self):
return "Setup - debug"
@property
def cmd(self):
return ['state-pusher-script']
class SparkStep(CmdStep):
def __init__(self, app_path, submit_args=None, app_args=None):
self.app = get_basename(app_path)
self.submit_args = submit_args or []
self.app_args = app_args or []
@property
def step_name(self):
return "Run {}".format(self.app)
@property
def cmd(self):
return (['spark-submit'] + self.submit_args + [self.remote_app] +
self.app_args)
@property
def remote_app(self):
return os.path.join(REMOTE_DIR, self.app)
class UnzipStep(CmdStep):
def __init__(self, dirpath):
self.dirpath = dirpath
@property
def step_name(self):
return "Unzip {}".format(self.zipfile)
@property
def cmd(self):
return ['unzip', '-o', self.remote_zipfile, '-d', self.remote_dirpath]
@property
def zipfile(self):
return self.dirname + '.zip'
@property
def remote_zipfile(self):
return os.path.join(REMOTE_DIR, self.zipfile)
@property
def dirname(self):
return get_basename(self.dirpath)
@property
def remote_dirpath(self):
return os.path.join(REMOTE_DIR, self.dirname)
class S3DistCp(CmdStep):
on_failure = 'CONTINUE'
def __init__(self, s3_dist_cp):
self.s3_dist_cp = s3_dist_cp
@property
def step_name(self):
return "S3DistCp step"
@property
def cmd(self):
return ['s3-dist-cp'] + self.s3_dist_cp
def upload_steps(s3_resource, bucket, path):
"""Upload files to S3 and get steps."""
steps = []
basename = get_basename(path)
if os.path.isdir(path): # zip directory
copy_step = CopyStep(bucket, basename + '.zip')
zip_to_s3(s3_resource, path, bucket, key=copy_step.key)
steps.extend([copy_step, UnzipStep(path)])
else:
copy_step = CopyStep(bucket, basename)
s3_resource.meta.client.upload_file(path, bucket, copy_step.key)
steps.append(copy_step)
return steps
def setup_steps(s3, bucket, app_path, submit_args=None, app_args=None,
uploads=None, s3_dist_cp=None):
cmd_steps = []
paths = uploads or []
paths.append(app_path)
for path in paths:
cmd_steps.extend(upload_steps(s3, bucket, path))
cmd_steps.append(SparkStep(app_path, submit_args, app_args))
if s3_dist_cp is not None:
cmd_steps.append(S3DistCp(s3_dist_cp))
return [s.step for s in cmd_steps]
|
py | 1a3b6c5324c2f749239450f2c25c825692d17ab0 | #!/usr/bin/env python
import asyncio
import logging
import time
from collections import deque
from typing import List, Dict, Optional, Tuple, Deque
from hummingbot.client.command import __all__ as commands
from hummingbot.client.tab import __all__ as tab_classes
from hummingbot.core.clock import Clock
from hummingbot.exceptions import ArgumentParserError
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.application_warning import ApplicationWarning
from hummingbot.model.sql_connection_manager import SQLConnectionManager
from hummingbot.connector.exchange.paper_trade import create_paper_trade_market
from hummingbot.client.ui.keybindings import load_key_bindings
from hummingbot.client.ui.parser import load_parser, ThrowingArgumentParser
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.completer import load_completer
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_helpers import (
get_strategy_config_map,
get_connector_class,
get_eth_wallet_private_key,
)
from hummingbot.strategy.strategy_base import StrategyBase
from hummingbot.strategy.cross_exchange_market_making import CrossExchangeMarketPair
from hummingbot.core.utils.kill_switch import KillSwitch
from hummingbot.core.utils.trading_pair_fetcher import TradingPairFetcher
from hummingbot.data_feed.data_feed_base import DataFeedBase
from hummingbot.notifier.notifier_base import NotifierBase
from hummingbot.notifier.telegram_notifier import TelegramNotifier
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.connector.markets_recorder import MarketsRecorder
from hummingbot.client.config.security import Security
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.client.settings import AllConnectorSettings, ConnectorType
from hummingbot.client.tab.data_types import CommandTab
s_logger = None
class HummingbotApplication(*commands):
KILL_TIMEOUT = 10.0
APP_WARNING_EXPIRY_DURATION = 3600.0
APP_WARNING_STATUS_LIMIT = 6
_main_app: Optional["HummingbotApplication"] = None
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@classmethod
def main_application(cls) -> "HummingbotApplication":
if cls._main_app is None:
cls._main_app = HummingbotApplication()
return cls._main_app
def __init__(self):
# This is to start fetching trading pairs for auto-complete
TradingPairFetcher.get_instance()
self.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
command_tabs = self.init_command_tabs()
self.parser: ThrowingArgumentParser = load_parser(self, command_tabs)
self.app = HummingbotCLI(
input_handler=self._handle_command,
bindings=load_key_bindings(self),
completer=load_completer(self),
command_tabs=command_tabs
)
self.markets: Dict[str, ExchangeBase] = {}
# strategy file name and name get assigned value after import or create command
self._strategy_file_name: str = None
self.strategy_name: str = None
self.strategy_task: Optional[asyncio.Task] = None
self.strategy: Optional[StrategyBase] = None
self.market_pair: Optional[CrossExchangeMarketPair] = None
self.market_trading_pair_tuples: List[MarketTradingPairTuple] = []
self.clock: Optional[Clock] = None
self.market_trading_pairs_map = {}
self.token_list = {}
self.init_time: float = time.time()
self.start_time: Optional[int] = None
self.placeholder_mode = False
self.log_queue_listener: Optional[logging.handlers.QueueListener] = None
self.data_feed: Optional[DataFeedBase] = None
self.notifiers: List[NotifierBase] = []
self.kill_switch: Optional[KillSwitch] = None
self._app_warnings: Deque[ApplicationWarning] = deque()
self._trading_required: bool = True
self._last_started_strategy_file: Optional[str] = None
self.trade_fill_db: Optional[SQLConnectionManager] = None
self.markets_recorder: Optional[MarketsRecorder] = None
self._script_iterator = None
self._binance_connector = None
# gateway variables
self._shared_client = None
@property
def strategy_file_name(self) -> str:
return self._strategy_file_name
@strategy_file_name.setter
def strategy_file_name(self, value: Optional[str]):
self._strategy_file_name = value
if value is not None:
db_name = value.split(".")[0]
self.trade_fill_db = SQLConnectionManager.get_trade_fills_instance(db_name)
else:
self.trade_fill_db = None
@property
def strategy_config_map(self):
if self.strategy_name is not None:
return get_strategy_config_map(self.strategy_name)
return None
def _notify(self, msg: str):
self.app.log(msg)
for notifier in self.notifiers:
notifier.add_msg_to_queue(msg)
def _handle_command(self, raw_command: str):
# unset to_stop_config flag it triggered before loading any command
if self.app.to_stop_config:
self.app.to_stop_config = False
raw_command = raw_command.lower().strip()
# NOTE: Only done for config command
if raw_command.startswith("config"):
command_split = raw_command.split(maxsplit=2)
else:
command_split = raw_command.split()
try:
if self.placeholder_mode:
pass
else:
# Check if help is requested, if yes, print & terminate
if len(command_split) > 1 and any(arg in ["-h", "--help"] for arg in command_split[1:]):
self.help(command_split[0])
return
shortcuts = global_config_map.get("command_shortcuts").value
shortcut = None
# see if we match against shortcut command
if shortcuts is not None:
for s in shortcuts:
if command_split[0] == s['command']:
shortcut = s
break
# perform shortcut expansion
if shortcut is not None:
# check number of arguments
num_shortcut_args = len(shortcut['arguments'])
if len(command_split) == num_shortcut_args + 1:
# notify each expansion if there's more than 1
verbose = True if len(shortcut['output']) > 1 else False
# do argument replace and re-enter this function with the expanded command
for output_cmd in shortcut['output']:
final_cmd = output_cmd
for i in range(1, num_shortcut_args + 1):
final_cmd = final_cmd.replace(f'${i}', command_split[i])
if verbose is True:
self._notify(f' >>> {final_cmd}')
self._handle_command(final_cmd)
else:
self._notify('Invalid number of arguments for shortcut')
# regular command
else:
args = self.parser.parse_args(args=command_split)
kwargs = vars(args)
if not hasattr(args, "func"):
self.app.handle_tab_command(self, command_split[0], kwargs)
else:
f = args.func
del kwargs["func"]
f(**kwargs)
except ArgumentParserError as e:
if not self.be_silly(raw_command):
self._notify(str(e))
except NotImplementedError:
self._notify("Command not yet implemented. This feature is currently under development.")
except Exception as e:
self.logger().error(e, exc_info=True)
async def _cancel_outstanding_orders(self) -> bool:
success = True
try:
kill_timeout: float = self.KILL_TIMEOUT
self._notify("Cancelling outstanding orders...")
for market_name, market in self.markets.items():
cancellation_results = await market.cancel_all(kill_timeout)
uncancelled = list(filter(lambda cr: cr.success is False, cancellation_results))
if len(uncancelled) > 0:
success = False
uncancelled_order_ids = list(map(lambda cr: cr.order_id, uncancelled))
self._notify("\nFailed to cancel the following orders on %s:\n%s" % (
market_name,
'\n'.join(uncancelled_order_ids)
))
except Exception:
self.logger().error("Error canceling outstanding orders.", exc_info=True)
success = False
if success:
self._notify("All outstanding orders cancelled.")
return success
async def run(self):
await self.app.run()
def add_application_warning(self, app_warning: ApplicationWarning):
self._expire_old_application_warnings()
self._app_warnings.append(app_warning)
def clear_application_warning(self):
self._app_warnings.clear()
@staticmethod
def _initialize_market_assets(market_name: str, trading_pairs: List[str]) -> List[Tuple[str, str]]:
market_trading_pairs: List[Tuple[str, str]] = [(trading_pair.split('-')) for trading_pair in trading_pairs]
return market_trading_pairs
def _initialize_markets(self, market_names: List[Tuple[str, List[str]]]):
# aggregate trading_pairs if there are duplicate markets
for market_name, trading_pairs in market_names:
if market_name not in self.market_trading_pairs_map:
self.market_trading_pairs_map[market_name] = []
for hb_trading_pair in trading_pairs:
self.market_trading_pairs_map[market_name].append(hb_trading_pair)
for connector_name, trading_pairs in self.market_trading_pairs_map.items():
conn_setting = AllConnectorSettings.get_connector_settings()[connector_name]
if connector_name.endswith("paper_trade") and conn_setting.type == ConnectorType.Exchange:
connector = create_paper_trade_market(conn_setting.parent_name, trading_pairs)
paper_trade_account_balance = global_config_map.get("paper_trade_account_balance").value
for asset, balance in paper_trade_account_balance.items():
connector.set_balance(asset, balance)
else:
Security.update_config_map(global_config_map)
keys = {key: config.value for key, config in global_config_map.items()
if key in conn_setting.config_keys}
init_params = conn_setting.conn_init_parameters(keys)
init_params.update(trading_pairs=trading_pairs, trading_required=self._trading_required)
if conn_setting.use_ethereum_wallet:
ethereum_rpc_url = global_config_map.get("ethereum_rpc_url").value
# Todo: Hard coded this execption for now until we figure out how to handle all ethereum connectors.
if connector_name in ["balancer", "uniswap", "uniswap_v3", "perpetual_finance"]:
private_key = get_eth_wallet_private_key()
init_params.update(wallet_private_key=private_key, ethereum_rpc_url=ethereum_rpc_url)
connector_class = get_connector_class(connector_name)
connector = connector_class(**init_params)
self.markets[connector_name] = connector
self.markets_recorder = MarketsRecorder(
self.trade_fill_db,
list(self.markets.values()),
self.strategy_file_name,
self.strategy_name,
)
self.markets_recorder.start()
def _initialize_notifiers(self):
if global_config_map.get("telegram_enabled").value:
# TODO: refactor to use single instance
if not any([isinstance(n, TelegramNotifier) for n in self.notifiers]):
self.notifiers.append(
TelegramNotifier(
token=global_config_map["telegram_token"].value,
chat_id=global_config_map["telegram_chat_id"].value,
hb=self,
)
)
for notifier in self.notifiers:
notifier.start()
def init_command_tabs(self) -> Dict[str, CommandTab]:
"""
Initiates and returns a CommandTab dictionary with mostly defaults and None values, These values will be
populated later on by HummingbotCLI
"""
command_tabs: Dict[str, CommandTab] = {}
for tab_class in tab_classes:
name = tab_class.get_command_name()
command_tabs[name] = CommandTab(name, None, None, None, tab_class)
return command_tabs
|
py | 1a3b6db978e9399cbbaa4227208e3befa4c14abc | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import numpy as np
import json
import argparse
import sys
import open3d as o3d
sys.path.append("../Utility")
from file import *
from visualization import *
sys.path.append(".")
from initialize_config import *
def list_posegraph_files(folder_posegraph):
pose_graph_paths = get_file_list(folder_posegraph, ".json")
for pose_graph_path in pose_graph_paths:
pose_graph = o3d.io.read_pose_graph(pose_graph_path)
n_nodes = len(pose_graph.nodes)
n_edges = len(pose_graph.edges)
print(
"Fragment o3d.registration.PoseGraph %s has %d nodes and %d edges" %
(pose_graph_path, n_nodes, n_edges))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="visualize pose graph")
parser.add_argument("config", help="path to the config file")
parser.add_argument("--source_id", type=int, help="ID of source fragment")
parser.add_argument("--target_id", type=int, help="ID of target fragment")
parser.add_argument("--adjacent",
help="visualize adjacent pairs",
action="store_true")
parser.add_argument("--all",
help="visualize all pairs",
action="store_true")
parser.add_argument("--list_posegraphs",
help="list number of node and edges of all pose graphs",
action="store_true")
parser.add_argument("--before_optimized",
help="visualize posegraph edges that is not optimized",
action="store_true")
args = parser.parse_args()
with open(args.config) as json_file:
config = json.load(json_file)
initialize_config(config)
ply_file_names = get_file_list(
join(config["path_dataset"], config["folder_fragment"]), ".ply")
if (args.list_posegraphs):
list_posegraph_files(
join(config["path_dataset"], config["folder_fragment"]))
list_posegraph_files(
join(config["path_dataset"], config["folder_scene"]))
if (args.before_optimized):
global_pose_graph_name = join(config["path_dataset"],
config["template_global_posegraph"])
else:
global_pose_graph_name = join(
config["path_dataset"],
config["template_refined_posegraph_optimized"])
print("Reading posegraph")
print(global_pose_graph_name)
pose_graph = o3d.io.read_pose_graph(global_pose_graph_name)
n_nodes = len(pose_graph.nodes)
n_edges = len(pose_graph.edges)
print("Global o3d.registration.PoseGraph having %d nodes and %d edges" % \
(n_nodes, n_edges))
# visualize alignment of posegraph edges
for edge in pose_graph.edges:
print("o3d.registration.PoseGraphEdge %d-%d" % \
(edge.source_node_id, edge.target_node_id))
if ((args.adjacent and \
edge.target_node_id - edge.source_node_id == 1)) or \
(not args.adjacent and
(args.source_id == edge.source_node_id and \
args.target_id == edge.target_node_id)) or \
args.all:
print(" confidence : %.3f" % edge.confidence)
source = o3d.io.read_point_cloud(
ply_file_names[edge.source_node_id])
target = o3d.io.read_point_cloud(
ply_file_names[edge.target_node_id])
source_down = source.voxel_down_sample(config["voxel_size"])
target_down = target.voxel_down_sample(config["voxel_size"])
print("original registration")
draw_registration_result(source_down, target_down,
edge.transformation)
print("optimized registration")
source_down.transform(
pose_graph.nodes[edge.source_node_id].pose)
target_down.transform(
pose_graph.nodes[edge.target_node_id].pose)
draw_registration_result(source_down, target_down,
np.identity(4))
|
py | 1a3b6f1d305e79f55c7a99dfaab1db9711c08c6a | import cv2
import csv
global clickCoordinates
clickCoordinates = list()
def click_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
clickCoordinates.append((x, y))
image = cv2.imread("../protos/textures/rover_circuit.jpg")
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_point)
while True:
cv2.imshow("image", image)
key = cv2.waitKey(1)
if key == ord(" "):
break
with open('waypoints.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["waypointID", "coordinateX", "coordinateY"])
for coordinateID, eachClickCoordinatePair in enumerate(clickCoordinates):
writer.writerow([coordinateID, eachClickCoordinatePair[0], eachClickCoordinatePair[1]]) |
py | 1a3b6f305505ad981c80c0942666f34319b20c3b | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# This application is an example on how to use aioblescan
#
# Copyright (c) 2017 François Wautier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import sys
import asyncio
import argparse
import re
import aioblescan as aiobs
from aioblescan.plugins import EddyStone
from aioblescan.plugins import RuuviWeather
from aioblescan.plugins import ATCMiThermometer
from aioblescan.plugins import ThermoBeacon
# global
opts = None
def check_mac(val):
try:
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", val.lower()):
return val.lower()
except:
pass
raise argparse.ArgumentTypeError("%s is not a MAC address" % val)
def my_process(data):
global opts
ev = aiobs.HCI_Event()
xx = ev.decode(data)
if opts.mac:
goon = False
mac = ev.retrieve("peer")
for x in mac:
if x.val in opts.mac:
goon = True
break
if not goon:
return
if opts.raw:
print("Raw data: {}".format(ev.raw_data))
noopt = True
if opts.eddy:
noopt = False
xx = EddyStone().decode(ev)
if xx:
print("Google Beacon {}".format(xx))
return
if opts.ruuvi:
noopt = False
xx = RuuviWeather().decode(ev)
if xx:
print("Weather info {}".format(xx))
return
if opts.atcmi:
noopt = False
xx = ATCMiThermometer().decode(ev)
if xx:
print("Temperature info {}".format(xx))
return
if opts.thermobeacon:
noopt = False
xx = ThermoBeacon().decode(ev)
if xx:
print("Temperature info {}".format(xx))
return
if noopt:
ev.show(0)
def main(args=None):
global opts
parser = argparse.ArgumentParser(description="Track BLE advertised packets")
parser.add_argument(
"-e",
"--eddy",
action="store_true",
default=False,
help="Look specificaly for Eddystone messages.",
)
parser.add_argument(
"-m",
"--mac",
type=check_mac,
action="append",
help="Look for these MAC addresses.",
)
parser.add_argument(
"-r",
"--ruuvi",
action="store_true",
default=False,
help="Look only for Ruuvi tag Weather station messages",
)
parser.add_argument(
"-A",
"--atcmi",
action="store_true",
default=False,
help="Look only for ATC_MiThermometer tag messages",
)
parser.add_argument(
"-T",
"--thermobeacon",
action="store_true",
default=False,
help="Look only for ThermoBeacon messages",
)
parser.add_argument(
"-R",
"--raw",
action="store_true",
default=False,
help="Also show the raw data.",
)
parser.add_argument(
"-a",
"--advertise",
type=int,
default=0,
help="Broadcast like an EddyStone Beacon. Set the interval between packet in millisec",
)
parser.add_argument(
"-u",
"--url",
type=str,
default="",
help="When broadcasting like an EddyStone Beacon, set the url.",
)
parser.add_argument(
"-t",
"--txpower",
type=int,
default=0,
help="When broadcasting like an EddyStone Beacon, set the Tx power",
)
parser.add_argument(
"-D",
"--device",
type=int,
default=0,
help="Select the hciX device to use (default 0, i.e. hci0).",
)
try:
opts = parser.parse_args()
except Exception as e:
parser.error("Error: " + str(e))
sys.exit()
event_loop = asyncio.get_event_loop()
# First create and configure a raw socket
mysocket = aiobs.create_bt_socket(opts.device)
# create a connection with the raw socket
# This used to work but now requires a STREAM socket.
# fac=event_loop.create_connection(aiobs.BLEScanRequester,sock=mysocket)
# Thanks to martensjacobs for this fix
fac = event_loop._create_connection_transport(
mysocket, aiobs.BLEScanRequester, None, None
)
# Start it
conn, btctrl = event_loop.run_until_complete(fac)
# Attach your processing
btctrl.process = my_process
if opts.advertise:
command = aiobs.HCI_Cmd_LE_Advertise(enable=False)
event_loop.run_until_complete(btctrl.send_command(command))
command = aiobs.HCI_Cmd_LE_Set_Advertised_Params(
interval_min=opts.advertise, interval_max=opts.advertise
)
event_loop.run_until_complete(btctrl.send_command(command))
if opts.url:
myeddy = EddyStone(param=opts.url)
else:
myeddy = EddyStone()
if opts.txpower:
myeddy.power = opts.txpower
command = aiobs.HCI_Cmd_LE_Set_Advertised_Msg(msg=myeddy)
event_loop.run_until_complete(btctrl.send_command(command))
command = aiobs.HCI_Cmd_LE_Advertise(enable=True)
event_loop.run_until_complete(btctrl.send_command(command))
# Probe
event_loop.run_until_complete(btctrl.send_scan_request())
try:
# event_loop.run_until_complete(coro)
event_loop.run_forever()
except KeyboardInterrupt:
print("keyboard interrupt")
finally:
print("closing event loop")
event_loop.run_until_complete(btctrl.stop_scan_request())
command = aiobs.HCI_Cmd_LE_Advertise(enable=False)
event_loop.run_until_complete(btctrl.send_command(command))
conn.close()
event_loop.close()
if __name__ == "__main__":
main()
|
py | 1a3b6f54348b303cd3675d4ead21ff517a0f107b | # PyAudio : Python Bindings for PortAudio.
# Copyright (c) 2006 Hubert Pham
# Copyright (c) 2020 Svein Seldal
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
PyAudio provides Python bindings for PortAudio, the cross-platform
audio I/O library. With PyAudio, you can easily use Python to play and
record audio on a variety of platforms. PyAudio is inspired by:
* pyPortAudio/fastaudio: Python bindings for PortAudio v18 API.
* tkSnack: cross-platform sound toolkit for Tcl/Tk and Python.
.. include:: ../sphinx/examples.rst
Overview
--------
**Classes**
:py:class:`PyAudio`, :py:class:`Stream`
.. only:: pamac
**Host Specific Classes**
:py:class:`PaMacCoreStreamInfo`
**Stream Conversion Convenience Functions**
:py:func:`get_sample_size`, :py:func:`get_format_from_width`
**PortAudio version**
:py:func:`get_portaudio_version`, :py:func:`get_portaudio_version_text`
.. |PaSampleFormat| replace:: :ref:`PortAudio Sample Format <PaSampleFormat>`
.. _PaSampleFormat:
**Portaudio Sample Formats**
:py:data:`paFloat32`, :py:data:`paInt32`, :py:data:`paInt24`,
:py:data:`paInt16`, :py:data:`paInt8`, :py:data:`paUInt8`,
:py:data:`paCustomFormat`
.. |PaHostAPI| replace:: :ref:`PortAudio Host API <PaHostAPI>`
.. _PaHostAPI:
**PortAudio Host APIs**
:py:data:`paInDevelopment`, :py:data:`paDirectSound`, :py:data:`paMME`,
:py:data:`paASIO`, :py:data:`paSoundManager`, :py:data:`paCoreAudio`,
:py:data:`paOSS`, :py:data:`paALSA`, :py:data:`paAL`, :py:data:`paBeOS`,
:py:data:`paWDMKS`, :py:data:`paJACK`, :py:data:`paWASAPI`,
:py:data:`paNoDevice`
.. |PaErrorCode| replace:: :ref:`PortAudio Error Code <PaErrorCode>`
.. _PaErrorCode:
**PortAudio Error Codes**
:py:data:`paNoError`, :py:data:`paNotInitialized`,
:py:data:`paUnanticipatedHostError`, :py:data:`paInvalidChannelCount`,
:py:data:`paInvalidSampleRate`, :py:data:`paInvalidDevice`,
:py:data:`paInvalidFlag`, :py:data:`paSampleFormatNotSupported`,
:py:data:`paBadIODeviceCombination`, :py:data:`paInsufficientMemory`,
:py:data:`paBufferTooBig`, :py:data:`paBufferTooSmall`,
:py:data:`paNullCallback`, :py:data:`paBadStreamPtr`,
:py:data:`paTimedOut`, :py:data:`paInternalError`,
:py:data:`paDeviceUnavailable`,
:py:data:`paIncompatibleHostApiSpecificStreamInfo`,
:py:data:`paStreamIsStopped`, :py:data:`paStreamIsNotStopped`,
:py:data:`paInputOverflowed`, :py:data:`paOutputUnderflowed`,
:py:data:`paHostApiNotFound`, :py:data:`paInvalidHostApi`,
:py:data:`paCanNotReadFromACallbackStream`,
:py:data:`paCanNotWriteToACallbackStream`,
:py:data:`paCanNotReadFromAnOutputOnlyStream`,
:py:data:`paCanNotWriteToAnInputOnlyStream`,
:py:data:`paIncompatibleStreamHostApi`
.. |PaCallbackReturnCodes| replace:: :ref:`PortAudio Callback Return Code <PaCallbackReturnCodes>`
.. _PaCallbackReturnCodes:
**PortAudio Callback Return Codes**
:py:data:`paContinue`, :py:data:`paComplete`, :py:data:`paAbort`
.. |PaCallbackFlags| replace:: :ref:`PortAutio Callback Flag <PaCallbackFlags>`
.. _PaCallbackFlags:
**PortAudio Callback Flags**
:py:data:`paInputUnderflow`, :py:data:`paInputOverflow`,
:py:data:`paOutputUnderflow`, :py:data:`paOutputOverflow`,
:py:data:`paPrimingOutput`
"""
__author__ = "Hubert Pham"
__version__ = "0.2.11"
__docformat__ = "restructuredtext en"
import sys
# attempt to import PortAudio
try:
import _portaudio as pa
except ImportError:
print("Could not import the PyAudio C module '_portaudio'.")
raise
############################################################
# GLOBALS
############################################################
##### PaSampleFormat Sample Formats #####
paFloat32 = pa.paFloat32 #: 32 bit float
paInt32 = pa.paInt32 #: 32 bit int
paInt24 = pa.paInt24 #: 24 bit int
paInt16 = pa.paInt16 #: 16 bit int
paInt8 = pa.paInt8 #: 8 bit int
paUInt8 = pa.paUInt8 #: 8 bit unsigned int
paCustomFormat = pa.paCustomFormat #: a custom data format
###### HostAPI TypeId #####
paInDevelopment = pa.paInDevelopment #: Still in development
paDirectSound = pa.paDirectSound #: DirectSound (Windows only)
paMME = pa.paMME #: Multimedia Extension (Windows only)
paASIO = pa.paASIO #: Steinberg Audio Stream Input/Output
paSoundManager = pa.paSoundManager #: SoundManager (OSX only)
paCoreAudio = pa.paCoreAudio #: CoreAudio (OSX only)
paOSS = pa.paOSS #: Open Sound System (Linux only)
paALSA = pa.paALSA #: Advanced Linux Sound Architecture (Linux only)
paAL = pa.paAL #: Open Audio Library
paBeOS = pa.paBeOS #: BeOS Sound System
paWDMKS = pa.paWDMKS #: Windows Driver Model (Windows only)
paJACK = pa.paJACK #: JACK Audio Connection Kit
paWASAPI = pa.paWASAPI #: Windows Vista Audio stack architecture
paNoDevice = pa.paNoDevice #: Not actually an audio device
###### portaudio error codes #####
paNoError = pa.paNoError
paNotInitialized = pa.paNotInitialized
paUnanticipatedHostError = pa.paUnanticipatedHostError
paInvalidChannelCount = pa.paInvalidChannelCount
paInvalidSampleRate = pa.paInvalidSampleRate
paInvalidDevice = pa.paInvalidDevice
paInvalidFlag = pa.paInvalidFlag
paSampleFormatNotSupported = pa.paSampleFormatNotSupported
paBadIODeviceCombination = pa.paBadIODeviceCombination
paInsufficientMemory = pa.paInsufficientMemory
paBufferTooBig = pa.paBufferTooBig
paBufferTooSmall = pa.paBufferTooSmall
paNullCallback = pa.paNullCallback
paBadStreamPtr = pa.paBadStreamPtr
paTimedOut = pa.paTimedOut
paInternalError = pa.paInternalError
paDeviceUnavailable = pa.paDeviceUnavailable
paIncompatibleHostApiSpecificStreamInfo = pa.paIncompatibleHostApiSpecificStreamInfo
paStreamIsStopped = pa.paStreamIsStopped
paStreamIsNotStopped = pa.paStreamIsNotStopped
paInputOverflowed = pa.paInputOverflowed
paOutputUnderflowed = pa.paOutputUnderflowed
paHostApiNotFound = pa.paHostApiNotFound
paInvalidHostApi = pa.paInvalidHostApi
paCanNotReadFromACallbackStream = pa.paCanNotReadFromACallbackStream
paCanNotWriteToACallbackStream = pa.paCanNotWriteToACallbackStream
paCanNotReadFromAnOutputOnlyStream = pa.paCanNotReadFromAnOutputOnlyStream
paCanNotWriteToAnInputOnlyStream = pa.paCanNotWriteToAnInputOnlyStream
paIncompatibleStreamHostApi = pa.paIncompatibleStreamHostApi
###### portaudio callback return codes ######
paContinue = pa.paContinue #: There is more audio data to come
paComplete = pa.paComplete #: This was the last block of audio data
paAbort = pa.paAbort #: An error ocurred, stop playback/recording
###### portaudio callback flags ######
paInputUnderflow = pa.paInputUnderflow #: Buffer underflow in input
paInputOverflow = pa.paInputOverflow #: Buffer overflow in input
paOutputUnderflow = pa.paOutputUnderflow #: Buffer underflow in output
paOutputOverflow = pa.paOutputOverflow #: Buffer overflow in output
paPrimingOutput = pa.paPrimingOutput #: Just priming, not playing yet
############################################################
# Convenience Functions
############################################################
def get_sample_size(format):
"""
Returns the size (in bytes) for the specified
sample *format*.
:param format: A |PaSampleFormat| constant.
:raises ValueError: on invalid specified `format`.
:rtype: integer
"""
return pa.get_sample_size(format)
def get_format_from_width(width, unsigned=True):
"""
Returns a PortAudio format constant for the specified *width*.
:param width: The desired sample width in bytes (1, 2, 3, or 4)
:param unsigned: For 1 byte width, specifies signed or unsigned format.
:raises ValueError: when invalid *width*
:rtype: A |PaSampleFormat| constant
"""
if width == 1:
if unsigned:
return paUInt8
else:
return paInt8
elif width == 2:
return paInt16
elif width == 3:
return paInt24
elif width == 4:
return paFloat32
else:
raise ValueError("Invalid width: %d" % width)
############################################################
# Versioning
############################################################
def get_portaudio_version():
"""
Returns portaudio version.
:rtype: string
"""
return pa.get_version()
def get_portaudio_version_text():
"""
Returns PortAudio version as a text string.
:rtype: string
"""
return pa.get_version_text()
############################################################
# Wrapper around _portaudio Stream (Internal)
############################################################
# Note: See PyAudio class below for main export.
class Stream:
"""
PortAudio Stream Wrapper. Use :py:func:`PyAudio.open` to make a new
:py:class:`Stream`.
**Opening and Closing**
:py:func:`__init__`, :py:func:`close`
**Stream Info**
:py:func:`get_input_latency`, :py:func:`get_output_latency`,
:py:func:`get_time`, :py:func:`get_cpu_load`
**Stream Management**
:py:func:`start_stream`, :py:func:`stop_stream`, :py:func:`is_active`,
:py:func:`is_stopped`
**Input Output**
:py:func:`write`, :py:func:`read`, :py:func:`get_read_available`,
:py:func:`get_write_available`
"""
def __init__(self,
PA_manager,
rate,
input_channels,
output_channels,
format,
input=False,
output=False,
input_device_index=None,
output_device_index=None,
frames_per_buffer=1024,
start=True,
input_host_api_specific_stream_info=None,
output_host_api_specific_stream_info=None,
stream_callback=None):
"""
Initialize a stream; this should be called by
:py:func:`PyAudio.open`. A stream can either be input, output,
or both.
:param PA_manager: A reference to the managing :py:class:`PyAudio`
instance
:param rate: Sampling rate
:param input_channels: Number of input channels
:param output_channels: Number of output channels
:param format: Sampling size and format. See |PaSampleFormat|.
:param input: Specifies whether this is an input stream.
Defaults to ``False``.
:param output: Specifies whether this is an output stream.
Defaults to ``False``.
:param input_device_index: Index of Input Device to use.
Unspecified (or ``None``) uses default device.
Ignored if `input` is ``False``.
:param output_device_index:
Index of Output Device to use.
Unspecified (or ``None``) uses the default device.
Ignored if `output` is ``False``.
:param frames_per_buffer: Specifies the number of frames per buffer.
:param start: Start the stream running immediately.
Defaults to ``True``. In general, there is no reason to set
this to ``False``.
:param input_host_api_specific_stream_info: Specifies a host API
specific stream information data structure for input.
.. only:: pamac
See :py:class:`PaMacCoreStreamInfo`.
:param output_host_api_specific_stream_info: Specifies a host API
specific stream information data structure for output.
.. only:: pamac
See :py:class:`PaMacCoreStreamInfo`.
:param stream_callback: Specifies a callback function for
*non-blocking* (callback) operation. Default is
``None``, which indicates *blocking* operation (i.e.,
:py:func:`Stream.read` and :py:func:`Stream.write`). To use
non-blocking operation, specify a callback that conforms
to the following signature:
.. code-block:: python
callback(in_data, # recorded data if input=True; else None
frame_count, # number of frames
time_info, # dictionary
status_flags) # PaCallbackFlags
``time_info`` is a dictionary with the following keys:
``input_buffer_adc_time``, ``current_time``, and
``output_buffer_dac_time``; see the PortAudio
documentation for their meanings. ``status_flags`` is one
of |PaCallbackFlags|.
The callback must return a tuple:
.. code-block:: python
(out_data, flag)
``out_data`` is a byte array whose length should be the
(``frame_count * output_channels * bytes-per-channel``) if
``output=True`` or ``None`` if ``output=False``. ``flag``
must be either :py:data:`paContinue`, :py:data:`paComplete` or
:py:data:`paAbort` (one of |PaCallbackReturnCodes|).
When ``output=True`` and ``out_data`` does not contain at
least ``frame_count`` frames, :py:data:`paComplete` is
assumed for ``flag``.
**Note:** ``stream_callback`` is called in a separate
thread (from the main thread). Exceptions that occur in
the ``stream_callback`` will:
1. print a traceback on standard error to aid debugging,
2. queue the exception to be thrown (at some point) in
the main thread, and
3. return `paAbort` to PortAudio to stop the stream.
**Note:** Do not call :py:func:`Stream.read` or
:py:func:`Stream.write` if using non-blocking operation.
**See:** PortAudio's callback signature for additional
details: http://portaudio.com/docs/v19-doxydocs/portaudio_8h.html#a8a60fb2a5ec9cbade3f54a9c978e2710
:raise ValueError: Neither input nor output are set True.
"""
# no stupidity allowed
if not (input or output):
raise ValueError("Must specify an input or output " + "stream.")
# remember parent
self._parent = PA_manager
# remember if we are an: input, output (or both)
self._is_input = input
self._is_output = output
# are we running?
self._is_running = start
# remember some parameters
self._rate = rate
self._input_channels = input_channels
self._output_channels = output_channels
self._format = format
self._frames_per_buffer = frames_per_buffer
arguments = {
'rate' : rate,
'input_channels' : input_channels,
'output_channels' : output_channels,
'format' : format,
'input' : input,
'output' : output,
'input_device_index' : input_device_index,
'output_device_index' : output_device_index,
'frames_per_buffer' : frames_per_buffer}
if input_host_api_specific_stream_info:
_l = input_host_api_specific_stream_info
arguments[
'input_host_api_specific_stream_info'
] = _l._get_host_api_stream_object()
if output_host_api_specific_stream_info:
_l = output_host_api_specific_stream_info
arguments[
'output_host_api_specific_stream_info'
] = _l._get_host_api_stream_object()
if stream_callback:
arguments['stream_callback'] = stream_callback
# calling pa.open returns a stream object
self._stream = pa.open(**arguments)
self._input_latency = self._stream.inputLatency
self._output_latency = self._stream.outputLatency
if self._is_running:
pa.start_stream(self._stream)
def close(self):
""" Close the stream """
pa.close(self._stream)
self._is_running = False
self._parent._remove_stream(self)
############################################################
# Stream Info
############################################################
def get_input_latency(self):
"""
Return the input latency.
:rtype: float
"""
return self._stream.inputLatency
def get_output_latency(self):
"""
Return the output latency.
:rtype: float
"""
return self._stream.outputLatency
def get_time(self):
"""
Return stream time.
:rtype: float
"""
return pa.get_stream_time(self._stream)
def get_cpu_load(self):
"""
Return the CPU load. This is always 0.0 for the
blocking API.
:rtype: float
"""
return pa.get_stream_cpu_load(self._stream)
############################################################
# Stream Management
############################################################
def start_stream(self):
""" Start the stream. """
if self._is_running:
return
pa.start_stream(self._stream)
self._is_running = True
def stop_stream(self):
"""
Stop the stream. Once the stream is stopped, one may not call
write or read. Call :py:func:`start_stream` to resume the
stream.
"""
if not self._is_running:
return
pa.stop_stream(self._stream)
self._is_running = False
def is_active(self):
"""
Returns whether the stream is active.
:rtype: bool
"""
return pa.is_stream_active(self._stream)
def is_stopped(self):
"""
Returns whether the stream is stopped.
:rtype: bool
"""
return pa.is_stream_stopped(self._stream)
############################################################
# Reading/Writing
############################################################
def write(self, frames, num_frames=None,
exception_on_underflow=False):
"""
Write samples to the stream. Do not call when using
*non-blocking* mode.
:param frames:
The frames of data.
:param num_frames:
The number of frames to write.
Defaults to None, in which this value will be
automatically computed.
:param exception_on_underflow:
Specifies whether an IOError exception should be thrown
(or silently ignored) on buffer underflow. Defaults
to False for improved performance, especially on
slower platforms.
:raises IOError: if the stream is not an output stream
or if the write operation was unsuccessful.
:rtype: `None`
"""
if not self._is_output:
raise IOError("Not output stream",
paCanNotWriteToAnInputOnlyStream)
if num_frames == None:
# determine how many frames to read
width = get_sample_size(self._format)
num_frames = int(len(frames) / (self._output_channels * width))
#print len(frames), self._output_channels, self._width, num_frames
pa.write_stream(self._stream, frames, num_frames,
exception_on_underflow)
def read(self, num_frames, exception_on_overflow=True):
"""
Read samples from the stream. Do not call when using
*non-blocking* mode.
:param num_frames: The number of frames to read.
:param exception_on_overflow:
Specifies whether an IOError exception should be thrown
(or silently ignored) on input buffer overflow. Defaults
to True.
:raises IOError: if stream is not an input stream
or if the read operation was unsuccessful.
:rtype: string
"""
if not self._is_input:
raise IOError("Not input stream",
paCanNotReadFromAnOutputOnlyStream)
return pa.read_stream(self._stream, num_frames, exception_on_overflow)
def get_read_available(self):
"""
Return the number of frames that can be read without waiting.
:rtype: integer
"""
return pa.get_stream_read_available(self._stream)
def get_write_available(self):
"""
Return the number of frames that can be written without
waiting.
:rtype: integer
"""
return pa.get_stream_write_available(self._stream)
############################################################
# Main Export
############################################################
class PyAudio:
"""
Python interface to PortAudio. Provides methods to:
- initialize and terminate PortAudio
- open and close streams
- query and inspect the available PortAudio Host APIs
- query and inspect the available PortAudio audio
devices
Use this class to open and close streams.
**Stream Management**
:py:func:`open`, :py:func:`close`
**Host API**
:py:func:`get_host_api_count`, :py:func:`get_default_host_api_info`,
:py:func:`get_host_api_info_by_type`,
:py:func:`get_host_api_info_by_index`,
:py:func:`get_device_info_by_host_api_device_index`
**Device API**
:py:func:`get_device_count`, :py:func:`is_format_supported`,
:py:func:`get_default_input_device_info`,
:py:func:`get_default_output_device_info`,
:py:func:`get_device_info_by_index`
**Stream Format Conversion**
:py:func:`get_sample_size`, :py:func:`get_format_from_width`
**Details**
"""
############################################################
# Initialization and Termination
############################################################
def __init__(self):
"""Initialize PortAudio."""
pa.initialize()
self._streams = set()
def terminate(self):
"""
Terminate PortAudio.
:attention: Be sure to call this method for every instance of
this object to release PortAudio resources.
"""
for stream in self._streams.copy():
stream.close()
self._streams = set()
pa.terminate()
############################################################
# Stream Format
############################################################
def get_sample_size(self, format):
"""
Returns the size (in bytes) for the specified
sample `format` (a |PaSampleFormat| constant).
:param format: A |PaSampleFormat| constant.
:raises ValueError: Invalid specified `format`.
:rtype: integer
"""
return pa.get_sample_size(format)
def get_format_from_width(self, width, unsigned=True):
"""
Returns a PortAudio format constant for the specified `width`.
:param width: The desired sample width in bytes (1, 2, 3, or 4)
:param unsigned: For 1 byte width, specifies signed or unsigned format.
:raises ValueError: for invalid `width`
:rtype: A |PaSampleFormat| constant.
"""
if width == 1:
if unsigned:
return paUInt8
else:
return paInt8
elif width == 2:
return paInt16
elif width == 3:
return paInt24
elif width == 4:
return paFloat32
else:
raise ValueError("Invalid width: %d" % width)
############################################################
# Stream Factory
############################################################
def open(self, *args, **kwargs):
"""
Open a new stream. See constructor for
:py:func:`Stream.__init__` for parameter details.
:returns: A new :py:class:`Stream`
"""
stream = Stream(self, *args, **kwargs)
self._streams.add(stream)
return stream
def close(self, stream):
"""
Close a stream. Typically use :py:func:`Stream.close` instead.
:param stream: An instance of the :py:class:`Stream` object.
:raises ValueError: if stream does not exist.
"""
if stream not in self._streams:
raise ValueError("Stream `%s' not found" % str(stream))
stream.close()
def _remove_stream(self, stream):
"""
Internal method. Removes a stream.
:param stream: An instance of the :py:class:`Stream` object.
"""
if stream in self._streams:
self._streams.remove(stream)
############################################################
# Host API Inspection
############################################################
def get_host_api_count(self):
"""
Return the number of available PortAudio Host APIs.
:rtype: integer
"""
return pa.get_host_api_count()
def get_default_host_api_info(self):
"""
Return a dictionary containing the default Host API
parameters. The keys of the dictionary mirror the data fields
of PortAudio's ``PaHostApiInfo`` structure.
:raises IOError: if no default input device is available
:rtype: dict
"""
defaultHostApiIndex = pa.get_default_host_api()
return self.get_host_api_info_by_index(defaultHostApiIndex)
def get_host_api_info_by_type(self, host_api_type):
"""
Return a dictionary containing the Host API parameters for the
host API specified by the `host_api_type`. The keys of the
dictionary mirror the data fields of PortAudio's ``PaHostApiInfo``
structure.
:param host_api_type: The desired |PaHostAPI|
:raises IOError: for invalid `host_api_type`
:rtype: dict
"""
index = pa.host_api_type_id_to_host_api_index(host_api_type)
return self.get_host_api_info_by_index(index)
def get_host_api_info_by_index(self, host_api_index):
"""
Return a dictionary containing the Host API parameters for the
host API specified by the `host_api_index`. The keys of the
dictionary mirror the data fields of PortAudio's ``PaHostApiInfo``
structure.
:param host_api_index: The host api index
:raises IOError: for invalid `host_api_index`
:rtype: dict
"""
return self._make_host_api_dictionary(
host_api_index,
pa.get_host_api_info(host_api_index)
)
def get_device_info_by_host_api_device_index(self,
host_api_index,
host_api_device_index):
"""
Return a dictionary containing the Device parameters for a
given Host API's n'th device. The keys of the dictionary
mirror the data fields of PortAudio's ``PaDeviceInfo`` structure.
:param host_api_index: The Host API index number
:param host_api_device_index: The n'th device of the host API
:raises IOError: for invalid indices
:rtype: dict
"""
long_method_name = pa.host_api_device_index_to_device_index
device_index = long_method_name(host_api_index,
host_api_device_index)
return self.get_device_info_by_index(device_index)
def _make_host_api_dictionary(self, index, host_api_struct):
"""
Internal method to create Host API dictionary that mirrors
PortAudio's ``PaHostApiInfo`` structure.
:rtype: dict
"""
return {'index' : index,
'structVersion' : host_api_struct.structVersion,
'type' : host_api_struct.type,
'name' : host_api_struct.name,
'deviceCount' : host_api_struct.deviceCount,
'defaultInputDevice' : host_api_struct.defaultInputDevice,
'defaultOutputDevice' : host_api_struct.defaultOutputDevice}
############################################################
# Device Inspection
############################################################
def get_device_count(self):
"""
Return the number of PortAudio Host APIs.
:rtype: integer
"""
return pa.get_device_count()
def is_format_supported(self, rate,
input_device=None,
input_channels=None,
input_format=None,
input_host_api_specific_stream_info=None,
output_device=None,
output_channels=None,
output_format=None,
output_host_api_specific_stream_info=None):
"""
Check to see if specified device configuration
is supported. Returns True if the configuration
is supported; throws a ValueError exception otherwise.
:param rate:
Specifies the desired rate (in Hz)
:param input_device:
The input device index. Specify ``None`` (default) for
half-duplex output-only streams.
:param input_channels:
The desired number of input channels. Ignored if
`input_device` is not specified (or ``None``).
:param input_format:
PortAudio sample format constant defined
in this module
:param output_device:
The output device index. Specify ``None`` (default) for
half-duplex input-only streams.
:param output_channels:
The desired number of output channels. Ignored if
`input_device` is not specified (or ``None``).
:param output_format:
|PaSampleFormat| constant.
:rtype: bool
:raises ValueError: tuple containing (error string, |PaErrorCode|).
"""
if input_device == None and output_device == None:
raise ValueError("must specify stream format for input, " +\
"output, or both", paInvalidDevice);
kwargs = {}
if input_device != None:
kwargs['input_device'] = input_device
kwargs['input_channels'] = input_channels
kwargs['input_format'] = input_format
if input_host_api_specific_stream_info:
kwargs['input_host_api_specific_stream_info'] = (
input_host_api_specific_stream_info._get_host_api_stream_object()
)
if output_device != None:
kwargs['output_device'] = output_device
kwargs['output_channels'] = output_channels
kwargs['output_format'] = output_format
if output_host_api_specific_stream_info:
kwargs['output_host_api_specific_stream_info'] = (
output_host_api_specific_stream_info._get_host_api_stream_object()
)
return pa.is_format_supported(rate, **kwargs)
def get_default_input_device_info(self):
"""
Return the default input Device parameters as a
dictionary. The keys of the dictionary mirror the data fields
of PortAudio's ``PaDeviceInfo`` structure.
:raises IOError: No default input device available.
:rtype: dict
"""
device_index = pa.get_default_input_device()
return self.get_device_info_by_index(device_index)
def get_default_output_device_info(self):
"""
Return the default output Device parameters as a
dictionary. The keys of the dictionary mirror the data fields
of PortAudio's ``PaDeviceInfo`` structure.
:raises IOError: No default output device available.
:rtype: dict
"""
device_index = pa.get_default_output_device()
return self.get_device_info_by_index(device_index)
def get_device_info_by_index(self, device_index):
"""
Return the Device parameters for device specified in
`device_index` as a dictionary. The keys of the dictionary
mirror the data fields of PortAudio's ``PaDeviceInfo``
structure.
:param device_index: The device index
:raises IOError: Invalid `device_index`.
:rtype: dict
"""
return self._make_device_info_dictionary(
device_index,
pa.get_device_info(device_index)
)
def _make_device_info_dictionary(self, index, device_info):
"""
Internal method to create Device Info dictionary that mirrors
PortAudio's ``PaDeviceInfo`` structure.
:rtype: dict
"""
device_name = device_info.name
# Attempt to decode device_name
for codec in ["utf-8", "cp1252"]:
try:
device_name = device_name.decode(codec)
break
except:
pass
# If we fail to decode, we return the raw bytes and let the caller
# deal with the encoding.
return {'index' : index,
'structVersion' : device_info.structVersion,
'name' : device_name,
'hostApi' : device_info.hostApi,
'maxInputChannels' : device_info.maxInputChannels,
'maxOutputChannels' : device_info.maxOutputChannels,
'defaultLowInputLatency' :
device_info.defaultLowInputLatency,
'defaultLowOutputLatency' :
device_info.defaultLowOutputLatency,
'defaultHighInputLatency' :
device_info.defaultHighInputLatency,
'defaultHighOutputLatency' :
device_info.defaultHighOutputLatency,
'defaultSampleRate' :
device_info.defaultSampleRate
}
######################################################################
# Host Specific Stream Info
######################################################################
try:
paMacCoreStreamInfo = pa.paMacCoreStreamInfo
except AttributeError:
pass
else:
class PaMacCoreStreamInfo:
"""
Mac OS X-only: PaMacCoreStreamInfo is a PortAudio Host API
Specific Stream Info data structure for specifying Mac OS
X-only settings. Instantiate this class (if desired) and pass
the instance as the argument in :py:func:`PyAudio.open` to parameters
``input_host_api_specific_stream_info`` or
``output_host_api_specific_stream_info``.
(See :py:func:`Stream.__init__`.)
:note: Mac OS X only.
.. |PaMacCoreFlags| replace:: :ref:`PortAudio Mac Core Flags <PaMacCoreFlags>`
.. _PaMacCoreFlags:
**PortAudio Mac Core Flags**
:py:data:`paMacCoreChangeDeviceParameters`,
:py:data:`paMacCoreFailIfConversionRequired`,
:py:data:`paMacCoreConversionQualityMin`,
:py:data:`paMacCoreConversionQualityMedium`,
:py:data:`paMacCoreConversionQualityLow`,
:py:data:`paMacCoreConversionQualityHigh`,
:py:data:`paMacCoreConversionQualityMax`,
:py:data:`paMacCorePlayNice`,
:py:data:`paMacCorePro`,
:py:data:`paMacCoreMinimizeCPUButPlayNice`,
:py:data:`paMacCoreMinimizeCPU`
**Settings**
:py:func:`get_flags`, :py:func:`get_channel_map`
"""
paMacCoreChangeDeviceParameters = pa.paMacCoreChangeDeviceParameters
paMacCoreFailIfConversionRequired = pa.paMacCoreFailIfConversionRequired
paMacCoreConversionQualityMin = pa.paMacCoreConversionQualityMin
paMacCoreConversionQualityMedium = pa.paMacCoreConversionQualityMedium
paMacCoreConversionQualityLow = pa.paMacCoreConversionQualityLow
paMacCoreConversionQualityHigh = pa.paMacCoreConversionQualityHigh
paMacCoreConversionQualityMax = pa.paMacCoreConversionQualityMax
paMacCorePlayNice = pa.paMacCorePlayNice
paMacCorePro = pa.paMacCorePro
paMacCoreMinimizeCPUButPlayNice = pa.paMacCoreMinimizeCPUButPlayNice
paMacCoreMinimizeCPU = pa.paMacCoreMinimizeCPU
def __init__(self, flags=None, channel_map=None):
"""
Initialize with flags and channel_map. See PortAudio
documentation for more details on these parameters; they are
passed almost verbatim to the PortAudio library.
:param flags: |PaMacCoreFlags| OR'ed together.
See :py:class:`PaMacCoreStreamInfo`.
:param channel_map: An array describing the channel mapping.
See PortAudio documentation for usage.
"""
kwargs = {"flags" : flags,
"channel_map" : channel_map}
if flags == None:
del kwargs["flags"]
if channel_map == None:
del kwargs["channel_map"]
self._paMacCoreStreamInfo = paMacCoreStreamInfo(**kwargs)
def get_flags(self):
"""
Return the flags set at instantiation.
:rtype: integer
"""
return self._paMacCoreStreamInfo.flags
def get_channel_map(self):
"""
Return the channel map set at instantiation.
:rtype: tuple or None
"""
return self._paMacCoreStreamInfo.channel_map
def _get_host_api_stream_object(self):
"""Private method."""
return self._paMacCoreStreamInfo
try:
paWasapiStreamInfo = pa.paWasapiStreamInfo
except AttributeError:
pass
else:
class PaWasapiStreamInfo:
paWinWasapiExclusive = pa.paWinWasapiExclusive
# paWinWasapiRedirectHostProcessor = pa.paWinWasapiRedirectHostProcessor
# paWinWasapiUseChannelMask = pa.paWinWasapiUseChannelMask
paWinWasapiPolling = pa.paWinWasapiPolling
# paWinWasapiThreadPriority = pa.paWinWasapiThreadPriority
paWinWasapiExplicitSampleFormat = pa.paWinWasapiExplicitSampleFormat
paWinWasapiAutoConvert = pa.paWinWasapiAutoConvert
def __init__(self, flags=None):
"""
Initialize with flags. See PortAudio
documentation for more details on these parameters; they are
passed almost verbatim to the PortAudio library.
:param flags: |PaWasapiFlags| OR'ed together.
See :py:class:`PaWasapiStreamInfo`.
"""
kwargs = {"flags" : flags}
if flags == None:
del kwargs["flags"]
self._paWasapiStreamInfo = paWasapiStreamInfo(**kwargs)
def get_flags(self):
"""
Return the flags set at instantiation.
:rtype: integer
"""
return self._paWasapiStreamInfo.flags
def _get_host_api_stream_object(self):
"""Private method."""
return self._paWasapiStreamInfo
|
py | 1a3b6f716056ef2c98d067c3c5ba021968331ad6 | class ProxyType:
def __init__(self,obj):
self.obj = obj
CallableProxyType = ProxyType
ProxyTypes = [ProxyType,CallableProxyType]
class ReferenceType:
def __init__(self,obj,callback):
self.obj = obj
self.callback = callback
class ref:
def __init__(self,obj,callback=None):
self.obj = ReferenceType(obj,callback)
self.callback=callback
def __call__(self):
return self.obj.obj
def __hash__(self):
return hash(self.obj.obj)
def __eq__(self, other):
return self.obj.obj == other.obj.obj
def getweakrefcount(obj):
return 1
def getweakrefs(obj):
return obj
def proxy(obj,callback=None):
return ProxyType(obj)
|
py | 1a3b703f7e1797a1ba5070151cefafae85dab64b | """
File: test.py
Author: Jens Petit
Email: [email protected]
Github: https://github.com/j-petit
Description: Class for
"""
def modelParse(filename):
"""Parses the standard mplus model into single lines. Model refers to the
concept defined after the model: keyword in mplus.
Each line of the model is translated into a lines with only one dependency.
It looks for the line containing "model:" and start parsing there until the
next empty line.
Parameters
----------
filename : The mplus model file to parse
Returns
-------
new_lines : list of strings representing a single line of the model
j : the line number where the model stopped
"""
key_words = ['on', 'with']
found_model = False
with open(filename) as fp:
new_lines = []
model_line = 0
for j, line in enumerate(fp):
line = line.strip(None)
if line.lower() == "model:":
found_model = True
continue
if found_model:
if line == "":
model_line = j
break
line = line.rstrip(";")
split_line = line.split(" ")
if (("on" in line or "with" in line) and len(split_line) > 3):
if ("on" in line):
key_word = "on"
else:
key_word = "with"
index = split_line.index(key_word)
if index == 1:
r_list = split_line[2:]
for i in range(len(r_list)):
line = "{} {} {}".format(split_line[0], key_word, r_list[i])
new_lines.append(line)
else:
l_list = split_line[:index]
for i in range(len(l_list)):
line = "{} {} {}".format(l_list[i], key_word, split_line[-1])
new_lines.append(line)
else:
new_lines.append(line)
if not found_model:
raise Exception("No model found in this file")
return new_lines, j
def appendToFile(filename, model):
"""Appends a model to a file.
Parameters
----------
filename : string which specifies the path.
model : mplus model object
"""
with open(filename, 'a') as f:
f.write(model.name + ":\n")
for i, line in enumerate(model.model):
if model.labels:
f.write(line + " (" + model.labels[i] + ");\n")
else:
f.write(line + ";\n")
f.write("\n")
def combineModels(model1, model2, label, same_indices):
"""Combines the labels of two model inplace.
Parameters
----------
model1 : mplus model object
model2 : mplus model object
label : string for the combined model parts
same_indices : list of ints
"""
for i, index in enumerate(same_indices):
model1.labels[index] = label + str(i)
model2.labels[index] = label + str(i)
|
py | 1a3b70d26eca1b48db6eb2b69a7b397550463b95 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
sourcefiles = ["wave_propogation.pyx", "wp.cpp"]
extensions = [Extension("wave_propogation", sourcefiles, language="c++")]
setup(ext_modules=cythonize(extensions, language_level=3))
|
py | 1a3b720fa32471eb22b3deac9af0130401b45a93 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-25 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hood', '0004_auto_20180525_0913'),
]
operations = [
migrations.AddField(
model_name='profile',
name='email_confirmed',
field=models.BooleanField(default=False),
),
]
|
py | 1a3b72d2f736671289080788016ead7f9543371c | """
Starling setup script.
See license in LICENSE.txt.
"""
import setuptools
import os
from starling_sim.version import __version__
# short description of the project
DESC = "Agent-based framework for mobility simulation"
# get long description from README.md
# with open("README.md", "r") as fh:
# LONG_DESC = fh.read()
LONG_DESC = "Long description of the Starling project (TODO)"
# list of classifiers from the PyPI classifiers trove
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: GIS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: CeCILL-B Free Software License Agreement (CECILL-B)"
]
# only specify install_requires if not in RTD environment
if os.getenv("READTHEDOCS") == "True":
INSTALL_REQUIRES = []
else:
with open("requirements.txt") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
# call setup
setuptools.setup(
name="starling-sim",
version=__version__,
license="CECILL-B",
author="Tellae",
author_email="[email protected]",
description=DESC,
long_description=LONG_DESC,
long_description_content_type="text/markdown",
url="https://github.com/tellae/starling",
packages=setuptools.find_packages() + ["starling_sim/schemas"],
classifiers=CLASSIFIERS,
python_requires='>=3.6',
install_requires=INSTALL_REQUIRES,
include_package_data=True
)
|
py | 1a3b736884198ec3b8f19d9ab7d0c7883f28fbb9 | # Enter your code here
def triple(num):
num = num*3
print(num)
triple(6)
triple(99)
|
py | 1a3b738925dc79852fac1cd35d89da00f3cbfad5 | """
Instantiate a variation font. Run, eg:
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.fixedTools import floatToFixedToFloat, otRound, floatToFixed
from fontTools.pens.boundsPen import BoundsPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
from fontTools.varLib import _GetCoordinates, _SetCoordinates
from fontTools.varLib.models import (
supportScalar,
normalizeLocation,
piecewiseLinearMap,
)
from fontTools.varLib.merger import MutatorMerger
from fontTools.varLib.varStore import VarStoreInstancer
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib.iup import iup_delta
import fontTools.subset.cff
import os.path
import logging
log = logging.getLogger("fontTools.varlib.mutator")
# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
OS2_WIDTH_CLASS_VALUES = {}
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
half = (prev + curr) / 2
OS2_WIDTH_CLASS_VALUES[half] = i
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
"FamilyOtherBlues", "StemSnapH",
"StemSnapV")
pd_blend_values = ("BlueScale", "BlueShift",
"BlueFuzz", "StdHW", "StdVW")
for fontDict in topDict.FDArray:
pd = fontDict.Private
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
for key, value in pd.rawDict.items():
if (key in pd_blend_values) and isinstance(value, list):
delta = interpolateFromDeltas(vsindex, value[1:])
pd.rawDict[key] = otRound(value[0] + delta)
elif (key in pd_blend_lists) and isinstance(value[0], list):
"""If any argument in a BlueValues list is a blend list,
then they all are. The first value of each list is an
absolute value. The delta tuples are calculated from
relative master values, hence we need to append all the
deltas to date to each successive absolute value."""
delta = 0
for i, val_list in enumerate(value):
delta += otRound(interpolateFromDeltas(vsindex,
val_list[1:]))
value[i] = val_list[0] + delta
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
charstrings = topDict.CharStrings
for gname in glyphOrder:
# Interpolate charstring
charstring = charstrings[gname]
pd = charstring.private
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
num_regions = pd.getNumRegions(vsindex)
numMasters = num_regions + 1
new_program = []
last_i = 0
for i, token in enumerate(charstring.program):
if token == 'blend':
num_args = charstring.program[i - 1]
""" The stack is now:
..args for following operations
num_args values from the default font
num_args tuples, each with numMasters-1 delta values
num_blend_args
'blend'
"""
argi = i - (num_args*numMasters + 1)
end_args = tuplei = argi + num_args
while argi < end_args:
next_ti = tuplei + num_regions
deltas = charstring.program[tuplei:next_ti]
delta = interpolateFromDeltas(vsindex, deltas)
charstring.program[argi] += otRound(delta)
tuplei = next_ti
argi += 1
new_program.extend(charstring.program[last_i:end_args])
last_i = i + 1
if last_i != 0:
new_program.extend(charstring.program[last_i:])
charstring.program = new_program
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
"""Unlike TrueType glyphs, neither advance width nor bounding box
info is stored in a CFF2 charstring. The width data exists only in
the hmtx and HVAR tables. Since LSB data cannot be interpolated
reliably from the master LSB values in the hmtx table, we traverse
the charstring to determine the actual bound box. """
charstrings = topDict.CharStrings
boundsPen = BoundsPen(glyphOrder)
hmtx = varfont['hmtx']
hvar_table = None
if 'HVAR' in varfont:
hvar_table = varfont['HVAR'].table
fvar = varfont['fvar']
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
for gid, gname in enumerate(glyphOrder):
entry = list(hmtx[gname])
# get width delta.
if hvar_table:
if hvar_table.AdvWidthMap:
width_idx = hvar_table.AdvWidthMap.mapping[gname]
else:
width_idx = gid
width_delta = otRound(varStoreInstancer[width_idx])
else:
width_delta = 0
# get LSB.
boundsPen.init()
charstring = charstrings[gname]
charstring.draw(boundsPen)
if boundsPen.bounds is None:
# Happens with non-marking glyphs
lsb_delta = 0
else:
lsb = boundsPen.bounds[0]
lsb_delta = entry[1] - lsb
if lsb_delta or width_delta:
if width_delta:
entry[0] += width_delta
if lsb_delta:
entry[1] = lsb
hmtx[gname] = tuple(entry)
def instantiateVariableFont(varfont, location, inplace=False):
""" Generate a static instance from a variable TTFont and a dictionary
defining the desired location along the variable font's axes.
The location values must be specified as user-space coordinates, e.g.:
{'wght': 400, 'wdth': 100}
By default, a new TTFont object is returned. If ``inplace`` is True, the
input varfont is modified and reduced to a static font.
"""
if not inplace:
# make a copy to leave input varfont unmodified
stream = BytesIO()
varfont.save(stream)
stream.seek(0)
varfont = TTFont(stream)
fvar = varfont['fvar']
axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes}
loc = normalizeLocation(location, axes)
if 'avar' in varfont:
maps = varfont['avar'].segments
loc = {k: piecewiseLinearMap(v, maps[k]) for k,v in loc.items()}
# Quantize to F2Dot14, to avoid surprise interpolations.
loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()}
# Location is normalized now
log.info("Normalized location: %s", loc)
if 'gvar' in varfont:
log.info("Mutating glyf/gvar tables")
gvar = varfont['gvar']
glyf = varfont['glyf']
# get list of glyph names in gvar sorted by component depth
glyphnames = sorted(
gvar.variations.keys(),
key=lambda name: (
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
if glyf[name].isComposite() else 0,
name))
for glyphname in glyphnames:
variations = gvar.variations[glyphname]
coordinates,_ = _GetCoordinates(varfont, glyphname)
origCoords, endPts = None, None
for var in variations:
scalar = supportScalar(loc, var.axes)
if not scalar: continue
delta = var.coordinates
if None in delta:
if origCoords is None:
origCoords,control = _GetCoordinates(varfont, glyphname)
endPts = control[1] if control[0] >= 1 else list(range(len(control[1])))
delta = iup_delta(delta, origCoords, endPts)
coordinates += GlyphCoordinates(delta) * scalar
_SetCoordinates(varfont, glyphname, coordinates)
else:
glyf = None
if 'cvar' in varfont:
log.info("Mutating cvt/cvar tables")
cvar = varfont['cvar']
cvt = varfont['cvt ']
deltas = {}
for var in cvar.variations:
scalar = supportScalar(loc, var.axes)
if not scalar: continue
for i, c in enumerate(var.coordinates):
if c is not None:
deltas[i] = deltas.get(i, 0) + scalar * c
for i, delta in deltas.items():
cvt[i] += otRound(delta)
if 'CFF2' in varfont:
log.info("Mutating CFF2 table")
glyphOrder = varfont.getGlyphOrder()
CFF2 = varfont['CFF2']
topDict = CFF2.cff.topDictIndex[0]
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
CFF2.desubroutinize()
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
del topDict.rawDict['VarStore']
del topDict.VarStore
if 'MVAR' in varfont:
log.info("Mutating MVAR table")
mvar = varfont['MVAR'].table
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
records = mvar.ValueRecord
for rec in records:
mvarTag = rec.ValueTag
if mvarTag not in MVAR_ENTRIES:
continue
tableTag, itemName = MVAR_ENTRIES[mvarTag]
delta = otRound(varStoreInstancer[rec.VarIdx])
if not delta:
continue
setattr(varfont[tableTag], itemName,
getattr(varfont[tableTag], itemName) + delta)
log.info("Mutating FeatureVariations")
for tableTag in 'GSUB','GPOS':
if not tableTag in varfont:
continue
table = varfont[tableTag].table
if not hasattr(table, 'FeatureVariations'):
continue
variations = table.FeatureVariations
for record in variations.FeatureVariationRecord:
applies = True
for condition in record.ConditionSet.ConditionTable:
if condition.Format == 1:
axisIdx = condition.AxisIndex
axisTag = fvar.axes[axisIdx].axisTag
Min = condition.FilterRangeMinValue
Max = condition.FilterRangeMaxValue
v = loc[axisTag]
if not (Min <= v <= Max):
applies = False
else:
applies = False
if not applies:
break
if applies:
assert record.FeatureTableSubstitution.Version == 0x00010000
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
break
del table.FeatureVariations
if 'GDEF' in varfont and varfont['GDEF'].table.Version >= 0x00010003:
log.info("Mutating GDEF/GPOS/GSUB tables")
gdef = varfont['GDEF'].table
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
merger = MutatorMerger(varfont, loc)
merger.mergeTables(varfont, [varfont], ['GDEF', 'GPOS'])
# Downgrade GDEF.
del gdef.VarStore
gdef.Version = 0x00010002
if gdef.MarkGlyphSetsDef is None:
del gdef.MarkGlyphSetsDef
gdef.Version = 0x00010000
if not (gdef.LigCaretList or
gdef.MarkAttachClassDef or
gdef.GlyphClassDef or
gdef.AttachList or
(gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)):
del varfont['GDEF']
addidef = False
if glyf:
for glyph in glyf.glyphs.values():
if hasattr(glyph, "program"):
instructions = glyph.program.getAssembly()
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
addidef = any(op.startswith("GETVARIATION") for op in instructions)
if addidef:
break
if addidef:
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
asm = []
if 'fpgm' in varfont:
fpgm = varfont['fpgm']
asm = fpgm.program.getAssembly()
else:
fpgm = newTable('fpgm')
fpgm.program = ttProgram.Program()
varfont['fpgm'] = fpgm
asm.append("PUSHB[000] 145")
asm.append("IDEF[ ]")
args = [str(len(loc))]
for a in fvar.axes:
args.append(str(floatToFixed(loc[a.axisTag], 14)))
asm.append("NPUSHW[ ] " + ' '.join(args))
asm.append("ENDF[ ]")
fpgm.program.fromAssembly(asm)
# Change maxp attributes as IDEF is added
if 'maxp' in varfont:
maxp = varfont['maxp']
if hasattr(maxp, "maxInstructionDefs"):
maxp.maxInstructionDefs += 1
else:
setattr(maxp, "maxInstructionDefs", 1)
if hasattr(maxp, "maxStackElements"):
maxp.maxStackElements = max(len(loc), maxp.maxStackElements)
else:
setattr(maxp, "maxInstructionDefs", len(loc))
if 'name' in varfont:
log.info("Pruning name table")
exclude = {a.axisNameID for a in fvar.axes}
for i in fvar.instances:
exclude.add(i.subfamilyNameID)
exclude.add(i.postscriptNameID)
varfont['name'].names[:] = [
n for n in varfont['name'].names
if n.nameID not in exclude
]
if "wght" in location and "OS/2" in varfont:
varfont["OS/2"].usWeightClass = otRound(
max(1, min(location["wght"], 1000))
)
if "wdth" in location:
wdth = location["wdth"]
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
if wdth < percent:
varfont["OS/2"].usWidthClass = widthClass
break
else:
varfont["OS/2"].usWidthClass = 9
if "slnt" in location and "post" in varfont:
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
log.info("Removing variable tables")
for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'):
if tag in varfont:
del varfont[tag]
return varfont
def main(args=None):
from fontTools import configLogger
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.mutator", description="Instantiate a variable font")
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input variable TTF file.")
parser.add_argument(
"locargs", metavar="AXIS=LOC", nargs="*",
help="List of space separated locations. A location consist in "
"the name of a variation axis, followed by '=' and a number. E.g.: "
" wght=700 wdth=80. The default is the location of the base master.")
parser.add_argument(
"-o", "--output", metavar="OUTPUT.ttf", default=None,
help="Output instance TTF file (default: INPUT-instance.ttf).")
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely.")
logging_group.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off.")
options = parser.parse_args(args)
varfilename = options.input
outfile = (
os.path.splitext(varfilename)[0] + '-instance.ttf'
if not options.output else options.output)
configLogger(level=(
"DEBUG" if options.verbose else
"ERROR" if options.quiet else
"INFO"))
loc = {}
for arg in options.locargs:
try:
tag, val = arg.split('=')
assert len(tag) <= 4
loc[tag.ljust(4)] = float(val)
except (ValueError, AssertionError):
parser.error("invalid location argument format: %r" % arg)
log.info("Location: %s", loc)
log.info("Loading variable font")
varfont = TTFont(varfilename)
instantiateVariableFont(varfont, loc, inplace=True)
log.info("Saving instance font %s", outfile)
varfont.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
|
py | 1a3b7402e2b5662d3c674ddc2c7f9d59f3acf4f7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from datetime import datetime
from decimal import Decimal
try:
from string import letters
except ImportError:
from string import ascii_letters as letters
from odps.tests.core import TestBase as Base, to_str, tn, pandas_case
from odps import types
from odps.df.backends.frame import ResultFrame
class TestBase(Base):
def _gen_random_bigint(self, value_range=None):
return random.randint(*(value_range or types.bigint._bounds))
def _gen_random_string(self, max_length=15):
gen_letter = lambda: letters[random.randint(0, 51)]
return to_str(''.join([gen_letter() for _ in range(random.randint(1, max_length))]))
def _gen_random_double(self):
return random.uniform(-2**32, 2**32)
def _gen_random_datetime(self):
dt = datetime.fromtimestamp(random.randint(0, int(time.time())))
if dt.year >= 1986 or dt.year <= 1992: # ignore years when daylight saving time is used
return dt.replace(year=1996)
else:
return dt
def _gen_random_boolean(self):
return random.uniform(-1, 1) > 0
def _gen_random_decimal(self):
return Decimal(str(self._gen_random_double()))
def assertListAlmostEqual(self, first, second, **kw):
self.assertEqual(len(first), len(second))
only_float = kw.pop('only_float', True)
for f, s in zip(first, second):
if only_float:
self.assertAlmostEqual(f, s, **kw)
else:
if isinstance(f, float) and isinstance(s, float):
self.assertAlmostEqual(f, s, **kw)
elif isinstance(f, list) and isinstance(s, list):
self.assertListAlmostEqual(f, s, only_float=False, **kw)
else:
self.assertEqual(f, s)
__all__ = ['TestBase', 'to_str', 'tn', 'pandas_case']
|
py | 1a3b7712fc258b551b04a6c331bea689233a70a1 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 14:03:22 2019
@author: Rafael Arenhart
"""
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
'''
use '%matplotlib qt5' no console IPython para abrir o gráfico interativo
'''
PI = np.pi
SAMPLES = 1000
theta = np.random.random(SAMPLES) * 2 * PI
polar_rho = np.random.random(SAMPLES) * 2 * PI
uniform_rho = np.arccos(2*np.random.random(SAMPLES) - 1)
def polar_to_cartesian(theta, rho, radius=1):
x = np.sin(rho) * np.cos(theta)
y = np.sin(rho) * np.sin(theta)
z = np.cos(rho)
return (x, y, z)
fig = plt.figure()
axs = [fig.add_subplot(121, projection='3d'),
fig.add_subplot(122, projection='3d')]
axs[0].set_axis_off()
axs[0].set_title('Concentrado')
axs[1].set_axis_off()
axs[1].set_title('Distribuído')
polar_points = polar_to_cartesian(theta, polar_rho)
#ax.scatter(polar_points[0], polar_points[1], polar_points[2])
#plt.show()
uniform_points = polar_to_cartesian(theta, uniform_rho)
#ax.scatter(uniform_points[0], uniform_points[1], uniform_points[2])
#plt.show()
#fig, axs = plt.subplots(2)
axs[0].scatter(polar_points[0], polar_points[1], polar_points[2])
axs[1].scatter(uniform_points[0], uniform_points[1], uniform_points[2])
plt.show()
|
py | 1a3b7752a492c3e9822116e0d4e63620f5c317e9 | #write a Python program to calculate the time runs (difference between start and current time) of a program.
import timeit
start_time= timeit.timeit()
sum(range(10))
end_time= timeit.timeit()
print( start_time - end_time) |
py | 1a3b779246024c08c6d642254fbf15c8451a0fed | # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2011, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from django.db.models import fields, SubfieldBase
from ldapdb import escape_ldap_filter
class CharField(fields.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 200
super(CharField, self).__init__(*args, **kwargs)
def from_ldap(self, value, connection):
if len(value) == 0:
return ''
else:
return value[0].decode(connection.charset)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if lookup_type in ['endswith','iendswith']:
return ["*%s" % escape_ldap_filter(value)]
elif lookup_type in ['startswith','istartswith']:
return ["%s*" % escape_ldap_filter(value)]
elif lookup_type in ['contains', 'icontains']:
return ["*%s*" % escape_ldap_filter(value)]
elif lookup_type in ['exact','iexact']:
return [escape_ldap_filter(value)]
elif lookup_type == 'in':
return [escape_ldap_filter(v) for v in value]
elif lookup_type == 'isnull':
return ["" if value else "*"]
raise TypeError("CharField has invalid lookup: %s" % lookup_type)
def get_db_prep_save(self, value, connection):
return [value.encode(connection.charset)]
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if lookup_type in ['endswith','iendswith']:
return "*%s" % escape_ldap_filter(value)
elif lookup_type in ['startswith','istartswith']:
return "%s*" % escape_ldap_filter(value)
elif lookup_type in ['contains', 'icontains']:
return "*%s*" % escape_ldap_filter(value)
elif lookup_type in ['exact','iexact']:
return escape_ldap_filter(value)
elif lookup_type == 'in':
return [escape_ldap_filter(v) for v in value]
elif lookup_type == 'isnull':
return "" if value else "*"
raise TypeError("CharField has invalid lookup: %s" % lookup_type)
class ImageField(fields.Field):
def from_ldap(self, value, connection):
if len(value) == 0:
return ''
else:
return value[0]
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
return [self.get_prep_lookup(lookup_type, value)]
def get_db_prep_save(self, value, connection):
return [value]
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
raise TypeError("ImageField has invalid lookup: %s" % lookup_type)
class IntegerField(fields.IntegerField):
def from_ldap(self, value, connection):
if len(value) == 0:
return 0
else:
return int(value[0])
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
return [self.get_prep_lookup(lookup_type, value)]
def get_db_prep_save(self, value, connection):
return [str(value)]
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if lookup_type in ('exact', 'gte', 'lte'):
return value
elif lookup_type == 'isnull':
return '' if value else '*'
raise TypeError("IntegerField has invalid lookup: %s" % lookup_type)
class ListField(fields.Field):
__metaclass__ = SubfieldBase
def from_ldap(self, value, connection):
return value
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
return [self.get_prep_lookup(lookup_type, value)]
def get_db_prep_save(self, value, connection):
return [x.encode(connection.charset) for x in value]
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if lookup_type in ['endswith', 'iendswith']:
return "*%s" % escape_ldap_filter(value)
elif lookup_type in ['startswith', 'istartswith']:
return "%s*" % escape_ldap_filter(value)
elif lookup_type in ['contains', 'icontains']:
return "*%s*" % escape_ldap_filter(value)
elif lookup_type in ['exact', 'iexact']:
return escape_ldap_filter(value)
elif lookup_type == 'in':
return [escape_ldap_filter(v) for v in value]
elif lookup_type == 'isnull':
return "" if value else "*"
raise TypeError("ListField has invalid lookup: %s" % lookup_type)
def to_python(self, value):
if not value:
return []
return value
|
py | 1a3b78f40b5225b6deb97ddbf72478a51ffc9de1 | import get_pic_from_video as gp
import directory_tree_traversal
import scor_onlie
import op_log
import ftp_op
import time
import configparser
import os.path
# 记录开始运行时间
start_time = time.strftime("%Y-%m-%d %X", time.localtime())
try:
# 读取配置文件
conf = configparser.ConfigParser()
conf.read('arr_config.ini')
# 要处理的本地目录
local_path = conf.get("local", "local_path")
# 筛选文件类型
file_type = conf.get("local", "video_type")
video_type = [x for x in file_type.split(', ')]
# 截图存放位置
save_picture_path = conf.get("local", "save_picture_path")
# 成功上传ftp后,是否删除本地的文件
del_localfile = conf.get("local", "del_file_afterupload")
# nswf 接口地址
scor_url = conf.get("nsfw", "nsfw_api")
# 截图总数
pic_cnt = int(conf.get("nsfw", "pic_cnt"))
# 判断图片是否为 NSFW 的阈值
threshold = conf.get("nsfw", "threshold")
# ftp 信息
ip = conf.get("ftp", "ip")
port = conf.get("ftp", "port")
username = conf.get("ftp", "username")
pwd = conf.get("ftp", "pwd")
except Exception as write_err:
op_log.log_to_file("读取配置文件失败. %s" % write_err)
try:
# 要上传文件的ftp配置
ftp_conn = ftp_op.ftpconnect(ip, int(port), username, pwd)
except:
print("ftp 连接失败(%s:%s %s %s)" % (ip, port, username, pwd))
op_log.log_to_file("ftp 连接失败(%s:%s %s %s)" % (ip, port, username, pwd))
exit(1)
try:
# 连接数据库,记录操作日志
tb_name = 'video_arrange'
db_name = 'arr_file.db3'
con, cur = op_log.open_sqlite(db_name, tb_name)
# 获取本地指定类型的文件列表
videos = directory_tree_traversal.file_list(local_path, video_type)
if not videos:
print('get no file')
log_txt = "start at %s , but get no file." % (start_time)
op_log.log_to_file(log_txt)
exit(0)
cnt = 0
for local_file in videos:
cnt += 1
print("-->:handling %s of %s " % (cnt, len(videos)))
# 获取文件截图
if not os.path.exists(save_picture_path):
os.makedirs(save_picture_path)
images = gp.get_frame(local_file, save_picture_path, pic_cnt)
if not images:
op_log.log_to_file("%s 获取文件截图失败" % local_file)
continue
# 记录超过阈值的个数
scors_cnt = 0
nsfw_flag = 0
for ims in images:
scors = scor_onlie.scor(scor_url, ims)
if float(scors) > float(threshold):
scors_cnt += 1
success = 0
if scors_cnt > 1:
nsfw_flag = 1
# 上传文件到ftp
remote_file = os.path.split(local_file)[-1]
upresult = ftp_op.uploadfile(ftp_conn, local_file, remote_file)
if upresult is True:
result_txt = local_file + '-- 上传ftp成功'
success = 1
if int(del_localfile) == 1:
os.remove(local_file)
op_log.log_to_file("删除文件:%s" % local_file)
else:
result_txt = local_file + '-- 上传ftp失败: ' + upresult
success = 0
# op_log.log_to_file(result_txt)
txt = "%s| |upfile: %s| | %s" % (str(time.asctime()), local_file, result_txt)
data = (os.path.split(local_file)[-1], local_file, int(nsfw_flag), result_txt, success, time.strftime("%Y-%m-%d %X", time.localtime()), '')
else:
result_txt = "不是NSFW文件,不上传"
data = (os.path.split(local_file)[-1], local_file, int(nsfw_flag), result_txt, success, time.strftime("%Y-%m-%d %X", time.localtime()), '')
op_log.insert_data(con, cur, tb_name, data)
con.commit()
end_time = time.strftime("%Y-%m-%d %X", time.localtime())
log_txt = "complete | start at %s , finish at %s . handle %s files." % (start_time, end_time, cnt)
op_log.log_to_file(log_txt)
except Exception as op_err:
op_log.log_to_file("操作失败: %s" % op_err)
finally:
ftp_conn.close()
cur.close()
con.close()
|
py | 1a3b798181f561c6bfbd86b3037c3002c5eb2f04 | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_top_level_comment import DocxTopLevelComment # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxTopLevelComment(unittest.TestCase):
"""DocxTopLevelComment unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxTopLevelComment(self):
"""Test DocxTopLevelComment"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_top_level_comment.DocxTopLevelComment() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a3b79d3c905d62a51c0276c1db3df96030ac743 | from flask_login import login_required
from ui import app
from flask import Response, request
from utils.json_encoder import JSONEncoder
from service.job_service import cancel_job, dao_list_jobs, dao_count_jobs, get_job_dao
__author__ = 'tomas'
# returns all the jobs (and their state)
# @app.route("/api/workspace/<workspace_id>/job", methods=["GET"])
# @login_required
# def get_jobs_api(workspace_id):
# in_doc = get_jobs_by_workspace(workspace_id)
# out_doc = JSONEncoder().encode(in_doc)
# return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job/<job_id>", methods=["GET"])
@login_required
def get_job_by_id_api(workspace_id, job_id):
job = get_job_dao(job_id)
out_doc = JSONEncoder().encode(job)
return Response(out_doc, mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job/<job_id>", methods=["DELETE"])
@login_required
def delete_job_api(workspace_id, job_id):
cancel_job(job_id)
return Response("{}", mimetype="application/json")
@app.route("/api/workspace/<workspace_id>/job", methods=['GET'])
@login_required
def get_jobs_api(workspace_id):
search_query = {}
search_query["workspace_id"] = workspace_id
if request.args.get('orderBy') is not None:
order = request.args.get('orderBy')
if order[:1] == "-":
order_by = order[1:]
reverse = -1
else:
order_by = order
reverse = 1
else:
order_by = "score"
reverse = 1
search_query["orderBy"] = order_by
search_query["reverse"] = reverse
search = None
if request.args.get('search') is not None:
search = request.args.get('search')
search_query["search_text"] = search
if request.args.get('limit') is not None:
limit = int(request.args.get('limit'))
search_query["limit"] = limit
else:
search_query["limit"] = 10
limit = 10
begin = 1
if request.args.get('page') is not None:
begin = int(request.args.get('page'))
search_query["begin"] = (begin - 1) * limit
# in_doc = list_workspace(search_query)
# count = dao_count_workspace()
in_doc = dao_list_jobs(search_query)
count = dao_count_jobs(search_query)
results = {}
results["count"] = count
results["list"] = in_doc
out_doc = JSONEncoder().encode(results)
return Response(out_doc, mimetype="application/json")
|
py | 1a3b7a06f85cd31aa2ca713fd3253bb6cdb6739a | A="100000"
B="110000"
C="100100"
D="100110"
E="100010"
F="110100"
G="110110"
H="110010"
I="010100"
J="010110"
K="101000"
L="111000"
M="101100"
N="101110"
O="101010"
P="111100"
Q="111110"
R="111010"
S="011100"
T="011110"
U="101001"
V="111001"
W="010111"
X="101101"
Y="101111"
Z="101011"
space="000000"
zero="001011"
one="010000"
two="011000"
three="010010"
four="010011"
five="010001"
six="011010"
seven="011011"
eight="011001"
nine="001010"
exc="011101"
quote="000010"
pound="001111"
dlr="110101"
pct="100101"
ap="111101"
ast="100001"
plus="001101"
comma="000001"
minus="001001"
colon="100011"
scolon="000011"
fslash="001100"
lpar="111011"
rpar="011111"
per="000101"
equ="111111"
qst="100111"
bslash="110011"
ans=True
while ans==True:
s=input("Please enter a string to translate to braille: ")
s=s.upper()
braille=""
for x in range(len(s)):
if s[x]=='A':
braille+=A
elif s[x]=='B':
braille+=B
elif s[x]=='C':
braille+=C
elif s[x]=='D':
braille+=D
elif s[x]=='E':
braille+=E
elif s[x]=='F':
braille+=F
elif s[x]=='G':
braille+=G
elif s[x]=='H':
braille+=H
elif s[x]=='I':
braille+=I
elif s[x]=='J':
braille+=J
elif s[x]=='K':
braille+=K
elif s[x]=='L':
braille+=L
elif s[x]=='M':
braille+=M
elif s[x]=='N':
braille+=N
elif s[x]=='O':
braille+=O
elif s[x]=='P':
braille+=P
elif s[x]=='Q':
braille+=Q
elif s[x]=='R':
braille+=R
elif s[x]=='S':
braille+=S
elif s[x]=='T':
braille+=T
elif s[x]=='U':
braille+=U
elif s[x]=='V':
braille+=V
elif s[x]=='W':
braille+=W
elif s[x]=='X':
braille+=X
elif s[x]=='Y':
braille+=Y
elif s[x]=='Z':
braille+=Z
elif s[x]==' ' or s[x]=='':
braille+=space
elif s[x]=='0':
braille+=zero
elif s[x]=='1':
braille+=one
elif s[x]=='2':
braille+=two
elif s[x]=='3':
braille+=three
elif s[x]=='4':
braille+=four
elif s[x]=='5':
braille+=five
elif s[x]=='6':
braille+=six
elif s[x]=='7':
braille+=seven
elif s[x]=='8':
braille+=eight
elif s[x]=='9':
braille+=nine
elif s[x]=='!':
braille+=exc
elif s[x]=='\"':
braille+=quote
elif s[x]=='#':
braille+=pound
elif s[x]=='$':
braille+=dlr
elif s[x]=='%':
braille+=pct
elif s[x]=='&':
braille+=ap
elif s[x]=='*':
braille+=ast
elif s[x]=='+':
braille+=plus
elif s[x]==',':
braille+=comma
elif s[x]=='-':
braille+=minus
elif s[x]==':':
braille+=colon
elif s[x]==';':
braille+=scolon
elif s[x]=='/':
braille+=fslash
elif s[x]=='(':
braille+=lpar
elif s[x]==')':
braille+=rpar
elif s[x]=='.':
braille+=per
elif s[x]=='=':
braille+=equ
elif s[x]=='?':
braille+=qst
elif s[x]=='\\':
braille+=bslash
print(braille)
y=True
while y == True:
z=input("Do you have another string to translate? Enter (YES) or (NO): ")
z=z.upper()
if z == "YES":
ans=True
y=False
elif z=="NO":
ans=False
y=False
print("Thank you, goodbye!")
else:
print("Not a valid response.")
y=True |
py | 1a3b7b25d7fa5f0ed3a44643f900e25768ffb54a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
producer_project_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] producer_project_id: ID of the project that produces and owns this service.
:param pulumi.Input[str] service_name: The name of the service. See the [overview](/service-management/overview) for naming requirements.
"""
if producer_project_id is not None:
pulumi.set(__self__, "producer_project_id", producer_project_id)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="producerProjectId")
def producer_project_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the project that produces and owns this service.
"""
return pulumi.get(self, "producer_project_id")
@producer_project_id.setter
def producer_project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "producer_project_id", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service. See the [overview](/service-management/overview) for naming requirements.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
producer_project_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new managed service. A managed service is immutable, and is subject to mandatory 30-day data retention. You cannot move a service or recreate it within 30 days after deletion. One producer project can own no more than 500 services. For security and reliability purposes, a production service should be hosted in a dedicated producer project. Operation
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] producer_project_id: ID of the project that produces and owns this service.
:param pulumi.Input[str] service_name: The name of the service. See the [overview](/service-management/overview) for naming requirements.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ServiceArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new managed service. A managed service is immutable, and is subject to mandatory 30-day data retention. You cannot move a service or recreate it within 30 days after deletion. One producer project can own no more than 500 services. For security and reliability purposes, a production service should be hosted in a dedicated producer project. Operation
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
producer_project_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["producer_project_id"] = producer_project_id
__props__.__dict__["service_name"] = service_name
super(Service, __self__).__init__(
'google-native:servicemanagement/v1:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["producer_project_id"] = None
__props__.__dict__["service_name"] = None
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="producerProjectId")
def producer_project_id(self) -> pulumi.Output[str]:
"""
ID of the project that produces and owns this service.
"""
return pulumi.get(self, "producer_project_id")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Output[str]:
"""
The name of the service. See the [overview](/service-management/overview) for naming requirements.
"""
return pulumi.get(self, "service_name")
|
py | 1a3b7b28cf336755bf0be86f4ab52eba95f24478 | # needs:check_deprecation_status
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
service_opts = [
# TODO(johngarbutt) we need a better default and minimum, in a backwards
# compatible way for report_interval
cfg.IntOpt('report_interval',
default=10,
help="""
Number of seconds indicating how frequently the state of services on a
given hypervisor is reported. Nova needs to know this to determine the
overall health of the deployment.
Related Options:
* service_down_time
report_interval should be less than service_down_time. If service_down_time
is less than report_interval, services will routinely be considered down,
because they report in too rarely.
"""),
# TODO(johngarbutt) the code enforces the min value here, but we could
# do to add some min value here, once we sort out report_interval
cfg.IntOpt('service_down_time',
default=60,
help="""
Maximum time in seconds since last check-in for up service
Each compute node periodically updates their database status based on the
specified report interval. If the compute node hasn't updated the status
for more than service_down_time, then the compute node is considered down.
Related Options:
* report_interval (service_down_time should not be less than report_interval)
"""),
cfg.BoolOpt('periodic_enable',
default=True,
help="""
Enable periodic tasks.
If set to true, this option allows services to periodically run tasks
on the manager.
In case of running multiple schedulers or conductors you may want to run
periodic tasks on only one host - in this case disable this option for all
hosts but one.
"""),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
min=0,
help="""
Number of seconds to randomly delay when starting the periodic task
scheduler to reduce stampeding.
When compute workers are restarted in unison across a cluster,
they all end up running the periodic tasks at the same time
causing problems for the external services. To mitigate this
behavior, periodic_fuzzy_delay option allows you to introduce a
random initial delay when starting the periodic task scheduler.
Possible Values:
* Any positive integer (in seconds)
* 0 : disable the random delay
"""),
cfg.ListOpt('enabled_apis',
item_type=cfg.types.String(choices=['osapi_compute',
'metadata']),
default=['osapi_compute', 'metadata'],
help="List of APIs to be enabled by default."),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help="""
List of APIs with enabled SSL.
Nova provides SSL support for the API servers. enabled_ssl_apis option
allows configuring the SSL support.
"""),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help="""
IP address on which the OpenStack API will listen.
The OpenStack API service listens on this IP address for incoming
requests.
"""),
cfg.PortOpt('osapi_compute_listen_port',
default=8774,
help="""
Port on which the OpenStack API will listen.
The OpenStack API service listens on this port number for incoming
requests.
"""),
cfg.IntOpt('osapi_compute_workers',
min=1,
help="""
Number of workers for OpenStack API service. The default will be the number
of CPUs available.
OpenStack API services can be configured to run as multi-process (workers).
This overcomes the problem of reduction in throughput when API request
concurrency increases. OpenStack API service will run in the specified
number of processes.
Possible Values:
* Any positive integer
* None (default value)
"""),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help="""
IP address on which the metadata API will listen.
The metadata API service listens on this IP address for incoming
requests.
"""),
cfg.PortOpt('metadata_listen_port',
default=8775,
help="""
Port on which the metadata API will listen.
The metadata API service listens on this port number for incoming
requests.
"""),
cfg.IntOpt('metadata_workers',
min=1,
help="""
Number of workers for metadata service. If not specified the number of
available CPUs will be used.
The metadata service can be configured to run as multi-process (workers).
This overcomes the problem of reduction in throughput when API request
concurrency increases. The metadata service will run in the specified
number of processes.
Possible Values:
* Any positive integer
* None (default value)
"""),
# NOTE(sdague): the network_manager has a bunch of different in
# tree classes that are still legit options. In Newton we should
# turn this into a selector.
cfg.StrOpt('network_manager',
choices=[
'nova.network.manager.FlatManager',
'nova.network.manager.FlatDHCPManager',
'nova.network.manager.VlanManager',
],
default='nova.network.manager.VlanManager',
help='Full class name for the Manager for network'),
cfg.BoolOpt('service_enabled_flag',
default=True,
help='Write flag file /var/run/nova/.nova_<service>_enabled'),
]
def register_opts(conf):
conf.register_opts(service_opts)
def list_opts():
return {'DEFAULT': service_opts}
|
py | 1a3b7b7956097d71fc3c8b9bf262fd10942f37fc | import click
@click.command()
@click.argument("name")
@click.version_option()
def hello(name):
"""Print a greeting for NAME"""
click.echo(f"Hello, {name}!")
|
py | 1a3b7bf1898eecdc2ae2fc4a58ecd6ea562068cc | print("t3., This is %s" % __file__)
def f3():
print("t3.f3() called!") |
py | 1a3b7c25f7c2d52da916c89f40e2337cc1f9b7b6 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
__all__ = ["Move"]
class Move(object):
def tune(self, state, accepted):
pass
def update(self,
old_state,
new_state,
accepted,
subset=None):
"""Update a given subset of the ensemble with an accepted proposal
Args:
coords: The original ensemble coordinates.
log_probs: The original log probabilities of the walkers.
blobs: The original blobs.
new_coords: The proposed coordinates.
new_log_probs: The proposed log probabilities.
new_blobs: The proposed blobs.
accepted: A vector of booleans indicating which walkers were
accepted.
subset (Optional): A boolean mask indicating which walkers were
included in the subset. This can be used, for example, when
updating only the primary ensemble in a :class:`RedBlueMove`.
"""
if subset is None:
subset = np.ones(len(old_state.coords), dtype=bool)
m1 = subset & accepted
m2 = accepted[subset]
old_state.coords[m1] = new_state.coords[m2]
old_state.log_prob[m1] = new_state.log_prob[m2]
if new_state.blobs is not None:
if old_state.blobs is None:
raise ValueError(
"If you start sampling with a given log_prob, "
"you also need to provide the current list of "
"blobs at that position.")
old_state.blobs[m1] = new_state.blobs[m2]
return old_state
|
py | 1a3b7d25726d6e705054aed976bafe7242dd445d | import pydicom
from pydicom.filereader import read_dicomdir, read_dataset
from pydicom.data import get_testdata_files
from pydicom.errors import InvalidDicomError
from os.path import dirname, join
from pprint import pprint
import matplotlib.pyplot as plt
from pydicom.dataset import Dataset
from pathlib import Path
from plugins.dicom_loader.dicom_record import DicomDir, DicomPatient, DicomStudy, DicomSeries, DicomImage
class DicomLoader:
def __init__(self):
pass
def load_dicom(self, dicom_path: Path):
dicom_dir = DicomDir(dicom_path)
file_paths = [file_path for file_path in dicom_path.glob('**/*') if file_path.is_file()]
for file_path in file_paths:
try:
file_dataset = pydicom.dcmread(str(file_path))
except InvalidDicomError as exception:
print('DICOM file loading exception:', exception, '\n\tFile:', file_path)
continue
# print('file_dataset', file_dataset)
if file_dataset.PatientID not in dicom_dir.children:
dicom_dir.children[file_dataset.PatientID] = DicomPatient(file_dataset.PatientID)
patient_record = dicom_dir.children[file_dataset.PatientID]
if file_dataset.StudyID not in patient_record.children:
patient_record.children[file_dataset.StudyID] = DicomStudy(file_dataset.StudyID)
study_record = patient_record.children[file_dataset.StudyID]
if file_dataset.SeriesNumber not in study_record.children:
study_record.children[file_dataset.SeriesNumber] = DicomSeries(file_dataset.SeriesNumber)
series_record = study_record.children[file_dataset.SeriesNumber]
series_record.children[file_path.name] = DicomImage(file_path.name, file_dataset)
# print(dicom_dir)
return dicom_dir
def load_dicom_dir_file(self):
# plt.imshow(dcm_dataset.pixel_array, cmap=plt.cm.bone)
# plt.show()
# fetch the path to the test data
# filepath = get_testdata_files('DICOMDIR')[0]
# filepath = 'D:/Projects/C++/Qt/5/BodySnitches/Builds/BodySnitches/!DicomDatasets/FantasticNine/09-Kydryavcev/2011.12.09/DICOMDIR'
filepath = 'd:/Projects/BodySnitches/Builds/BodySnitches/DicomDatasets/FantasticNine/09-Kydryavcev/2011.12.09/DICOMDIR'
print('Path to the DICOM directory: {}'.format(filepath))
# load the data
dicom_dir = read_dicomdir(filepath)
print('dicom_dir', dicom_dir)
base_dir = dirname(filepath)
print('base_dir', base_dir)
# go through the patient record and print information
print('patient_records type', type(dicom_dir.patient_records))
for patient_record in dicom_dir.patient_records:
print('rrr:', type(patient_record))
if (hasattr(patient_record, 'PatientID') and
hasattr(patient_record, 'PatientName')):
print("Patient: {}: {}".format(patient_record.PatientID,
patient_record.PatientName))
studies = patient_record.children
# got through each serie
for study in studies:
print('sss:', type(study))
print(" " * 4 + "Study {}: {}: {}".format(study.StudyID,
study.StudyDate,
study.StudyDescription))
all_series = study.children
# go through each serie
for series in all_series:
image_count = len(series.children)
plural = ('', 's')[image_count > 1]
# Write basic series info and image count
# Put N/A in if no Series Description
if 'SeriesDescription' not in series:
series.SeriesDescription = "N/A"
print(" " * 8 + "Series {}: {}: {} ({} image{})".format(
series.SeriesNumber, series.Modality, series.SeriesDescription,
image_count, plural))
# Open and read something from each image, for demonstration
# purposes. For file quick overview of DICOMDIR, leave the
# following out
print(" " * 12 + "Reading images...")
image_records = series.children
image_filenames = [join(base_dir, *image_rec.ReferencedFileID)
for image_rec in image_records]
datasets = [pydicom.dcmread(image_filename)
for image_filename in image_filenames]
patient_names = set(ds.PatientName for ds in datasets)
patient_IDs = set(ds.PatientID for ds in datasets)
# List the image filenames
print("\n" + " " * 12 + "Image filenames:")
print(" " * 12, end=' ')
pprint(image_filenames, indent=12)
# Expect all images to have same patient name, id
# Show the set of all names, IDs found (should each have one)
print(" " * 12 + "Patient Names in images..: {}".format(
patient_names))
print(" " * 12 + "Patient IDs in images..: {}".format(
patient_IDs))
|
py | 1a3b7d8b62af69510b7b9bfddff42e985e5b1206 | from sheets.base import *
from sheets.options import *
from sheets.columns import *
|
py | 1a3b7f9c066f7028145e9498373c6a406a5fb3ff | #!/usr/bin/env python2.7
"""
Updates 'gene_id' entries in a GTF file downloaded from UCSC Table Browser
to have gene IDs as values instead of transcript IDs.
Two types annotation sources can be used to replace the 'gene_id' values:
1. Local annotation source. To use this, supply a file name to the
'--local' argument. The file must only have two columns denoting
the transcript - gene ID mapping (the first column contain the
transcript IDs).
2. Remote annotation source (UCSC). To use this, supply the UCSC
database to use (e.g. 'hg19') to the '--db' argument and the annotation
source to the '--annot' argument. Annotation source is either 'ucsc',
'ensembl', 'refseq', 'gencode14', or 'gencode17'.
You can only choose local or remote sources, not both.
Requirements:
* Python == 2.7.x
* MySQLdb >= 1.2.3
* track >= 1.3.0-dev (dev version from github.com/xapple/track)
Copyright (c) 2013 Wibowo Arindrarto <[email protected]>
Copyright (c) 2013 LUMC Sequencing Analysis Support Core <[email protected]>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = ('0', '1', )
__version__ = '.'.join(__version_info__)
__version__ += '-dev' if not RELEASE else ''
import argparse
import os
import warnings
import track
import MySQLdb
# Credentials for connecting to database
CRED = {
'host': 'genome-mysql.cse.ucsc.edu',
'user': 'genome',
}
# Queries that return ('transcript ID', 'gene ID') on various tables
QUERIES = {
'ucsc': 'SELECT knownGene.name, kgXref.geneSymbol FROM ' \
'knownGene, kgXref WHERE knownGene.name = kgID',
'ensembl': 'SELECT name, name2 FROM ensGene',
'refseq': 'SELECT name, name2 FROM refGene',
'gencode17': 'SELECT name, name2 FROM wgEncodeGencodeBasicV17',
'gencode14': 'SELECT name, name2 FROM wgEncodeGencodeBasicV14',
}
def get_ucsc_transcript_gene_mapping(annot_src, db, cred=CRED):
"""Returns the transcript-gene name mapping for an annotation source
from a given database source.
The function does not know whether the annotation source exists within
the given database nor does it do any check before trying connect.
:param annot_src: name of annotation source
:type annot_src: str
:param db: UCSC database name to use
:param cred: database login credentials, must contain entries for at
least 'host' and 'user', defaults to credentials for
public UCSC server
:type cred: dict
:returns: transcript-gene name mapping
:rtype: dict(str: str)
"""
con = MySQLdb.connect(db=db, **cred)
cur = con.cursor()
cur.execute(QUERIES[annot_src])
return {tid: gid for tid, gid in cur.fetchall()}
def get_local_transcript_gene_mapping(fname):
"""Loads a two-column file (transcript ID - gene ID) as a dict.
:param fname: path to file
:type fname: str
:returns: mapping of column 1 and column 2 in the file
:rtype: dict(str: str)
"""
mapping = {}
with open(fname, 'r') as src:
for line in src:
if not line.strip():
break
elif not line:
continue
key, value = line.strip().split()
if key in mapping:
if value != mapping[key]:
raise ValueError("Duplicate transcript ID ({0}) with "
"ambiguous gene ID ({1} vs {2}).".format(
key, value, mapping[key]))
mapping[key] = value
return mapping
def update_gene_id_attr(chrom_recs, mapping):
"""Given an iterator for `track` records, yield `track` records with
updated gene ID.
:param chrom_recs: iterator returning `track` records for one chromosome
:type chrom_recs: iterator
:returns: generator yielding single `track` records
:rtype: (yield) `track.pyrow.SuperRow`
"""
for rec in chrom_recs:
data = list(rec.data)
# gene ID is always at index 7 (first index of attributes)
init_gid = data[7]
try:
map_gid = mapping[init_gid]
except KeyError:
warnings.warn("Transcript ID {0} not found in the given "
"mapping, initial value is left unchanged.".format(
init_gid))
else:
data[7] = map_gid
yield data
def ucsc_geneid_fix(in_gtf, out_gtf, remote=None, local=None):
"""Updates 'gene_id' entries in GTF files downloaded from UCSC
Table Browser to contain gene IDs instead of transcript IDs.
If the output GTF file name already exists, it will be overwritten.
:param in_gtf: path to input GTF file
:type in_gtf: str
:param out_gtf: path to output GTF file
:type out_gtf: str
:param remote: UCSC database and annotation source to use
:type remote: dict('db': str, 'annot_src': str)
:param local: two-column file name containing transcript-gene mapping,
only when `db` and `annot_src` are None
:type local: str
:returns: None
"""
# remote not defined
if remote is None:
# then local must be defined
if local is None:
raise ValueError("Missing `remote` or `local` arguments")
mapping = get_local_transcript_gene_mapping(local)
# remote defined
else:
# then local can not be defined
if local is not None:
raise ValueError("Only supply `remote` or `local` argument, "
"not both.")
# remote must have 'db'
if 'db' not in remote:
raise ValueError("Missing remote database name")
# and 'annot_src'
if 'annot' not in remote:
raise ValueError("Missing remote annotation source name")
db = remote['db']
annot = remote['annot']
if annot not in QUERIES.keys():
raise ValueError("Invalid annotation source "
"name: {0}".format(annot))
mapping = get_ucsc_transcript_gene_mapping(annot, db, cred=CRED)
# remove output file if it exists
if os.path.exists(out_gtf):
os.remove(out_gtf)
with track.load(in_gtf, readonly=True) as in_track, \
track.new(out_gtf, format='gtf') as out_track:
# since GTF has custom fields, need to set the out_track to use
# in_track's fields
out_track.fields = in_track.fields
for chrom in in_track.chromosomes:
chrom_rec = in_track.read(chrom)
out_track.write(chrom, update_gene_id_attr(chrom_rec, mapping))
if __name__ == '__main__':
usage = __doc__.split('\n\n\n')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('input', type=str, help='Path to input GTF file')
parser.add_argument('output', type=str, help='Path to output GTF file')
parser.add_argument('--local', type=str, dest='local', default=None,
help='Path to transcript ID - gene ID mapping file')
parser.add_argument('--db', type=str, dest='db', default=None,
help='UCSC database name to use')
parser.add_argument('--annot', type=str, dest='annot', default=None,
choices=QUERIES.keys(), help='UCSC annotation source')
parser.add_argument('--version', action='version', version='%(prog)s ' +
__version__)
args = parser.parse_args()
remote = None
if args.db is not None or args.annot is not None:
remote = {'db': args.db, 'annot': args.annot}
ucsc_geneid_fix(args.input, args.output, remote=remote, local=args.local)
|
py | 1a3b8059dffaeec8c421922c71512de954b4e0cd | from typing import Optional
import tensorflow as tf
from kerastuner.applications import resnet
from kerastuner.applications import xception
from tensorflow.keras import layers
from tensorflow.python.util import nest
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
from autokeras.utils import layer_utils
from autokeras.utils import utils
def set_hp_value(hp, name, value):
full_name = hp._get_name(name)
hp.values[full_name] = value or hp.values[full_name]
class DenseBlock(block_module.Block):
"""Block for Dense layers.
# Arguments
num_layers: Int. The number of Dense layers in the block.
If left unspecified, it will be tuned automatically.
use_bn: Boolean. Whether to use BatchNormalization layers.
If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for the layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
num_layers: Optional[int] = None,
use_batchnorm: Optional[bool] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.num_layers = num_layers
self.use_batchnorm = use_batchnorm
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'num_layers': self.num_layers,
'use_batchnorm': self.use_batchnorm,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
output_node = reduction.Flatten().build(hp, output_node)
num_layers = self.num_layers or hp.Choice('num_layers', [1, 2, 3], default=2)
use_batchnorm = self.use_batchnorm
if use_batchnorm is None:
use_batchnorm = hp.Boolean('use_batchnorm', default=False)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0)
for i in range(num_layers):
units = hp.Choice(
'units_{i}'.format(i=i),
[16, 32, 64, 128, 256, 512, 1024],
default=32)
output_node = layers.Dense(units)(output_node)
if use_batchnorm:
output_node = layers.BatchNormalization()(output_node)
output_node = layers.ReLU()(output_node)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
class RNNBlock(block_module.Block):
"""An RNN Block.
# Arguments
return_sequences: Boolean. Whether to return the last output in the
output sequence, or the full sequence. Defaults to False.
bidirectional: Boolean. Bidirectional RNN. If left unspecified, it will be
tuned automatically.
num_layers: Int. The number of layers in RNN. If left unspecified, it will
be tuned automatically.
layer_type: String. 'gru' or 'lstm'. If left unspecified, it will be tuned
automatically.
"""
def __init__(self,
return_sequences: bool = False,
bidirectional: Optional[bool] = None,
num_layers: Optional[int] = None,
layer_type: Optional[int] = None,
**kwargs):
super().__init__(**kwargs)
self.return_sequences = return_sequences
self.bidirectional = bidirectional
self.num_layers = num_layers
self.layer_type = layer_type
def get_config(self):
config = super().get_config()
config.update({
'return_sequences': self.return_sequences,
'bidirectional': self.bidirectional,
'num_layers': self.num_layers,
'layer_type': self.layer_type})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
shape = input_node.shape.as_list()
if len(shape) != 3:
raise ValueError(
'Expect the input tensor to have '
'at least 3 dimensions for rnn models, '
'but got {shape}'.format(shape=input_node.shape))
feature_size = shape[-1]
output_node = input_node
bidirectional = self.bidirectional
if bidirectional is None:
bidirectional = hp.Boolean('bidirectional', default=True)
layer_type = self.layer_type or hp.Choice('layer_type',
['gru', 'lstm'],
default='lstm')
num_layers = self.num_layers or hp.Choice('num_layers',
[1, 2, 3],
default=2)
rnn_layers = {
'gru': layers.GRU,
'lstm': layers.LSTM
}
in_layer = rnn_layers[layer_type]
for i in range(num_layers):
return_sequences = True
if i == num_layers - 1:
return_sequences = self.return_sequences
if bidirectional:
output_node = layers.Bidirectional(
in_layer(feature_size,
return_sequences=return_sequences))(output_node)
else:
output_node = in_layer(
feature_size,
return_sequences=return_sequences)(output_node)
return output_node
class ConvBlock(block_module.Block):
"""Block for vanilla ConvNets.
# Arguments
kernel_size: Int. If left unspecified, it will be tuned automatically.
num_blocks: Int. The number of conv blocks, each of which may contain
convolutional, max pooling, dropout, and activation. If left unspecified,
it will be tuned automatically.
num_layers: Int. The number of convolutional layers in each block. If left
unspecified, it will be tuned automatically.
max_pooling: Boolean. Whether to use max pooling layer in each block. If left
unspecified, it will be tuned automatically.
separable: Boolean. Whether to use separable conv layers.
If left unspecified, it will be tuned automatically.
dropout_rate: Float. Between 0 and 1. The dropout rate for after the
convolutional layers. If left unspecified, it will be tuned
automatically.
"""
def __init__(self,
kernel_size: Optional[int] = None,
num_blocks: Optional[int] = None,
num_layers: Optional[int] = None,
max_pooling: Optional[bool] = None,
separable: Optional[bool] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.kernel_size = kernel_size
self.num_blocks = num_blocks
self.num_layers = num_layers
self.max_pooling = max_pooling
self.separable = separable
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'kernel_size': self.kernel_size,
'num_blocks': self.num_blocks,
'num_layers': self.num_layers,
'max_pooling': self.max_pooling,
'separable': self.separable,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
kernel_size = self.kernel_size or hp.Choice('kernel_size',
[3, 5, 7],
default=3)
num_blocks = self.num_blocks or hp.Choice('num_blocks',
[1, 2, 3],
default=2)
num_layers = self.num_layers or hp.Choice('num_layers',
[1, 2],
default=2)
separable = self.separable
if separable is None:
separable = hp.Boolean('separable', default=False)
if separable:
conv = layer_utils.get_sep_conv(input_node.shape)
else:
conv = layer_utils.get_conv(input_node.shape)
max_pooling = self.max_pooling
if max_pooling is None:
max_pooling = hp.Boolean('max_pooling', default=True)
pool = layer_utils.get_max_pooling(input_node.shape)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0)
for i in range(num_blocks):
for j in range(num_layers):
output_node = conv(
hp.Choice('filters_{i}_{j}'.format(i=i, j=j),
[16, 32, 64, 128, 256, 512],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
if max_pooling:
output_node = pool(
kernel_size - 1,
padding=self._get_padding(kernel_size - 1,
output_node))(output_node)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
@staticmethod
def _get_padding(kernel_size, output_node):
if all([kernel_size * 2 <= length
for length in output_node.shape[1:-1]]):
return 'valid'
return 'same'
class MultiHeadSelfAttention(block_module.Block):
"""Block for Multi-Head Self-Attention.
# Arguments
head_size: Int. Dimensionality of the `query`, `key` and `value` tensors
after the linear transformation. If left unspecified, it will be
tuned automatically.
num_heads: Int. The number of attention heads. If left unspecified,
it will be tuned automatically.
"""
def __init__(self,
head_size: Optional[int] = None,
num_heads: Optional[int] = 8,
**kwargs):
super().__init__(**kwargs)
self.head_size = head_size
self.num_heads = num_heads
def get_config(self):
config = super().get_config()
config.update({
'head_size': self.head_size,
'num_heads': self.num_heads})
return config
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len, embedding_dim]
# Returns
Self-Attention outputs of shape `[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
shape = input_node.shape.as_list()
if len(shape) != 3:
raise ValueError(
'Expect the input tensor to have '
'3 dimensions for multi-head self-attention, '
'but got {shape}'.format(shape=input_node.shape))
# input.shape = [batch_size, seq_len, embedding_dim]
head_size = self.head_size or hp.Choice(
'head_size',
[32, 64, 128, 256, 512],
default=128)
num_heads = self.num_heads
if num_heads is None:
num_heads = 8
if head_size % num_heads != 0: # how to evaluate this condition
raise ValueError(
f"embedding dimension = {head_size} should be "
f"divisible by number of heads = {num_heads}"
)
projection_dim = head_size // num_heads
query_dense = layers.Dense(head_size)
key_dense = layers.Dense(head_size)
value_dense = layers.Dense(head_size)
combine_heads = layers.Dense(head_size)
batch_size = tf.shape(input_node)[0]
query = query_dense(input_node) # (batch_size, seq_len, head_size)
key = key_dense(input_node) # (batch_size, seq_len, head_size)
value = value_dense(input_node) # (batch_size, seq_len, head_size)
query, key, value = [self.separate_heads(
var, batch_size, num_heads, projection_dim
) for var in [query, key, value]]
attention, weights = self.attention(query, key, value)
attention = tf.transpose(
attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len, num_heads, projection_dim)
concat_attention = tf.reshape(
attention, (batch_size, tf.shape(attention)[1], self.head_size)
) # (batch_size, seq_len, head_size)
output = combine_heads(
concat_attention
) # (batch_size, seq_len, head_size)
return output
@staticmethod
def attention(query, key, value):
score = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(dim_key)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
@staticmethod
def separate_heads(x, batch_size, num_heads, projection_dim):
x = tf.reshape(x, (batch_size, -1, num_heads, projection_dim))
return tf.transpose(x, perm=[0, 2, 1, 3])
class Transformer(block_module.Block):
"""Block for Transformer.
The input should be tokenized sequences with the same length, where each element
of a sequence should be the index of the word.
# Example
```python
# Using the Transformer Block with AutoModel.
import autokeras as ak
from tensorflow.keras import losses
text_input = ak.TextInput()
output_node = ak.TextToIntSequence(output_sequence_length=200)(text_input)
output_node = ak.Transformer(embedding_dim=32,
pretraining='none',
num_heads=2,
dense_dim=32,
dropout_rate = 0.25)(output_node)
output_node = ak.SpatialReduction(reduction_type='global_avg')(output_node)
output_node = ak.DenseBlock(num_layers=1, use_batchnorm = False)(output_node)
output_node = ak.ClassificationHead(
loss=losses.SparseCategoricalCrossentropy(),
dropout_rate = 0.25)(output_node)
clf = ak.AutoModel(inputs=text_input, outputs=output_node, max_trials=2)
```
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
embedding_dim: Int. Output dimension of the Attention block.
If left unspecified, it will be tuned automatically.
num_heads: Int. The number of attention heads. If left unspecified,
it will be tuned automatically.
dense_dim: Int. The output dimension of the Feed-Forward Network. If left
unspecified, it will be tuned automatically.
dropout_rate: Float. Between 0 and 1. If left unspecified, it will be
tuned automatically.
"""
def __init__(self,
max_features: int = 20001,
pretraining: Optional[str] = None,
embedding_dim: Optional[int] = None,
num_heads: Optional[int] = None,
dense_dim: Optional[int] = None,
dropout_rate: Optional[int] = None,
**kwargs):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = pretraining
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self. dense_dim = dense_dim
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'max_features': self.max_features,
'pretraining': self.pretraining,
'embedding_dim': self.embedding_dim,
'num_heads': self.num_heads,
'dense_dim': self.dense_dim,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
"""
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: Tensor of Shape [batch_size, seq_len]
# Returns
Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
"""
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
pretraining = self.pretraining or hp.Choice(
'pretraining',
['random', 'glove', 'fasttext', 'word2vec', 'none'],
default='none')
embedding_dim = self.embedding_dim or hp.Choice(
'embedding_dim',
[32, 64, 128, 256, 512],
default=128)
num_heads = self.num_heads or hp.Choice('num_heads', [8, 16, 32], default=8)
dense_dim = self.dense_dim or hp.Choice('dense_dim',
[128, 256, 512, 1024, 2048],
default=2048)
dropout_rate = self.dropout_rate or hp.Choice('dropout_rate',
[0.0, 0.25, 0.5],
default=0)
ffn = tf.keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embedding_dim), ]
)
layernorm1 = layers.LayerNormalization(epsilon=1e-6)
layernorm2 = layers.LayerNormalization(epsilon=1e-6)
dropout1 = layers.Dropout(dropout_rate)
dropout2 = layers.Dropout(dropout_rate)
# Token and Position Embeddings
input_node = nest.flatten(inputs)[0]
token_embedding = Embedding(max_features=self.max_features,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate).build(hp, input_node)
maxlen = input_node.shape[-1]
batch_size = tf.shape(input_node)[0]
positions = self.pos_array_funct(maxlen, batch_size)
position_embedding = Embedding(max_features=maxlen,
pretraining=pretraining,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate).build(hp,
positions)
output_node = tf.keras.layers.Add()([token_embedding,
position_embedding])
attn_output = MultiHeadSelfAttention(
embedding_dim, num_heads).build(hp, output_node)
attn_output = dropout1(attn_output)
add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
out1 = layernorm1(add_inputs_1)
ffn_output = ffn(out1)
ffn_output = dropout2(ffn_output)
add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
output = layernorm2(add_inputs_2)
return output
@staticmethod
def pos_array_funct(maxlen, batch_size):
pos_ones = tf.ones((batch_size, 1), dtype=tf.int32)
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = tf.expand_dims(positions, 0)
positions = tf.matmul(pos_ones, positions)
return positions
class ResNetBlock(resnet.HyperResNet, block_module.Block):
"""Block for ResNet.
# Arguments
version: String. 'v1', 'v2' or 'next'. The type of ResNet to use.
If left unspecified, it will be tuned automatically.
pooling: String. 'avg', 'max'. The type of pooling layer to use.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
version: Optional[str] = None,
pooling: Optional[str] = None,
**kwargs):
if 'include_top' in kwargs:
raise ValueError(
'Argument "include_top" is not supported in ResNetBlock.')
if 'input_shape' in kwargs:
raise ValueError(
'Argument "input_shape" is not supported in ResNetBlock.')
super().__init__(include_top=False, input_shape=(10,), **kwargs)
self.version = version
self.pooling = pooling
def get_config(self):
config = super().get_config()
config.update({
'version': self.version,
'pooling': self.pooling})
return config
def build(self, hp, inputs=None):
self.input_tensor = nest.flatten(inputs)[0]
self.input_shape = None
hp.Choice('version', ['v1', 'v2', 'next'], default='v2')
hp.Choice('pooling', ['avg', 'max'], default='avg')
set_hp_value(hp, 'version', self.version)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
class XceptionBlock(xception.HyperXception, block_module.Block):
"""XceptionBlock.
An Xception structure, used for specifying your model with specific datasets.
The original Xception architecture is from https://arxiv.org/abs/1610.02357.
The data first goes through the entry flow, then through the middle flow which
is repeated eight times, and finally through the exit flow.
This XceptionBlock returns a similar architecture as Xception except without
the last (optional) fully connected layer(s) and logistic regression.
The size of this architecture could be decided by `HyperParameters`, to get an
architecture with a half, an identical, or a double size of the original one.
# Arguments
activation: String. 'selu' or 'relu'. If left unspecified, it will be tuned
automatically.
initial_strides: Int. If left unspecified, it will be tuned automatically.
num_residual_blocks: Int. If left unspecified, it will be tuned
automatically.
pooling: String. 'ave', 'flatten', or 'max'. If left unspecified, it will be
tuned automatically.
"""
def __init__(self,
activation: Optional[str] = None,
initial_strides: Optional[int] = None,
num_residual_blocks: Optional[int] = None,
pooling: Optional[str] = None,
**kwargs):
if 'include_top' in kwargs:
raise ValueError(
'Argument "include_top" is not supported in XceptionBlock.')
if 'input_shape' in kwargs:
raise ValueError(
'Argument "input_shape" is not supported in XceptionBlock.')
super().__init__(include_top=False, input_shape=(10,), **kwargs)
self.activation = activation
self.initial_strides = initial_strides
self.num_residual_blocks = num_residual_blocks
self.pooling = pooling
def get_config(self):
config = super().get_config()
config.update({
'classes': self.classes,
'activation': self.activation,
'initial_strides': self.initial_strides,
'num_residual_blocks': self.num_residual_blocks,
'pooling': self.pooling})
return config
def build(self, hp, inputs=None):
self.input_tensor = nest.flatten(inputs)[0]
self.input_shape = None
hp.Choice('activation', ['relu', 'selu'])
hp.Choice('initial_strides', [2])
hp.Int('num_residual_blocks', 2, 8, default=4)
hp.Choice('pooling', ['avg', 'flatten', 'max'])
set_hp_value(hp, 'activation', self.activation)
set_hp_value(hp, 'initial_strides', self.initial_strides)
set_hp_value(hp, 'num_residual_blocks', self.num_residual_blocks)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
class Embedding(block_module.Block):
"""Word embedding block for sequences.
The input should be tokenized sequences with the same length, where each element
of a sequence should be the index of the word.
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. Defaults to 20001.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
embedding_dim: Int. If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for after the Embedding layer.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
max_features: int = 20001,
pretraining: Optional[str] = None,
embedding_dim: Optional[int] = None,
dropout_rate: Optional[float] = None,
**kwargs):
super().__init__(**kwargs)
self.max_features = max_features
self.pretraining = pretraining
self.embedding_dim = embedding_dim
self.dropout_rate = dropout_rate
def get_config(self):
config = super().get_config()
config.update({
'max_features': self.max_features,
'pretraining': self.pretraining,
'embedding_dim': self.embedding_dim,
'dropout_rate': self.dropout_rate})
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# TODO: support more pretrained embedding layers.
# glove, fasttext, and word2vec
pretraining = self.pretraining or hp.Choice(
'pretraining',
['random', 'glove', 'fasttext', 'word2vec', 'none'],
default='none')
embedding_dim = self.embedding_dim or hp.Choice(
'embedding_dim',
[32, 64, 128, 256, 512],
default=128)
if pretraining != 'none':
# TODO: load from pretrained weights
layer = layers.Embedding(
input_dim=self.max_features,
output_dim=embedding_dim,
input_length=input_node.shape[1])
# trainable=False,
# weights=[embedding_matrix])
else:
layer = layers.Embedding(
input_dim=self.max_features,
output_dim=embedding_dim)
# input_length=input_node.shape[1],
# trainable=True)
output_node = layer(input_node)
if self.dropout_rate is not None:
dropout_rate = self.dropout_rate
else:
dropout_rate = hp.Choice('dropout_rate', [0.0, 0.25, 0.5], default=0.25)
if dropout_rate > 0:
output_node = layers.Dropout(dropout_rate)(output_node)
return output_node
|
py | 1a3b8069040869384d0e10fe3fad619f6390179f | import glob
import itertools as it
import numpy as np
import os
import sys
import xgboost as xgb
try:
from sklearn import datasets
from sklearn.preprocessing import scale
except ImportError:
None
class Dataset:
def __init__(self, name, get_dataset, objective, metric,
has_weights=False, use_external_memory=False):
self.name = name
self.objective = objective
self.metric = metric
if has_weights:
self.X, self.y, self.w = get_dataset()
else:
self.X, self.y = get_dataset()
self.w = None
self.use_external_memory = use_external_memory
def get_boston():
data = datasets.load_boston()
return data.data, data.target
def get_digits():
data = datasets.load_digits()
return data.data, data.target
def get_cancer():
data = datasets.load_breast_cancer()
return data.data, data.target
def get_sparse():
rng = np.random.RandomState(199)
n = 5000
sparsity = 0.75
X, y = datasets.make_regression(n, random_state=rng)
X = np.array([[0.0 if rng.uniform(0, 1) < sparsity else x for x in x_row] for x_row in X])
from scipy import sparse
X = sparse.csr_matrix(X)
return X, y
def get_sparse_weights():
return get_weights_regression(1, 10)
def get_small_weights():
return get_weights_regression(1e-6, 1e-5)
def get_weights_regression(min_weight, max_weight):
rng = np.random.RandomState(199)
n = 10000
sparsity = 0.25
X, y = datasets.make_regression(n, random_state=rng)
X = np.array([[np.nan if rng.uniform(0, 1) < sparsity else x
for x in x_row] for x_row in X])
w = np.array([rng.uniform(min_weight, max_weight) for i in range(n)])
return X, y, w
def train_dataset(dataset, param_in, num_rounds=10, scale_features=False):
param = param_in.copy()
param["objective"] = dataset.objective
if dataset.objective == "multi:softmax":
param["num_class"] = int(np.max(dataset.y) + 1)
param["eval_metric"] = dataset.metric
if scale_features:
X = scale(dataset.X, with_mean=isinstance(dataset.X, np.ndarray))
else:
X = dataset.X
if dataset.use_external_memory:
np.savetxt('tmptmp_1234.csv', np.hstack((dataset.y.reshape(len(dataset.y), 1), X)),
delimiter=',')
dtrain = xgb.DMatrix('tmptmp_1234.csv?format=csv&label_column=0#tmptmp_',
weight=dataset.w)
else:
dtrain = xgb.DMatrix(X, dataset.y, weight=dataset.w)
print("Training on dataset: " + dataset.name, file=sys.stderr)
print("Using parameters: " + str(param), file=sys.stderr)
res = {}
bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')],
evals_result=res, verbose_eval=False)
# Free the booster and dmatrix so we can delete temporary files
bst_copy = bst.copy()
del bst
del dtrain
# Cleanup temporary files
if dataset.use_external_memory:
for f in glob.glob("tmptmp_*"):
os.remove(f)
return {"dataset": dataset, "bst": bst_copy, "param": param.copy(),
"eval": res['train'][dataset.metric]}
def parameter_combinations(variable_param):
"""
Enumerate all possible combinations of parameters
"""
result = []
names = sorted(variable_param)
combinations = it.product(*(variable_param[Name] for Name in names))
for set in combinations:
param = {}
for i, name in enumerate(names):
param[name] = set[i]
result.append(param)
return result
def run_suite(param, num_rounds=10, select_datasets=None, scale_features=False):
"""
Run the given parameters on a range of datasets. Objective and eval metric will be automatically set
"""
datasets = [
Dataset("Boston", get_boston, "reg:squarederror", "rmse"),
Dataset("Digits", get_digits, "multi:softmax", "mlogloss"),
Dataset("Cancer", get_cancer, "binary:logistic", "logloss"),
Dataset("Sparse regression", get_sparse, "reg:squarederror", "rmse"),
Dataset("Sparse regression with weights", get_sparse_weights,
"reg:squarederror", "rmse", has_weights=True),
Dataset("Small weights regression", get_small_weights,
"reg:squarederror", "rmse", has_weights=True),
Dataset("Boston External Memory", get_boston,
"reg:squarederror", "rmse",
use_external_memory=True)
]
results = [
]
for d in datasets:
if select_datasets is None or d.name in select_datasets:
results.append(
train_dataset(d, param, num_rounds=num_rounds, scale_features=scale_features))
return results
def non_increasing(L, tolerance):
return all((y - x) < tolerance for x, y in zip(L, L[1:]))
def assert_results_non_increasing(results, tolerance=1e-5):
for r in results:
assert non_increasing(r['eval'], tolerance), r
|
py | 1a3b80be1e11cf6747e62c9b497921a5ca51c54e | """
MIT License
Copyright (c) 2021 Timothy Pidashev
"""
import discord
import asyncio
import json
from discord.ext import commands
from db import db
#loading bot config
with open("config.json") as file:
config = json.load(file)
#checks if invoked command is run by owner
async def is_owner(context):
return context.message.author.id in config["owner_ids"] |
py | 1a3b80c5355c9a0e10ccf143f0c4c6cb2bd4cbd7 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
|
py | 1a3b8169f9897a5cba9a0df79c2fa973a52a7563 | import base64
import uuid
from .base import DynamicField
from rest_framework.serializers import FileField, ImageField
from rest_framework import exceptions
from django.core.files.base import ContentFile
from django.utils import six
IMAGE_TYPES = {
'jpeg',
'jpg',
'png',
'gif',
'bmp',
'tiff',
'webp',
'ico',
'eps'
}
class DynamicFileFieldBase(
DynamicField
):
def __init__(self, **kwargs):
self.allow_remote = kwargs.pop('allow_remote', True)
self.allow_base64 = kwargs.pop('allow_base64', True)
super(DynamicFileFieldBase, self).__init__(**kwargs)
def get_extension(self, name):
if not name or '.' not in name:
return ''
return name.split('.')[-1].lower()
def to_internal_value_remote(self, name):
if not name:
self.fail('no_name')
field = self.model_field
storage = field.storage
if not storage.exists(name):
self.fail('invalid')
size = storage.size(name)
name_length = len(name)
if not self.allow_empty_file and not size:
self.fail('empty')
if self.max_length and name_length > self.max_length:
self.fail(
'max_length',
max_length=self.max_length,
length=name_length
)
if isinstance(self, ImageField):
ext = self.get_extension(name)
if ext not in IMAGE_TYPES:
return self.fail('invalid_image')
return name
def to_internal_value_base64(self, data):
header, data = data.split(';base64,')
try:
decoded = base64.b64decode(data)
except TypeError:
self.fail('invalid')
file_name = str(uuid.uuid4())[:12]
ext = header.split('/')[-1]
file_name += '.' + ext
data = ContentFile(decoded, name=file_name)
if isinstance(self, ImageField):
if ext not in IMAGE_TYPES:
return self.fail('invalid_image')
return super(
DynamicFileFieldBase,
self
).to_internal_value(data)
def to_internal_value(self, data):
if isinstance(data, six.string_types):
if self.allow_base64 and 'data:' in data and ';base64,' in data:
return self.to_internal_value_base64(data)
elif self.allow_remote:
return self.to_internal_value_remote(data)
else:
raise exceptions.ValidationError()
else:
return super(DynamicFileFieldBase, self).to_internal_value(data)
class DynamicImageField(
DynamicFileFieldBase,
ImageField
):
pass
class DynamicFileField(
DynamicFileFieldBase,
FileField
):
pass
|
py | 1a3b81a69b072f27b1671025154bbaa689ea717e | import logging
import tempfile
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import ignore_leftovers
from ocs_ci.ocs.ocp import wait_for_cluster_connectivity, OCP
from ocs_ci.ocs import constants, node, defaults
from ocs_ci.ocs.resources.pod import get_fio_rw_iops
from ocs_ci.ocs.resources.pvc import delete_pvcs
from tests import helpers
from ocs_ci.ocs.bucket_utils import s3_delete_object, s3_get_object, s3_put_object
from tests.manage.z_cluster.pvc_ops import create_pvcs
from ocs_ci.utility.utils import ceph_health_check, run_cmd, TimeoutSampler
from ocs_ci.utility import templating
from ocs_ci.ocs.cluster import CephCluster, CephClusterExternal
logger = logging.getLogger(__name__)
class Sanity:
"""
Class for cluster health and functional validations
"""
def __init__(self):
"""
Initializer for Sanity class - Init CephCluster() in order to
set the cluster status before starting the tests
"""
self.pvc_objs = list()
self.pod_objs = list()
self.obj_data = ""
self.ceph_cluster = CephCluster()
def health_check(self, cluster_check=True, tries=20):
"""
Perform Ceph and cluster health checks
"""
wait_for_cluster_connectivity(tries=400)
logger.info("Checking cluster and Ceph health")
node.wait_for_nodes_status(timeout=300)
ceph_health_check(namespace=config.ENV_DATA['cluster_namespace'], tries=tries)
if cluster_check:
self.ceph_cluster.cluster_health_check(timeout=60)
def create_resources(self, pvc_factory, pod_factory, run_io=True):
"""
Sanity validation - Create resources (FS and RBD) and run IO
Args:
pvc_factory (function): A call to pvc_factory function
pod_factory (function): A call to pod_factory function
run_io (bool): True for run IO, False otherwise
"""
logger.info("Creating resources and running IO as a sanity functional validation")
for interface in [constants.CEPHBLOCKPOOL, constants.CEPHFILESYSTEM]:
pvc_obj = pvc_factory(interface)
self.pvc_objs.append(pvc_obj)
self.pod_objs.append(pod_factory(pvc=pvc_obj, interface=interface))
if run_io:
for pod in self.pod_objs:
pod.run_io('fs', '1G', runtime=30)
for pod in self.pod_objs:
get_fio_rw_iops(pod)
self.create_obc()
self.verify_obc()
def create_obc(self):
"""
OBC creation for RGW and Nooba
"""
if config.ENV_DATA['platform'] in constants.ON_PREM_PLATFORMS:
obc_rgw = templating.load_yaml(
constants.RGW_OBC_YAML
)
obc_rgw_data_yaml = tempfile.NamedTemporaryFile(
mode='w+', prefix='obc_rgw_data', delete=False
)
templating.dump_data_to_temp_yaml(
obc_rgw, obc_rgw_data_yaml.name
)
logger.info("Creating OBC for rgw")
run_cmd(f"oc create -f {obc_rgw_data_yaml.name}", timeout=2400)
self.obc_rgw = obc_rgw['metadata']['name']
obc_nooba = templating.load_yaml(
constants.MCG_OBC_YAML
)
obc_mcg_data_yaml = tempfile.NamedTemporaryFile(
mode='w+', prefix='obc_mcg_data', delete=False
)
templating.dump_data_to_temp_yaml(
obc_nooba, obc_mcg_data_yaml.name
)
logger.info("create OBC for mcg")
run_cmd(f"oc create -f {obc_mcg_data_yaml.name}", timeout=2400)
self.obc_mcg = obc_nooba['metadata']['name']
def delete_obc(self):
"""
Clenaup OBC resources created above
"""
if config.ENV_DATA['platform'] in constants.ON_PREM_PLATFORMS:
logger.info(f"Deleting rgw obc {self.obc_rgw}")
obcrgw = OCP(
kind='ObjectBucketClaim',
resource_name=f'{self.obc_rgw}'
)
run_cmd(f"oc delete obc/{self.obc_rgw}")
obcrgw.wait_for_delete(
resource_name=f'{self.obc_rgw}',
timeout=300
)
logger.info(f"Deleting mcg obc {self.obc_mcg}")
obcmcg = OCP(kind='ObjectBucketClaim', resource_name=f'{self.obc_mcg}')
run_cmd(
f"oc delete obc/{self.obc_mcg} -n "
f"{defaults.ROOK_CLUSTER_NAMESPACE}"
)
obcmcg.wait_for_delete(resource_name=f'{self.obc_mcg}', timeout=300)
def verify_obc(self):
"""
OBC verification from external cluster perspective,
we will check 2 OBCs
"""
sample = TimeoutSampler(
300,
5,
self.ceph_cluster.noobaa_health_check
)
sample.wait_for_func_status(True)
def delete_resources(self):
"""
Sanity validation - Delete resources (FS and RBD)
"""
logger.info("Deleting resources as a sanity functional validation")
self.delete_obc()
for pod_obj in self.pod_objs:
pod_obj.delete()
for pod_obj in self.pod_objs:
pod_obj.ocp.wait_for_delete(pod_obj.name)
for pvc_obj in self.pvc_objs:
pvc_obj.delete()
for pvc_obj in self.pvc_objs:
pvc_obj.ocp.wait_for_delete(pvc_obj.name)
@ignore_leftovers
def create_pvc_delete(self, multi_pvc_factory, project=None):
"""
Creates and deletes all types of PVCs
"""
# Create rbd pvcs
pvc_objs_rbd = create_pvcs(
multi_pvc_factory=multi_pvc_factory, interface='CephBlockPool',
project=project, status="", storageclass=None
)
# Create cephfs pvcs
pvc_objs_cephfs = create_pvcs(
multi_pvc_factory=multi_pvc_factory, interface='CephFileSystem',
project=project, status="", storageclass=None
)
all_pvc_to_delete = pvc_objs_rbd + pvc_objs_cephfs
# Check pvc status
for pvc_obj in all_pvc_to_delete:
helpers.wait_for_resource_state(
resource=pvc_obj, state=constants.STATUS_BOUND, timeout=300
)
# Start deleting PVC
delete_pvcs(all_pvc_to_delete)
# Check PVCs are deleted
for pvc_obj in all_pvc_to_delete:
pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
logger.info("All PVCs are deleted as expected")
def obc_put_obj_create_delete(self, mcg_obj, bucket_factory):
"""
Creates bucket then writes, reads and deletes objects
"""
bucket_name = bucket_factory(amount=1, interface='OC')[0].name
self.obj_data = "A string data"
for i in range(0, 30):
key = 'Object-key-' + f"{i}"
logger.info(f"Write, read and delete object with key: {key}")
assert s3_put_object(mcg_obj, bucket_name, key, self.obj_data), f"Failed: Put object, {key}"
assert s3_get_object(mcg_obj, bucket_name, key), f"Failed: Get object, {key}"
assert s3_delete_object(mcg_obj, bucket_name, key), f"Failed: Delete object, {key}"
class SanityExternalCluster(Sanity):
"""
Helpers for health check and functional validation
in External mode
"""
def __init__(self):
"""
Initializer for Sanity class - Init CephCluster() in order to
set the cluster status before starting the tests
"""
self.pvc_objs = list()
self.pod_objs = list()
self.ceph_cluster = CephClusterExternal()
|
py | 1a3b82f7c8fe4ae973bd93c7232daad8e1fae4c0 | #!/usr/bin/python
# Sample program or step 2 in becoming a DFIR Wizard!
# No license as this code is simple and free!
import sys
import pytsk3
import datetime
imagefile = "Stage2.vhd"
imagehandle = pytsk3.Img_Info(imagefile)
partitionTable = pytsk3.Volume_Info(imagehandle)
for partition in partitionTable:
print partition.addr, partition.desc, "%ss(%s)" % (partition.start, partition.start * 512), partition.len
filesystemObject = pytsk3.FS_Info(imagehandle, offset=65536)
fileobject = filesystemObject.open("/$MFT")
print "File Inode:",fileobject.info.meta.addr
print "File Name:",fileobject.info.name.name
print "File Creation Time:",datetime.datetime.fromtimestamp(fileobject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S')
outfile = open('DFIRWizard-output', 'w')
filedata = fileobject.read_random(0,fileobject.info.meta.size)
outfile.write(filedata)
|
py | 1a3b8495fd7979bf291f7426b4429f8342d9f21d | """ Functions for generating Direct-(LGST, MC2GST, MLGST) models """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
from .. import tools as _tools
from .. import construction as _construction
from .. import objects as _objs
from . import core as _core
def model_with_lgst_circuit_estimates(
circuitsToEstimate, dataset, prepStrs, effectStrs,
targetModel, includeTargetOps=True, opLabelAliases=None,
guessModelForGauge=None, circuitLabels=None, svdTruncateTo=None,
verbosity=0):
"""
Constructs a model that contains LGST estimates for circuitsToEstimate.
For each operation sequence s in circuitsToEstimate, the constructed model
contains the LGST estimate for s as separate gate, labeled either by
the corresponding element of circuitLabels or by the tuple of s itself.
Parameters
----------
circuitsToEstimate : list of Circuits or tuples
The operation sequences to estimate using LGST
dataset : DataSet
The data to use for LGST
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
A model used by LGST to specify which operation labels should be estimated,
a guess for which gauge these estimates should be returned in, and
used to simplify operation sequences.
includeTargetOps : bool, optional
If True, the operation labels in targetModel will be included in the
returned model.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
guessModelForGauge : Model, optional
A model used to compute a gauge transformation that is applied to
the LGST estimates. This gauge transformation is computed such that
if the estimated gates matched the model given, then the gate
matrices would match, i.e. the gauge would be the same as
the model supplied. Defaults to the targetModel.
circuitLabels : list of strings, optional
A list of labels in one-to-one correspondence with the
operation sequence in circuitsToEstimate. These labels are
the keys to access the operation matrices in the returned
Model, i.e. op_matrix = returned_model[op_label]
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
verbosity : int, optional
Verbosity value to send to do_lgst(...) call.
Returns
-------
Model
A model containing LGST estimates for all the requested
operation sequences and possibly the gates in targetModel.
"""
opLabels = [] # list of operation labels for LGST to estimate
if opLabelAliases is None: aliases = {}
else: aliases = opLabelAliases.copy()
#Add operation sequences to estimate as aliases
if circuitLabels is not None:
assert(len(circuitLabels) == len(circuitsToEstimate))
for opLabel, opStr in zip(circuitLabels, circuitsToEstimate):
aliases[opLabel] = opStr.replace_layers_with_aliases(opLabelAliases)
opLabels.append(opLabel)
else:
for opStr in circuitsToEstimate:
newLabel = 'G' + '.'.join(map(str, tuple(opStr)))
aliases[newLabel] = opStr.replace_layers_with_aliases(opLabelAliases) # use circuit tuple as label
opLabels.append(newLabel)
#Add target model labels (not aliased) if requested
if includeTargetOps and targetModel is not None:
for targetOpLabel in targetModel.operations:
if targetOpLabel not in opLabels: # very unlikely that this is false
opLabels.append(targetOpLabel)
return _core.do_lgst(dataset, prepStrs, effectStrs, targetModel,
opLabels, aliases, guessModelForGauge,
svdTruncateTo, verbosity)
def direct_lgst_model(circuitToEstimate, circuitLabel, dataset,
prepStrs, effectStrs, targetModel,
opLabelAliases=None, svdTruncateTo=None, verbosity=0):
"""
Constructs a model of LGST estimates for target gates and circuitToEstimate.
Parameters
----------
circuitToEstimate : Circuit or tuple
The single operation sequence to estimate using LGST
circuitLabel : string
The label for the estimate of circuitToEstimate.
i.e. op_matrix = returned_model[op_label]
dataset : DataSet
The data to use for LGST
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
verbosity : int, optional
Verbosity value to send to do_lgst(...) call.
Returns
-------
Model
A model containing LGST estimates of circuitToEstimate
and the gates of targetModel.
"""
return model_with_lgst_circuit_estimates(
[circuitToEstimate], dataset, prepStrs, effectStrs, targetModel,
True, opLabelAliases, None, [circuitLabel], svdTruncateTo,
verbosity)
def direct_lgst_models(circuits, dataset, prepStrs, effectStrs, targetModel,
opLabelAliases=None, svdTruncateTo=None, verbosity=0):
"""
Constructs a dictionary with keys == operation sequences and values == Direct-LGST Models.
Parameters
----------
circuits : list of Circuit or tuple objects
The operation sequences to estimate using LGST. The elements of this list
are the keys of the returned dictionary.
dataset : DataSet
The data to use for all LGST estimates.
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
verbosity : int, optional
Verbosity value to send to do_lgst(...) call.
Returns
-------
dict
A dictionary that relates each operation sequence of circuits to a
Model containing the LGST estimate of that operation sequence stored under
the operation label "GsigmaLbl", along with LGST estimates of the gates in
targetModel.
"""
printer = _objs.VerbosityPrinter.build_printer(verbosity)
directLGSTmodels = {}
printer.log("--- Direct LGST precomputation ---")
with printer.progress_logging(1):
for i, sigma in enumerate(circuits):
printer.show_progress(i, len(circuits), prefix="--- Computing model for string -", suffix='---')
directLGSTmodels[sigma] = direct_lgst_model(
sigma, "GsigmaLbl", dataset, prepStrs, effectStrs, targetModel,
opLabelAliases, svdTruncateTo, verbosity)
return directLGSTmodels
def direct_mc2gst_model(circuitToEstimate, circuitLabel, dataset,
prepStrs, effectStrs, targetModel,
opLabelAliases=None, svdTruncateTo=None,
minProbClipForWeighting=1e-4,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a model of LSGST estimates for target gates and circuitToEstimate.
Starting with a Direct-LGST estimate for circuitToEstimate, runs LSGST
using the same strings that LGST would have used to estimate circuitToEstimate
and each of the target gates. That is, LSGST is run with strings of the form:
1. prepStr
2. effectStr
3. prepStr + effectStr
4. prepStr + singleGate + effectStr
5. prepStr + circuitToEstimate + effectStr
and the resulting Model estimate is returned.
Parameters
----------
circuitToEstimate : Circuit
The single operation sequence to estimate using LSGST
circuitLabel : string
The label for the estimate of circuitToEstimate.
i.e. op_matrix = returned_mode[op_label]
dataset : DataSet
The data to use for LGST
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
minProbClipForWeighting : float, optional
defines the clipping interval for the statistical weight used
within the chi^2 function (see chi2fn).
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send to do_lgst(...) and do_mc2gst(...) calls.
Returns
-------
Model
A model containing LSGST estimates of circuitToEstimate
and the gates of targetModel.
"""
direct_lgst = model_with_lgst_circuit_estimates(
[circuitToEstimate], dataset, prepStrs, effectStrs, targetModel,
True, opLabelAliases, None, [circuitLabel], svdTruncateTo, verbosity)
# LEXICOGRAPHICAL VS MATRIX ORDER
circuits = prepStrs + effectStrs + [prepStr + effectStr for prepStr in prepStrs for effectStr in effectStrs]
for opLabel in direct_lgst.operations:
circuits.extend([prepStr + _objs.Circuit((opLabel,)) + effectStr
for prepStr in prepStrs for effectStr in effectStrs])
aliases = {} if (opLabelAliases is None) else opLabelAliases.copy()
aliases[circuitLabel] = circuitToEstimate.replace_layers_with_aliases(opLabelAliases)
_, direct_lsgst = _core.do_mc2gst(
dataset, direct_lgst, circuits,
minProbClipForWeighting=minProbClipForWeighting,
probClipInterval=probClipInterval, verbosity=verbosity,
opLabelAliases=aliases)
return direct_lsgst
def direct_mc2gst_models(circuits, dataset, prepStrs, effectStrs,
targetModel, opLabelAliases=None,
svdTruncateTo=None, minProbClipForWeighting=1e-4,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a dictionary with keys == operation sequences and values == Direct-LSGST Models.
Parameters
----------
circuits : list of Circuit or tuple objects
The operation sequences to estimate using LSGST. The elements of this list
are the keys of the returned dictionary.
dataset : DataSet
The data to use for all LGST and LSGST estimates.
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
minProbClipForWeighting : float, optional
defines the clipping interval for the statistical weight used
within the chi^2 function (see chi2fn).
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send to do_lgst(...) and do_mc2gst(...) calls.
Returns
-------
dict
A dictionary that relates each operation sequence of circuits to a
Model containing the LSGST estimate of that operation sequence stored under
the operation label "GsigmaLbl", along with LSGST estimates of the gates in
targetModel.
"""
printer = _objs.VerbosityPrinter.build_printer(verbosity)
directLSGSTmodels = {}
printer.log("--- Direct LSGST precomputation ---")
with printer.progress_logging(1):
for i, sigma in enumerate(circuits):
printer.show_progress(i, len(circuits), prefix="--- Computing model for string-", suffix='---')
directLSGSTmodels[sigma] = direct_mc2gst_model(
sigma, "GsigmaLbl", dataset, prepStrs, effectStrs, targetModel,
opLabelAliases, svdTruncateTo, minProbClipForWeighting,
probClipInterval, verbosity)
return directLSGSTmodels
def direct_mlgst_model(circuitToEstimate, circuitLabel, dataset,
prepStrs, effectStrs, targetModel,
opLabelAliases=None, svdTruncateTo=None, minProbClip=1e-6,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a model of MLEGST estimates for target gates and circuitToEstimate.
Starting with a Direct-LGST estimate for circuitToEstimate, runs MLEGST
using the same strings that LGST would have used to estimate circuitToEstimate
and each of the target gates. That is, MLEGST is run with strings of the form:
1. prepStr
2. effectStr
3. prepStr + effectStr
4. prepStr + singleGate + effectStr
5. prepStr + circuitToEstimate + effectStr
and the resulting Model estimate is returned.
Parameters
----------
circuitToEstimate : Circuit or tuple
The single operation sequence to estimate using LSGST
circuitLabel : string
The label for the estimate of circuitToEstimate.
i.e. op_matrix = returned_model[op_label]
dataset : DataSet
The data to use for LGST
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
minProbClip : float, optional
defines the minimum probability "patch point" used
within the logl function.
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send to do_lgst(...) and do_mlgst(...) calls.
Returns
-------
Model
A model containing MLEGST estimates of circuitToEstimate
and the gates of targetModel.
"""
direct_lgst = model_with_lgst_circuit_estimates(
[circuitToEstimate], dataset, prepStrs, effectStrs, targetModel,
True, opLabelAliases, None, [circuitLabel], svdTruncateTo, verbosity)
# LEXICOGRAPHICAL VS MATRIX ORDER
circuits = prepStrs + effectStrs + [prepStr + effectStr for prepStr in prepStrs for effectStr in effectStrs]
for opLabel in direct_lgst.operations:
circuits.extend([prepStr + _objs.Circuit((opLabel,)) + effectStr
for prepStr in prepStrs for effectStr in effectStrs])
aliases = {} if (opLabelAliases is None) else opLabelAliases.copy()
aliases[circuitLabel] = circuitToEstimate.replace_layers_with_aliases(opLabelAliases)
_, direct_mlegst = _core.do_mlgst(
dataset, direct_lgst, circuits, minProbClip=minProbClip,
probClipInterval=probClipInterval, verbosity=verbosity,
opLabelAliases=aliases)
return direct_mlegst
def direct_mlgst_models(circuits, dataset, prepStrs, effectStrs, targetModel,
opLabelAliases=None, svdTruncateTo=None, minProbClip=1e-6,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a dictionary with keys == operation sequences and values == Direct-MLEGST Models.
Parameters
----------
circuits : list of Circuit or tuple objects
The operation sequences to estimate using MLEGST. The elements of this list
are the keys of the returned dictionary.
dataset : DataSet
The data to use for all LGST and LSGST estimates.
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
targetModel : Model
The target model used by LGST to extract operation labels and an initial gauge
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
svdTruncateTo : int, optional
The Hilbert space dimension to truncate the operation matrices to using
a SVD to keep only the largest svdToTruncateTo singular values of
the I_tildle LGST matrix. Zero means no truncation.
Defaults to dimension of `targetModel`.
minProbClip : float, optional
defines the minimum probability "patch point" used
within the logl function.
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send to do_lgst(...) and do_mlgst(...) calls.
Returns
-------
dict
A dictionary that relates each operation sequence of circuits to a
Model containing the MLEGST estimate of that operation sequence stored under
the operation label "GsigmaLbl", along with MLEGST estimates of the gates in
targetModel.
"""
printer = _objs.VerbosityPrinter.build_printer(verbosity)
directMLEGSTmodels = {}
printer.log("--- Direct MLEGST precomputation ---")
with printer.progress_logging(1):
for i, sigma in enumerate(circuits):
printer.show_progress(i, len(circuits), prefix="--- Computing model for string ", suffix="---")
directMLEGSTmodels[sigma] = direct_mlgst_model(
sigma, "GsigmaLbl", dataset, prepStrs, effectStrs, targetModel,
opLabelAliases, svdTruncateTo, minProbClip,
probClipInterval, verbosity)
return directMLEGSTmodels
def focused_mc2gst_model(circuitToEstimate, circuitLabel, dataset,
prepStrs, effectStrs, startModel,
opLabelAliases=None, minProbClipForWeighting=1e-4,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a model containing a single LSGST estimate of circuitToEstimate.
Starting with startModel, run LSGST with the same operation sequences that LGST
would use to estimate circuitToEstimate. That is, LSGST is run with
strings of the form: prepStr + circuitToEstimate + effectStr
and return the resulting Model.
Parameters
----------
circuitToEstimate : Circuit or tuple
The single operation sequence to estimate using LSGST
circuitLabel : string
The label for the estimate of circuitToEstimate.
i.e. op_matrix = returned_model[op_label]
dataset : DataSet
The data to use for LGST
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
startModel : Model
The model to seed LSGST with. Often times obtained via LGST.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
minProbClipForWeighting : float, optional
defines the clipping interval for the statistical weight used
within the chi^2 function (see chi2fn).
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send do_mc2gst(...) call.
Returns
-------
Model
A model containing LSGST estimate of circuitToEstimate.
"""
circuits = [prepStr + circuitToEstimate + effectStr for prepStr in prepStrs for effectStr in effectStrs]
_, focused_lsgst = _core.do_mc2gst(
dataset, startModel, circuits,
minProbClipForWeighting=minProbClipForWeighting,
probClipInterval=probClipInterval,
opLabelAliases=opLabelAliases,
verbosity=verbosity)
focused_lsgst.operations[circuitLabel] = _objs.FullDenseOp(
focused_lsgst.product(circuitToEstimate)) # add desired string as a separate labeled gate
return focused_lsgst
def focused_mc2gst_models(circuits, dataset, prepStrs, effectStrs,
startModel, opLabelAliases=None,
minProbClipForWeighting=1e-4,
probClipInterval=(-1e6, 1e6), verbosity=0):
"""
Constructs a dictionary with keys == operation sequences and values == Focused-LSGST Models.
Parameters
----------
circuits : list of Circuit or tuple objects
The operation sequences to estimate using LSGST. The elements of this list
are the keys of the returned dictionary.
dataset : DataSet
The data to use for all LGST and LSGST estimates.
prepStrs,effectStrs : list of Circuits
Fiducial Circuit lists used to construct a informationally complete
preparation and measurement.
startModel : Model
The model to seed LSGST with. Often times obtained via LGST.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
minProbClipForWeighting : float, optional
defines the clipping interval for the statistical weight used
within the chi^2 function (see chi2fn).
probClipInterval : 2-tuple, optional
(min,max) to clip probabilities to within Model probability
computation routines (see Model.bulk_fill_probs)
verbosity : int, optional
Verbosity value to send to do_mc2gst(...) call.
Returns
-------
dict
A dictionary that relates each operation sequence of circuits to a
Model containing the LSGST estimate of that operation sequence stored under
the operation label "GsigmaLbl".
"""
printer = _objs.VerbosityPrinter.build_printer(verbosity)
focusedLSGSTmodels = {}
printer.log("--- Focused LSGST precomputation ---")
with printer.progress_logging(1):
for i, sigma in enumerate(circuits):
printer.show_progress(i, len(circuits), prefix="--- Computing model for string", suffix='---')
focusedLSGSTmodels[sigma] = focused_mc2gst_model(
sigma, "GsigmaLbl", dataset, prepStrs, effectStrs, startModel,
opLabelAliases, minProbClipForWeighting, probClipInterval, verbosity)
return focusedLSGSTmodels
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.