id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
40829
|
import glob
import pickle
import sys
import msprime as msp
import numpy as np
import os
import multiprocessing as mp
import shutil
import random
import copy
import argparse
import h5py
import allel
import time
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import resample
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model, model_from_json
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TerminateOnNaN
|
40835
|
from functools import wraps
from contextlib import contextmanager
from .core import Hub, gethub, Future, Condition, Lock, TimeoutError
__version__ = '0.2.a0'
__all__ = [
'Hub',
'gethub',
'sleep',
'Future',
'Condition',
'Lock',
]
def sleep(value):
gethub().do_sleep(value)
|
40844
|
import platform
"""
[Note for Windows]
- Use '\\' or '/' in path
Ex) gitStoragePath = "D:\\Source\\gitrepos"
- Install 'Git for Windows'
- Windows version of VUDDY use its own JRE
[Note for POSIX]
- Use '/' for path
Ex) gitStoragePath = "/home/ubuntu/gitrepos/"
- Java binary is only needed in POSIX
"""
gitStoragePath = "/home/ubuntu/gitrepos/"
pf = platform.platform()
if "Windows" in pf: # Windows
gitBinary = "C:\\Program Files\\Git\\bin\\git.exe"
diffBinary = "C:\\Program Files\\Git\\usr\\bin\\diff.exe"
else: # POSIX
gitBinary = "git"
diffBinary = "diff"
javaBinary = "java"
|
40854
|
import os
from pathlib import Path
def menpo3d_src_dir_path():
r"""The path to the top of the menpo3d Python package.
Useful for locating where the data folder is stored.
Returns
-------
path : str
The full path to the top of the Menpo3d package
"""
return Path(os.path.abspath(__file__)).parent
|
40858
|
from ursina import *
app = Ursina()
bg = Entity(model='quad', texture='assets\BG', scale=36, z=1)
window.fullscreen = True
player = Animation('assets\player', collider='box', y=5)
fly = Entity(model='cube', texture='assets\\fly1', collider='box',
scale=2, x=20, y=-10)
flies = []
def newFly():
new = duplicate(fly,y=-5+(5124*time.dt)%15)
flies.append(new)
invoke(newFly, delay=1)
newFly()
camera.orthographic = True
camera.fov = 20
def update():
player.y += held_keys['w']*6*time.dt
player.y -= held_keys['s'] *6* time.dt
a = held_keys['w']*-20
b = held_keys['s'] *20
if a != 0:
player.rotation_z = a
else:
player.rotation_z = b
for fly in flies:
fly.x -= 4*time.dt
touch = fly.intersects()
if touch.hit:
flies.remove(fly)
destroy(fly)
destroy(touch.entity)
t = player.intersects()
if t.hit and t.entity.scale==2:
quit()
def input(key):
if key == 'space':
e = Entity(y=player.y, x=player.x+1, model='cube', scale=1,
texture='assets\Bullet', collider='cube')
e.animate_x(30,duration=2,curve=curve.linear)
invoke(destroy, e, delay=2)
app.run()
|
40978
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "tax",
ext_modules = cythonize('tax.pyx'),
script_name = 'setup.py',
script_args = ['build_ext', '--inplace']
)
import tax
import numpy as np
print(tax.tax(np.ones(10)))
|
40984
|
from rest_framework.permissions import SAFE_METHODS, BasePermission
class ApiPermission(BasePermission):
def _has_permission(self, view, obj, request):
event = getattr(request, "event", None)
if not event: # Only true for root API view
return True
if request.method in SAFE_METHODS:
read_permission = getattr(view, "read_permission_required", None)
if read_permission:
return request.user.has_perm(read_permission, obj)
return True
write_permission = getattr(view, "write_permission_required", None)
if write_permission:
return request.user.has_perm(write_permission, obj)
return False
def has_permission(self, request, view):
return self._has_permission(view, getattr(request, "event", None), request)
def has_object_permission(self, request, view, obj):
return self._has_permission(view, obj, request)
|
40998
|
import datetime
from django.core.management.base import BaseCommand
from data_import.models import DataFileKey
class Command(BaseCommand):
"""
A management command for expunging expired keys
"""
help = "Expunge expired keys"
def handle(self, *args, **options):
self.stdout.write("Expunging expired keys")
now = datetime.datetime.utcnow()
# Note: astimezone reapplies the timezone so that django doesn't
# complain
six_hours_ago = (now - datetime.timedelta(hours=6)).astimezone()
keys = DataFileKey.objects.filter(created__lte=six_hours_ago)
num_deletes = keys.delete()[0]
self.stdout.write("Removed {0} keys".format(num_deletes))
|
41002
|
import argparse
import os
argparser = argparse.ArgumentParser()
argparser.add_argument("--dataset_names", default="all", type=str) # "all" or names joined by comma
argparser.add_argument("--dataset_path", default="DATASET/odinw", type=str)
args = argparser.parse_args()
root = "https://vlpdatasets.blob.core.windows.net/odinw/odinw/odinw_35"
all_datasets = ["AerialMaritimeDrone", "AmericanSignLanguageLetters", "Aquarium", "BCCD", "ChessPieces", "CottontailRabbits", "DroneControl", "EgoHands", "HardHatWorkers", "MaskWearing", "MountainDewCommercial", "NorthAmericaMushrooms", "OxfordPets", "PKLot", "Packages", "PascalVOC", "Raccoon", "ShellfishOpenImages", "ThermalCheetah", "UnoCards", "VehiclesOpenImages", "WildfireSmoke", "boggleBoards", "brackishUnderwater", "dice", "openPoetryVision", "pistols", "plantdoc", "pothole", "selfdrivingCar", "thermalDogsAndPeople", "vector", "websiteScreenshots"]
datasets_to_download = []
if args.dataset_names == "all":
datasets_to_download = all_datasets
else:
datasets_to_download = args.dataset_names.split(",")
for dataset in datasets_to_download:
if dataset in all_datasets:
print("Downloading dataset: ", dataset)
os.system("wget " + root + "/" + dataset + ".zip" + " -O " + args.dataset_path + "/" + dataset + ".zip")
os.system("unzip " + args.dataset_path + "/" + dataset + ".zip -d " + args.dataset_path)
os.system("rm " + args.dataset_path + "/" + dataset + ".zip")
else:
print("Dataset not found: ", dataset)
|
41045
|
import tensorrt as trt
import pycuda.driver as cuda
import cv2
import numpy as np
class TrtPacknet(object):
"""TrtPacknet class encapsulates things needed to run TRT Packnet (depth inference)."""
def _load_engine(self):
TRTbin = 'trt_%s.trt' % self.model
with open(TRTbin, 'rb') as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
def _allocate_buffers(self):
host_inputs, host_outputs, cuda_inputs, cuda_outputs, bindings = \
[], [], [], [], []
for binding in self.engine:
size = trt.volume(self.engine.get_binding_shape(binding)) * \
self.engine.max_batch_size
host_mem = cuda.pagelocked_empty(size, np.float32)
cuda_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(cuda_mem))
if self.engine.binding_is_input(binding):
host_inputs.append(host_mem)
cuda_inputs.append(cuda_mem)
else:
host_outputs.append(host_mem)
cuda_outputs.append(cuda_mem)
return host_inputs, host_outputs, cuda_inputs, cuda_outputs, bindings
def __init__(self, model, input_shape=(288, 384), cuda_ctx=None):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = model
self.input_shape = input_shape
self.cuda_ctx = cuda_ctx
if self.cuda_ctx:
self.cuda_ctx.push()
self.trt_logger = trt.Logger(trt.Logger.INFO)
self.engine = self._load_engine()
try:
self.context = self.engine.create_execution_context()
self.stream = cuda.Stream()
self.host_inputs, self.host_outputs, self.cuda_inputs, self.cuda_outputs, self.bindings = self._allocate_buffers()
except Exception as e:
raise RuntimeError('fail to allocate CUDA resources') from e
finally:
if self.cuda_ctx:
self.cuda_ctx.pop()
def __del__(self):
"""Free CUDA memories and context."""
del self.cuda_outputs
del self.cuda_inputs
del self.stream
if __name__ == "__main__":
import pycuda.autoinit # This is needed for initializing CUDA driver
trt_packnet = TrtPacknet("packnet")
|
41049
|
from random import randint
from typing import Any
from typing import Dict
from retrying import retry
import apysc as ap
from apysc._event.mouse_up_interface import MouseUpInterface
from apysc._expression import expression_data_util
from apysc._type.variable_name_interface import VariableNameInterface
class _TestMouseUp(MouseUpInterface, VariableNameInterface):
def __init__(self) -> None:
"""Test class for mouse up interface.
"""
self.variable_name = 'test_mouse_up'
class TestMouseUpInterface:
def on_mouse_up_1(
self, e: ap.MouseEvent, options: Dict[str, Any]) -> None:
"""
Test handler for mouse up event.
Parameters
----------
e : MouseEvent
Created event instance.
options : dict
Optional arguments dictionary.
"""
def on_mouse_up_2(
self, e: ap.MouseEvent, options: Dict[str, Any]) -> None:
"""
Test handler for mouse up event.
Parameters
----------
e : MouseEvent
Created event instance.
options : dict
Optional arguments dictionary.
"""
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_mouse_up_handlers_if_not_initialized(self) -> None:
interface_1: MouseUpInterface = MouseUpInterface()
interface_1._initialize_mouse_up_handlers_if_not_initialized()
assert interface_1._mouse_up_handlers == {}
interface_1._initialize_mouse_up_handlers_if_not_initialized()
assert interface_1._mouse_up_handlers == {}
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_mouseup(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
name: str = interface_1.mouseup(
handler=self.on_mouse_up_1, options={'msg': 'Hello!'})
assert name in interface_1._mouse_up_handlers
expression: str = \
expression_data_util.get_current_event_handler_scope_expression()
expected: str = f'function {name}('
assert expected in expression
expression = expression_data_util.get_current_expression()
expected = (
f'{interface_1.variable_name}.mouseup({name});'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_unbind_mouseup(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
name: str = interface_1.mouseup(handler=self.on_mouse_up_1)
interface_1.unbind_mouseup(handler=self.on_mouse_up_1)
assert interface_1._mouse_up_handlers == {}
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{interface_1.variable_name}.off('
f'"{ap.MouseEventType.MOUSEUP.value}", {name});'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_unbind_mouseup_all(self) -> None:
expression_data_util.empty_expression()
interface_1: _TestMouseUp = _TestMouseUp()
interface_1.mouseup(handler=self.on_mouse_up_1)
interface_1.mouseup(handler=self.on_mouse_up_2)
interface_1.unbind_mouseup_all()
interface_1._mouse_up_handlers == {}
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{interface_1.variable_name}.off('
f'"{ap.MouseEventType.MOUSEUP.value}");'
)
assert expected in expression
|
41071
|
import scrapy
from aroay_cloudscraper import CloudScraperRequest
class JavdbSpider(scrapy.Spider):
name = 'javdb'
allowed_domains = ['javdb.com']
headers = {"Accept-Language": "zh-cn;q=0.8,en-US;q=0.6"}
def start_requests(self):
yield CloudScraperRequest("https://javdb.com/v/BOeQO", callback=self.parse
)
def parse(self, response):
print(response.text)
|
41092
|
import sys
import setuptools
sys.path.insert(0, "src")
import pytorch_adapt
with open("README.md", "r") as fh:
long_description = fh.read()
extras_require_ignite = ["pytorch-ignite == 0.5.0.dev20220221"]
extras_require_lightning = ["pytorch-lightning"]
extras_require_record_keeper = ["record-keeper >= 0.9.31"]
extras_require_timm = ["timm"]
extras_require_docs = [
"mkdocs-material",
"mkdocstrings[python]",
"griffe",
"mkdocs-gen-files",
"mkdocs-section-index",
"mkdocs-literate-nav",
]
extras_require_dev = ["black", "isort", "nbqa", "flake8"]
setuptools.setup(
name="pytorch-adapt",
version=pytorch_adapt.__version__,
author="<NAME>",
description="Domain adaptation made easy. Fully featured, modular, and customizable.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KevinMusgrave/pytorch-adapt",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.0",
install_requires=[
"numpy",
"torch",
"torchvision",
"torchmetrics",
"pytorch-metric-learning >= 1.3.1.dev0",
],
extras_require={
"ignite": extras_require_ignite,
"lightning": extras_require_lightning,
"record-keeper": extras_require_record_keeper,
"timm": extras_require_timm,
"docs": extras_require_docs,
"dev": extras_require_dev,
},
)
|
41130
|
from urllib import request
def download_from_url(url, filename):
request.urlretrieve(url, filename)
|
41205
|
import numpy as np
import queue
import cv2
import os
import datetime
SIZE = 32
SCALE = 0.007874015748031496
def quantized_np(array,scale,data_width=8):
quantized_array= np.round(array/scale)
quantized_array = np.maximum(quantized_array, -2**(data_width-1))
quantized_array = np.minimum(quantized_array, 2**(data_width-1)-1)
return quantized_array
def get_x_y_cuts(data, n_lines=1):
w, h = data.shape
visited = set()
q = queue.Queue()
offset = [(-1, -1), (0, -1), (1, -1), (-1, 0),
(1, 0), (-1, 1), (0, 1), (1, 1)]
cuts = []
for y in range(h):
for x in range(w):
x_axis = []
y_axis = []
if data[x][y] < 200 and (x, y) not in visited:
q.put((x, y))
visited.add((x, y))
while not q.empty():
x_p, y_p = q.get()
for x_offset, y_offset in offset:
x_c, y_c = x_p + x_offset, y_p + y_offset
if (x_c, y_c) in visited:
continue
visited.add((x_c, y_c))
try:
if data[x_c][y_c] < 200:
q.put((x_c, y_c))
x_axis.append(x_c)
y_axis.append(y_c)
except:
pass
if x_axis:
min_x, max_x = min(x_axis), max(x_axis)
min_y, max_y = min(y_axis), max(y_axis)
if max_x - min_x > 3 and max_y - min_y > 3:
cuts.append([min_x, max_x + 1, min_y, max_y + 1])
if n_lines == 1:
cuts = sorted(cuts, key=lambda x: x[2])
pr_item = cuts[0]
count = 1
len_cuts = len(cuts)
new_cuts = [cuts[0]]
pr_k = 0
for i in range(1, len_cuts):
pr_item = new_cuts[pr_k]
now_item = cuts[i]
if not (now_item[2] > pr_item[3]):
new_cuts[pr_k][0] = min(pr_item[0], now_item[0])
new_cuts[pr_k][1] = max(pr_item[1], now_item[1])
new_cuts[pr_k][2] = min(pr_item[2], now_item[2])
new_cuts[pr_k][3] = max(pr_item[3], now_item[3])
else:
new_cuts.append(now_item)
pr_k += 1
cuts = new_cuts
return cuts
def get_image_cuts(image, dir=None, is_data=False, n_lines=1, data_needed=False, count=0,QUAN = False):
if is_data:
data = image
else:
data = cv2.imread(image, 2)
cuts = get_x_y_cuts(data, n_lines=n_lines)
image_cuts = None
for i, item in enumerate(cuts):
count += 1
max_dim = max(item[1] - item[0], item[3] - item[2])
new_data = np.ones((int(1.4 * max_dim), int(1.4 * max_dim))) * 255
x_min, x_max = (
max_dim - item[1] + item[0]) // 2, (max_dim - item[1] + item[0]) // 2 + item[1] - item[0]
y_min, y_max = (
max_dim - item[3] + item[2]) // 2, (max_dim - item[3] + item[2]) // 2 + item[3] - item[2]
new_data[int(0.2 * max_dim) + x_min:int(0.2 * max_dim) + x_max, int(0.2 * max_dim) +
y_min:int(0.2 * max_dim) + y_max] = data[item[0]:item[1], item[2]:item[3]]
standard_data = cv2.resize(new_data, (SIZE, SIZE))
if not data_needed:
cv2.imwrite(dir + str(count) + ".jpg", standard_data)
if data_needed:
data_flat = np.reshape(standard_data, (1, SIZE*SIZE))
data_flat = (255 - data_flat) / 255
if QUAN == True:
data_flat = quantized_np(data_flat,SCALE,data_width=8)
else:
pass
if image_cuts is None:
image_cuts = data_flat
else:
image_cuts = np.r_[image_cuts, data_flat]
if data_needed:
return image_cuts
return count
def main(img_dir):
for file in os.listdir(img_dir):
if file.endswith('jpeg'):
path = os.path.join(img_dir, file)
oldtime = datetime.datetime.now()
#count = process.get_image_cuts(path, dir='./dataset/'+file.split('.')[0]+'_cut',count=0)
image_cuts = get_image_cuts(
path, dir = img_dir + file.split('.')[0]+'_cut', count=0, data_needed=True)
newtime = datetime.datetime.now()
Totaltime = (newtime-oldtime).microseconds
print("image cut time: ", Totaltime)
print(np.size(image_cuts, 0))
if __name__ == '__main__':
img_dir = './dataset'
main(img_dir)
|
41238
|
import os
import secrets
import threading
import tornado.web
import tornado.escape
import logging.config
from app.classes.models import Roles, Users, check_role_permission, Remote, model_to_dict
from app.classes.multiserv import multi
from app.classes.helpers import helper
from app.classes.backupmgr import backupmgr
logger = logging.getLogger(__name__)
class BaseHandler(tornado.web.RequestHandler):
def check_xsrf_cookie(self):
# Disable CSRF protection on API routes
pass
def return_response(self, status, errors, data, messages):
# Define a standardized response
self.write({
"status": status,
"data": data,
"errors": errors,
"messages": messages
})
def access_denied(self, user):
logger.info("User %s was denied access to API route", user)
self.set_status(403)
self.finish(self.return_response(403, {'error':'ACCESS_DENIED'}, {}, {'info':'You were denied access to the requested resource'}))
def authenticate_user(self, token):
try:
logger.debug("Searching for specified token")
user_data = Users.get(api_token=token)
logger.debug("Checking results")
if user_data:
# Login successful! Return the username
logger.info("User {} has authenticated to API".format(user_data.username))
return user_data.username
else:
logging.debug("Auth unsuccessful")
return None
except:
logger.warning("Traceback occurred when authenticating user to API. Most likely wrong token")
return None
pass
class SendCommand(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
command = self.get_body_argument('command', default=None, strip=True)
server_id = self.get_argument('id')
if command:
server = multi.get_server_obj(server_id)
if server.check_running:
server.send_command(command)
self.return_response(200, '', {"run": True}, '')
else:
self.return_response(200, {'error':'SER_NOT_RUNNING'}, {}, {})
else:
self.return_response(200, {'error':'NO_COMMAND'}, {}, {})
class GetHostStats(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
stats = multi.get_host_status()
stats.pop('time') # We dont need the request time
self.return_response(200, {}, stats, {})
class GetServerStats(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
stats = multi.get_stats_for_servers()
data = []
for server in stats:
server = stats[server]
server.pop('time') # We dont need the request time
data.append(server)
self.return_response(200, {}, data, {})
class SearchMCLogs(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
search_string = self.get_argument('query', default=None, strip=True)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
logfile = os.path.join(server.server_path, 'logs', 'latest.log')
data = helper.search_file(logfile, search_string)
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class GetMCLogs(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
logfile = os.path.join(server.server_path, 'logs', 'latest.log')
data = helper.search_file(logfile, '')
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class GetCraftyLogs(BaseHandler):
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
filename = self.get_argument('name')
logfile = os.path.join('logs', filename + '.log')
data = helper.search_file(logfile, '')
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class SearchCraftyLogs(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'logs'):
self.access_denied(user)
filename = self.get_argument('name')
query = self.get_argument('query')
logfile = os.path.join('logs', filename + '.log')
data = helper.search_file(logfile, query)
line_list = []
if data:
for line in data:
line_list.append({'line_num': line[0], 'message': line[1]})
self.return_response(200, {}, line_list, {})
class ForceServerBackup(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'backups'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
backup_thread = threading.Thread(name='backup', target=server.backup_server, daemon=False)
backup_thread.start()
self.return_response(200, {}, {'code':'SER_BAK_CALLED'}, {})
class StartServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
if not server.check_running():
Remote.insert({
Remote.command: 'start_mc_server',
Remote.server_id: server_id,
Remote.command_source: "localhost"
}).execute()
self.return_response(200, {}, {'code':'SER_START_CALLED'}, {})
else:
self.return_response(500, {'error':'SER_RUNNING'}, {}, {})
class StopServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
if server.check_running():
Remote.insert({
Remote.command: 'stop_mc_server',
Remote.server_id: server_id,
Remote.command_source: "localhost"
}).execute()
self.return_response(200, {}, {'code':'SER_STOP_CALLED'}, {})
else:
self.return_response(500, {'error':'SER_NOT_RUNNING'}, {}, {})
class RestartServer(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'svr_control'):
self.access_denied(user)
server_id = self.get_argument('id')
server = multi.get_server_obj(server_id)
server.restart_threaded_server()
self.return_response(200, {}, {'code':'SER_RESTART_CALLED'}, {})
class CreateUser(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'config'):
self.access_denied(user)
new_username = self.get_argument("username")
# TODO: implement role checking
#new_role = self.get_argument("role", 'Mod')
if new_username:
new_pass = helper.random_string_generator()
new_token = secrets.token_urlsafe(32)
result = Users.insert({
Users.username: new_username,
Users.role: 'Mod',
Users.password: <PASSWORD>),
Users.api_token: new_token
}).execute()
self.return_response(200, {}, {'code':'COMPLETE', 'username': new_username, 'password': <PASSWORD>, 'api_token': new_token}, {})
else:
self.return_response(500, {'error':'MISSING_PARAMS'}, {}, {'info':'Some paramaters failed validation'})
class DeleteUser(BaseHandler):
def post(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access') and not check_role_permission(user, 'config'):
self.access_denied(user)
username = self.get_argument("username", None, True)
if username == 'Admin':
self.return_response(500, {'error':'NOT_ALLOWED'}, {}, {'info':'You cannot delete the admin user'})
else:
if username:
Users.delete().where(Users.username == username).execute()
self.return_response(200, {}, {'code':'COMPLETED'}, {})
class ListServers(BaseHandler):
def initialize(self, mcserver):
self.mcserver = mcserver
def get(self):
token = self.get_argument('token')
user = self.authenticate_user(token)
if user is None:
self.access_denied('unknown')
if not check_role_permission(user, 'api_access'):
self.access_denied(user)
self.return_response(200, {}, {"code": "COMPLETED", "servers": multi.list_servers()}, {})
|
41277
|
import numpy as np
import tensorflow as tf
import unittest
hungarian_module = tf.load_op_library("hungarian.so")
class HungarianTests(unittest.TestCase):
def test_min_weighted_bp_cover_1(self):
W = np.array([[3, 2, 2], [1, 2, 0], [2, 2, 1]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 1, 1])
c_1_t = np.array([1, 1, 0])
M_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
pass
def test_min_weighted_bp_cover_2(self):
W = np.array([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([5, 6, 5])
c_1_t = np.array([0, 0, 0, 2])
M_t = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_3(self):
W = np.array([[5, 0, 2], [3, 1, 0], [0, 5, 0]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 0, 4])
c_1_t = np.array([3, 1, 0])
M_t = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_4(self):
W = np.array([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0],
[2, 2, 1]]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([[2, 0, 4], [2, 1, 1]])
c_1_t = np.array([[3, 1, 0], [1, 1, 0]])
M_t = np.array([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0],
[0, 0, 1]]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_real_values_1(self):
# Test the while loop terminates with real values.
W = np.array(
[[0.90, 0.70, 0.30, 0.20, 0.40, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.80, 0.75, 0.92, 0.10, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.78, 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.42, 0.55, 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001],
[0.64, 0.44, 0.33, 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001],
[0.22, 0.55, 0.43, 0.43, 0.14, 0.001, 0.001, 0.002, 0.001, 0.001],
[0.43, 0.33, 0.34, 0.22, 0.14, 0.001, 0.001, 0.001, 0.002, 0.001],
[0.33, 0.42, 0.23, 0.13, 0.43, 0.001, 0.001, 0.001, 0.001, 0.002],
[0.39, 0.24, 0.53, 0.56, 0.89, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.12, 0.34, 0.82, 0.82, 0.77, 0.001, 0.001, 0.001, 0.001, 0.001]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
M_t = np.array(
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])
self.assertTrue((M == M_t).all())
def test_real_values_2(self):
W = np.array([[
0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,
0.0137996, 0.00403898, 0.0123786, 1e-05
], [
0.00604229, 0.0126071, 0.0117400, 0.0124528, 0.00808971, 0.0162703,
0.0138028, 0.00403935, 0.0123812, 1e-05
], [
0.00604234, 0.0126073, 0.0117402, 0.012453, 0.00808980, 0.0162706,
0.0138030, 0.00403937, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_3(self):
W = np.array([[
0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026,
0.0289461, 0.0214768, 0.0101898, 1e-05
], [
0.00302875, 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137,
0.0289468, 0.0214719, 0.0101904, 1e-05
], [
0.00302897, 0.00321726, 0.0217636, 0.00836369, 0.0256217, 0.0177148,
0.0289468, 0.0214714, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_4(self):
W = np.array([[
1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05,
1e-05, 1e-05, 3.9034e-05
], [
1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1.0122e-05,
3.43236e-05, 1e-05
], [
1e-05, 0.0426792, 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05,
1e-05, 1e-05, 0.102463
], [
1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05, 1e-05,
1e-05, 1.00007e-05
], [
1e-05, 4.22947e-05, 0.00062168, 0.623917, 1.03468e-05, 0.00588984,
1.00004e-05, 1.44433e-05, 1.00014e-05, 0.000213425
], [
1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, 0.000485082, 1e-05,
1e-05, 1.00002e-05, 1e-05
], [
1e-05, 1e-05, 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05,
1.13251e-05
], [
1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,
1e-05
], [
1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, 1.03587e-05,
0.150301, 1e-05, 1.00045e-05
], [
1e-05, 3.97901e-05, 1e-05, 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05,
2.42828e-05, 1e-05, 1.10529e-05
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_5(self):
W = np.array([[
1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05,
1e-05
], [
0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, 1e-05,
1e-05, 1e-05
], [
0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05, 7.4e-05, 0.002359,
1e-05, 1e-05
], [
3.8e-05, 1e-05, 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05,
1e-05
], [
0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05,
0.000919, 1e-05
], [
1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.028816, 1e-05
], [
1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_6(self):
W = np.array([[
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(HungarianTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
41285
|
from cartodb_services.refactor.storage.redis_connection_config import RedisMetadataConnectionConfigBuilder
from cartodb_services.refactor.storage.redis_connection import RedisConnectionBuilder
from cartodb_services.refactor.storage.redis_config import RedisUserConfigStorageBuilder
class UserConfigBackendFactory(object):
"""
This class abstracts the creation of a user configuration backend. It will return
an implementation of the ConfigBackendInterface appropriate to the user, depending
on the environment.
"""
def __init__(self, username, environment, server_config_backend):
self._username = username
self._environment = environment
self._server_config_backend = server_config_backend
def get(self):
if self._environment.is_onpremise:
user_config_backend = self._server_config_backend
else:
redis_metadata_connection_config = RedisMetadataConnectionConfigBuilder(self._server_config_backend).get()
redis_metadata_connection = RedisConnectionBuilder(redis_metadata_connection_config).get()
user_config_backend = RedisUserConfigStorageBuilder(redis_metadata_connection, self._username).get()
return user_config_backend
|
41296
|
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK
from river.models import Function
from river_admin.views import get, post, put, delete
from river_admin.views.serializers import UpdateFunctionDto, CreateFunctionDto, FunctionDto
@get(r'^function/get/(?P<pk>\w+)/$')
def get_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
return Response(FunctionDto(function).data, status=HTTP_200_OK)
@get(r'^function/list/$')
def list_it(request):
return Response(FunctionDto(Function.objects.all(), many=True).data, status=HTTP_200_OK)
@post(r'^function/create/')
def create_it(request):
create_function_request = CreateFunctionDto(data=request.data)
if create_function_request.is_valid():
function = create_function_request.save()
return Response({"id": function.id}, status=HTTP_200_OK)
else:
return Response(create_function_request.errors, status=HTTP_400_BAD_REQUEST)
@put(r'^function/update/(?P<pk>\w+)/$')
def update_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
update_function_request = UpdateFunctionDto(data=request.data, instance=function)
if update_function_request.is_valid():
update_function_request.save()
return Response({"message": "Function is updated"}, status=HTTP_200_OK)
else:
return Response(update_function_request.errors, status=HTTP_400_BAD_REQUEST)
@delete(r'^function/delete/(?P<pk>\w+)/$')
def delete_it(request, pk):
function = get_object_or_404(Function.objects.all(), pk=pk)
function.delete()
return Response(status=HTTP_200_OK)
|
41299
|
import logging
from logging.config import dictConfig
import dbnd
from dbnd.testing.helpers import run_dbnd_subprocess__with_home
from dbnd_airflow_contrib.dbnd_airflow_default_logger import DEFAULT_LOGGING_CONFIG
class TestDbndAirflowLogging(object):
def test_dbnd_airflow_logging_conifg(self):
# we implement it as a separte test, as we don't want to affect current logging system
dbnd_config = DEFAULT_LOGGING_CONFIG
assert dbnd_config
def test_can_be_loaded(self):
# we can't just load config, it will affect all future tests
output = run_dbnd_subprocess__with_home([__file__.replace(".pyc", ".py")])
assert "test_can_be_loaded OK" in output
logging.error("Done")
if __name__ == "__main__":
print(
dbnd.__version__
) # we need it first to import, before we import any airflow code
dbnd_config = DEFAULT_LOGGING_CONFIG
dictConfig(dbnd_config)
logging.info("test_can_be_loaded OK")
|
41311
|
from base import CQPartsTest
from base import testlabel
# units under test
from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener
# ---------- Test Assembly ----------
import cadquery
import cqparts
from partslib.basic import Box
from cqparts import constraint
from cqparts.utils import CoordSystem
class FastenedAssembly(cqparts.Assembly):
def make_components(self):
base = Box(length=20, width=20, height=12)
top = Box(length=18, width=18, height=18)
return {
'base': base,
'top': top,
'fastener': NutAndBoltFastener(parts=[base, top]),
}
def make_constraints(self):
base = self.components['base']
top = self.components['top']
fastener = self.components['fastener']
return [
constraint.Fixed(base.mate_bottom),
constraint.Coincident(top.mate_bottom, base.mate_top),
constraint.Coincident(fastener.mate_origin, top.mate_top + CoordSystem((1, 2, 0))),
]
# ---------- Unit Tests ----------
class ScrewFastenerTest(CQPartsTest):
def test_fastener(self):
obj = FastenedAssembly()
bolt = obj.find('fastener.bolt')
nut = obj.find('fastener.nut')
self.assertEquals(bolt.world_coords.origin, cadquery.Vector((1, 2, 30)))
self.assertGreater(
bolt.bounding_box.zlen,
obj.find('top').height + obj.find('base').height
)
self.assertEquals(nut.world_coords.origin, cadquery.Vector((1, 2, 0)))
|
41317
|
from tests.utilities import is_equivalent
def test_mutate(case_data):
"""
Test :meth:`mutate`.
Parameters
----------
case_data : :class:`.CaseData`
A test case. Holds the mutator to test and the correct mutation
record.
Returns
-------
None : :class:`NoneType`
"""
_test_mutate(
mutator=case_data.mutator,
record=case_data.record,
mutation_record=case_data.mutation_record,
)
def _test_mutate(mutator, record, mutation_record):
"""
Test :meth:`mutate`.
Parameters
----------
mutator : :class:`.MoleculeMutator`
The mutator to test.
record : :class:`.MoleculeRecord`
The molecule to mutate.
mutation_record : :class:`.MutationRecord`
The correct mutation record.
Returns
-------
None : :class:`NoneType`
"""
result = mutator.mutate(record)
assert (
result.get_mutator_name() == mutation_record.get_mutator_name()
)
is_equivalent(
result.get_molecule_record().get_molecule(),
mutation_record.get_molecule_record().get_molecule(),
)
|
41459
|
import FWCore.ParameterSet.Config as cms
pileupVtxDigitizer = cms.PSet(
accumulatorType = cms.string("PileupVertexAccumulator"),
hitsProducer = cms.string('generator'),
vtxTag = cms.InputTag("generatorSmeared"),
vtxFallbackTag = cms.InputTag("generator"),
makeDigiSimLinks = cms.untracked.bool(False),
saveVtxTimes = cms.bool(False))
from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing
phase2_timing.toModify( pileupVtxDigitizer, saveVtxTimes = cms.bool(True) )
|
41473
|
from dataset.transform import crop, hflip, normalize, resize, blur, cutout
import math
import os
from PIL import Image
import random
from torch.utils.data import Dataset
from torchvision import transforms
class SemiDataset(Dataset):
def __init__(self, name, root, mode, size, labeled_id_path=None, unlabeled_id_path=None, pseudo_mask_path=None):
"""
:param name: dataset name, pascal or cityscapes
:param root: root path of the dataset.
:param mode: train: supervised learning only with labeled images, no unlabeled images are leveraged.
label: pseudo labeling the remaining unlabeled images.
semi_train: semi-supervised learning with both labeled and unlabeled images.
val: validation.
:param size: crop size of training images.
:param labeled_id_path: path of labeled image ids, needed in train or semi_train mode.
:param unlabeled_id_path: path of unlabeled image ids, needed in semi_train or label mode.
:param pseudo_mask_path: path of generated pseudo masks, needed in semi_train mode.
"""
self.name = name
self.root = root
self.mode = mode
self.size = size
self.pseudo_mask_path = pseudo_mask_path
if mode == 'semi_train':
with open(labeled_id_path, 'r') as f:
self.labeled_ids = f.read().splitlines()
with open(unlabeled_id_path, 'r') as f:
self.unlabeled_ids = f.read().splitlines()
self.ids = \
self.labeled_ids * math.ceil(len(self.unlabeled_ids) / len(self.labeled_ids)) + self.unlabeled_ids
else:
if mode == 'val':
id_path = 'dataset/splits/%s/val.txt' % name
elif mode == 'label':
id_path = unlabeled_id_path
elif mode == 'train':
id_path = labeled_id_path
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
def __getitem__(self, item):
id = self.ids[item]
img = Image.open(os.path.join(self.root, id.split(' ')[0]))
if self.mode == 'val' or self.mode == 'label':
mask = Image.open(os.path.join(self.root, id.split(' ')[1]))
img, mask = normalize(img, mask)
return img, mask, id
if self.mode == 'train' or (self.mode == 'semi_train' and id in self.labeled_ids):
mask = Image.open(os.path.join(self.root, id.split(' ')[1]))
else:
# mode == 'semi_train' and the id corresponds to unlabeled image
fname = os.path.basename(id.split(' ')[1])
mask = Image.open(os.path.join(self.pseudo_mask_path, fname))
# basic augmentation on all training images
base_size = 400 if self.name == 'pascal' else 2048
img, mask = resize(img, mask, base_size, (0.5, 2.0))
img, mask = crop(img, mask, self.size)
img, mask = hflip(img, mask, p=0.5)
# strong augmentation on unlabeled images
if self.mode == 'semi_train' and id in self.unlabeled_ids:
if random.random() < 0.8:
img = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img)
img = transforms.RandomGrayscale(p=0.2)(img)
img = blur(img, p=0.5)
img, mask = cutout(img, mask, p=0.5)
img, mask = normalize(img, mask)
return img, mask
def __len__(self):
return len(self.ids)
|
41523
|
import os
import trimesh
import unittest
import pocketing
import numpy as np
def get_model(file_name):
"""
Load a model from the models directory by expanding paths out.
Parameters
------------
file_name : str
Name of file in `models`
Returns
------------
mesh : trimesh.Geometry
Trimesh object or similar
"""
pwd = os.path.dirname(os.path.abspath(
os.path.expanduser(__file__)))
return trimesh.load(os.path.abspath(
os.path.join(pwd, '../models', file_name)))
class PocketTest(unittest.TestCase):
def test_contour(self):
path = get_model('wrench.dxf')
poly = path.polygons_full[0]
# generate tool paths
toolpaths = pocketing.contour.contour_parallel(poly, .05)
assert all(trimesh.util.is_shape(i, (-1, 2))
for i in toolpaths)
def test_troch(self):
path = get_model('wrench.dxf')
polygon = path.polygons_full[0]
# set radius arbitrarily
radius = .125
# set step to 10% of tool radius
step = radius * 0.10
# generate our trochoids
toolpath = pocketing.trochoidal.toolpath(
polygon, step=step)
assert trimesh.util.is_shape(toolpath, (-1, 2))
def test_archimedian(self):
# test generating a simple archimedean spiral
spiral = pocketing.spiral.archimedean(0.5, 2.0, 0.125)
assert trimesh.util.is_shape(spiral, (-1, 3, 2))
def test_helix(self):
# check a 3D helix
# set values off a tool radius
tool_radius = 0.25
radius = tool_radius * 1.2
pitch = tool_radius * 0.3
height = 2.0
# create the helix
h = pocketing.spiral.helix(
radius=radius,
height=height,
pitch=pitch,)
# should be 3-point arcs
check_arcs(h)
# heights should start and end correctly
assert np.isclose(h[0][0][2], 0.0)
assert np.isclose(h[-1][-1][2], height)
# check the flattened 2D radius
radii = np.linalg.norm(h.reshape((-1, 3))[:, :2], axis=1)
assert np.allclose(radii, radius)
def check_arcs(arcs):
# arcs should be 2D or 2D 3-point arcs
assert trimesh.util.is_shape(arcs, (-1, 3, (3, 2)))
# make sure arcs start where previous arc begins
for a, b in zip(arcs[:-1], arcs[1:]):
assert np.allclose(a[2], b[0])
if __name__ == '__main__':
unittest.main()
|
41553
|
import unittest2
import json
from datafeeds.resource_library_parser import ResourceLibraryParser
class TestResourceLibraryParser(unittest2.TestCase):
def test_parse_hall_of_fame(self):
with open('test_data/hall_of_fame.html', 'r') as f:
teams, _ = ResourceLibraryParser.parse(f.read())
# Test number of teams
self.assertEqual(len(teams), 14)
# Test team 987
team = teams[0]
self.assertEqual(team["team_id"], "frc987")
self.assertEqual(team["team_number"], 987)
self.assertEqual(team["year"], 2016)
self.assertEqual(team["video"], "wpv-9yd_CJk")
self.assertEqual(team["presentation"], "ILxVggTpXhs")
self.assertEqual(team["essay"], "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2016/chairmans/week-five/team-987.pdf")
# Test team 597
team = teams[1]
self.assertEqual(team["team_id"], "frc597")
self.assertEqual(team["team_number"], 597)
self.assertEqual(team["year"], 2015)
self.assertEqual(team["video"], "2FKks-d6LOo")
self.assertEqual(team["presentation"], "RBXj490clow")
self.assertEqual(team["essay"], None)
# Test team 27
team = teams[2]
self.assertEqual(team["team_id"], "frc27")
self.assertEqual(team["team_number"], 27)
self.assertEqual(team["year"], 2014)
self.assertEqual(team["video"], "BCz2yTVPxbM")
self.assertEqual(team["presentation"], "1rE67fTRl98")
self.assertEqual(team["essay"], "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2015/2014-67-chairmans-handout.pdf")
# Test team 1538
team = teams[3]
self.assertEqual(team["team_id"], "frc1538")
self.assertEqual(team["team_number"], 1538)
self.assertEqual(team["year"], 2013)
self.assertEqual(team["video"], "p62jRCMkoiw")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 1114
team = teams[4]
self.assertEqual(team["team_id"], "frc1114")
self.assertEqual(team["team_number"], 1114)
self.assertEqual(team["year"], 2012)
self.assertEqual(team["video"], "VqciMgjw-SY")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 359
team = teams[5]
self.assertEqual(team["team_id"], "frc359")
self.assertEqual(team["team_number"], 359)
self.assertEqual(team["year"], 2011)
self.assertEqual(team["video"], "e9IV1chHJtg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 341
team = teams[6]
self.assertEqual(team["team_id"], "frc341")
self.assertEqual(team["team_number"], 341)
self.assertEqual(team["year"], 2010)
self.assertEqual(team["video"], "-AzvT02ZCNk")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 236
team = teams[7]
self.assertEqual(team["team_id"], "frc236")
self.assertEqual(team["team_number"], 236)
self.assertEqual(team["year"], 2009)
self.assertEqual(team["video"], "NmzCLohIZLg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 842
team = teams[8]
self.assertEqual(team["team_id"], "frc842")
self.assertEqual(team["team_number"], 842)
self.assertEqual(team["year"], 2008)
self.assertEqual(team["video"], "N0LMLz6LK7U")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 365
team = teams[9]
self.assertEqual(team["team_id"], "frc365")
self.assertEqual(team["team_number"], 365)
self.assertEqual(team["year"], 2007)
self.assertEqual(team["video"], "f8MT7pSRXtg")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
# Test team 111
team = teams[10]
self.assertEqual(team["team_id"], "frc111")
self.assertEqual(team["team_number"], 111)
self.assertEqual(team["year"], 2006)
self.assertEqual(team["video"], "SfCjZMMIt0k")
self.assertEqual(team["presentation"], None)
self.assertEqual(team["essay"], None)
|
41612
|
from django.db import models
from djangae import patches # noqa
class DeferIterationMarker(models.Model):
"""
Marker to keep track of sharded defer
iteration tasks
"""
# Set to True when all shards have been deferred
is_ready = models.BooleanField(default=False)
shard_count = models.PositiveIntegerField(default=0)
shards_complete = models.PositiveIntegerField(default=0)
delete_on_completion = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
callback_name = models.CharField(max_length=100)
finalize_name = models.CharField(max_length=100)
class Meta:
app_label = "djangae"
@property
def is_finished(self):
return self.is_ready and self.shard_count == self.shards_complete
def __unicode__(self):
return "Background Task (%s -> %s) at %s" % (
self.callback_name,
self.finalize_name,
self.created
)
|
41652
|
import sublime, sublime_plugin
from os.path import join as join_path, isdir
from os import mkdir
IS_WINDOWS = sublime.platform() == 'windows'
PATH_SEPARATOR = '\\' if IS_WINDOWS else '/'
EXTENSION = '.exe' if IS_WINDOWS else ''
class VlangBuilderCommand(sublime_plugin.WindowCommand):
def run(self, **kwargs):
self.flags = kwargs.pop('flags') if 'flags' in kwargs else []
self.project = kwargs.pop('project') if 'project' in kwargs else False
action = kwargs.pop('action') if 'action' in kwargs else 'guess'
kwargs['shell_cmd'] = self.get_shell_cmd(action)
# kwargs['shell_cmd'] = 'echo ' + kwargs.get('shell_cmd')
self.window.run_command('exec', kwargs)
def get_shell_cmd(self, action: str) -> str:
parts = self.window.active_view().file_name().split(PATH_SEPARATOR)
file = '.' if self.project else parts[-1]
root = parts[-2]
is_test = '_test.v' in file
if not action and is_test:
return disabled('file')
settings = sublime.load_settings('V.sublime-settings')
if not action:
bin_name = file.split('.')[0] + EXTENSION
if root in settings.get('magic_dirs') or []:
base = PATH_SEPARATOR.join(parts[:-2])
bin_dir = join_path(base, 'bin')
if not isdir(bin_dir): mkdir(bin_dir)
bin_name = join_path(bin_dir, bin_name)
self.push_flags(False, ['-o', bin_name])
elif action == 'guess':
action = 'test' if is_test else 'run'
extension = get_extension(file)
for preset in settings.get('magic_if') or []:
exts = preset.get('extensions', [])
dirs = preset.get('directories', [])
plat = preset.get('platform', '')
flags = preset.get('flags', [])
done = False
[match_ext, excl_ext] = includes(extension, exts)
[match_dir, excl_dir] = includes(root, dirs)
if match_ext:
if excl_ext:
return disabled('platform')
elif match_dir or match_dir is None:
done = self.push_flags(done, flags)
elif match_dir:
if excl_dir:
return disabled('platform')
if match_ext is None:
self.push_flags(done, flags)
compiler = settings.get('compiler') or 'v'
return ' '.join([compiler, *self.flags, action, file])
def push_flags(self, done: bool, flags: list) -> bool:
if not done:
skip = False
for f in flags:
if skip:
skip = False
elif f in self.flags:
if f == '-o':
skip = True
else:
self.flags.append(f)
return done or len(flags) > 0
def get_extension(file: str) -> str:
"""
:examples:
get_extension('some.win.prod.v') -> 'win.prod' # TODO
get_extension('some.win.v') -> 'win'
get_extension('some.v') -> ''
"""
parts = file.split('.')[1:-1]
return '.'.join(parts)
def includes(base: str, ary: list):
if not ary: return [None, False]
excl = '!' + base in ary
return [base in ary or excl, excl]
def disabled(kind: str) -> str:
return f'echo Disabled for the current {kind}.'
|
41661
|
import sublime, sublime_plugin
import os
from ...libs import util
from ...libs import JavascriptEnhancementsExecuteOnTerminalCommand
class JavascriptEnhancementsGenerateJsdocCommand(JavascriptEnhancementsExecuteOnTerminalCommand, sublime_plugin.WindowCommand):
is_node = True
is_bin_path = True
def prepare_command(self):
jsdoc_conf_file = os.path.join(self.settings['project_dir_name'], self.settings['project_settings']['jsdoc']['conf_file'])
if os.path.isfile(jsdoc_conf_file) :
self.command = ["jsdoc", "-c", jsdoc_conf_file]
else :
sublime.error_message("JSDOC ERROR: Can't load "+jsdoc_conf_file+" file!\nConfiguration file REQUIRED!")
return
self._run()
def _run(self):
super(JavascriptEnhancementsGenerateJsdocCommand, self)._run()
def is_enabled(self):
return True if util.is_javascript_project() else False
|
41665
|
import torch.nn as nn
from architectures.position_wise_feed_forward_net import PositionWiseFeedForwardNet
from architectures.multi_head_attention import MultiHeadAttention
from architectures.add_and_norm import AddAndNorm
class TransformerEncoderBlock(nn.Module):
def __init__(self, d_model, n_heads, d_ff, dropout_proba):
super(TransformerEncoderBlock, self).__init__()
self.W_q = nn.Linear(d_model, d_model)
self.W_k = nn.Linear(d_model, d_model)
self.W_v = nn.Linear(d_model, d_model)
self.mha_layer=MultiHeadAttention(d_model, n_heads)
self.dropout_layer_1=nn.Dropout(dropout_proba)
self.add_and_norm_layer_1 = AddAndNorm(d_model)
self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff)
self.dropout_layer_2=nn.Dropout(dropout_proba)
self.add_and_norm_layer_2 = AddAndNorm(d_model)
def forward(self, x, mask):
# x dims: (batch_size, src_seq_len, d_model)
# mask dim: (batch_size, 1, 1, src_seq_len)
q = self.W_q(x) # (batch_size, src_seq_len, d_model)
k = self.W_k(x) # (batch_size, src_seq_len, d_model)
v = self.W_v(x) # (batch_size, src_seq_len, d_model)
mha_out = self.mha_layer(q, k, v, mask) # (batch_size, src_seq_len, d_model)
mha_out= self.dropout_layer_1(mha_out) # (batch_size, src_seq_len, d_model)
mha_out = self.add_and_norm_layer_1(x, mha_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.ffn_layer(mha_out) # (batch_size, src_seq_len, d_model)
ffn_out= self.dropout_layer_2(ffn_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.add_and_norm_layer_2(mha_out, ffn_out) # (batch_size, src_seq_len, d_model)
return ffn_out
class TransformerEncoder(nn.Module):
def __init__(self, n_blocks, n_heads, d_model, d_ff, dropout_proba=0.1):
super(TransformerEncoder, self).__init__()
self.encoder_blocks=nn.ModuleList([TransformerEncoderBlock(d_model, n_heads, d_ff, dropout_proba) for _ in range(n_blocks)])
def forward(self, x, mask):
for encoder_block in self.encoder_blocks:
x = encoder_block(x, mask)
return x
|
41674
|
from collections import Counter, defaultdict
import matplotlib as mpl
import networkx as nx
import numba
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
from fa2 import ForceAtlas2
from scipy import sparse
def to_adjacency_matrix(net):
if sparse.issparse(net):
if type(net) == "scipy.sparse.csr.csr_matrix":
return net
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
elif "networkx" in "%s" % type(net):
return (
sparse.csr_matrix(nx.adjacency_matrix(net), dtype=np.float64),
net.nodes(),
)
elif "numpy.ndarray" == type(net):
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
def to_nxgraph(net):
if sparse.issparse(net):
return nx.from_scipy_sparse_matrix(net)
elif "networkx" in "%s" % type(net):
return net
elif "numpy.ndarray" == type(net):
return nx.from_numpy_array(net)
def set_node_colors(c, x, cmap, colored_nodes):
node_colors = defaultdict(lambda x: "#8d8d8d")
node_edge_colors = defaultdict(lambda x: "#4d4d4d")
cnt = Counter([c[d] for d in colored_nodes])
num_groups = len(cnt)
# Set up the palette
if cmap is None:
if num_groups <= 10:
cmap = sns.color_palette().as_hex()
elif num_groups <= 20:
cmap = sns.color_palette("tab20").as_hex()
else:
cmap = sns.color_palette("hls", num_groups).as_hex()
# Calc size of groups
cmap = dict(
zip(
[d[0] for d in cnt.most_common(num_groups)],
[cmap[i] for i in range(num_groups)],
)
)
bounds = np.linspace(0, 1, 11)
norm = mpl.colors.BoundaryNorm(bounds, ncolors=12, extend="both")
# Calculate the color for each node using the palette
cmap_coreness = {
k: sns.light_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
cmap_coreness_dark = {
k: sns.dark_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
for d in colored_nodes:
node_colors[d] = cmap_coreness[c[d]][norm(x[d]) - 1]
node_edge_colors[d] = cmap_coreness_dark[c[d]][-norm(x[d])]
return node_colors, node_edge_colors
def classify_nodes(G, c, x, max_num=None):
non_residuals = [d for d in G.nodes() if (c[d] is not None) and (x[d] is not None)]
residuals = [d for d in G.nodes() if (c[d] is None) or (x[d] is None)]
# Count the number of groups
cnt = Counter([c[d] for d in non_residuals])
cvals = np.array([d[0] for d in cnt.most_common(len(cnt))])
if max_num is not None:
cvals = set(cvals[:max_num])
else:
cvals = set(cvals)
#
colored_nodes = [d for d in non_residuals if c[d] in cvals]
muted = [d for d in non_residuals if not c[d] in cvals]
# Bring core nodes to front
order = np.argsort([x[d] for d in colored_nodes])
colored_nodes = [colored_nodes[d] for d in order]
return colored_nodes, muted, residuals
def calc_node_pos(G, iterations=300, **params):
default_params = dict(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
verbose=False,
)
if params is not None:
for k, v in params.items():
default_params[k] = v
forceatlas2 = ForceAtlas2(**default_params)
return forceatlas2.forceatlas2_networkx_layout(G, pos=None, iterations=iterations)
def draw(
G,
c,
x,
ax,
draw_edge=True,
font_size=0,
pos=None,
cmap=None,
max_group_num=None,
draw_nodes_kwd={},
draw_edges_kwd={"edge_color": "#adadad"},
draw_labels_kwd={},
layout_kwd={},
):
"""Plot the core-periphery structure in the networks.
:param G: Graph
:type G: networkx.Graph
:param c: dict
:type c: group membership c[i] of i
:param x: core (x[i])=1 or periphery (x[i]=0)
:type x: dict
:param ax: axis
:type ax: matplotlib.pyplot.ax
:param draw_edge: whether to draw edges, defaults to True
:type draw_edge: bool, optional
:param font_size: font size for node labels, defaults to 0
:type font_size: int, optional
:param pos: pos[i] is the xy coordinate of node i, defaults to None
:type pos: dict, optional
:param cmap: colomap defaults to None
:type cmap: matplotlib.cmap, optional
:param max_group_num: Number of groups to color, defaults to None
:type max_group_num: int, optional
:param draw_nodes_kwd: Parameter for networkx.draw_networkx_nodes, defaults to {}
:type draw_nodes_kwd: dict, optional
:param draw_edges_kwd: Parameter for networkx.draw_networkx_edges, defaults to {"edge_color": "#adadad"}
:type draw_edges_kwd: dict, optional
:param draw_labels_kwd: Parameter for networkx.draw_networkx_labels, defaults to {}
:type draw_labels_kwd: dict, optional
:param layout_kwd: layout keywords, defaults to {}
:type layout_kwd: dict, optional
:return: (ax, pos)
:rtype: matplotlib.pyplot.ax, dict
"""
# Split node into residual and non-residual
colored_nodes, muted_nodes, residuals = classify_nodes(G, c, x, max_group_num)
node_colors, node_edge_colors = set_node_colors(c, x, cmap, colored_nodes)
# Set the position of nodes
if pos is None:
pos = calc_node_pos(G, **layout_kwd)
# Draw
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color=[node_colors[d] for d in colored_nodes],
nodelist=colored_nodes,
ax=ax,
# zorder=3,
**draw_nodes_kwd
)
if nodes is not None:
nodes.set_zorder(3)
nodes.set_edgecolor([node_edge_colors[r] for r in colored_nodes])
draw_nodes_kwd_residual = draw_nodes_kwd.copy()
draw_nodes_kwd_residual["node_size"] = 0.1 * draw_nodes_kwd.get("node_size", 100)
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color="#efefef",
nodelist=residuals,
node_shape="s",
ax=ax,
**draw_nodes_kwd_residual
)
if nodes is not None:
nodes.set_zorder(1)
nodes.set_edgecolor("#4d4d4d")
if draw_edge:
nx.draw_networkx_edges(
G.subgraph(colored_nodes + residuals), pos, ax=ax, **draw_edges_kwd
)
if font_size > 0:
nx.draw_networkx_labels(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)
ax.axis("off")
return ax, pos
def draw_interactive(G, c, x, hover_text=None, node_size=10.0, pos=None, cmap=None):
node_colors, node_edge_colors = set_node_colors(G, c, x, cmap)
if pos is None:
pos = nx.spring_layout(G)
nodelist = [d for d in G.nodes()]
group_ids = [c[d] if c[d] is not None else "residual" for d in nodelist]
coreness = [x[d] if x[d] is not None else "residual" for d in nodelist]
node_size_list = [(x[d] + 1) if x[d] is not None else 1 / 2 for d in nodelist]
pos_x = [pos[d][0] for d in nodelist]
pos_y = [pos[d][1] for d in nodelist]
df = pd.DataFrame(
{
"x": pos_x,
"y": pos_y,
"name": nodelist,
"group_id": group_ids,
"coreness": coreness,
"node_size": node_size_list,
}
)
df["marker"] = df["group_id"].apply(
lambda s: "circle" if s != "residual" else "square"
)
df["hovertext"] = df.apply(
lambda s: "{ht}<br>Group: {group}<br>Coreness: {coreness}".format(
ht="Node %s" % s["name"]
if hover_text is None
else hover_text.get(s["name"], ""),
group=s["group_id"],
coreness=s["coreness"],
),
axis=1,
)
fig = go.Figure(
data=go.Scatter(
x=df["x"],
y=df["y"],
marker_size=df["node_size"],
marker_symbol=df["marker"],
hovertext=df["hovertext"],
hoverlabel=dict(namelength=0),
hovertemplate="%{hovertext}",
marker={
"color": node_colors,
"sizeref": 1.0 / node_size,
"line": {"color": node_edge_colors, "width": 1},
},
mode="markers",
),
)
fig.update_layout(
autosize=False,
width=800,
height=800,
template="plotly_white",
# layout=go.Layout(xaxis={"showgrid": False}, yaxis={"showgrid": True}),
)
return fig
|
41706
|
import json
import logging
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
from seahub.institutions.models import Institution, InstitutionAdmin
from seahub.profile.models import Profile
logger = logging.getLogger(__name__)
class AdminInstitutionUsersTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def test_can_get(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['user_list']) is list
inst.delete()
def test_no_permission(self):
self.logout()
self.login_as(self.admin_no_other_permission)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
def test_can_create(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
data = {
'email': 'invalid_email_string',
}
resp = self.client.post(url, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['success']) is list
assert type(json_resp['failed']) is list
class AdminInstitutionUserTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def _add_user_in_institution(self, email, inst_name):
profile = Profile.objects.get_profile_by_user(email)
if not profile:
profile = Profile.objects.add_or_update(username=email, institution=inst_name)
else:
profile.institution = inst_name
profile.save()
def test_can_update(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
data = 'is_institution_admin=True'
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['is_institution_admin'] is True
inst.delete()
def test_can_delete(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
resp = self.client.delete(url)
self.assertEqual(200, resp.status_code)
inst.delete()
|
41717
|
import numpy as np
def bowl(vs, v_ref=1.0, scale=.1):
def normal(v, loc, scale):
return 1 / np.sqrt(2 * np.pi * scale**2) * np.exp( - 0.5 * np.square(v - loc) / scale**2 )
def _bowl(v):
if np.abs(v-v_ref) > 0.05:
return 2 * np.abs(v-v_ref) - 0.095
else:
return - 0.01 * normal(v, v_ref, scale) + 0.04
return np.array([_bowl(v) for v in vs])
|
41721
|
load("//bazel/rules/cpp:object.bzl", "cpp_object")
load("//bazel/rules/hcp:hcp.bzl", "hcp")
load("//bazel/rules/hcp:hcp_hdrs_derive.bzl", "hcp_hdrs_derive")
def string_tree_to_static_tree_parser(name):
#the file names to use
target_name = name + "_string_tree_parser_dat"
in_file = name + ".dat"
outfile = name + "_string_tree_parser.hcp"
#converting hcp to hpp/cpp
native.genrule(
name = target_name,
srcs = [in_file],
outs = [outfile],
tools = ["//code/programs/transcompilers/tree_hcp/string_tree_to_static_tree_parser:string_tree_to_static_tree_parser"],
cmd = "$(location //code/programs/transcompilers/tree_hcp/string_tree_to_static_tree_parser:string_tree_to_static_tree_parser) -i $(SRCS) -o $@",
)
#compile hcp file
#unique dep (TODO: dynamically decide)
static_struct_dep = "//code/utilities/code:concept_static_tree_structs"
deps = [
"//code/utilities/data_structures/tree/generic:string_tree",
"//code/utilities/data_structures/tree/generic:string_to_string_tree",
"//code/utilities/types/strings/transformers/appending:lib",
"//code/utilities/data_structures/tree/generic/tokens:tree_token",
"//code/utilities/types/vectors/observers:lib",
static_struct_dep,
]
hcp(name + "_string_tree_parser", deps)
|
41723
|
import datetime
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.core.models import Page, Site
from wagtail.tests.utils import WagtailTestUtils
from tests.app.models import NewsIndex, NewsItem
def dt(*args):
return datetime.datetime(*args, tzinfo=timezone.get_current_timezone())
def noop(x):
return x
class TestNewsList(TestCase, WagtailTestUtils):
def setUp(self):
super(TestNewsList, self).setUp()
site = Site.objects.get(is_default_site=True)
root_page = site.root_page
self.index = NewsIndex(
title='News', slug='news')
root_page.add_child(instance=self.index)
def test_index(self):
item1 = NewsItem.objects.create(
newsindex=self.index,
title='One post',
date=dt(2015, 8, 24, 0, 0, 0))
item2 = NewsItem.objects.create(
newsindex=self.index,
title='Two post',
date=dt(2015, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item1, item2], transform=noop)
def test_archive_year(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015',
date=dt(2015, 8, 24, 0, 0, 0))
item2014 = NewsItem.objects.create(
newsindex=self.index,
title='2014',
date=dt(2014, 8, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2013',
date=dt(2013, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'year', kwargs={'year': '2014'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item2014], transform=noop)
def test_archive_month(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 0, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-07-24',
date=dt(2015, 7, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-06-24',
date=dt(2015, 6, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2014-07-24',
date=dt(2014, 7, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'month', kwargs={'year': '2015', 'month': '7'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
def test_archive_day(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 12, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-08-23',
date=dt(2015, 8, 23, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-22',
date=dt(2015, 8, 22, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-07-23',
date=dt(2015, 7, 23, 12, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'day', kwargs={'year': '2015', 'month': '8', 'day': '23'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
@override_settings(ALLOWED_HOSTS=['localhost', 'site-a.com', 'site-b.org'])
class TestMultipleSites(TestCase, WagtailTestUtils):
def setUp(self):
super(TestMultipleSites, self).setUp()
root = Page.objects.get(pk=1)
root_a = Page(
title='Home A', slug='home-a')
root.add_child(instance=root_a)
root_b = Page(
title='Home B', slug='home-b')
root.add_child(instance=root_b)
self.index_a = NewsIndex(title='News A', slug='news-a')
root_a.add_child(instance=self.index_a)
self.index_b = NewsIndex(title='News B', slug='news-b')
root_b.add_child(instance=self.index_b)
self.site_a = Site.objects.create(
hostname='site-a.com',
root_page=root_a)
self.site_b = Site.objects.create(
hostname='site-b.org',
root_page=root_b)
self.item_a = NewsItem.objects.create(
newsindex=self.index_a, title='Post A', date=dt(2015, 8, 1))
self.item_b = NewsItem.objects.create(
newsindex=self.index_b, title='Post B', date=dt(2015, 8, 2))
@classmethod
def tearDownClass(cls):
super(TestMultipleSites, cls).tearDownClass()
# Clear site cache when the tests finish to prevent other tests being
# polluted by a stale cache.
cache.delete('wagtail_site_root_paths')
def test_index(self):
response = self.client.get(self.index_a.url,
HTTP_HOST=self.site_a.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_a], transform=noop)
response = self.client.get(self.index_b.url,
HTTP_HOST=self.site_b.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_b], transform=noop)
def test_item_url(self):
self.assertEqual(
self.item_a.url(), 'http://{}/{}/2015/8/1/{}-{}/'.format(
self.site_a.hostname, self.index_a.slug,
self.item_a.pk, self.item_a.get_nice_url()))
self.assertEqual(
self.item_b.url(), 'http://{}/{}/2015/8/2/{}-{}/'.format(
self.site_b.hostname, self.index_b.slug,
self.item_b.pk, self.item_b.get_nice_url()))
def test_item(self):
response = self.client.get(self.item_a.url(),
HTTP_HOST=self.site_a.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_a)
response = self.client.get(self.item_b.url(),
HTTP_HOST=self.site_b.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_b)
|
41800
|
from _revkit import netlist, gate
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
def _to_qiskit(self, circuit=None, with_classical_register=False):
"""
Convert RevKit quantum circuit into Qiskit quantum circuit
:param qiskit.QuantumCircuit circuit: If not `None`, add gates to this circuit
and also use its quantum registers. If the circuit does not have enough
qubit, the method fails. If `None` (default), a new circuit is
constructed.
:param bool with_classical_register: Add a classical register, if new circuit
is constructed (i.e., `circuit` is `None`)
:rtype: qiskit.QuatumCircuit
"""
if circuit is None:
qr = QuantumRegister(self.num_qubits, "qr")
if with_classical_register:
cr = ClassicalRegister(self.num_qubits, "cr")
circuit = QuantumCircuit(qr, cr)
else:
circuit = QuantumCircuit(qr)
# collect all qubits from all quantum registers
qr = [q for reg in circuit.qregs for q in reg]
for g in self.gates:
if g.kind == gate.gate_type.pauli_x:
for t in g.targets:
circuit.x(qr[t])
elif g.kind == gate.gate_type.hadamard:
for t in g.targets:
circuit.h(qr[t])
elif g.kind == gate.gate_type.rotation_z:
for t in g.targets:
circuit.rz(g.angle, qr[t])
elif g.kind == gate.gate_type.cx:
ctrl = g.controls[0]
for t in g.targets:
circuit.cx(qr[int(ctrl)], qr[t])
if not bool(ctrl):
circuit.x(qr[t])
elif g.kind == gate.gate_type.mcx:
ctls = g.controls
# only at most 2 controls and no negative controls
if len(ctls) > 2: raise Exception("X gates cannot have more than 2 controls")
negs = [qr[int(q)] for q in ctls if not bool(q)]
ctls = [qr[int(q)] for q in ctls]
tgts = [qr[q] for q in g.targets]
for t in tgts[1:]:
circuit.cx(tgts[0], t)
for n in negs:
circuit.x(n)
if len(ctls) == 0:
circuit.x(tgts[0])
elif len(ctls) == 1:
circuit.cx(ctls[0], tgts[0])
else:
circuit.ccx(ctls[0], ctls[1], tgts[0])
for n in negs:
circuit.x(n)
for t in tgts[1:]:
circuit.cx(tgts[0], t)
else:
raise Exception(f"Unsupported gate type {g.kind}")
return circuit
netlist.to_qiskit = _to_qiskit
|
41845
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from config import Config
def cuda_(var):
return var.cuda() if Config.use_gpu else var
class Net(nn.Module):
def __init__(self, state_dim, num_actions):
super(Net, self).__init__()
self.linear1 = nn.Linear(state_dim, 20)
self.linear2 = nn.Linear(20, num_actions)
# self.W1 = nn.Parameter(torch.randn(state_dim, 20))
# self.b1 = nn.Parameter(torch.randn(20))
# self.W2 = nn.Parameter(torch.randn(20, num_actions))
# self.b2 = nn.Parameter(torch.randn(num_actions))
# self.myparameters = nn.ParameterList([nn.Parameter(self.W1), nn.Parameter(self.W2),
# nn.Parameter(self.b1), nn.Parameter(self.b2)])
def forward(self, states, bit_vecs=None):
h1 = torch.tanh(self.linear1(states))
p = self.linear2(h1)
import pdb
# pdb.set_trace()
p = F.log_softmax(p, dim=1)
# if bit_vecs :
# if not isinstance(bit_vecs, torch.Tensor):
# bit_vecs = torch.tensor(bit_vecs, dtype=torch.float32, device=Config.device)
# bit_vecs.detach_()
# p = p * bit_vecs
# h1 = F.tanh((torch.matmul(states, self.W1) + self.b1))
# p = torch.matmul(h1, self.W2) + self.b2
return p
|
41865
|
from wtforms import StringField
from flask_babel import lazy_gettext
from wtforms.validators import DataRequired
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_appbuilder.forms import DynamicForm
class TestForm(DynamicForm):
TestFieldOne = StringField(lazy_gettext('Test Field One'), validators=[DataRequired()], widget=BS3TextFieldWidget())
TestFieldTwo = StringField(lazy_gettext('Test Field One'), validators=[DataRequired()], widget=BS3TextFieldWidget())
|
41903
|
import numpy as np
class DOM:
"""
Object representing a discretized observation model. Comprised primarily by the
DOM.edges and DOM.chi vectors, which represent the discrete mask and state-dependent
emission probabilities, respectively.
"""
def __init__(self):
self.k = None
self.n_bins = None
self.edges = None
self.classes = None
self.chi = None
self.type = 'DOM'
self.n_params = None
def set_params(self, config):
"""
Set relevant parameters for DOM object.
Args:
config (dict): Parameters to set.
"""
params = {'n_bins', 'edges', 'classes', 'chi', 'n_params'}
self.__dict__.update((param, np.array(value)) for param, value in config.items() if param in params)
def initialize(self, k, stats):
"""
Initialize DOM parameters according to dataset properties.
Args:
k (int): Number of components to use
stats (dict): Dictionary of dataset sets, generated by Dataset.compute_stats()
"""
k = k + 5
qbin_sizes = 0.5 / k # Quantile sizes
qbin_edges = 0.25 + qbin_sizes*np.arange(0, k+1) # Edge locations (in quantile terms)
bin_edges = np.interp(qbin_edges, stats['quantile_basis'], stats['quantiles'])
self.k = k
self.n_bins = k + 2
self.classes = list(range(1, self.n_bins + 2))
self.edges = [-np.Inf] + [edge for edge in bin_edges] + [np.Inf]
self.chi = np.zeros((2, self.n_bins + 1))
dist = np.linspace(2, 1, self.n_bins) # Bins captured by observations
scaled_dist = 0.9 * dist / dist.sum() # Scaling by 0.9 to allow for 0.1 emission prob of NaN
self.chi[1, :-1] = scaled_dist # Paired emission dist
self.chi[0, :-1] = np.flip(scaled_dist) # Unpaired emission dist
self.chi[1, -1] = 0.1 # NaN observations
self.chi[0, -1] = 0.1 # NaN observations
self.n_params = 2*(self.n_bins-2)
def discretize(self, transcript):
"""
Compute the DOM class for all nucleotides in an RNA and save the resulting vector
to Transcript.obs_dom.
"""
# np.searchsorted is identical to the digitize call here, but marginally faster (especially
# for a large number of bins and/or a large number of RNAs).
# transcript.obs_dom = np.digitize(transcript.obs, bins=self.edges)
transcript.obs_dom = np.searchsorted(self.edges, transcript.obs, side='left')
def compute_emissions(self, transcript, reference=False):
"""
Compute emission probabilities according to the discretized observation model.
This amounts to simply accessing the correct indices of the DOM pdf matrix, chi.
Args:
transcript (src.patteRNA.Transcript.Transcript): Transcript to process
reference (bool): Whether or not it's a reference transcript
"""
if reference:
pass
transcript.B = self.chi[:, transcript.obs_dom-1]
@staticmethod
def post_process(transcript):
pass # No post-processing needed for DOM model
def m_step(self, transcript):
"""
Compute pseudo-counts en route to updating model parameters according to maximium-likelihood approach.
Args:
transcript (Transcript): Transcript to process
Returns:
params (dict): Partial pseudo-counts
"""
chi_0 = np.fromiter((transcript.gamma[0, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
chi_1 = np.fromiter((transcript.gamma[1, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
params = {'chi': np.vstack((chi_0, chi_1)),
'chi_norm': np.sum(transcript.gamma, axis=1)}
return params
def update_from_pseudocounts(self, pseudocounts, nan=False):
"""
Scheme model parameters from transcript-level pseudo-counts.
Args:
pseudocounts (dict): Dictionary of total pseudo-counts
nan (bool): Whether or not to treat NaNs as informative
"""
self.chi = pseudocounts['chi'] / pseudocounts['chi_norm'][:, None]
self.scale_chi(nan=nan)
def scale_chi(self, nan=False):
"""
Scale chi vector to a probability distribution.
Args:
nan (bool): Whether or not to treat NaNs as informative
"""
if nan:
self.chi[:, :] = self.chi[:, :] / np.sum(self.chi[:, :], axis=1)[:, np.newaxis]
else:
self.chi[:, :-1] = 0.9 * self.chi[:, :-1] / np.sum(self.chi[:, :-1], axis=1)[:, np.newaxis]
self.chi[:, -1] = 0.1 # NaN observations
def snapshot(self):
"""
Returns a text summary of model parameters.
"""
text = ""
text += "{}:\n{}\n".format('chi', np.array2string(self.chi))
return text
def serialize(self):
"""
Return a dictionary containing all of the parameters needed to describe the emission model.
"""
return {'type': self.type,
'n_bins': self.n_bins,
'classes': self.classes,
'edges': self.edges,
'chi': self.chi.tolist(),
'n_params': self.n_params}
def reset(self):
"""
Reset DOM object to un-initialized state.
"""
self.edges = None
self.chi = None
self.k = None
self.n_bins = None
self.classes = None
self.n_params = None
|
41918
|
from __future__ import print_function
from itertools import product
import torch
import torch.nn as nn
import torch_mlu
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
import sys
import os
import copy
import random
import time
import unittest
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir+"/../../")
from common_utils import testinfo, TestCase
import logging
logging.basicConfig(level=logging.DEBUG)
torch.set_grad_enabled(False)
class TestSizeModel(nn.Module):
def __init__(self, dim):
super(TestSizeModel, self).__init__()
self.dim = dim
def forward(self, x, y):
z = x.size(self.dim)
# TODO(wangyan): test when mm fixed
return z + y
class TestSizeOp(TestCase):
@testinfo()
def test_size(self):
dim_l = [0, 3]
for dim in dim_l:
for element_type in [torch.half, torch.float, torch.int, torch.short, \
torch.long, torch.uint8, torch.int8, torch.bool]:
model = TestSizeModel(dim)
input_x = torch.rand((3,6,8,12)).to(dtype=element_type)
input_y = torch.randn((3,6,8,12))
traced_model = torch.jit.trace(model, (input_x, input_y), check_trace=False)
out_cpu = model(input_x, input_y)
input_x = input_x.to(dtype=element_type)
input_x_mlu = input_x.to('mlu')
input_y_mlu = input_y.to('mlu')
out_mlu = traced_model(input_x_mlu, input_y_mlu)
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.003, use_MSE = True)
if __name__ == '__main__':
unittest.main()
|
41943
|
import torch
import torch.nn as nn
__all__ = [
'ConcatEmbeddings',
'PassThrough',
'MeanOfEmbeddings',
]
class ConcatEmbeddings(nn.Module):
def __init__(self, fields):
super().__init__()
self.output_dim = sum([field.output_dim for field in fields.values()])
self.embedders = nn.ModuleList([field.build_embedder() for field in fields.values()])
def forward(self, x):
res = [embedder(values) for embedder, values in zip(self.embedders, x)]
return torch.cat(res, dim=1)
class PassThrough(nn.Module):
def forward(self, x):
return x
class MeanOfEmbeddings(nn.Module):
def __init__(self, vocab_size, emb_dim):
super().__init__()
self.emb = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
def forward(self, x):
mask = (x != 0).float()[:, :, None]
emb = self.emb(x) * mask.float()
s = mask.squeeze(2).sum(1).clamp_min(1.)[:, None].float()
return emb.sum(dim=1) / s
|
41948
|
import json
import tempfile
from collections import OrderedDict
import os
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from utils import BoxList
#from utils.pycocotools_rotation import Rotation_COCOeval
def evaluate(dataset, predictions, result_file, score_threshold=None, epoch=0):
coco_results = {}
coco_results['bbox'] = make_coco_detection(predictions, dataset, score_threshold)
results = COCOResult('bbox')
path = os.path.join(result_file, str(epoch)+'_result.json')
res = evaluate_predictions_on_coco(dataset.coco, coco_results['bbox'], path, 'bbox')
results.update(res)
# with tempfile.NamedTemporaryFile() as f:
# path = f.name
# res = evaluate_predictions_on_coco(
# dataset.coco, coco_results['bbox'], path, 'bbox'
# )
# results.update(res)
print(results)
return results.results
def evaluate_predictions_on_coco(coco_gt, results, result_file, iou_type):
with open(result_file, 'w') as f:
json.dump(results, f)
coco_dt = coco_gt.loadRes(str(result_file)) if results else COCO()
coco_eval = Rotation_COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.params.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
score_threshold = compute_thresholds_for_classes(coco_eval)
return coco_eval
def compute_thresholds_for_classes(coco_eval):
precision = coco_eval.eval['precision']
precision = precision[0, :, :, 0, -1]
scores = coco_eval.eval['scores']
scores = scores[0, :, :, 0, -1]
recall = np.linspace(0, 1, num=precision.shape[0])
recall = recall[:, None]
f1 = (2 * precision * recall) / (np.maximum(precision + recall, 1e-6))
max_f1 = f1.max(0)
max_f1_id = f1.argmax(0)
scores = scores[max_f1_id, range(len(max_f1_id))]
print('Maximum f1 for classes:')
print(list(max_f1))
print('Score thresholds for classes')
print(list(scores))
print('')
return scores
def make_coco_detection(predictions, dataset, score_threshold=None):
coco_results = []
for id, pred in enumerate(predictions):
orig_id = dataset.id2img[id]
if len(pred) == 0:
continue
img_meta = dataset.get_image_meta(id)
pred_resize = map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox')
boxes = pred_resize.bbox.tolist()
scores = pred_resize.get_field('scores').tolist()
labels = pred_resize.get_field('labels').tolist()
labels = [dataset.id2category[i] for i in labels]
if score_threshold is None:
score_threshold = [0]*len(dataset.id2category)
coco_results.extend(
[
{
'image_id': orig_id,
'category_id': labels[k],
'bbox': box,
'score': scores[k],
}
for k, box in enumerate(boxes)
if scores[k] > score_threshold[labels[k] - 1]
]
)
return coco_results
class COCOResult:
METRICS = {
'bbox': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'segm': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'box_proposal': [
'AR@100',
'ARs@100',
'ARm@100',
'ARl@100',
'AR@1000',
'ARs@1000',
'ARm@1000',
'ARl@1000',
],
'keypoints': ['AP', 'AP50', 'AP75', 'APm', 'APl'],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResult.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResult.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
return repr(self.results)
def map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox'):
'''
img_meta: "id": int, "width": int, "height": int,"file_name": str,
pred: boxlist object
flipmode:'h':Horizontal flip,'v':vertical flip 'no': no flip
resize_mode: 'letterbox' , 'wrap'
'''
assert pred.mode == 'xyxyxyxy'
if flipmode == 'h':
pred = pred.transpose(0)
elif flipmode == 'v':
pred = pred.transpose(1)
elif flipmode == 'no':
pass
else:
raise Exception("unspported flip mode, 'h', 'v' or 'no' ")
width = img_meta['width']
height = img_meta['height']
resized_width, resized_height = pred.size
if resize_mode == 'letterbox':
if width > height:
scale = resized_width / width
size = (resized_width, int(scale * height))
else:
scale = resized_height / height
size = (int(width * scale), resized_height)
pred_resize = BoxList(pred.bbox, size, mode='xyxyxyxy')
pred_resize._copy_extra_fields(pred)
pred_resize = pred_resize.clip_to_image(remove_empty=True)
pred_resize = pred_resize.resize((width, height))
pred_resize = pred_resize.clip_to_image(remove_empty=True)
#pred_resize = pred_resize.convert('xywh')
elif resize_mode == 'wrap':
pred_resize = pred.resize((width, height))
pred_resize = pred_resize.convert('xyxyxyxy')
pred_resize = pred_resize.clip_to_image(remove_empty=True)
else:
raise Exception("unspported reisze mode, either 'letterbox' or 'wrap' ")
return pred_resize
|
41977
|
import unittest
import update_logs
class UpdateLogsTest(unittest.TestCase):
def test_get_new_logs_with_more_next_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="0123456789"))
def test_get_new_logs_with_more_prev_logs(self):
self.assertEqual(
"",
update_logs.get_new_logs(prev_logs="0123456789", next_logs="01234"))
def test_get_new_logs_with_no_common_logs(self):
self.assertEqual(
"56789",
update_logs.get_new_logs(prev_logs="01234", next_logs="56789"))
def test_get_new_logs_with_no_prev_logs(self):
self.assertEqual(
"0123456789",
update_logs.get_new_logs(prev_logs="", next_logs="0123456789"))
def test_get_new_logs_with_no_next_logs(self):
self.assertEqual(
"", update_logs.get_new_logs(prev_logs="01234", next_logs=""))
|
41995
|
import time
import numpy as np
import utils.measurement_subs as measurement_subs
import utils.socket_subs as socket_subs
from .do_fridge_sweep import do_fridge_sweep
from .do_device_sweep import do_device_sweep
def device_fridge_2d(
graph_proc, rpg, data_file,
read_inst, sweep_inst=[], set_inst=[],
set_value=[], pre_value=[], finish_value=[],
fridge_sweep="B", fridge_set=0.0,
device_start=0.0, device_stop=1.0, device_step=0.1, device_finish=0.0,
device_mid=[],
fridge_start=0.0, fridge_stop=1.0, fridge_rate=0.1,
delay=0, sample=1,
timeout=-1, wait=0.0,
comment="No comment!", network_dir="Z:\\DATA",
persist=True, x_custom=[]
):
"""2D data acquisition either by sweeping a device parameter
or by sweepng a fridge parameter
The program decides which of these to do depending on if the
the variable "sweep_inst" is assigned.
i.e. if "sweep_inst" is assigned the device is swept and the
fridge parameter is stepped.
If the device is being swept the variable "fridge_rate" is the size
of successive steps of either T or B.
If the fridge is being swept the first set_inst is stepped by the
"device_step"
For the case of successive B sweeps the fridge will be swept
forwards and backwards
e.g. Vg = -60 V B = -9 --> +9 T
Vg = -50 V B = +9 --> -9 T
etc ...
Note that in this case the first "set_value" will be overwritten
therefore a dummy e.g. 0.0 should be written in the case that there
are additional set_inst
"""
if sweep_inst:
sweep_device = True
else:
sweep_device = False
if fridge_sweep == "B":
b_sweep = True
else:
b_sweep = False
if not finish_value:
finish_value = list(set_value)
# We step over the x variable and sweep over the y
if sweep_device:
x_vec = np.hstack((np.arange(fridge_start, fridge_stop, fridge_rate), fridge_stop))
y_start = device_start
y_stop = device_stop
y_step = device_step
else:
x_vec = np.hstack((np.arange(device_start, device_stop, device_step), device_stop))
y_start = fridge_start
y_stop = fridge_stop
y_step = fridge_rate
if not not x_custom:
x_vec = x_custom
if sweep_device:
y_len = len(measurement_subs.generate_device_sweep(
device_start, device_stop, device_step, mid=device_mid))
else:
y_len = abs(y_start - y_stop) / y_step + 1
num_of_inst = len(read_inst)
plot_2d_window = [None] * num_of_inst
view_box = [None] * num_of_inst
image_view = [None] * num_of_inst
z_array = [np.zeros((len(x_vec), y_len)) for i in range(num_of_inst)]
if sweep_device:
for i in range(num_of_inst):
plot_2d_window[i] = rpg.QtGui.QMainWindow()
plot_2d_window[i].resize(500, 500)
view_box[i] = rpg.ViewBox(invertY=True)
image_view[i] = rpg.ImageView(view=rpg.PlotItem(viewBox=view_box[i]))
plot_2d_window[i].setCentralWidget(image_view[i])
plot_2d_window[i].setWindowTitle("read_inst %d" % i)
plot_2d_window[i].show()
view_box[i].setAspectLocked(False)
y_scale = y_step
x_scale = (x_vec[-2] - x_vec[0]) / np.float(len(x_vec) - 1)
for j in range(num_of_inst):
image_view[j].setImage(z_array[j], scale=(x_scale, y_scale), pos=(x_vec[0], y_start))
for i, v in enumerate(x_vec):
if sweep_device:
# sweep the device and fix T or B
if b_sweep:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=v, persist=False,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_finish=device_finish, sweep_mid=device_mid,
delay=delay, sample=sample, t_set=fridge_set,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=fridge_set, persist=True,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_mid=device_mid,
delay=delay, sample=sample, t_set=v,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
set_value[0] = v
if i == len(x_vec) - 1:
finish_value[0] = 0.0
else:
finish_value[0] = x_vec[i + 1]
# Fix the device and sweep T or B
if b_sweep:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="B", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=False,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
tmp_sweep = [fridge_start, fridge_stop]
fridge_start = tmp_sweep[1]
fridge_stop = tmp_sweep[0]
else:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="T", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=True,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
if sweep_device:
for j in range(num_of_inst):
z_array[j][i, :] = data_list[j + 1]
image_view[j].setImage(z_array[j], pos=(x_vec[0], y_start), scale=(x_scale, y_scale))
m_client = socket_subs.SockClient('localhost', 18861)
time.sleep(2)
measurement_subs.socket_write(m_client, "SET 0.0 0")
time.sleep(2)
m_client.close()
time.sleep(2)
return
|
42009
|
import re
import numpy as np
import sympy as sp
import random as rd
from functools import reduce
NORMAL_VECTOR_ID = 'hyperplane_normal_vector_%s_%i'
NUM_NORMAL_VECS_ID = 'num_normal_vectors_%s'
CHAMBER_ID = 'chamber_%s_%s'
FVECTOR_ID = 'feature_vector_%s'
FVEC_ID_EX = re.compile(r'feature_vector_([\S]*)')
class HyperplaneHasher():
def __init__(self, kvstore, name, normal_vectors=None):
"""'name' is a string used for cribbing names of things to be stored
in the KeyValueStore instance 'kvstore'. 'normal_vectors' is
either a list of 1-rankal numpy arrays, all of the same rank,
or else of type None. In the latter case, normal vectors are assumed to
exist in 'kvstore', and are named NORMAL_VECTOR_ID % ('name', i),
where i is an integer."""
self.kvstore = kvstore
self.name = name
if normal_vectors is None:
self.num_normal_vectors = kvstore.get_int(
NUM_NORMAL_VECS_ID % name)
self.normal_vectors = [kvstore.get_vector(NORMAL_VECTOR_ID % (name, i))
for i in range(self.num_normal_vectors)]
else:
self.normal_vectors = normal_vectors
self.num_normal_vectors = len(normal_vectors)
self.rank = len(self.normal_vectors[0])
def _compute_num_chambers(self):
"""Computes the number of chambers defined by the hyperplanes
corresponding to the normal vectors."""
d = self.rank
n = self.num_normal_vectors
raw_cfs = sp.binomial_coefficients_list(n)
cfs = np.array([(-1)**i * raw_cfs[i] for i in range(n + 1)])
powers = np.array([max(entry, 0)
for entry in [d - k for k in range(n + 1)]])
ys = np.array([-1] * len(powers))
return (-1)**d * sum(cfs * (ys**powers))
@classmethod
def _flip_digit(cls, binary_string, i):
"""Given a string 'binary_string' of length n, each letter of
which is either '0' or '1', and an integer 0 <= i <= n-1, returns
the binary_string in which the i-th letter is flipped."""
for letter in binary_string:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i > len(binary_string) - 1 or i < 0:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
else:
flip_dict = {'0': '1', '1': '0'}
letters = [letter for letter in binary_string]
letters[i] = flip_dict[binary_string[i]]
return ''.join(letters)
@classmethod
def _hamming_distance(cls, bstring_1, bstring_2):
"""Given two strings of equal length, composed of only 0s and 1s, computes the
Hamming Distance between them: the number of places at which they differ."""
for pair in zip(bstring_1, bstring_2):
if not set(pair).issubset(set(['0', '1'])):
raise ValueError(
"""Input strings contain characters other than '0' and '1'.""")
if len(bstring_1) != len(bstring_2):
raise ValueError("""Lengths of input strings disagree.""")
else:
total = 0
for i in range(len(bstring_1)):
if bstring_1[i] != bstring_2[i]:
total += 1
return total
def _hamming_distance_i(self, chamber_id, i):
"""Given a chamber_id 'chamber_id' and an integer 0 <= i <= self.rank - 1,
returns the alphabetically sorted list of all chamber_ids having Hamming Distance
equal to i from 'chamber_id'."""
for letter in chamber_id:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i < 0 or i > self.num_normal_vectors - 1:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
if len(chamber_id) != self.num_normal_vectors:
raise ValueError("""len(chamber_id) != self.num_normal_vectors.""")
else:
result = []
cids = self._all_binary_strings()
for cid in cids:
if self._hamming_distance(chamber_id, cid) == i:
result.append(cid)
return result
def _all_binary_strings(self):
"""Returns a list of all binary strings of length
self.num_normal_vectors."""
n = self.num_normal_vectors
strings = [np.binary_repr(i) for i in range(2**n)]
return ['0' * (n - len(entry)) + entry for entry in strings]
@classmethod
def _random_vectors(cls, num, rank):
"""This class method return a list of length 'num' or
vectors (numpy arrays) of rank 'rank'. Both arguments
are assumed to be positive integers."""
vec_list = [
np.array([rd.random() - 0.5 for i in range(rank)]) for j in range(num)]
return vec_list
def label_chamber(self, chamber_id, label):
"""Appends the string 'label' to the set with key
'chamber_id' in self.kvstore, if such exists. If not, then
a new singleton set {'label'} is created in self.kvstore
with key 'chamber_id'. The method is idempotent."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.add_to_set(full_chamber_id, full_label_id)
def bulk_label_chamber(self, chamber_ids, labels):
"""The arguments 'chamber_ids' and 'labels' must be lists of strings
of equal length, else ValueError is raised. This method produces the same result
as calling self.label_chamber(ch_id, label) for all pairs (ch_id, label) in
chamber_ids x labels, but may be faster if self.kvstore is an instance of
class DynamoDBAdapter."""
chamber_ids = [CHAMBER_ID %
(self.name, chamber_id) for chamber_id in chamber_ids]
labels = [FVECTOR_ID % label for label in labels]
self.kvstore.bulk_add_to_set(chamber_ids, labels)
def unlabel_chamber(self, chamber_id, label):
"""Removes 'label' from the set corresponding to 'chamber_id'.
Raises KeyError if 'label' is not an element of the
corresponding set."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.remove_from_set(full_chamber_id, full_label_id)
def chamber_labels(self, chamber_id):
"""Returns the set of labels corresponding
to key chamber_id. Returns empty set if
chamber_id is unknown."""
try:
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
result = set([FVEC_ID_EX.findall(entry)[0] for entry in self.kvstore.get_set(
full_chamber_id) if len(FVEC_ID_EX.findall(entry)) > 0])
return result
except KeyError:
return set()
def get_chamber_id(self, vector):
"""Returns the chamber_id of the chamber to which
vector belongs. Throws a ValueError if rank(vector) differs
from the ranks of the normal vectors. The binary digits
of the chamber_id for vectors are computed in the order
given by the output of the get_normal_vectors() method."""
if len(vector) != self.rank:
raise ValueError("""len(vector) != self.rank""")
else:
PMZO = {1: 1, -1: 0}
signs = [int(np.sign(np.dot(vector, nvec)))
for nvec in self.normal_vectors]
chamber_id = ''.join([str(PMZO[entry]) for entry in signs])
return chamber_id
def get_chamber_ids(self):
"""Returns the set of all chamber ids."""
chamber_id_prefix = 'chamber_%s' % self.name
chamber_id_ex = re.compile(r'%s_([\S]*)' % chamber_id_prefix)
chamber_ids = [''.join(chamber_id_ex.findall(entry))
for entry in self.kvstore.get_set_ids()]
return set([entry for entry in chamber_ids if len(entry) > 0])
def adjacent_chamber_ids(self, chamber_id):
"""Returns the set of ids of all chambers directly adjacent
to the chamber corresponding to 'chamber_id'."""
results = set([chamber_id])
for i in range(len(chamber_id)):
results.add(self._flip_digit(chamber_id, i))
results = sorted(results)
return results
def proximal_chamber_ids(self, chamber_id, num_labels):
"""This method returns the smallest list of chamber ids proximal to
the string 'chamber_id', such that the union of the corresponding chambers
contains at least 'num_labels' labels, assumed to be a positive integer.
The list is sorted by ascending distance.
NOTE: A set S of chambers is _proximal_ to a given chamber C if
(i) C is in S, and (ii) D in S implies all chambers nearer to
C than D are also in S. Here, the distance between two chambers
is given by the alphabetical distance of their ids."""
total = 0
pids = []
for i in range(self.num_normal_vectors):
if total >= num_labels:
break
hdi = self._hamming_distance_i(chamber_id, i)
for j in range(len(hdi)):
if total >= num_labels:
break
next_id = hdi[j]
total += len(self.chamber_labels(next_id))
pids.append(next_id)
if total >= num_labels:
break
return pids
def proximal_chamber_labels(self, chamber_id, num_labels):
"""Finds the smallest set of proximal chambers containing
at least 'num_labels' labels, assumed to be a positive integer,
and returns the set of all labels from this."""
pcids = self.proximal_chamber_ids(chamber_id, num_labels)
labels_list = [self.chamber_labels(cid) for cid in pcids]
labels = reduce(lambda x, y: x.union(y), labels_list)
return labels
def get_normal_vectors(self):
"""Returns the list of normal vectors."""
return self.normal_vectors
|
42018
|
import graph_ltpl.offline_graph.src.gen_edges
import graph_ltpl.offline_graph.src.gen_node_skeleton
import graph_ltpl.offline_graph.src.gen_offline_cost
import graph_ltpl.offline_graph.src.main_offline_callback
import graph_ltpl.offline_graph.src.prune_graph
|
42083
|
import pandas as pd
from autogluon.utils.tabular.utils.savers import save_pd
from autogluon_utils.benchmarking.evaluation.preprocess import preprocess_openml
from autogluon_utils.benchmarking.evaluation.constants import *
def run():
results_dir = 'data/results/'
results_dir_input = results_dir + 'input/raw/original/'
results_dir_output = results_dir + 'input/prepared/openml/'
other_results_large_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_large-8c4h.csv', framework_suffix='_4h')
other_results_medium_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_medium-8c4h.csv', framework_suffix='_4h')
other_results_small_4h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_small-8c4h.csv', framework_suffix='_4h')
other_results_medium_1h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_medium-8c1h.csv', framework_suffix='_1h')
other_results_small_1h = preprocess_openml.preprocess_openml_input(path=results_dir_input + 'results_small-8c1h.csv', framework_suffix='_1h')
results_list = [other_results_large_4h, other_results_medium_4h, other_results_small_4h, other_results_medium_1h, other_results_small_1h]
results_raw = pd.concat(results_list, ignore_index=True, sort=True)
results_raw[FRAMEWORK] = ['orig_' + name[0] for name in zip(results_raw[FRAMEWORK])]
frameworks_original = [
'orig_H2OAutoML_1h',
'orig_autosklearn_1h',
'orig_TPOT_1h',
'orig_AutoWEKA_1h',
'orig_H2OAutoML_4h',
'orig_autosklearn_4h',
'orig_TPOT_4h',
'orig_AutoWEKA_4h',
]
results_original = results_raw[results_raw[FRAMEWORK].isin(frameworks_original)]
save_pd.save(path=results_dir_output + 'openml_original.csv', df=results_original)
if __name__ == '__main__':
run()
|
42092
|
from tests.functional.services.policy_engine.utils.api.conf import (
policy_engine_api_conf,
)
from tests.functional.services.utils import http_utils
def get_vulnerabilities(
vulnerability_ids=[],
affected_package=None,
affected_package_version=None,
namespace=None,
):
if not vulnerability_ids:
raise ValueError("Cannot fetch vulnerabilities without ids")
query = {
"id": ",".join(vulnerability_ids),
"affected_package": affected_package,
"affected_package_version": affected_package_version,
"namespace": namespace,
}
vulnerabilities_resp = http_utils.http_get(
["query", "vulnerabilities"], query, config=policy_engine_api_conf
)
if vulnerabilities_resp.code != 200:
raise http_utils.RequestFailedError(
vulnerabilities_resp.url,
vulnerabilities_resp.code,
vulnerabilities_resp.body,
)
return vulnerabilities_resp
|
42095
|
from flake8_aaa.line_markers import LineMarkers
from flake8_aaa.types import LineType
def test():
result = LineMarkers(5 * [''], 7)
assert result.types == [
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
LineType.unprocessed,
]
assert result.lines == ['', '', '', '', '']
assert result.fn_offset == 7
|
42115
|
import platform as platform_module
import pytest
from cibuildwheel.__main__ import get_build_identifiers
from cibuildwheel.environment import parse_environment
from cibuildwheel.options import Options, _get_pinned_docker_images
from .utils import get_default_command_line_arguments
PYPROJECT_1 = """
[tool.cibuildwheel]
build = ["cp38*", "cp37*"]
environment = {FOO="BAR"}
test-command = "pyproject"
manylinux-x86_64-image = "manylinux1"
environment-pass = ["<PASSWORD>"]
[tool.cibuildwheel.macos]
test-requires = "else"
[[tool.cibuildwheel.overrides]]
select = "cp37*"
test-command = "pyproject-override"
manylinux-x86_64-image = "manylinux2014"
"""
def test_options_1(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(platform="linux", command_line_arguments=args)
identifiers = get_build_identifiers(
platform="linux",
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
override_display = """\
test_command: 'pyproject'
cp37-manylinux_x86_64: 'pyproject-override'"""
print(options.summary(identifiers))
assert override_display in options.summary(identifiers)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment == parse_environment('FOO="BAR"')
all_pinned_docker_images = _get_pinned_docker_images()
pinned_x86_64_docker_image = all_pinned_docker_images["x86_64"]
local = options.build_options("cp38-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux1"]
local = options.build_options("cp37-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject-override"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux2014"]
def test_passthrough(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("EXAMPLE_ENV", "ONE")
options = Options(platform="linux", command_line_arguments=args)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment.as_dictionary(prev_environment={}) == {
"FOO": "BAR",
"EXAMPLE_ENV": "ONE",
}
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
'"value wrapped in quotes"',
"an unclosed single-quote: '",
'an unclosed double-quote: "',
"string\nwith\ncarriage\nreturns\n",
"a trailing backslash \\",
],
)
def test_passthrough_evil(tmp_path, monkeypatch, env_var_value):
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("CIBW_ENVIRONMENT_PASS_LINUX", "ENV_VAR")
options = Options(platform="linux", command_line_arguments=args)
monkeypatch.setenv("ENV_VAR", env_var_value)
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"ENV_VAR": env_var_value}
|
42143
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import ArticlePage
class Command(BaseCommand):
def handle(self, **options):
ArticlePage.objects.all().update(
featured_in_latest=False,
featured_in_latest_start_date=None,
featured_in_latest_end_date=None,
featured_in_homepage=False,
featured_in_homepage_start_date=None,
featured_in_homepage_end_date=None)
|
42149
|
from itertools import chain
from nose.tools import *
from hawkweed.monads.either import Either, Left, Right, is_right,\
is_left, is_either, either, lefts, rights, partition_eithers
from hawkweed.functional.primitives import identity
def test_right():
assert_equal(Right(10).bind(identity), 10)
def test_nothing():
l = Left("failure")
assert_equal(l.bind(lambda _: "lol"), l)
def test_is_right():
assert_true(is_right(Right(10)))
assert_false(is_right(Left("no")))
assert_false(is_right(10))
def test_is_left():
assert_true(is_left(Left("yes")))
assert_false(is_left(Right(10)))
assert_false(is_left(10))
def test_is_either():
assert_true(is_either(Right(10)))
assert_true(is_either(Left("yes")))
assert_false(is_either(10))
def test_either():
v = "val"
either(lambda x: assert_equal(Left(v), x), None, Left(v))
either(None, lambda x: assert_equal(v, x), Right(v))
with assert_raises(ValueError):
either(None, None, 10)
def test_lefts():
l = [Left("failure"), Left("i died"), Left("noes")]
lr = l + [Right(1)]
assert_equal(list(lefts(lr)), l)
def test_rights():
r = [Right(x) for x in range(4)]
rl = [Left("moo")] + r
assert_equal(list(rights(rl)), r)
def test_partition_eithers():
r = [Right(x) for x in range(4)]
l = [Left(x) for x in ["failure"] * 4]
rl = list(chain.from_iterable(zip(r, l)))
assert_equal([list(x) for x in partition_eithers(rl)], [l, r])
|
42177
|
from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Transition that builds an ensemble used to facilitate the rate
calculation in fixed-length TPS. [1]_ Details in
:class:`.PartInBFixedLengthTPSNetwork`.
See also
--------
PartInBFixedLengthTPSNetwork
References
----------
.. [1] <NAME>, <NAME>, and <NAME>. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the TPS rate calculation. [1]_ This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
|
42190
|
from collections import deque
import networkx as nx
import numpy as np
def random_subtree(T, alpha, beta, subtree_mark):
""" Random subtree of T according to Algorithm X in [1].
Args:
alpha (float): probability of continuing to a neighbor
beta (float): probability of non empty subtree
T (NetworkX graph): the tree of which the subtree is taken
Returns:
A subtree of T
References:
[1] <NAME>., <NAME>. Pavlenko Bayesian structure learning in graphical models using sequential Monte Carlo.
"""
# Take empty subtree with prob beta
empty = np.random.multinomial(1, [beta, 1-beta]).argmax()
subtree_edges = []
subtree_nodes = []
if empty == 1:
separators = {}
subtree = nx.Graph()
return (subtree, [], [], {}, separators, 1-beta)
# Take non-empty subtree
n = T.order()
w = 0.0
visited = set() # cliques
q = deque([])
start = np.random.randint(n) # then n means new component
separators = {}
#start_node = T.nodes()[start] # nx < 2.x
start_node = list(T.nodes())[start] # nx > 2.x
q.append(start_node)
subtree_adjlist = {start_node: []}
while len(q) > 0:
node = q.popleft()
visited.add(node)
subtree_nodes.append(node)
#T.node[node]["subnode"] = subtree_mark
for neig in T.neighbors(node):
b = np.random.multinomial(1, [1-alpha, alpha]).argmax()
if neig not in visited:
if b == 1:
subtree_edges.append((node, neig))
subtree_adjlist[node].append(neig)
subtree_adjlist[neig] = [node]
q.append(neig)
# Add separator
sep = neig & node
if not sep in separators:
separators[sep] = []
separators[sep].append((neig, node))
else:
w += 1
# subtree = T.subgraph(subtree_nodes)
# assert subtree_nodes in T.nodes()
subtree = None
v = len(subtree_nodes)
probtree = beta * v * np.power(alpha, v-1) / np.float(n)
probtree *= np.power(1-alpha, w)
return (subtree, subtree_nodes, subtree_edges, subtree_adjlist, separators, probtree)
def pdf(subtree, T, alpha, beta):
""" Returns the probability of the subtree subtree generated by
random_subtree(T, alpha, beta).
Args:
T (NetworkX graph): A tree
subtree (NetworkX graph): a subtree of T drawn by the subtree kernel
alpha (float): Subtree kernel parameter
beta (float): Subtree kernel parameter
Returns:
float
"""
p = subtree.order()
if p == 0:
return 1.0 - beta
forest = T.subgraph(set(T.nodes()) - set(subtree.nodes()))
#components = nx.connected_components(forest)
components = forest.connected_components()
w = float(len(list(components)))
v = float(subtree.order())
alpha = float(alpha)
beta = float(beta)
n = float(T.order())
prob = beta * v * np.power(alpha, v-1) * np.power(1-alpha, w) / n
return prob
|
42194
|
from django.apps import AppConfig
class ClubADMConfig(AppConfig):
name = "clubadm"
verbose_name = "Клуб анонимных Дедов Морозов"
|
42237
|
countries = {
"kabul": "afghanistan",
"tirana": "albania",
"alger": "algeria",
"fagatogo": "american samoa",
"andorra la vella": "andorra",
"luanda": "angola",
"the valley": "anguilla",
"null": "united states minor outlying islands",
"saint john's": "antigua and barbuda",
"buenos aires": "argentina",
"yerevan": "armenia",
"oranjestad": "aruba",
"canberra": "australia",
"wien": "austria",
"baku": "azerbaijan",
"nassau": "bahamas",
"al-manama": "bahrain",
"dhaka": "bangladesh",
"bridgetown": "barbados",
"minsk": "belarus",
"bruxelles [brussel]": "belgium",
"belmopan": "belize",
"porto-novo": "benin",
"hamilton": "bermuda",
"thimphu": "bhutan",
"la paz": "bolivia",
"sarajevo": "bosnia and herzegovina",
"gaborone": "botswana",
"brasília": "brazil",
"<NAME>": "brunei",
"sofia": "bulgaria",
"ouagadougou": "burkina faso",
"bujumbura": "burundi",
"phnom penh": "cambodia",
"yaound": "cameroon",
"ottawa": "canada",
"praia": "cape verde",
"george town": "cayman islands",
"bangui": "central african republic",
"n'djam": "chad",
"santiago de chile": "chile",
"peking": "china",
"flying fish cove": "christmas island",
"west island": "cocos (keeling) islands",
"santaf": "colombia",
"moroni": "comoros",
"brazzaville": "congo",
"avarua": "cook islands",
"san jos": "costa rica",
"zagreb": "croatia",
"la habana": "cuba",
"nicosia": "cyprus",
"praha": "czech republic",
"copenhagen": "denmark",
"djibouti": "djibouti",
"roseau": "dominica",
"santo domingo de guzm": "dominican republic",
"dili": "east timor",
"quito": "ecuador",
"cairo": "egypt",
"san salvador": "el salvador",
"london": "united kingdom",
"malabo": "equatorial guinea",
"asmara": "eritrea",
"tallinn": "estonia",
"addis abeba": "ethiopia",
"stanley": "falkland islands",
"tórshavn": "faroe islands",
"suva": "fiji islands",
"helsinki [helsingfors]": "finland",
"paris": "france",
"cayenne": "french guiana",
"papeete": "french polynesia",
"libreville": "gabon",
"banjul": "gambia",
"tbilisi": "georgia",
"berlin": "germany",
"accra": "ghana",
"gibraltar": "gibraltar",
"athenai": "greece",
"nuuk": "greenland",
"saint george's": "grenada",
"basse-terre": "guadeloupe",
"aga": "guam",
"ciudad de guatemala": "guatemala",
"conakry": "guinea",
"bissau": "guinea-bissau",
"georgetown": "guyana",
"port-au-prince": "haiti",
"citt": "holy see (vatican capital state)",
"tegucigalpa": "honduras",
"victoria": "seychelles",
"budapest": "hungary",
"reykjav": "iceland",
"new delhi": "india",
"jakarta": "indonesia",
"tehran": "iran",
"baghdad": "iraq",
"dublin": "ireland",
"jerusalem": "israel",
"roma": "italy",
"yamoussoukro": "ivory coast",
"kingston": "norfolk island",
"tokyo": "japan",
"amman": "jordan",
"astana": "kazakhstan",
"nairobi": "kenya",
"bairiki": "kiribati",
"kuwait": "kuwait",
"bishkek": "kyrgyzstan",
"vientiane": "laos",
"riga": "latvia",
"beirut": "lebanon",
"maseru": "lesotho",
"monrovia": "liberia",
"tripoli": "libyan arab jamahiriya",
"vaduz": "liechtenstein",
"vilnius": "lithuania",
"luxembourg [luxemburg/l": "luxembourg",
"macao": "macao",
"skopje": "north macedonia",
"antananarivo": "madagascar",
"lilongwe": "malawi",
"kuala lumpur": "malaysia",
"male": "maldives",
"bamako": "mali",
"valletta": "malta",
"dalap-uliga-darrit": "marshall islands",
"fort-de-france": "martinique",
"nouakchott": "mauritania",
"port-louis": "mauritius",
"mamoutzou": "mayotte",
"ciudad de m": "mexico",
"palikir": "micronesia, federated states of",
"chisinau": "moldova",
"monaco-ville": "monaco",
"ulan bator": "mongolia",
"plymouth": "montserrat",
"rabat": "morocco",
"maputo": "mozambique",
"rangoon (yangon)": "myanmar",
"windhoek": "namibia",
"yaren": "nauru",
"kathmandu": "nepal",
"amsterdam": "netherlands",
"willemstad": "netherlands antilles",
"noum": "new caledonia",
"wellington": "new zealand",
"managua": "nicaragua",
"niamey": "niger",
"abuja": "nigeria",
"alofi": "niue",
"pyongyang": "north korea",
"belfast": "northern ireland",
"garapan": "northern mariana islands",
"oslo": "norway",
"masqat": "oman",
"islamabad": "pakistan",
"koror": "palau",
"gaza": "palestine",
"ciudad de panam": "panama",
"port moresby": "papua new guinea",
"asunci": "paraguay",
"lima": "peru",
"manila": "philippines",
"adamstown": "pitcairn",
"warszawa": "poland",
"lisboa": "portugal",
"san juan": "puerto rico",
"doha": "qatar",
"saint-denis": "reunion",
"bucuresti": "romania",
"moscow": "russian federation",
"kigali": "rwanda",
"jamestown": "saint helena",
"basseterre": "saint kitts and nevis",
"castries": "saint lucia",
"saint-pierre": "saint pierre and miquelon",
"kingstown": "saint vincent and the grenadines",
"apia": "samoa",
"san marino": "san marino",
"s": "sao tome and principe",
"riyadh": "saudi arabia",
"edinburgh": "scotland",
"dakar": "senegal",
"freetown": "sierra leone",
"singapore": "singapore",
"bratislava": "slovakia",
"ljubljana": "slovenia",
"honiara": "solomon islands",
"mogadishu": "somalia",
"pretoria": "south africa",
"seoul": "south korea",
"juba": "south sudan",
"madrid": "spain",
"khartum": "sudan",
"paramaribo": "suriname",
"longyearbyen": "svalbard and jan mayen",
"mbabane": "swaziland",
"stockholm": "sweden",
"bern": "switzerland",
"damascus": "syria",
"dushanbe": "tajikistan",
"dodoma": "tanzania",
"bangkok": "thailand",
"lom": "togo",
"fakaofo": "tokelau",
"nuku'alofa": "tonga",
"port-of-spain": "trinidad and tobago",
"tunis": "tunisia",
"ankara": "turkey",
"ashgabat": "turkmenistan",
"cockburn town": "turks and caicos islands",
"funafuti": "tuvalu",
"kampala": "uganda",
"kyiv": "ukraine",
"abu dhabi": "united arab emirates",
"washington": "united states",
"montevideo": "uruguay",
"toskent": "uzbekistan",
"port-vila": "vanuatu",
"caracas": "venezuela",
"hanoi": "vietnam",
"road town": "virgin islands, british",
"charlotte amalie": "virgin islands, u.s.",
"cardiff": "wales",
"mata-utu": "wallis and futuna",
"el-aai": "western sahara",
"sanaa": "yemen",
"beograd": "yugoslavia",
"lusaka": "zambia",
"harare": "zimbabwe"
}
|
42298
|
import os
import shutil
import json
import pandas as pd
import ast
import numpy as np
from utils.read_convergence import plot_convergence, parse, get_cffl_best
fairness_keys = [
'standalone_vs_fedavg_mean',
'standalone_vs_rrdssgd_mean',
'standalone_vs_final_mean',
]
def collect_and_compile_performance(dirname):
fairness_rows = []
performance_rows = []
for folder in os.listdir(dirname):
if os.path.isfile(os.path.join(dirname, folder)) or not 'complete.txt' in os.listdir(os.path.join(dirname, folder)):
continue
setup = parse(dirname, folder)
n_participants = setup['P']
fl_epochs = setup['Communication Rounds']
theta = setup['theta']
try:
with open(os.path.join(dirname, folder, 'aggregate_dict.txt')) as dict_log:
aggregate_dict = json.loads(dict_log.read())
with open(os.path.join(dirname, folder, 'aggregate_dict_pretrain.txt')) as dict_log:
aggregate_dict_pretrain = json.loads(dict_log.read())
f_data_row = ['P' + str(n_participants) + '_' + str(theta)] + [aggregate_dict[f_key][0] for f_key in fairness_keys]
f_data_row.append(aggregate_dict_pretrain['standalone_vs_final_mean'][0])
p_data_row = ['P' + str(n_participants) + '_' + str(theta)] + [aggregate_dict['rr_fedavg_best'][0],
aggregate_dict['rr_dssgd_best'][0],
aggregate_dict['standalone_best_participant'][0],
aggregate_dict['CFFL_best_participant'][0],
aggregate_dict_pretrain['CFFL_best_participant'][0]
]
fairness_rows.append(f_data_row)
performance_rows.append(p_data_row)
except Exception as e:
print("Compiling fairness and accuracy csvs")
print(e)
shorthand_f_keys = ['Fedavg', 'DSSGD', 'CFFL', 'CFFL pretrain']
fair_df = pd.DataFrame(fairness_rows, columns=[' '] + shorthand_f_keys).set_index(' ')
fair_df = fair_df.sort_values(' ')
print(fair_df.to_markdown())
print(os.path.join(dirname, 'fairness.csv'))
fair_df.to_csv( os.path.join(dirname, 'fairness.csv'))
shorthand_p_keys = ['Fedavg', 'DSSGD', 'Standalone', 'CFFL', 'CFFL pretrain']
pd.options.display.float_format = '{:,.2f}'.format
perf_df = pd.DataFrame(performance_rows, columns=[' '] + shorthand_p_keys).set_index(' ').T
perf_df = perf_df[sorted(perf_df.columns)]
print(perf_df.to_markdown())
perf_df.to_csv( os.path.join(dirname, 'performance.csv'))
return fair_df, perf_df
def collate_pngs(dirname):
os.makedirs(os.path.join(dirname, 'figures'), exist_ok=True)
figures_dir = os.path.join(dirname, 'figures')
for directory in os.listdir(dirname):
if os.path.isfile(os.path.join(dirname, directory)) or not 'complete.txt' in os.listdir(os.path.join(dirname, directory)):
continue
setup = parse(dirname, directory)
subdir = os.path.join(dirname, directory)
figure_name = '{}_{}_p{}e{}_cffl_localepoch{}_localbatch{}_lr{}_upload{}_pretrain0.png'.format(
setup['dataset'], setup['model'],
setup['P'], setup['Communication Rounds'],
setup['E'], setup['B'],
str(setup['lr']).replace('.', ''),
str(setup['theta']).replace('.', '').rstrip('0'))
pastfig_name = figure_name.replace('_pretrain0','')
if os.path.exists(os.path.join(figures_dir, pastfig_name)):
os.remove(os.path.join(figures_dir, pastfig_name))
shutil.copy(os.path.join(subdir,'figure.png'), os.path.join(figures_dir, figure_name) )
shutil.copy(os.path.join(subdir,'figure_pretrain.png'), os.path.join(figures_dir, figure_name.replace('pretrain0','pretrain1')) )
standalone_name = '{}_{}_p{}e{}_standalone.png'.format(
setup['dataset'], setup['model'],
setup['P'], setup['Communication Rounds'])
shutil.copy(os.path.join(subdir,'standlone.png'), os.path.join(figures_dir, standalone_name) )
convergence_name = '{}_{}_p{}e{}_upload{}_convergence.png'.format(
setup['dataset'], setup['model'],
setup['P'], setup['Communication Rounds'],
str(setup['theta']).replace('.', '').rstrip('0'))
shutil.copy(os.path.join(subdir,'convergence_for_one.png'), os.path.join(figures_dir, convergence_name) )
return
def examine(dirname):
experiment_results = plot_convergence(dirname)
collate_pngs(dirname)
fair_df, perf_df = collect_and_compile_performance(dirname)
if __name__ == '__main__':
"""
Give the directory to the experiment to dirname
"""
dirname = 'cifar10/Experiments_2020-08-06-01:21'
examine(dirname)
|
42324
|
import os
directory="/home/pi/Desktop/samplepacks/"
sampleList=[["test","test"]]
def main():
for file in os.listdir(directory):
fullPath = directory + file
if os.path.isdir(fullPath):
#print
#print "directory: ",file
#print fullPath
containsAif=0
#each folder in parent directory
for subfile in os.listdir(fullPath):
subfullPath=fullPath+"/"+subfile
#a path within a path
#print "SUBFILE: ",subfile
if os.path.isdir(subfullPath):
if subfile=="synth" or "drum":
#print "nested directories, but it's okay cuz you named them"
readAifDir(subfile,subfullPath)
elif subfile.endswith(".aif") or subfile.endswith(".aiff"):
containsAif=1
elif subfile.endswith(".DS_Store"):
continue
else:
print "what's going on here. name your folders or hold it with the nesting"
print "SUBFILE: ",subfile
if containsAif==1:
readAifDir(file,fullPath)
# else:
# sampleList.append([file,fullPath]) #adds file andfullpath to samplelist
# #if file.endswith(".atm") or file.endswith(".py"):
if ['test', 'test'] in sampleList:
sampleList.remove(['test','test'])
#print sampleList
# for sample in sampleList:
# print
# print sample[1] #fullpath
# atts=readAif(sample[1]) #reads aiff and gets attributes!
# print atts['type']
# #print atts
def readAifDir(name,path):
#should return amount of .aif's found in dir
aifsampleList=[["a","a"]]
print
print "readAif directory: ",name
print path
for file in os.listdir(path):
fullPath=path+"/"+file
if file.endswith('.aif')or file.endswith(".aiff"):
#print "aif found at file: ",fullPath
atts=readAif(fullPath)
aifsampleList.append([file,fullPath])
#print atts['type']
elif file.endswith(".DS_Store"):
#ignore .DS_Store mac files
continue
else:
print fullPath, " is not a aif. what gives?"
if ["a","a"] in aifsampleList:
aifsampleList.remove(["a","a"])
for sample in aifsampleList:
print sample[1] #fullpath
atts=readAif(sample[1]) #reads aiff and gets attributes!
print atts['type']
#print atts
def readAif(path):
#print "//READAIFF from file ", path
#print
# SAMPLE DRUM AIFF METADATA
# /home/pi/Desktop/samplepacks/kits1/rz1.aif
# drum_version : 1
# type : drum
# name : user
# octave : 0
# pitch : ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0']
# start : ['0', '24035422', '48070845', '86012969', '123955093', '144951088', '175722759', '206494430', '248851638', '268402991', '312444261', '428603973', '474613364', '601936581', '729259799', '860697810', '992135821', '1018188060', '1044240299', '1759004990', '1783040413', '1820982537', '1845017959', '1882960084']
# end : ['24031364', '48066787', '86008911', '123951035', '144947030', '175718701', '206490372', '248847580', '268398933', '312440203', '428599915', '474609306', '601932523', '729255741', '860693752', '992131763', '1018184002', '1044236241', '1759000932', '1783036355', '1820978479', '1845013902', '1882956026', '1906991448']
# playmode : ['8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192']
# reverse : ['8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192']
# volume : ['8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192', '8192']
# dyna_env : ['0', '8192', '0', '8192', '0', '0', '0', '0']
# fx_active : false
# fx_type : delay
# fx_params : ['8000', '8000', '8000', '8000', '8000', '8000', '8000', '8000']
# lfo_active : false
# lfo_type : tremolo
# lfo_params : ['16000', '16000', '16000', '16000', '0', '0', '0', '0']
# SAMPLE SYNTH METADATA
# /home/pi/Desktop/samplepacks/C-MIX/mtrap.aif
# adsr : ['64', '10746', '32767', '14096', '4000', '64', '4000', '4000']
# base_freq : 440.0
# fx_active : true
# fx_params : ['64', '0', '18063', '16000', '0', '0', '0', '0']
# fx_type : nitro
# knobs : ['0', '2193', '2540', '4311', '12000', '12288', '28672', '8192']
# lfo_active : false
# lfo_params : ['16000', '0', '0', '16000', '0', '0', '0', '0']
# lfo_type : tremolo
# name : mtrap
# octave : 0
# synth_version : 2
# type : sampler
attdata={}
with open(path,'rb') as fp:
line=fp.readline()
#print line
if 'op-1' in line:
#print
#print 'op-1 appl chunk found!'
#print subline=line.split("op-1")
# subline=line.split("op-1")[0]
# print subline[1]
data=line.split('{', 1)[1].split('}')[0] #data is everything in brackets
#print
#print "data!"
#print data
data=switchBrack(data,",","|")
attlist=data.split(",")
#print
#print "attlist"
#print attlist
#print
#print "attname: attvalue"
for i,line in enumerate(attlist):
#print line
linesplit=line.split(":")
attname=linesplit[0]
attname=attname[1:-1]
attvalue=linesplit[1]
valtype=""
#print attvalue
if isInt(attvalue):
valtype='int'
if isfloat(attvalue):
valtype='float'
if attvalue=="false" or attvalue=="true":
valtype='bool'
for j,char in enumerate(list(attvalue)):
#print "j,char"
#print j, char
if valtype=="":
if char=='"':
#print "string: ",char
valtype="string"
elif char=="[":
valtype="list"
if valtype=="":
valtype="no type detected"
elif valtype=="string":
attvalue=attvalue[1:-1]
elif valtype=="list":
attvalue=attvalue[1:-1]
attvalue=attvalue.split("|")
#print "list found"
# for k,item in enumerate(attvalue):
# print k,item
#attvalue[k]=
#print attvalue[1]
#print attname,":",attvalue
#print valtype
#print
attdata.update({attname:attvalue})
#print attdata['type']
if 'type' in attdata:
#print "type exists"
True
else:
#print "type doesn't exist"
attdata.update({'type':'not specified'})
#except:
# attdata.update({'type':'not specified'})
return attdata
# attdata[attname]=value
#print attdata
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def isfloat(s):
try:
float(s)
return True
except ValueError:
return False
def switchBrack(data,fromdelim,todelim):
datalist=list(data)
inbrack=0
for i,char in enumerate(datalist):
#print i, " ",char
if char=="[":
inbrack=1
#print "in brackets"
if char=="]":
inbrack=0
#print "out of brackets"
if inbrack ==1:
if char==fromdelim:
#print "comma found!"
if data[i-1].isdigit():
#print "num preceding comma found"
datalist[i]=todelim
newdata="".join(datalist)
#print newdata
return newdata
main()
|
42356
|
from typing import Optional
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from tensorflow.keras.layers import Layer
import numpy as np
import rinokeras as rk
from rinokeras.layers import WeightNormDense as Dense
from rinokeras.layers import LayerNorm, Stack
class RandomReplaceMask(Layer):
""" Copied from rinokeras because we're going to potentially have
different replace masks.
Replaces some percentage of the input with a mask token. Used for
implementing style models. This is actually slightly more complex - it
does one of three things
Based on https://arxiv.org/abs/1810.04805.
Args:
percentage (float): Percentage of input tokens to mask
mask_token (int): Token to replace masked input with
"""
def __init__(self,
percentage: float,
mask_token: int,
n_symbols: Optional[int] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not 0 <= percentage < 1:
raise ValueError("Masking percentage must be in [0, 1).\
Received {}".format(percentage))
self.percentage = percentage
self.mask_token = mask_token
self.n_symbols = n_symbols
def _generate_bert_mask(self, inputs):
mask_shape = K.shape(inputs)
bert_mask = K.random_uniform(mask_shape) < self.percentage
return bert_mask
def call(self,
inputs: tf.Tensor,
mask: Optional[tf.Tensor] = None):
"""
Args:
inputs (tf.Tensor[ndims=2, int]): Tensor of values to mask
mask (Optional[tf.Tensor[bool]]): Locations in the inputs to that are valid
(i.e. not padding, start tokens, etc.)
Returns:
masked_inputs (tf.Tensor[ndims=2, int]): Tensor of masked values
bert_mask: Locations in the input that were masked
"""
bert_mask = self._generate_bert_mask(inputs)
if mask is not None:
bert_mask &= mask
masked_inputs = inputs * tf.cast(~bert_mask, inputs.dtype)
token_bert_mask = K.random_uniform(K.shape(bert_mask)) < 0.8
random_bert_mask = (K.random_uniform(
K.shape(bert_mask)) < 0.1) & ~token_bert_mask
true_bert_mask = ~token_bert_mask & ~random_bert_mask
token_bert_mask = tf.cast(token_bert_mask & bert_mask, inputs.dtype)
random_bert_mask = tf.cast(random_bert_mask & bert_mask, inputs.dtype)
true_bert_mask = tf.cast(true_bert_mask & bert_mask, inputs.dtype)
masked_inputs += self.mask_token * token_bert_mask # type: ignore
masked_inputs += K.random_uniform(
K.shape(bert_mask), 0, self.n_symbols, dtype=inputs.dtype) * random_bert_mask
masked_inputs += inputs * true_bert_mask
return masked_inputs, bert_mask
class ContiguousReplaceMask(Layer):
""" Copied from rinokeras because we're going to potentially have
different replace masks.
Replaces some percentage of the input with a mask token. Used for
implementing style models. This is actually slightly more complex - it
does one of three things
Based on https://arxiv.org/abs/1810.04805.
Args:
percentage (float): Percentage of input tokens to mask
mask_token (int): Token to replace masked input with
"""
def __init__(self,
percentage: float,
mask_token: int,
n_symbols: Optional[int] = None,
avg_seq_len: int = 3,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not 0 <= percentage < 1:
raise ValueError("Masking percentage must be in [0, 1).\
Received {}".format(percentage))
self.percentage = percentage
self.mask_token = mask_token
self.avg_seq_len = avg_seq_len
self.n_symbols = n_symbols
def _generate_bert_mask(self, inputs):
def _numpy_generate_contiguous_mask(array):
mask = np.random.random(array.shape) < (1 / self.avg_seq_len)
mask = np.cumsum(mask, 1)
seqvals = np.max(mask)
mask_prob = self.percentage * array.shape[1] / seqvals # increase probability because fewer sequences
vals_to_mask = np.arange(seqvals)[np.random.random((seqvals,)) < mask_prob]
indices_to_mask = np.isin(mask, vals_to_mask)
mask[indices_to_mask] = 1
mask[~indices_to_mask] = 0
return np.asarray(mask, np.bool)
bert_mask = tf.py_func(_numpy_generate_contiguous_mask, [inputs], tf.bool)
bert_mask.set_shape(inputs.shape)
return bert_mask
class RandomSequenceMask(Model):
def __init__(self,
n_symbols: int,
mask_token: int,
mask_percentage: float = 0.15,
mask_type: str = 'random'):
super().__init__()
if mask_type == 'random':
self.bert_mask = RandomReplaceMask(mask_percentage, mask_token, n_symbols)
elif mask_type == 'contiguous':
self.bert_mask = ContiguousReplaceMask(mask_percentage, mask_token, n_symbols)
else:
raise ValueError("Unrecognized mask_type: {}".format(mask_type))
def call(self, inputs):
"""
Args:
sequence: tf.Tensor[int32] - Amino acid sequence,
a padded tensor with shape [batch_size, MAX_PROTEIN_LENGTH]
protein_length: tf.Tensor[int32] - Length of each protein in the sequence, a tensor with shape [batch_size]
Output:
amino_acid_probs: tf.Tensor[float32] - Probability of each type of amino acid,
a tensor with shape [batch_size, MAX_PROTEIN_LENGTH, n_symbols]
"""
sequence = inputs['primary']
protein_length = inputs['protein_length']
sequence_mask = rk.utils.convert_sequence_length_to_sequence_mask(
sequence, protein_length)
masked_sequence, bert_mask = self.bert_mask(sequence, sequence_mask)
inputs['original_sequence'] = sequence
inputs['primary'] = masked_sequence
inputs['bert_mask'] = bert_mask
return inputs
|
42364
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="iam-permissions-guardrails", # Replace with your own username
version="0.0.3",
author="<NAME>",
author_email="<EMAIL>",
description="IAM Permissions Guardrails module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://code.amazon.com/packages/IAM-Permissions-Guardrails/trees/mainline",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
install_requires=["aws-cdk.core>=1.74.0", "aws-cdk.custom-resources>=1.74.0","aws-cdk.aws_lambda>=1.74.0","aws-cdk.aws_iam>=1.74.0"],
)
|
42415
|
import hyperopt
import csv
import json
import traceback
import os.path
from pprint import pprint
import datetime
import time
import numpy.random
import threading
import queue
import copy
import tempfile
import random
import subprocess
import concurrent.futures
import tempfile
import functools
import math
import atexit
import jsonschema
import pkg_resources
from hypermax.execution import Execution
from hypermax.hyperparameter import Hyperparameter
from hypermax.results_analyzer import ResultsAnalyzer
from hypermax.algorithms.atpe_optimizer import ATPEOptimizer
from hypermax.algorithms.human_guided_optimizer_wrapper import HumanGuidedOptimizerWrapper
from hypermax.algorithms.tpe_optimizer import TPEOptimizer
from hypermax.algorithms.random_search_optimizer import RandomSearchOptimizer
from hypermax.algorithms.adaptive_bayesian_hyperband_optimizer import AdaptiveBayesianHyperband
from hypermax.configuration import Configuration
class Optimizer:
resultInformationKeys = [
'trial',
'status',
'loss',
'time',
'log',
'error'
]
def __init__(self, configuration):
self.config = Configuration(configuration)
self.searchConfig = configuration.get('search', {})
# jsonschema.validate(self.searchConfig, self.configurationSchema())
self.space = self.config.createHyperparameterSpace()
self.threadExecutor = concurrent.futures.ThreadPoolExecutor()
self.resultsAnalyzer = ResultsAnalyzer(configuration)
self.results = []
self.resultFutures = []
self.best = None
self.bestLoss = None
self.thread = threading.Thread(target=lambda: self.optimizationThread(), daemon=True if configuration.get("ui", {}).get("enabled", True) else False)
self.totalTrials = self.searchConfig.get("iterations")
self.trialsSinceResultsUpload = None
self.resultsExportFuture = None
self.currentTrials = []
self.allWorkers = set(range(self.config.data['function'].get('parallel', 1)))
self.occupiedWorkers = set()
self.trialNumber = 0
self.lastATPEParameters = None
self.lastLockedParameters = None
self.atpeParamDetails = None
self.tpeOptimizer = TPEOptimizer()
self.atpeOptimizer = ATPEOptimizer()
self.abhOptimizer = AdaptiveBayesianHyperband(self.atpeOptimizer, self.searchConfig.get("min_budget", 1), self.searchConfig.get("max_budget", 100), self.searchConfig.get("eta", 3))
self.humanGuidedATPEOptimizer = HumanGuidedOptimizerWrapper(self.atpeOptimizer)
self.randomSearchOptimizer = RandomSearchOptimizer()
def __del__(self):
if self.threadExecutor:
self.threadExecutor.shutdown(wait=True)
@classmethod
def configurationSchema(self):
""" This method returns the configuration schema for the optimization module. The schema
is a standard JSON-schema object."""
return {
"type": "object",
"properties": {
"method": {"type": "string", "enum": ['atpe', 'tpe', 'random']},
"iterations": {"type": "number"},
"budget": {"type": "number"}
},
"required": ['method', 'iterations']
}
def completed(self):
return len(self.results)
def sampleNext(self):
if self.searchConfig['method'] == 'tpe':
return self.tpeOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'random':
return self.randomSearchOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'atpe':
params = self.humanGuidedATPEOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
elif self.searchConfig['method'] == 'abh':
params = self.abhOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
def computeCurrentBest(self):
best = None
bestLoss = None
for result in self.results:
if (best is None and result['loss'] is not None ) or (result['loss'] is not None and result['loss'] < bestLoss):
best = result
bestLoss = result['loss']
self.best = best
self.bestLoss = bestLoss
def startOptmizationJob(self):
availableWorkers = list(sorted(self.allWorkers.difference(self.occupiedWorkers)))
sampleWorker = availableWorkers[0]
sample = None
while sample is None:
# Hedge against any exceptions in the atpe optimizer.
try:
sample = self.sampleNext()
except Exception:
traceback.print_exc()
pass
def testSample(params, trial, worker):
currentTrial = {
"start": datetime.datetime.now(),
"trial": trial,
"worker": worker,
"params": copy.deepcopy(params)
}
self.currentTrials.append(currentTrial)
start = datetime.datetime.now()
execution = Execution(self.config.data['function'], parameters=params, worker_n=worker)
modelResult = execution.run()
end = datetime.datetime.now()
result = Hyperparameter(self.config.data['hyperparameters']).convertToFlatValues(params)
for key in params.keys():
if key.startswith("$"):
result[key] = params[key]
result['trial'] = trial
self.resultsAnalyzer.makeDirs(os.path.join(self.resultsAnalyzer.directory, "logs"))
if 'loss' in modelResult:
result['loss'] = modelResult['loss']
elif 'accuracy' in modelResult:
result['loss'] = modelResult['accuracy']
if 'status' in modelResult:
result['status'] = modelResult['status']
else:
result['status'] = 'ok'
if 'log' in modelResult:
fileName = os.path.join(self.resultsAnalyzer.directory, "logs", "trial_" + str(trial) + ".txt")
with open(fileName, "wt") as file:
file.write(modelResult['log'])
result['log'] = fileName
else:
result['log'] = ''
if 'error' in modelResult:
result['error'] = modelResult['error']
else:
result['error'] = ''
if 'time' in modelResult:
result['time'] = modelResult['time']
else:
result['time'] = (end-start).total_seconds()
self.currentTrials.remove(currentTrial)
return result
def onCompletion(worker, future):
self.occupiedWorkers.remove(worker)
self.results.append(future.result())
self.computeCurrentBest()
if not self.config.data.get("ui", {}).get("enabled", True):
pprint(future.result())
if self.resultsExportFuture is None or (self.resultsExportFuture.done() and len(self.results) > 5):
self.resultsExportFuture = self.threadExecutor.submit(
lambda: self.outputResultsWithBackup(self.config.data.get("results", {}).get("graphs", True)))
else:
self.outputResultsWithBackup(False)
if 'hypermax_results' in self.config.data:
if self.trialsSinceResultsUpload is None or self.trialsSinceResultsUpload >= self.config.data['hypermax_results']['upload_frequency']:
self.saveResultsToHypermaxResultsRepository()
self.trialsSinceResultsUpload = 1
else:
self.trialsSinceResultsUpload += 1
self.occupiedWorkers.add(sampleWorker)
sampleFuture = self.threadExecutor.submit(testSample, sample, self.trialNumber, sampleWorker)
sampleFuture.add_done_callback(functools.partial(onCompletion, sampleWorker))
self.trialNumber += 1
return sampleFuture
def runOptimizationThread(self):
self.thread.start()
def outputResultsWithBackup(self, graphs, workers=1):
self.resultsAnalyzer.outputResultsFolder(self, graphs, workers=workers)
directory_head, directory_tail = os.path.split(self.resultsAnalyzer.directory)
backup_directory = os.path.join(directory_head, ".backup_" + directory_tail + "~")
self.resultsAnalyzer.outputResultsFolder(self, graphs, directory=backup_directory, workers=workers)
def optimizationThread(self):
# Make sure we output basic results if the process is killed for some reason.
atexit.register(lambda: self.outputResultsWithBackup(False))
futures = []
for worker in range(min(len(self.allWorkers), self.totalTrials - len(self.results))):
futures.append(self.startOptmizationJob())
time.sleep(1.0)
while (len(self.results) + len(self.currentTrials)) < self.totalTrials:
completedFuture = list(concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)[0])[0]
futures.remove(completedFuture)
time.sleep(0.05)
futures.append(self.startOptmizationJob())
concurrent.futures.wait(futures)
# We are completed, so we can allocate a full contingent of workers
self.outputResultsWithBackup(True, workers=4)
def exportGuidanceJSON(self, fileName):
with open(fileName, 'wt') as file:
json.dump(self.humanGuidedATPEOptimizer.guidanceOptions, file, indent=4, sort_keys=True)
def importGuidanceJSON(self, fileName):
with open(fileName, 'rt') as file:
self.humanGuidedATPEOptimizer.guidanceOptions = json.load(file)
def exportResultsCSV(self, fileName):
allKeys = set()
for result in self.results:
for key in result:
allKeys.add(key)
fieldNames = self.resultInformationKeys + sorted(allKeys.difference(set(self.resultInformationKeys))) # Make sure we keep the order of the field names consistent when writing the csv
with open(fileName, 'wt') as file:
writer = csv.DictWriter(file, fieldnames=fieldNames if len(self.results) > 0 else [], dialect='unix')
writer.writeheader()
writer.writerows(self.results)
def importResultsCSV(self, fileName):
with open(fileName) as file:
reader = csv.DictReader(file)
results = list(reader)
newResults = []
for result in results:
newResult = {}
for key,value in result.items():
if value is not None and value != "":
try:
if '.' in value or 'e' in value:
newResult[key] = float(value)
else:
newResult[key] = int(value)
except ValueError:
newResult[key] = value
elif key == 'loss':
newResult[key] = None
elif key == 'log':
newResult[key] = ''
else:
newResult[key] = None
newResults.append(newResult)
self.results = newResults
self.computeCurrentBest()
self.trialNumber = len(self.results)
def saveResultsToHypermaxResultsRepository(self):
try:
hypermaxResultsConfig = self.config.data['hypermax_results']
with tempfile.TemporaryDirectory() as directory:
process = subprocess.run(['git', 'clone', '<EMAIL>:electricbrainio/hypermax-results.git'], cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hypermaxResultsDirectory = os.path.join(directory, 'hypermax-results', hypermaxResultsConfig['name'])
self.resultsAnalyzer.outputResultsFolder(self, detailed=False, directory=hypermaxResultsDirectory)
with open(os.path.join(hypermaxResultsDirectory, "metadata.json"), 'wt') as file:
json.dump(self.config.data['hypermax_results'], file, indent=4)
process = subprocess.run(['git', 'add', hypermaxResultsDirectory], cwd=os.path.join(directory, 'hypermax-results'))
process = subprocess.run(['git', 'commit', '-m', 'Hypermax automatically storing results for model ' + hypermaxResultsConfig['name'] + ' with ' + str(len(self.results)) + " trials."], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process = subprocess.run(['git push'], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as e:
print(e)
|
42435
|
from unittest import TestCase
import sys
sys.path.append("./AerialNavigation/rocket_powered_landing/")
from AerialNavigation.rocket_powered_landing import rocket_powered_landing as m
print(__file__)
class Test(TestCase):
def test1(self):
m.show_animation = False
m.main()
|
42469
|
class SearchModule:
def __init__(self):
pass
def search_for_competition_by_name(self, competitions, query):
m, answer = self.search(competitions, attribute_name="caption", query=query)
if m == 0:
return False
return answer
def search_for_competition_by_code(self, competitions, query):
return self.search_by_code(competitions, attribute_name="league", query=query)
def search_for_team_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_by_code(self, teams, query):
return self.search_by_code(teams, attribute_name="code", query=query)
def search_for_player_by_name(self, players, query):
m, answer = self.search(players, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_from_standing_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="team_name", query=query)
if m == 0:
return False
return answer
@staticmethod
def search_by_code(dataset, attribute_name, query):
search = query.lower()
for index, data in enumerate(dataset):
code = getattr(data, attribute_name).lower()
if code == search:
return dataset[index]
return False
@staticmethod
def search(dataset, attribute_name, query):
values = [0 for _ in range(0, len(dataset))]
search = query.lower().split()
upper_threshold = len(search)
for index, data in enumerate(dataset):
data_name = getattr(data, attribute_name).lower()
search_array = data_name.split()
for index2, text in enumerate(search_array):
if index2 >= upper_threshold:
break
threshold = len(search[index2])
for i in range(0, len(text)):
if i >= threshold - 1:
break
if text[i] == search[index2][i]:
values[index] += 1
max_value = max(values)
max_index = values.index(max_value)
return max_value, dataset[max_index]
|
42484
|
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from libs.modules.FuseBlock import MakeFB
from .resnet_dilation import resnet50, resnet101, Bottleneck, conv1x1
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class _ConvBatchNormReLU(nn.Sequential):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
relu=True,
):
super(_ConvBatchNormReLU, self).__init__()
self.add_module(
"conv",
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=False,
),
)
self.add_module(
"bn",
nn.BatchNorm2d(out_channels),
)
if relu:
self.add_module("relu", nn.ReLU(inplace=True))
def forward(self, x):
return super(_ConvBatchNormReLU, self).forward(x)
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=(0, 0), dilation=1):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class _DenseDecoder(nn.Module):
def __init__(self, reduce_channel, n_classes):
super(_DenseDecoder, self).__init__()
# Decoder
self.decoder = nn.Sequential(
OrderedDict(
[
("conv1", _ConvBatchNormReLU(128, 256, 3, 1, 1, 1)), # 换成短连接残差块
("conv2", nn.Conv2d(256, n_classes, kernel_size=1)),
]
)
)
self.refine4_3 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.refine4_2 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.refine4_1 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.refine3_2 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.refine3_1 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.refine2_1 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.conv_cat_block4 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.conv_cat_block3 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.conv_cat_block2 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.conv_cat_block1 = _ConvBatchNormReLU(reduce_channel, reduce_channel, 3, 1, 1, 1)
self.fuse_sal = _ConvBatchNormReLU(reduce_channel * 4, 128, 3, 1, 1, 1)
def seg_conv(self, block3, block4):
bu1 = block3 + self.refine4_3(block4)
# bu1 = F.interpolate(x, size=block2.shape[2:], mode="bilinear", align_corners=False)
return bu1
def seg_conv2(self, block2, block4, bu1):
block4 = F.interpolate(block4, size=block2.shape[2:], mode="bilinear", align_corners=True)
bu1 = F.interpolate(bu1, size=block2.shape[2:], mode="bilinear", align_corners=True)
bu2 = block2 + self.refine3_2(bu1) + self.refine4_2(block4)
# bu2 = F.interpolate(x, size=block1.shape[2:], mode="bilinear", align_corners=False)
return bu2
def seg_conv3(self, block1, block4, bu1, bu2):
# bu1_2 = F.interpolate(bu1, size=block1.shape[2:], mode="bilinear", align_corners=False)
block4_1 = F.interpolate(block4, size=block1.shape[2:], mode="bilinear", align_corners=True)
bu2_1 = F.interpolate(bu2, size=block1.shape[2:], mode="bilinear", align_corners=True)
bu1_1 = F.interpolate(bu1, size=block1.shape[2:], mode="bilinear", align_corners=True)
# x = torch.cat((block1, bu2), dim=1)
bu3 = block1 + self.refine2_1(bu2_1) + self.refine3_1(bu1_1) + self.refine4_1(block4_1)
return bu3, block4_1, bu2_1, bu1_1
def segment(self, bu3, block4_1, bu2_1, bu1_1, shape):
agg = torch.cat((self.conv_cat_block1(bu3), self.conv_cat_block2(bu2_1), self.conv_cat_block3(bu1_1),
self.conv_cat_block4(block4_1)), dim=1)
sal = self.fuse_sal(agg)
sal = self.decoder(sal)
sal = F.interpolate(sal, size=shape, mode="bilinear", align_corners=True)
# sal= self.decoder(sal)
return sal
def forward(self, block1, block2, block3, block4, x):
bu1 = self.seg_conv(block3, block4)
bu2 = self.seg_conv2(block2, block4, bu1)
bu3, block4_1, bu2_1, bu1_1 = self.seg_conv3(block1, block4, bu1, bu2)
seg = self.segment(bu3, block4_1, bu2_1, bu1_1, x.shape[2:])
# return seg, E_sup, E_att, bu1_res
return seg
class _ASPPModule(nn.Module):
"""Atrous Spatial Pyramid Pooling with image pool"""
def __init__(self, in_channels, out_channels, output_stride):
super(_ASPPModule, self).__init__()
if output_stride == 8:
pyramids = [12, 24, 36]
elif output_stride == 16:
pyramids = [6, 12, 18]
self.stages = nn.Module()
self.stages.add_module(
"c0", _ConvBatchNormReLU(in_channels, out_channels, 1, 1, 0, 1)
)
for i, (dilation, padding) in enumerate(zip(pyramids, pyramids)):
self.stages.add_module(
"c{}".format(i + 1),
_ConvBatchNormReLU(in_channels, out_channels, 3, 1, padding, dilation),
)
self.imagepool = nn.Sequential(
OrderedDict(
[
("pool", nn.AdaptiveAvgPool2d((1,1))),
("conv", _ConvBatchNormReLU(in_channels, out_channels, 1, 1, 0, 1)),
]
)
)
self.fire = nn.Sequential(
OrderedDict(
[
("conv", _ConvBatchNormReLU(out_channels * 5, out_channels, 3, 1, 1, 1)),
("dropout", nn.Dropout2d(0.1))
]
)
)
def forward(self, x):
h = self.imagepool(x)
h = [F.interpolate(h, size=x.shape[2:], mode="bilinear", align_corners=False)]
for stage in self.stages.children():
h += [stage(x)]
h = torch.cat(h, dim=1)
h = self.fire(h)
return h
class DCFNet_backbone(nn.Module):
def __init__(self, cfg, output_stride, input_channels=3, pretrained=False):
super(DCFNet_backbone, self).__init__()
self.os = output_stride
self.resnet = resnet101(pretrained=pretrained, output_stride=output_stride, input_channels=input_channels)
self.aspp = _ASPPModule(2048, 256, output_stride)
self.DenseDecoder = _DenseDecoder(reduce_channel=128, n_classes=1)
# stage3----> stage4
self.stage4_cfg = cfg['stage4_cfg']
self.stage4 = self._make_stage(self.stage4_cfg)
if pretrained:
for key in self.state_dict():
if 'resnet' not in key:
self.init_layer(key)
def init_layer(self, key):
if key.split('.')[-1] == 'weight':
if 'conv' in key:
if self.state_dict()[key].ndimension() >= 2:
nn.init.kaiming_normal_(self.state_dict()[key], mode='fan_out', nonlinearity='relu')
elif 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0.001
def feat_conv(self, x):
x_list = []
block0 = self.resnet.conv1(x)
block0 = self.resnet.bn1(block0)
block0 = self.resnet.relu(block0)
block0 = self.resnet.maxpool(block0)
block1 = self.resnet.layer1(block0)
x_list.append(block1)
block2 = self.resnet.layer2(block1)
x_list.append(block2)
block3 = self.resnet.layer3(block2)
x_list.append(block3)
block4 = self.resnet.layer4(block3)
# if self.os == 16:
# block4 = F.upsample(block4, scale_factor=2, mode='bilinear', align_corners=False)
block4 = self.aspp(block4)
x_list.append(block4)
return block1, block2, block3, block4, x_list
def _make_stage(self, layer_config, multi_scale_output=True):
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
modules = []
modules.append(
MakeFB(
num_branches,
num_blocks,
num_channels,
multi_scale_output
)
)
return nn.Sequential(*modules)
def forward(self, x):
block1, block2, block3, block4, x_list = self.feat_conv(x)
y_list = self.stage4(x_list)
seg = self.DenseDecoder(y_list[0], y_list[1], y_list[2], y_list[3], x)
return F.sigmoid(seg)
|
42499
|
import time
from selenium import webdriver
from lxml import etree
driver = webdriver.PhantomJS(executable_path='./phantomjs-2.1.1-macosx/bin/phantomjs')
# 获取第一页的数据
def get_html():
url = "https://detail.tmall.com/item.htm?id=531993957001&skuId=3609796167425&user_id=268451883&cat_id=2&is_b=1&rn=71b9b0aeb233411c4f59fe8c610bc34b"
driver.get(url)
time.sleep(5)
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,5000)')
time.sleep(2)
# 累计评价
btnNext = driver.find_element_by_xpath('//*[@id="J_TabBar"]/li[3]/a')
btnNext.click()
html = driver.page_source
return html
def get_comments(html):
source = etree.HTML(html)
commens = source.xpath("//*[@id='J_TabBar']/li[3]/a/em/text()")
print('评论数:', commens)
# 将评论转为int类型
commens = (int(commens[0]) / 20) + 1
# 获取到总评论
print('评论数:', int(commens))
return int(commens)
def parse_html(html):
html = etree.HTML(html)
commentlist = html.xpath("//*[@class='rate-grid']/table/tbody")
for comment in commentlist:
# 评论
vercomment = comment.xpath(
"./tr/td[@class='tm-col-master']/div[@class='tm-rate-content']/div[@class='tm-rate-fulltxt']/text()")
# 机器类型
verphone = comment.xpath("./tr/td[@class='col-meta']/div[@class='rate-sku']/p[@title]/text()")
print(vercomment)
print(verphone)
# 用户(头尾各一个字,中间用****代替)
veruser = comment.xpath("./tr/td[@class='col-author']/div[@class='rate-user-info']/text()")
print(veruser)
def next_button_work(num):
if num != 0:
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
try:
driver.find_element_by_css_selector('#J_Reviews > div > div.rate-page > div > a:last-child').click()
except Exception as e:
print(e)
time.sleep(2)
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,5000)')
time.sleep(2)
html = driver.page_source
parse_html(html)
def selenuim_work(html):
parse_html(html)
next_button_work(1)
pass
def gettotalpagecomments(comments):
html = get_html()
for i in range(0, comments):
selenuim_work(html)
data = get_html()
# 得到评论
commens = get_comments(data)
# 根据评论内容进行遍历
gettotalpagecomments(commens)
|
42582
|
import urllib
from bs4 import BeautifulSoup
print ("Collecting data from IMDb charts....\n\n\n")
print ("The current top 15 IMDB movies are the following: \n\n")
response = urllib.request.urlopen("http://www.imdb.com/chart/top")
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
mytd = soup.findAll("td", {"class":"titleColumn"})
for titles in mytd[:15]:
print (titles.find('a').text)
print ("\n\nThank you for using IMDB script ...")
|
42601
|
from uuid import UUID
from datetime import datetime
def uuid_from_string(string):
return UUID('{s}'.format(s=string))
def format_timestamp(string):
if isinstance(string, str):
return datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(string, datetime):
return string
|
42613
|
import sys
from glob import glob
from serial import Serial, SerialException
import numpy as np
BAUD_RATE = 9600
PORT = 'COM5'
READ_TIMEOUT = 1
LOWER_BOUND = 0.01
UPPER_BOUND = 0.4
class SerialCommunication():
""" Manages the communication and sends the data to the Arduino """
def __init__(self):
self._serial_channel = Serial()
self._serial_channel.port = PORT
self._serial_channel.baudrate = BAUD_RATE
@property
def baudrate(self):
return self._serial_channel.baudrate
@baudrate.setter
def baudrate(self, new_baudrate):
if not self._serial_channel.is_open:
self._serial_channel.baudrate = new_baudrate
else:
raise Exception("Close connection before changing baudrate")
@property
def port(self):
return self._serial_channel.port
@port.setter
def set_port(self, new_port):
if not self._serial_channel.is_open:
self._serial_channel.port = new_port
else:
raise Exception("Close connection before changing port")
def get_available_serial_ports(self):
""" Returns a list of all ports that can be opened """
if self._serial_channel.is_open:
raise Exception("Close connection before")
result = []
for port in self._list_all_possibles_ports():
try:
Serial(port).close()
result.append(port)
except (OSError, SerialException):
pass
return result
def establish_communication(self):
"""
Enables the communication with the arduino with the latest parameters
Throws a SerialException is it cannot connect to port
"""
try:
self._serial_channel.open()
except SerialException as error:
print("Error when connecting to serial %s port" % (self._serial_channel.port))
raise(SerialException)
def send_data(self, data):
""" prints feedback data from the arduino and sends the new data """
if self._is_data_available():
print(("Reading : ", self._read_bytes(len(data))))
data = [x[1] for x in data]
if self._is_data_valid(data):
value_to_send = self._get_clipped_signals(data)
print(('Sending', value_to_send))
try:
self._serial_channel.write(bytearray(value_to_send))
except SerialTimeoutException as e:
print('Error when sending data to microcontroller:' + str(e))
def close_communication(self):
self._serial_channel.close()
def _list_all_possibles_ports(self):
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
return ports
def _read_bytes(self, nb_bytes=1):
bytes_received = []
for _ in range(nb_bytes):
bytes_received.append(self._serial_channel.read(1))
return [ord(byte) for byte in bytes_received if byte]
def _is_data_available(self):
return self._serial_channel is not None and self._serial_channel.is_open and self._serial_channel.in_waiting
def _is_data_valid(self, data):
return self._serial_channel is not None and self._serial_channel.is_open and not np.any(np.isnan(data))
def _get_clipped_signals(self, signals):
clipped_list = np.clip(signals, LOWER_BOUND, UPPER_BOUND)
return [int(255 * (x - LOWER_BOUND)/(UPPER_BOUND - LOWER_BOUND)) for x in clipped_list]
|
42620
|
import assets
import webbrowser
from PyQt5.Qt import QMessageBox
from PyQt5.QtNetwork import QNetworkDiskCache
from PyQt5.QtWebKitWidgets import QWebPage, QWebInspector
class WebPage(QWebPage):
def __init__(self):
super(WebPage, self).__init__()
self.inspector = QWebInspector()
self.inspector.setPage(self)
self.inspector.resize(1024, 400)
diskCache = QNetworkDiskCache(self)
diskCache.setCacheDirectory(assets.fs.dataPath() + '/Cache')
self.networkAccessManager().setCache(diskCache)
self.networkAccessManager().setCookieJar(assets.dataJar)
def acceptNavigationRequest(self, frame, request, type):
if(type == QWebPage.NavigationTypeLinkClicked):
url = request.url().toString()
if(frame == self.mainFrame()):
self.view().load(url)
return False
elif frame == None:
# self.createWindow(QWebPage.WebBrowserWindow, url)
webbrowser.open(request.url().toString())
return False
return QWebPage.acceptNavigationRequest(self, frame, request, type)
# def downloadRequested(self, request):
# print(request)
def findText(self, text):
return super(WebPage, self).findText(text, QWebPage.FindBackward)
def showInspector(self):
self.inspector.show()
self.inspector.activateWindow()
def hideInspector(self):
self.inspector.close()
def createWindow(self, type, url = None):
from window import Window
window = Window(self.view().parentWidget(), url, isDialog = (type == QWebPage.WebModalDialog))
return window.webView.page()
def javaScriptAlert(self, frame, msg):
QMessageBox.information(self.view().parentWidget(), None, msg)
def javaScriptConfirm(self, frame, msg):
return QMessageBox.question(self.view().parentWidget(), None, msg) == QMessageBox.Yes
# There is a bug in PyQt
# def javaScriptPrompt(self, frame, msg, defaultValue):
# result = QInputDialog.getText(self.view().parentWidget(), None, msg)
# return (result[1], result[0])
def close(self):
self.hideInspector()
assets.dataJar.save()
|
42624
|
import sys
import time
import threading
import grpc
import numpy
import soundfile as sf
import tensorflow as tf
import _init_paths
import audioset.vggish_input as vggish_input
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.app.flags.DEFINE_integer('concurrency', 1, 'concurrent inference requests limit')
tf.app.flags.DEFINE_integer('num_tests', 100, 'Number of test sample')
tf.app.flags.DEFINE_string('server', '0.0.0.0:8500', 'PredictionService host:port')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory')
FLAGS = tf.app.flags.FLAGS
class _ResultCounter(object):
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
self._start_time = -1
self._end_time = 0
def inc_done(self):
with self._condition:
self._done += 1
if self._done == self._num_tests:
self.set_end_time(time.time())
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def throttle(self):
with self._condition:
if self._start_time == -1:
self._start_time = time.time()
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def set_start_time(self, start_time):
self._start_time = start_time
def set_end_time(self, end_time):
self._end_time = end_time
def get_throughput(self):
if self._end_time == 0:
self.set_end_time(time.time())
print(self._end_time - self._start_time)
return self._num_tests / (self._end_time - self._start_time)
def time_to_sample(t, sr, factor):
return round(sr * t / factor)
def _create_rpc_callback(label, result_counter):
def _callback(result_future):
exception = result_future.exception()
if exception:
# result_counter.inc_error()
print(exception)
else:
print('normal')
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(result_future.result().outputs['output'].float_val)
result_counter.inc_done()
result_counter.dec_active()
return _callback
def inference(hostport, work_dir, concurrency, num_tests):
audio_path = 'test_DB/test_airport.wav'
num_secs = 1
sc_start = 0
sc_end = 2000
wav_data, sr = sf.read(audio_path, dtype='int16')
assert wav_data.dtype == numpy.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
sc_center = time_to_sample((sc_start + sc_end) / 2, sr, 1000.0)
# print('Center is {} when sample_rate is {}'.format(sc_center, sr))
data_length = len(samples)
data_width = time_to_sample(num_secs, sr, 1.0)
half_input_width = int(data_width / 2)
if sc_center < half_input_width:
pad_width = half_input_width - sc_center
samples = numpy.pad(samples, [(pad_width, 0), (0, 0)], mode='constant', constant_values=0)
sc_center += pad_width
elif sc_center + half_input_width > data_length:
pad_width = sc_center + half_input_width - data_length
samples = numpy.pad(samples, [(0, pad_width), (0, 0)], mode='constant', constant_values=0)
samples = samples[sc_center - half_input_width: sc_center + half_input_width]
audio_input = vggish_input.waveform_to_examples(samples, sr)
print(audio_input.dtype)
audio_input = audio_input.astype(numpy.float32)
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result_counter = _ResultCounter(num_tests, concurrency)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'vgg'
request.model_spec.signature_name = 'prediction'
print(audio_input.shape)
request.inputs['input'].CopyFrom(tf.contrib.util.make_tensor_proto(audio_input, shape=audio_input.shape))
result_counter.throttle()
result_future = stub.Predict.future(request, 5.0)
result_future.add_done_callback(_create_rpc_callback(None, result_counter))
return result_counter.get_throughput()
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
tfs_throughput = inference(FLAGS.server, FLAGS.work_dir, FLAGS.concurrency, FLAGS.num_tests)
print('\n TFS Thoughput: %s requests/sec' % (tfs_throughput))
if __name__ == '__main__':
tf.app.run()
|
42685
|
from __future__ import annotations
import json
import os
import shutil
import subprocess
import tempfile
import uuid
from abc import ABC, abstractmethod
from typing import Any, Union
from urllib.error import HTTPError
from urllib.request import urlopen, urlretrieve
import warnings
import meerkat as mk
import pandas as pd
import yaml
from meerkat.tools.lazy_loader import LazyLoader
from dcbench.common.modeling import Model
from dcbench.config import config
storage = LazyLoader("google.cloud.storage")
torch = LazyLoader("torch")
def _upload_dir_to_gcs(local_path: str, gcs_path: str, bucket: "storage.Bucket"):
assert os.path.isdir(local_path)
with tempfile.TemporaryDirectory() as tmp_dir:
tarball_path = os.path.join(tmp_dir, "run.tar.gz")
subprocess.call(
[
"tar",
"-czf",
tarball_path,
"-C",
local_path,
".",
]
)
remote_path = gcs_path + ".tar.gz"
blob = bucket.blob(remote_path)
blob.upload_from_filename(tarball_path)
def _url_exists(url: str):
try:
response = urlopen(url)
status_code = response.getcode()
return status_code == 200
except HTTPError:
return False
def urlretrieve_with_retry(url: str, filename: str, max_retries: int=5):
"""
Retry urlretrieve() if it fails.
"""
for idx in range(max_retries):
try:
urlretrieve(url, filename)
return
except Exception as e:
warnings.warn(
f"Failed to download {url}: {e}\n"
f"Retrying {idx}/{max_retries}..."
)
continue
raise RuntimeError(f"Failed to download {url} after {max_retries} retries.")
class Artifact(ABC):
"""A pointer to a unit of data (e.g. a CSV file) that is stored locally on
disk and/or in a remote GCS bucket.
In DCBench, each artifact is identified by a unique artifact ID. The only
state that the :class:`Artifact` object must maintain is this ID (``self.id``).
The object does not hold the actual data in memory, making it
lightweight.
:class:`Artifact` is an abstract base class. Different types of artifacts (e.g. a
CSV file vs. a PyTorch model) have corresponding subclasses of :class:`Artifact`
(e.g. :class:`CSVArtifact`, :class:`ModelArtifact`).
.. Tip::
The vast majority of users should not call the :class:`Artifact`
constructor directly. Instead, they should either create a new artifact by
calling :meth:`from_data` or load an existing artifact from a YAML file.
The class provides utilities for accessing and managing a unit of data:
- Synchronizing the local and remote copies of a unit of data:
:meth:`upload`, :meth:`download`
- Loading the data into memory: :meth:`load`
- Creating new artifacts from in-memory data: :meth:`from_data`
- Serializing the pointer artifact so it can be shared:
:meth:`to_yaml`, :meth:`from_yaml`
Args:
artifact_id (str): The unique artifact ID.
Attributes:
id (str): The unique artifact ID.
"""
@classmethod
def from_data(
cls, data: Union[mk.DataPanel, pd.DataFrame, Model], artifact_id: str = None
) -> Artifact:
"""Create a new artifact object from raw data and save the artifact to
disk in the local directory specified in the config file at
``config.local_dir``.
.. tip::
When called on the abstract base class :class:`Artifact`, this method will
infer which artifact subclass to use. If you know exactly which artifact
class you'd like to use (e.g. :class:`DataPanelArtifact`), you should call
this classmethod on that subclass.
Args:
data (Union[mk.DataPanel, pd.DataFrame, Model]): The raw data that will be
saved to disk.
artifact_id (str, optional): . Defaults to None, in which case a UUID will
be generated and used.
Returns:
Artifact: A new artifact pointing to the :arg:`data` that was saved to disk.
"""
if artifact_id is None:
artifact_id = uuid.uuid4().hex
# TODO ():At some point we should probably enforce that ids are unique
if cls is Artifact:
# if called on base class, infer which class to use
if isinstance(data, mk.DataPanel):
cls = DataPanelArtifact
elif isinstance(data, pd.DataFrame):
cls = CSVArtifact
elif isinstance(data, Model):
cls = ModelArtifact
elif isinstance(data, (list, dict)):
cls = YAMLArtifact
else:
raise ValueError(
f"No Artifact in dcbench for object of type {type(data)}"
)
artifact = cls(artifact_id=artifact_id)
artifact.save(data)
return artifact
@property
def local_path(self) -> str:
"""The local path to the artifact in the local directory specified in
the config file at ``config.local_dir``."""
return os.path.join(config.local_dir, self.path)
@property
def remote_url(self) -> str:
"""The URL of the artifact in the remote GCS bucket specified in the
config file at ``config.public_bucket_name``."""
return os.path.join(
config.public_remote_url, self.path + (".tar.gz" if self.isdir else "")
)
@property
def is_downloaded(self) -> bool:
"""Checks if artifact is downloaded to local directory specified in the
config file at ``config.local_dir``.
Returns:
bool: True if artifact is downloaded, False otherwise.
"""
return os.path.exists(self.local_path)
@property
def is_uploaded(self) -> bool:
"""Checks if artifact is uploaded to GCS bucket specified in the config
file at ``config.public_bucket_name``.
Returns:
bool: True if artifact is uploaded, False otherwise.
"""
return _url_exists(self.remote_url)
def upload(self, force: bool = False, bucket: "storage.Bucket" = None) -> bool:
"""Uploads artifact to a GCS bucket at ``self.path``, which by default
is just the artifact ID with the default extension.
Args:
force (bool, optional): Force upload even if artifact is already uploaded.
Defaults to False.
bucket (storage.Bucket, optional): The GCS bucket to which the artifact is
uplioaded. Defaults to None, in which case the artifact is uploaded to
the bucket speciried in the config file at config.public_bucket_name.
Returns
bool: True if artifact was uploaded, False otherwise.
"""
if not os.path.exists(self.local_path):
raise ValueError(
f"Could not find Artifact to upload at '{self.local_path}'. "
"Are you sure it is stored locally?"
)
if self.is_uploaded and not force:
warnings.warn(
f"Artifact {self.id} is not being re-uploaded."
"Set `force=True` to force upload."
)
return False
if bucket is None:
client = storage.Client()
bucket = client.get_bucket(config.public_bucket_name)
if self.isdir:
_upload_dir_to_gcs(
local_path=self.local_path,
bucket=bucket,
gcs_path=self.path,
)
else:
blob = bucket.blob(self.path)
blob.upload_from_filename(self.local_path)
blob.metadata = {"Cache-Control": "private, max-age=0, no-transform"}
blob.patch()
return True
def download(self, force: bool = False) -> bool:
"""Downloads artifact from GCS bucket to the local directory specified
in the config file at ``config.local_dir``. The relative path to the
artifact within that directory is ``self.path``, which by default is
just the artifact ID with the default extension.
Args:
force (bool, optional): Force download even if artifact is already
downloaded. Defaults to False.
Returns:
bool: True if artifact was downloaded, False otherwise.
.. warning::
By default, the GCS cache on public urls has a max-age up to an hour.
Therefore, when updating an existin artifacts, changes may not be
immediately reflected in subsequent downloads.
See `here
<https://stackoverflow.com/questions/62897641/google-cloud-storage-public-ob
ject-url-e-super-slow-updating>`_
for more details.
"""
if self.is_downloaded and not force:
return False
if self.isdir:
if self.is_downloaded:
shutil.rmtree(self.local_path)
os.makedirs(self.local_path, exist_ok=True)
tarball_path = self.local_path + ".tar.gz"
urlretrieve_with_retry(self.remote_url, tarball_path)
subprocess.call(["tar", "-xzf", tarball_path, "-C", self.local_path])
os.remove(tarball_path)
else:
if self.is_downloaded:
os.remove(self.local_path)
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
urlretrieve_with_retry(self.remote_url, self.local_path)
return True
DEFAULT_EXT: str = ""
isdir: bool = False
@abstractmethod
def load(self) -> Any:
"""Load the artifact into memory from disk at ``self.local_path``."""
raise NotImplementedError()
@abstractmethod
def save(self, data: Any) -> None:
"""Save data to disk at ``self.local_path``."""
raise NotImplementedError()
def __init__(self, artifact_id: str, **kwargs) -> None:
"""
.. warning::
In general, you should not instantiate an Artifact directly. Instead, use
:meth:`Artifact.from_data` to create an Artifact.
"""
self.path = f"{artifact_id}.{self.DEFAULT_EXT}"
self.id = artifact_id
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
super().__init__()
@staticmethod
def from_yaml(loader: yaml.Loader, node):
"""This function is called by the YAML loader to convert a YAML node
into an Artifact object.
It should not be called directly.
"""
data = loader.construct_mapping(node, deep=True)
return data["class"](artifact_id=data["artifact_id"])
@staticmethod
def to_yaml(dumper: yaml.Dumper, data: Artifact):
"""This function is called by the YAML dumper to convert an Artifact
object into a YAML node.
It should not be called directly.
"""
data = {
"artifact_id": data.id,
"class": type(data),
}
node = dumper.represent_mapping("!Artifact", data)
return node
def _ensure_downloaded(self):
if not self.is_downloaded:
raise ValueError(
"Cannot load `Artifact` that has not been downloaded. "
"First call `artifact.download()`."
)
yaml.add_multi_representer(Artifact, Artifact.to_yaml)
yaml.add_constructor("!Artifact", Artifact.from_yaml)
class CSVArtifact(Artifact):
DEFAULT_EXT: str = "csv"
def load(self) -> pd.DataFrame:
self._ensure_downloaded()
data = pd.read_csv(self.local_path, index_col=0)
def parselists(x):
if isinstance(x, str):
try:
return json.loads(x)
except ValueError:
return x
else:
return x
return data.applymap(parselists)
def save(self, data: pd.DataFrame) -> None:
return data.to_csv(self.local_path)
class YAMLArtifact(Artifact):
DEFAULT_EXT: str = "yaml"
def load(self) -> Any:
self._ensure_downloaded()
return yaml.load(open(self.local_path), Loader=yaml.FullLoader)
def save(self, data: Any) -> None:
return yaml.dump(data, open(self.local_path, "w"))
class DataPanelArtifact(Artifact):
DEFAULT_EXT: str = "mk"
isdir: bool = True
def load(self) -> pd.DataFrame:
self._ensure_downloaded()
return mk.DataPanel.read(self.local_path)
def save(self, data: mk.DataPanel) -> None:
return data.write(self.local_path)
class VisionDatasetArtifact(DataPanelArtifact):
DEFAULT_EXT: str = "mk"
isdir: bool = True
COLUMN_SUBSETS = {
"celeba": ["id", "image", "split"],
"imagenet": ["id", "image", "name", "synset"],
}
@classmethod
def from_name(cls, name: str):
if name == "celeba":
dp = mk.datasets.get(name, dataset_dir=config.celeba_dir)
elif name == "imagenet":
dp = mk.datasets.get(name, dataset_dir=config.imagenet_dir, download=False)
else:
raise ValueError(f"No dataset named '{name}' supported by dcbench.")
dp["id"] = dp["image_id"]
dp.remove_column("image_id")
dp = dp[cls.COLUMN_SUBSETS[name]]
artifact = cls.from_data(data=dp, artifact_id=name)
return artifact
def download(self, force: bool = False):
if self.id == "celeba":
dp = mk.datasets.get(self.id, dataset_dir=config.celeba_dir)
elif self.id == "imagenet":
dp = mk.datasets.get(
self.id, dataset_dir=config.imagenet_dir, download=False
)
else:
raise ValueError(f"No dataset named '{self.id}' supported by dcbench.")
dp["id"] = dp["image_id"]
dp.remove_column("image_id")
dp = dp[self.COLUMN_SUBSETS[self.id]]
self.save(data=dp[self.COLUMN_SUBSETS[self.id]])
class ModelArtifact(Artifact):
DEFAULT_EXT: str = "pt"
def load(self) -> Model:
self._ensure_downloaded()
dct = torch.load(self.local_path, map_location="cpu")
model = dct["class"](dct["config"])
model.load_state_dict(dct["state_dict"])
return model
def save(self, data: Model) -> None:
return torch.save(
{
"state_dict": data.state_dict(),
"config": data.config,
"class": type(data),
},
self.local_path,
)
|
42686
|
import asyncio
import json
import zlib
import aiohttp
import errors
API_BASE = 'https://discordapp.com/api/v6'
CONFIG_FILE = json.load(open('data/config.json'))
TOKEN = CONFIG_FILE['token']
HEADERS = {'Authorization': 'Bot ' + TOKEN,
'User-Agent': 'DiscordBot (https://www.github.com/fourjr/dapi-bot,\
aiohttp and websockets)'}
SESSION = aiohttp.ClientSession(loop=asyncio.get_event_loop())
SESSION_DATA = [None, None]
PREFIX = './'
def parse_data(data):
'''Parses the websocket data into a dictionary'''
if isinstance(data, bytes):
return json.loads(zlib.decompress(data, 15, 10490000).decode('utf-8'))
else:
return json.loads(data)
def find(obj:list, **kwargs):
'''Finds a element of the given object that satisfies all kwargs'''
for i in obj:
if all(i[k] == kwargs[k] for k in kwargs):
return i
return None
async def request(http, endpoint, obj=None):
'''Used to request to the Discord API'''
if http == 'POST':
resp = await SESSION.post(API_BASE + endpoint, json=obj, headers=HEADERS)
elif http == 'DELETE':
resp = await SESSION.delete(API_BASE + endpoint, json=obj, headers=HEADERS)
if resp.status == 204:
return
obj = await resp.json()
print(resp)
if 300 > resp.status >= 200:
return #ok
elif resp.status == 403:
raise errors.Forbidden(resp, obj)
elif resp.status == 404:
raise errors.NotFound(resp, obj)
elif resp.status == 429:
raise errors.RateLimit(resp, obj)
async def get_channel(channel_id):
'''Gets a channel by the ID'''
return await request('GET', f'/channels/{channel_id}')
async def send_message(channel_id, content):
'''Sends a plain text message to the provided channel ID'''
return await request('POST', f'/channels/{channel_id}/messages', {'content':content})
|
42760
|
from tensorflow.keras.layers import (Conv2D, Dense, Flatten, MaxPooling2D,
TimeDistributed)
def VGG16(inputs):
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(inputs)
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block2_pool')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block3_pool')(x)
# 第四个卷积部分
# 14,14,512
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block4_pool')(x)
# 第五个卷积部分
# 7,7,512
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv3')(x)
return x
def vgg_classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = TimeDistributed(Flatten(name='flatten'))(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc1')(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc2')(x)
return x
|
42761
|
from ...schema_classes import SchemaClasses
ContainerType = SchemaClasses.sysflow.type.ContainerTypeClass
OID = SchemaClasses.sysflow.type.OIDClass
SFObjectState = SchemaClasses.sysflow.type.SFObjectStateClass
|
42774
|
from .seg import seg
class cbs(seg):
'''
cbs file type extends from seg files
'''
_fileType = "cbs"
|
42784
|
from pygments.lexer import RegexLexer
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, String, Text
__all__ = ["GraphQLLexer"]
class GraphQLLexer(RegexLexer):
"""
Pygments GraphQL lexer for mkdocs
"""
name = "GraphQL"
aliases = ["graphql", "gql"]
filenames = ["*.graphql", "*.gql"]
mimetypes = ["application/graphql"]
tokens = {
"root": [
(r"#.*", Comment.Singline),
(r'"""("?"?(\\"""|\\(?!=""")|[^"\\]))*"""', Comment.Multi),
(r"\.\.\.", Operator),
# u
(r'".*"', String.Double),
(r"(-?0|-?[1-9][0-9]*)(\.[0-9]+[eE][+-]?[0-9]+|\.[0-9]+|[eE][+-]?[0-9]+)", Number.Float),
(r"(-?0|-?[1-9][0-9]*)", Number.Integer),
(r"\$+[_A-Za-z][_0-9A-Za-z]*", Name.Variable),
(r"[_A-Za-z][_0-9A-Za-z]+\s?:", Text),
(
r"(type|query|interface|mutation|extend|input|implements|directive|@[a-z]+|on|true|false|null)\b",
Keyword.Type,
),
(r"[!$():=@\[\]{|}]+?", Punctuation),
(r"[_A-Za-z][_0-9A-Za-z]*", Keyword),
(r"(\s|,)", Text),
]
}
|
42790
|
from copy import copy
import sqlite3
import pandas as pd
import pandas_to_sql
from pandas_to_sql.testing.utils.fake_data_creation import create_fake_dataset
from pandas_to_sql.conventions import flatten_grouped_dataframe
# table_name = 'random_data'
# df, _ = create_fake_dataset()
# df_ = pandas_to_sql.wrap_df(df, table_name)
# df2 = df_.groupby('random_int').agg({'random_float':['mean','sum','count'], 'random_str':', '.join})
# df2 = flatten_grouped_dataframe(df2)
# print(df2.get_sql_string())
iris = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')
table_name = 'iris'
sql_connection = sqlite3.connect('./iris.db') #create db
iris.to_sql(table_name, sql_connection, if_exists='replace', index=False)
df = pandas_to_sql.wrap_df(iris, table_name)
pd_wrapped = pandas_to_sql.wrap_pd(pd)
df_ = copy(df)
df_['sepal_width_rounded'] = df_.sepal_width.round()
df_1 = df_[df_.species=='setosa'].reset_index(drop=True)
df_2 = df_[df_.species=='versicolor'].reset_index(drop=True)
some_df = pd_wrapped.concat([df_1, df_2]).reset_index(drop=True)
sql_string = some_df.get_sql_string()
df_from_sql_database = pd.read_sql_query(sql_string, sql_connection)
df_pandas = some_df.df_pandas
from pandas_to_sql.testing.utils.asserters import assert_dataframes_equals
assert_dataframes_equals(df_pandas, df_from_sql_database)
|
42793
|
from flask_restful import Api
class ViewInjector:
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
from app.views.blockchain import Node, Chain, Mine, Transaction
api = Api(app)
api.add_resource(Node, '/node')
api.add_resource(Chain, '/chain')
api.add_resource(Mine, '/mine')
api.add_resource(Transaction, '/transaction')
|
42826
|
import time
import mxnet as mx
benchmark_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/test.rec",
data_shape=(1, 28, 28),
batch_size=64,
mean_r=128,
scale=0.00390625,
)
mod = mx.mod.Module.load('mnist_lenet', 35, context=mx.gpu(2))
mod.bind(
data_shapes=benchmark_dataiter.provide_data,
label_shapes=benchmark_dataiter.provide_label,
for_training=False)
start = time.time()
for i, batch in enumerate(benchmark_dataiter):
mod.forward(batch)
time_elapsed = time.time() - start
msg = '{} batches iterated!\nAverage forward time per batch: {:.6f} ms'
print(msg.format(i+1, 1000*time_elapsed/float(i)))
|
42872
|
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
dic = {}
for char in s:
dic[char] = dic.get(char,0)+1
res = []
for char in s:
dic[char] -= 1
if char not in res:
while res and char<res[-1] and dic[res[-1]]>0:
res.pop()
res.append(char)
return ''.join(res)
|
42884
|
import argparse
def merge(infiles, outfile):
setReads = set()
for infile in infiles:
with open(infile, "r") as fileIn:
for strLine in fileIn:
if strLine.startswith('@'):
continue
strSplit = strLine.split("\t")
if strSplit[2] != '*':
setReads.add(strSplit[0])
with open(outfile, "w") as fileOut:
for strRead in setReads:
fileOut.write(strRead + "\n")
return(len(setReads))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infiles", nargs="+",
help="sam files you wish to merge")
parser.add_argument("outfile", help="output file")
args = parser.parse_args()
iNumUniqReads = merge(args.infiles, args.outfile)
print("Number of merged reads: " + str(iNumUniqReads))
return 0
if __name__ == '__main__':
main()
|
42930
|
from ._kaldi_error import *
from ._timer import *
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
|
43000
|
import setuptools
setuptools.setup(
name="almond-cloud-cli",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Command Line Interface (CLI) for Almond Cloud development and deployment",
url="https://github.com/stanford-oval/almond-cloud",
packages=setuptools.find_packages(),
python_requires=">=3,<4",
install_requires=[
"clavier==0.1.3a3",
"kubernetes>=19.15.0,<20",
"pyyaml>=6.0,<7",
],
scripts=[
"bin/almond-cloud",
],
)
|
43067
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
with open('./quadratic/eval_record.pickle','rb') as loss:
data = pickle.load(loss)
print('Mat_record',len(data['Mat_record']))
#print('bias',data['inter_gradient_record'])
#print('constant',data['intra_record'])
with open('./quadratic/evaluate_record.pickle','rb') as loss1:
data1 = pickle.load(loss1)
x = np.array(data1['x_record'])
print('x_record',x.shape)
#print('bias',data1['inter_gradient_record'])
#print('constant',data1['intra_record'])
#x = range(10000)
#ax = plt.axes(yscale='log')
#ax.plot(x,data,'b')
#plt.show('loss')
|
43085
|
from django.forms import TextInput
from django.forms.widgets import MultiWidget, RadioSelect
from django.template.loader import render_to_string
class MultiTextWidget(MultiWidget):
def __init__(self, widgets_length, **kwargs):
widgets = [TextInput() for _ in range(widgets_length)]
kwargs.update({"widgets": widgets})
super(MultiTextWidget, self).__init__(**kwargs)
def decompress(self, value):
return value if value is not None else []
def format_output(self, rendered_widgets):
return render_to_string(
"formly/run/_multiple_input.html",
context={
"inputs": rendered_widgets
}
)
class LikertSelect(RadioSelect):
"""
This class differentiates Likert-scale radio selects
from "normal" radio selects for presentation purposes.
"""
pass
class RatingSelect(RadioSelect):
pass
|
43090
|
import socket
def is_connectable(host, port):
sock = None
try:
sock = socket.create_connection((host, port), 1)
result = True
except socket.error:
result = False
finally:
if sock:
sock.close()
return result
|
43108
|
from singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods import UmiBarcodeDemuxMethod
class chrom10x_c16_u12(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = '10x_3M-february-2018'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=16,
umiLength=12,
barcodeRead=0,
barcodeStart=0,
barcodeLength=16,
random_primer_read=None,
random_primer_length=None,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
**kwargs)
self.shortName = 'CHROMC16U12'
self.longName = 'Chromium 10x, CB: 16bp, UMI: 12bp'
self.autoDetectable = False
self.description = 'R1 starts with a 16bp cell barcode followed by a 12bp UMI.'
|
43109
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# read CSV data into a "dataframe" - pandas can parse dates
# this will be familiar to R users (not so much matlab users)
df = pd.read_csv('data/SHA.csv', index_col=0, parse_dates=True)
Q = df.SHA_INFLOW_CFS # a pandas series (daily)
# Q = Q.resample('AS-OCT').sum() # annual values
print(Q.autocorr(lag=1))
# plot a correlogram with confidence bounds
pd.plotting.autocorrelation_plot(Q)
plt.xlim([0,365])
plt.show()
from statsmodels.tsa import stattools
pacf,ci = stattools.pacf(Q, nlags=7, alpha=0.05)
plt.plot(pacf, linewidth=2)
plt.plot(ci, linestyle='dashed', color='0.5')
plt.show()
# we did this with pandas to simplify the resampling operations
# but we can also do it with numpy
# (using annual flow values)
Q = df.SHA_INFLOW_CFS.resample('AS-OCT').sum().values # now a numpy array
def autocorr(x,k):
return np.corrcoef(x[:len(x)-k], x[k:])[0,1]
print(autocorr(Q,k=1))
|
43115
|
import unittest
from calculator import multiply
class TestSomething(unittest.TestCase):
def test_multiply(self):
self.assertEqual(6, multiply(2,3))
if __name__ == '__main__':
unittest.main()
|
43155
|
from util.callbacks import callsback
from util.threads.timeout_thread import Timer
from util.primitives import Storage as S
from traceback import print_exc
import util.primitives.structures as structures
from .fbutil import trim_profiles, extract_profile_ids
import traceback
import simplejson
import facebookapi
import hooks
from social.network import SocialFeed
from util.Events import EventMixin
import util.primitives.mapping as mapping
from logging import getLogger
from util.primitives.mapping import Ostorage
from gui import skin
import graphapi
log = getLogger("Facebook2.0")
POSTS_LIMIT = 100
FORCED_APPS = {'News Feed': 'nf',
'Status Updates': 'app_2915120374',
'Photos': 'app_2305272732',
'Links': 'app_2309869772'}
FORCED_KEYS = {
'__notification__':{'name':'Notifications',
'icon_url':'facebookicons.notifications_icon'}
}
KNOWN_APPS_LOOKUP = mapping.dictreverse(FORCED_APPS)
#COMMENTS_QUERY = "SELECT fromid, text, time, post_id, id FROM comment WHERE post_id IN (SELECT post_id FROM #posts)"
PROFILES_QUERY = """SELECT id, name, pic_square, url FROM profile WHERE id IN (SELECT viewer_id FROM #posts) OR id IN(SELECT actor_id FROM #posts) OR id in (SELECT target_id FROM #posts) OR id in (SELECT source_id FROM #posts) OR id IN (SELECT likes.sample FROM #posts) OR id IN (SELECT likes.friends FROM #posts) OR id IN (SELECT sender_id FROM #notifications)"""
#ALL_POSTS_QUERY = 'SELECT post_id, comments, permalink, created_time, updated_time, viewer_id, actor_id, target_id, source_id, message, attachment, action_links, likes FROM stream where filter_key="nf" and is_hidden=0 LIMIT 100'
BIRTHDAY_QUERY = 'select name, birthday_date, profile_url, uid from user where uid IN (select uid2 from friend where uid1=%d)'
NOW_QUERY = 'select now() from user where uid=%d'
EVENTS_QUERY = 'select eid from event where eid in (select eid from event_member where uid=me() and rsvp_status="not_replied") and start_time > now()'
STATUS_QUERY = 'select message, status_id, time, uid from status where uid=me() limit 1'
NOTIFICATIONS_QUERY = 'select notification_id,sender_id,created_time,updated_time,title_html,title_text,href,is_unread,app_id from notification where recipient_id=me()'
APP_QUERY = 'SELECT app_id,icon_url FROM application WHERE app_id IN (SELECT app_id from #notifications)'
POST_FILTER_KEY_QUERY = "select post_id, filter_key from stream where post_id in (select post_id from #latest_posts) and filter_key in (select filter_key from #filter_keys)"
FILTER_KEY_QUERY = "select filter_key, name, icon_url from stream_filter where uid=me() and ((is_visible=1 and type='application') or filter_key in ('" + "', '".join(FORCED_APPS.values()) + "')) ORDER BY rank ASC"
POST_QUERY = 'SELECT post_id, comments, permalink, created_time, updated_time, viewer_id, actor_id, target_id, source_id, message, attachment, action_links, likes FROM stream where post_id="%s"'
#UPDATED_POSTS_QUERY = 'SELECT post_id, comments, permalink, created_time, updated_time, viewer_id, actor_id, target_id, source_id, message, attachment, action_links, likes FROM stream where filter_key="nf" and is_hidden=0 and updated_time > %s LIMIT 100'
LATEST_POSTS_QUERY = ' '.join(x.strip() for x in '''
SELECT post_id, updated_time
FROM stream
WHERE filter_key="%%s" %%s ORDER BY created_time DESC
LIMIT %d
'''.strip().splitlines()) % POSTS_LIMIT
UPDATED_POSTS_QUERY = ' '.join(x.strip() for x in '''SELECT post_id, comments, permalink, created_time, updated_time, viewer_id, actor_id, target_id, source_id, message, attachment, action_links, likes
FROM stream
WHERE post_id in
(
SELECT post_id
FROM #latest_posts
WHERE updated_time > %s
) ORDER BY created_time DESC'''.strip().splitlines())
UPDATE_STREAM_QUERY = {
#'comments':COMMENTS_QUERY,
'profiles':PROFILES_QUERY}
from facebook.fbacct import FBIB
class FacebookProtocol(EventMixin):
events = EventMixin.events | set([
'stream_requested',
'not_logged_in',
'got_stream',
'status_updated',
'conn_error',
'infobox_dirty',
])
def __init__(self, acct):
self.stream_request_outstanding = True
self.acct = acct
self._init_apis()
self.last_stream = True
self.last_filter_key = self.filter_key
EventMixin.__init__(self)
self.social_feed = SocialFeed('facebook_' + self.acct.username,
'newsfeed',
self.get_post_feed,
self.htmlize_posts,
self.set_infobox_dirty)
def set_infobox_dirty(self):
self.event('infobox_dirty')
def htmlize_posts(self, posts, stream_context):
'''Convert one facebook newsfeed post into infobox HTML.'''
t = FBIB(self.acct)
#CAS: pull out the context stuff, the default FBIB grabs self.last_stream, not the one we have context for!
return t.get_html(None, set_dirty=False,
file='posts.py.xml',
dir=t.get_context()['app'].get_res_dir('base'),
context=S(posts=posts))
def get_post_feed(self):
# TODO bring back feed context.
return iter(self.last_stream.posts)
@property
def filter_key(self):
return ['nf', 'lf', 'h'][self.acct.preferred_filter_key]
@property
def hidden_posts(self):
return "and is_hidden=0" if self.acct.show_hidden_posts else ''
def get_stream(self):
self.stream_request_outstanding = True
self.do_get_stream()
def _init_apis(self):
self._init_digsby()
def _init_digsby(self, session_key='', secret='', uid=None):
access_token = getattr(self.acct, 'access_token', None)
uid = getattr(self.acct, 'uid', None),
self.digsby = graphapi.LegacyRESTAPI(access_token, uid=uid)
def do_get_stream(self, num_tries=0):
from util import default_timer
self.start_get_stream = default_timer()
if not self.digsby.logged_in:
return self.event('not_logged_in')
#refresh full stream if pref has changed
prev_filter_key, self.last_filter_key = self.last_filter_key, self.filter_key
if not isinstance(self.last_stream, dict) or prev_filter_key != self.filter_key:
kw = dict(success=lambda *a: self.get_stream_success(num_tries=num_tries, *a),
error = lambda *a: self.get_stream_error(num_tries, *a))
updated_time = 0
else:
kw = dict(success=self.update_stream_success,
error = lambda *a: self.get_stream_error(num_tries, *a))
updated_time = max(self.last_stream.posts + [S(updated_time=0)], key=lambda v: v.updated_time).updated_time
# query = self.digsby.multiquery(prepare=True,
self.last_run_multi = dict(
# birthdays = BIRTHDAY_QUERY % self.digsby.uid,
latest_posts = LATEST_POSTS_QUERY % (self.filter_key, self.hidden_posts),
posts = UPDATED_POSTS_QUERY % (('%d' % updated_time) + '+0'),
# now = NOW_QUERY % self.digsby.uid,
events = EVENTS_QUERY,
status = STATUS_QUERY,
notifications = NOTIFICATIONS_QUERY,
apps = APP_QUERY,
post_filter_keys = POST_FILTER_KEY_QUERY,
filter_keys = FILTER_KEY_QUERY,
**UPDATE_STREAM_QUERY)
self.digsby.fql.multiquery(queries=self.last_run_multi, **kw)
# alerts = self.digsby.notifications.get(prepare=True)
# self.digsby.batch.run(method_feed=[alerts, query], **kw)
def update_status(self):
self.digsby.query(STATUS_QUERY, success=self.status_updated)
def status_updated(self, status):
status = status[0]
if status is not None:
status['uid'] = self.digsby.uid
self.last_status = status
self.event('status_updated')
def update_stream_success(self, value):
return self.get_stream_success(value, update=True)
def get_stream_success(self, value, update=False, num_tries=0):
from util import default_timer
self.end_get_stream = default_timer()
log.debug('stream get took %f seconds', self.end_get_stream - self.start_get_stream)
stream = value
# v = []
# for val in value:
# v.append(simplejson.loads(val, object_hook=facebookapi.storageify))
# alerts, stream = v[:2]
self.last_alerts = Alerts(self.acct)
from facebookapi import simplify_multiquery
try:
# print stream
new_stream = simplify_multiquery(stream,keys={'posts':None,
# 'comments':None,
'latest_posts':None,
'profiles':'id',
# 'now':None,
'events':list,
'status':None,
'notifications': None,
'apps' : 'app_id',
'post_filter_keys':None,
'filter_keys':'filter_key'})# 'birthdays':'uid',})
import util.primitives.funcs as funcs
# new_stream['comments'] = dict(funcs.groupby(new_stream['comments'], lambda x: x['post_id']))
new_stream['comments'] = {}
new_stream['post_ids'] = post_ids = {}
for k, v in new_stream['filter_keys'].iteritems():
if not v.get('name'):
v['name'] = KNOWN_APPS_LOOKUP.get(k, v.get('name'))
new_stream['filter_keys'].update([(k, dict(name=d['name'],
icon_url=skin.get(d['icon_url']).path.url())) for k,d in FORCED_KEYS.items()])
new_stream['post_filter_keys'] = dict((post_id, structures.oset(p['filter_key'] for p in vals))
for post_id, vals in
funcs.groupby(new_stream['post_filter_keys'], lambda x: x['post_id']))
for post in new_stream['posts']:
post['comments']['count'] = int(post['comments']['count'])
new_stream['apps'], apps_str = {}, new_stream['apps']
for app_id, app_dict in apps_str.items():
new_stream['apps'][int(app_id)] = app_dict
try:
new_stream['now'] = new_stream['now'][0].values()[0]
except (IndexError, KeyError) as _e:
# print_exc()
import time
new_stream['now'] = time.time()
self.last_alerts.event_invites &= set(new_stream['events'])
self.last_status = (new_stream['status'][:1] or [Ostorage([('message', ''), ('status_id', 0), ('time', 0)])])[0]
self.last_status['uid'] = self.digsby.uid
if not isinstance(new_stream['posts'], list):
log.error('stream: %r', stream)
raise ValueError('Facebook returned type=%r of posts' % type(new_stream['posts']))
for post in new_stream['posts']: #get the new ones
post_ids[post['post_id']] = post
if 'notifications' in new_stream:
import lxml
for notification in new_stream['notifications']:
title_html = notification.get('title_html', None)
if title_html is None:
continue
s = lxml.html.fromstring(title_html)
s.make_links_absolute('http://www.facebook.com', resolve_base_href = False)
for a in s.findall('a'):
a.tag = 'span'
# _c = a.attrib.clear()
a.attrib['class'] = 'link notification_link'
[x.attrib.pop("data-hovercard", None) for x in s.findall(".//*[@data-hovercard]")]
notification['title_html'] = lxml.etree.tostring(s)
self.last_alerts.update_notifications(new_stream['notifications'])
if update:
latest_posts = filter(None, (post_ids.get(post_id, self.last_stream.post_ids.get(post_id)) for post_id in
structures.oset([post['post_id'] for post in new_stream['latest_posts']] +
[post['post_id'] for post in self.last_stream.posts])))[:POSTS_LIMIT]
new_stream['posts'] = latest_posts
for post in new_stream['posts']: #update the dict with the combined list
post_ids[post['post_id']] = post
for key in self.last_stream.comments:
if key in post_ids and key not in new_stream.comments:
new_stream.comments[key] = self.last_stream.comments[key]
for key in self.last_stream.profiles:
if key not in new_stream.profiles:
new_stream.profiles[key] = self.last_stream.profiles[key]
trim_profiles(new_stream)
for p in new_stream.posts: p.id = p.post_id # compatability hack for ads
self.last_stream = new_stream
self.social_feed.new_ids([p['post_id'] for p in self.last_stream.posts])
except Exception, e:
traceback.print_exc()
return self.get_stream_error(num_tries=num_tries, error=e)
self.event('got_stream')
def get_stream_error(self, num_tries, error=None, *a): #*a, **k for other kinds of errors.
if not_logged_in(error): #doesn't matter if it's really a facebook error; should fail this test if not
return self.event('not_logged_in')
elif num_tries < 2:
Timer(2, lambda: self.do_get_stream(num_tries + 1)).start()
else:
self.event('conn_error')
@callsback
def addComment(self, post_id, comment, callback=None):
self.digsby.stream.addComment(post_id=post_id, comment=comment,
success = lambda resp: self.handle_comment_resp(resp, post_id, comment, callback),
error = lambda resp: self.handle_comment_error(resp, post_id, comment, callback))
@callsback
def removeComment(self, comment_id, callback=None):
self.digsby.stream.removeComment(comment_id=comment_id,
success = lambda resp: self.handle_comment_remove_resp(resp, comment_id, callback),
error = lambda resp: self.handle_comment_remove_error(resp, comment_id, callback))
@callsback
def getComments(self, post_id, callback=None, limit=50, **k):
self.digsby.multiquery(
comments = 'SELECT fromid, text, time, post_id, id FROM comment WHERE post_id="%s" ORDER BY time DESC LIMIT %d' % (post_id, limit),
count = 'SELECT comments.count FROM stream where post_id="%s"' % post_id,
profiles = """SELECT id, name, pic_square, url FROM profile WHERE id IN (SELECT fromid FROM #comments)""",
success = lambda resp: self.handle_get_comments_resp(resp, post_id, callback),
error = lambda req, resp = None: self.handle_get_comments_error(resp or req, post_id, callback)
)
def handle_get_comments_resp(self, resp, post_id, callback):
from facebookapi import simplify_multiquery
resp = simplify_multiquery(resp,
{'comments':None,
'count':None,
'profiles':'id'}
)
resp['comments'].sort(key = lambda c: c['time'])
try:
count = resp['count'][0]['comments']['count']
try:
self.last_stream['post_ids'][post_id]['comments']['count'] = int(count)
except Exception:
traceback.print_exc()
except Exception:
num_comments = len(resp['comments'])
if num_comments >= 50:
count = -1
else:
count = num_comments
self.last_stream['comments'][post_id] = resp['comments']
self.last_stream['profiles'].update(resp['profiles'])
callback.success(post_id, count)
def handle_get_comments_error(self, resp, post_id, callback):
callback.error(resp)
def handle_comment_remove_resp(self, resp, comment_id, callback):
if resp:
for post_id, comments in self.last_stream['comments'].items():
for i, comment in enumerate(comments):
if comment['id'] == comment_id:
c = comments.pop(i)
post = self.last_stream['post_ids'][post_id]
post['comments']['count'] -= 1
callback.success(post_id)
hooks.notify('digsby.facebook.comment_removed', c)
return
def handle_comment_remove_error(self, resp, comment_id, callback):
callback.error()
@callsback
def addLike(self, post_id, callback):
self.digsby.stream.addLike(post_id=str(post_id),
success = (lambda resp: self.handle_like_resp(resp, post_id, callback)),
error = (lambda resp: self.handle_like_error(resp, post_id, callback)))
@callsback
def removeLike(self, post_id, callback):
self.digsby.stream.removeLike(post_id=post_id,
success = (lambda resp: self.handle_unlike_resp(resp, post_id, callback)),
error = (lambda resp: self.handle_unlike_error(resp, post_id, callback)))
def handle_like_resp(self, resp, post_id, callback):
post = self.last_stream['post_ids'][post_id]
post['likes'].update(user_likes=True)
post['likes']['count'] += 1
callback.success(post_id)
hooks.notify('digsby.facebook.like_added', post_id)
def handle_unlike_resp(self, resp, post_id, callback):
post = self.last_stream['post_ids'][post_id]
post['likes'].update(user_likes=False)
post['likes']['count'] -= 1
callback.success(post_id)
hooks.notify('digsby.facebook.like_removed', post_id)
#regen likes block, regen likes link block, send to callback
#regen cached post html
def handle_comment_resp(self, response, post_id, comment, callback):
comment_id = response
post = self.last_stream['post_ids'][post_id]
post['comments']['count'] += 1
import time
comment_dict = S({'fromid': post['viewer_id'],
'id': comment_id,
'post_id': post_id,
'text': comment,
'time': time.time()})
self.last_stream['comments'].setdefault(post_id, []).append(comment_dict)
callback.success(post_id, comment_dict)
hooks.notify('digsby.facebook.comment_added', comment_dict)
#regen comment, regen comment link block
#regen cached post html
def handle_comment_error(self, response, post_id, comment, callback):
callback.error(response)
def handle_like_error(self, response, post_id, callback):
callback.error(response)
def handle_unlike_error(self, response, post_id, callback):
callback.error(response)
@callsback
def get_user_name_gender(self, callback=None):
def success(info):
try:
info = info[0]
except Exception:
traceback.print_exc()
callback.error(info)
else:
if isinstance(info, dict):
callback.success(info)
else:
callback.error(info)
self.digsby.query('SELECT first_name, last_name, sex FROM user WHERE uid=' + str(self.digsby.uid), success=success, error=callback.error)
from .objects import Alerts
#not ready to mess with code that's 17000 revisions old.
#minimal subclass to get rid of the reference to a facebook object
#the only reason it is there is to grab the filters; not up to that point yet here.
#class Alerts(Alerts_Super):
# def __init__(self, notifications_get_xml=None):
# super(Alerts, self).__init__(None, notifications_get_xml)
# if hasattr(self, 'fb'):
# del self.fb
#
# def __sub__(self, other):
# ret = Alerts()
# for attr in self.stuff:
# setattr(ret, attr, getattr(self, attr) - getattr(other, attr))
# return ret
#
# def __getitem__(self, key):
# return getattr(self, key)
login_error_codes = frozenset(
[100, #no session key
102, #session invalid
104, #signature invalid (likely the secret is messed up)
] +
range(450, 455 + 1) + #session errors
[612] #permission error
)
def not_logged_in(fb_error):
return getattr(fb_error, 'code', None) in login_error_codes
|
43162
|
import configparser
import os
ApplicationDir = os.path.dirname(os.path.abspath(__file__))
HomeDir = os.path.expanduser('~')
CredentialDir = os.path.join(HomeDir, '.credentials')
if not os.path.exists(CredentialDir):
os.makedirs(CredentialDir)
CredentialFilePath = os.path.join(CredentialDir, 'CalSyncHAB.json')
CalSyncHABSettings = os.path.join(ApplicationDir, 'CalSyncHAB.ini')
Settings = configparser.ConfigParser()
Settings.read(CalSyncHABSettings)
ApplicationName = Settings.get('General', 'ApplicationName')
CalendarScope = Settings.get('Calendar', 'Scope')
CalendarId = Settings.get('Calendar', 'CalendarId')
CalendarMaxEvents = Settings.get('Calendar', 'MaxEvents')
CalendarTimeZone = Settings.get('Calendar', 'TimeZone')
CalendarClientSecretFile = Settings.get('Calendar', 'ClientSecretFile')
OpenHABHostName = Settings.get('OpenHAB', 'HostName')
OpenHABPort = Settings.get('OpenHAB', 'Port')
OpenHABItemPrefix = Settings.get('OpenHAB', 'ItemPrefix')
|
43213
|
from relex.predictors.relation_classification.relation_classifier_predictor import RelationClassifierPredictor
|
43268
|
import datetime
import re
import time
from collections import namedtuple
from django.conf import settings
from django.core.management.base import BaseCommand
from trello import ResourceUnavailable, TrelloClient
from core.models import Event
# Create new command
class Command(BaseCommand):
help = 'Syncs event in trello board. Need a token.'
missing_args_message = (
'You need to add a token! Get one here: '
'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'
'name=My+Application&expiration=1hour&response_type=token'
)
def add_arguments(self, parser):
parser.add_argument('trello_token', type=str)
def handle(self, *args, **options):
token = options['trello_token']
events = event_list()
sync(events, token)
# Get data
EventTuple = namedtuple('EventTuple', 'name id city date')
def event_list():
event = Event.objects.all()
result = []
for e in event:
name = e.name
_id = str(e.pk)
city = e.city
date = datetime.date(e.date.year, e.date.month, e.date.day or 1)
result.append(EventTuple(name, _id, city, date))
return result
# Sync to trello
ADMIN_BASE_URL = 'https://djangogirls.org/admin/core/event/'
def sync(events, token):
trello = TrelloClient(api_key=settings.TRELLO_API_KEY, token=token)
board = trello.get_board('55f7167c46760fcb5d68b385')
far_away, less_2_months, less_1_month, less_1_week, today, past = board.all_lists()
all_cards = {card_id(c): c for c in board.all_cards()}
date_today = datetime.date.today()
for e in events:
card = all_cards.get(e.id)
if not card:
card = create_card(e, far_away)
create_checklist(card)
# fetch card to get due date
try:
card.fetch()
except ResourceUnavailable:
print("Oopsie: too many requests! Let's wait 10 seconds!")
time.sleep(10)
card.fetch()
if e.date != card.due_date.date():
print('Changing due date of {} to {}'.format(e.city, e.date))
card.set_due(e.date)
distance = (e.date - date_today).days
if distance < 0:
right_list = past
elif distance == 0:
right_list = today
elif distance < 7:
right_list = less_1_week
elif distance < 30:
right_list = less_1_month
elif distance < 60:
right_list = less_2_months
else:
right_list = far_away
ensure_card_in_list(card, right_list)
def card_id(card):
m = re.search(ADMIN_BASE_URL + r'(\d+)',
card.desc)
return m.group(1)
def create_card(event, list):
print('Creating card {} ({})'.format(event.city, event.date.isoformat()))
return list.add_card(name=event.city,
desc=ADMIN_BASE_URL + event.id,
due=event.date.isoformat())
def create_checklist(card):
card.add_checklist("Things to do:", [
"2 month check", "1 month check", "Thank you email and request for stats", "Stats obtained"])
def ensure_checklist_in_card(card):
if not card.checklists:
print("Adding checklist to {} card.".format(card.name))
create_checklist(card)
def ensure_card_in_list(card, list):
if card.list_id != list.id:
print('Moving {} to {}'.format(
card.name, list.name))
card.change_list(list.id)
|
43291
|
from setuptools import setup, find_packages
try:
import s3stat
doc = s3stat.__doc__
except ImportError:
doc = "The docs are only available when the package is already installed. Sorry for this."
setup(
name="s3stat",
version="2.3.1",
description='An extensible Amazon S3 and Cloudfront log parser.',
long_description=doc,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/nagyv/s3stat',
include_package_data=True,
zip_safe=False,
install_requires=['boto', 'tempdir'],
py_modules=['s3stat'],
scripts=['s3stat.py'],
keywords="s3stat amazon statistics goaccess"
# tests_require=['pytest'],
# cmdclass = {
# 'test': PyTest,
# }
)
|
43299
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import psoap
from psoap.data import lkca14, redshift, Chunk
from psoap import matrix_functions
from psoap import covariance
from psoap import orbit
# from matplotlib.ticker import FormatStrFormatter as FSF
# from matplotlib.ticker import MaxNLocator
# from matplotlib.ticker import MultipleLocator
# Specify orbital parameters and make a sanity plot
q = 0.2
K = 5.0 # km/s
e = 0.2 #
omega = 10.0 # deg
P = 10.0 # days
T0 = 0.0 # epoch
gamma = 5.0 # km/s
n_epochs = 10
obs_dates = np.array([2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1])
# obs_dates = np.linspace(5, 150, num=n_epochs)
orb = orbit.SB2(q, K, e, omega, P, T0, gamma, obs_dates)
vAs, vBs = orb.get_component_velocities()
dates_fine = np.linspace(0, 30, num=200)
vA_fine, vB_fine = orb.get_component_velocities(dates_fine)
vAs_relative = vAs - vAs[0]
np.save("SB2/vAs_relative.npy", vAs_relative)
vBs_relative = vBs - vBs[0]
np.save("SB2/vBs_relative.npy", vBs_relative)
fig, ax = plt.subplots(nrows=3, figsize=(6,6))
ax[0].plot(dates_fine, vA_fine, "b")
ax[0].plot(orb.obs_dates, vAs, "bo")
ax[0].plot(dates_fine, vB_fine, "g")
ax[0].plot(orb.obs_dates, vBs, "go")
ax[0].axhline(gamma, ls="-.", color="0.5")
ax[-1].set_xlabel(r"$t$ [days]")
ax[0].set_ylabel(r"$v_A$ [km $\mathrm{s}^{-1}$]")
# For subsequent axes, plot velocities of stars relative to first observation.
ax[1].plot(orb.obs_dates, vAs_relative, "bo")
ax[1].set_ylabel(r"$v_A$ relative")
ax[2].plot(orb.obs_dates, vBs_relative, "go")
ax[2].set_ylabel(r"$v_B$ relative")
fig.subplots_adjust(left=0.14, right=0.86, bottom=0.24)
fig.savefig("SB2/orbit.png")
# Load the fake primary spectra we prepared
wl_f, fl_f = np.load("primary_wl_fl.npy")
# Load the fake secondary spectra we prepared
wl_g, fl_g = np.load("secondary_wl_fl.npy")
n_f = len(wl_f)
n_g = len(wl_g)
print("n_f:", n_f, "n_g:", n_g)
# Shorten these to be the same.
if n_f < n_g:
n_pix = n_f
print("Shortening g to f")
else:
n_pix =n_g
print("Shortening f to g")
wl = wl_f[0:n_pix]
fl_f = fl_f[0:n_pix]
fl_g = fl_g[0:n_pix]
# Just assume that wl_f will be wl_g as well.
# Create fake wavelengths with Doppler shifts by apply these to the master wl
wls_f = np.empty((n_epochs, n_pix))
wls_g = np.empty((n_epochs, n_pix))
for i in range(n_epochs):
wls_f[i] = redshift(wl, vAs[i])
wls_g[i] = redshift(wl, vBs[i])
# Falling plot of all eight epochs of each spectrum, overlaid with the velocities for each
# Show spectra on each plot along with chosen amplitude scaling
fig, ax = plt.subplots(nrows=n_epochs, sharex=True)
for i in range(n_epochs):
ax[i].plot(wls_f[i], fl_f, "b")
ax[i].plot(wls_g[i], fl_g, "g")
ax[i].set_ylabel("epoch {:}".format(i))
ax[-1].set_xlabel(r"$\lambda [\AA]$")
fig.savefig("SB2/dataset_noiseless_full.png", dpi=300)
# Here is where we set up the number of chunks, and choose what region of overlaps we want.
# New chunks [start, stop]
# chunk_wls = [[5240, 5250], [5255, 5265], [5270, 5280]]
chunk_wls = [[5265, 5275]]
# Measure this as S/N per resolution element. That means that there is a sqrt(2.5) effect.
# let alpha be the percentage of the primary as the total flux.
ratio = 0.2
alpha = (1 / (ratio + 1))
print("Ratio: {}, alpha: {}".format(ratio, alpha))
# alpha = 0.90
# Assume a S/N = 40, so N = 1.0 / 40
S_N = 60 # per resolution element
noise_amp = 1.0 / (S_N/np.sqrt(2.5)) # per pixel
# Truncate down to a smaller region to ensure overlap between all orders.
for (wl0, wl1) in chunk_wls:
print("Creating chunk {:.0f} to {:.0f}".format(wl0, wl1))
# Keep everything the same size. These are how many pixels we plan to keep in common between
# epochs
ind = (wls_f[0] > wl0) & (wls_f[0] < wl1)
n_pix_common = np.sum(ind)
print("n_pix_common = {}".format(n_pix_common))
# Now choose a narrower, common wl grid, which will just be f.
# Now we should have a giant array of wavelengths that all share the same flux values, but shifted
wls_comb = np.zeros((n_epochs, n_pix_common))
fls_f = np.empty((n_epochs, n_pix_common))
fls_g = np.empty((n_epochs, n_pix_common))
fls_comb = np.empty((n_epochs, n_pix_common))
fls_noise = np.zeros((n_epochs, n_pix_common))
sigma_comb = noise_amp * np.ones((n_epochs, n_pix_common))
for i in range(n_epochs):
# Select a subset of wl_f that has the appropriate number of pixels
ind_0 = np.searchsorted(wls_f[i], wl0)
print("Inserting at index {}, wavelength {:.2f}".format(ind_0, wls_f[i, ind_0]))
wl_common = wls_f[i, ind_0:(ind_0 + n_pix_common)]
# Interpolate the master spectrum onto this grid
interp = interp1d(wls_f[i], fl_f)
fl_f_common = interp(wl_common)
interp = interp1d(wls_g[i], fl_g)
fl_g_common = interp(wl_common)
fl_common = alpha * fl_f_common + (1 - alpha) * fl_g_common
# Add noise to it
fl_common_noise = fl_common + np.random.normal(scale=noise_amp, size=n_pix_common)
# Store into array
wls_comb[i] = wl_common
fls_f[i] = fl_f_common
fls_g[i] = fl_g_common
fls_comb[i] = fl_common
fls_noise[i] = fl_common_noise
fig, ax = plt.subplots(nrows=4, sharex=True)
ax[0].plot(wl_common, alpha * fl_f_common, "b")
ax[0].set_ylabel(r"$f$")
ax[1].plot(wl_common, (1 - alpha) * fl_g_common, "g")
ax[1].set_ylabel(r"$g$")
ax[2].plot(wl_common, fl_common, "k")
ax[2].set_ylabel(r"$f + g$")
ax[3].plot(wl_common, fl_common_noise, "k")
ax[3].set_ylabel(r"$f + g +$ noise")
ax[-1].set_xlabel(r"$\lambda\;[\AA]$")
fig.savefig("SB2/epoch_{}.png".format(i), dpi=300)
# Save the created spectra into a chunk
date_comb = obs_dates[:,np.newaxis] * np.ones_like(wls_comb)
chunkSpec = Chunk(wls_comb, fls_noise, sigma_comb, date_comb)
wl0 = np.min(wls_comb)
wl1 = np.max(wls_comb)
chunkSpec.save(0, wl0, wl1, prefix="SB2/")
# 2D arrays before we have summed them or added noise.
print("STDEV primary", np.std(alpha * fls_f))
print("STDEV secondary", np.std((1 - alpha) * fls_g))
np.save("SB2/fls_f.npy", alpha * fls_f)
np.save("SB2/fls_g.npy", (1 - alpha) * fls_g)
np.save("SB2/fls_comb.npy", fls_comb)
|
43322
|
from slack_sdk import WebClient
from slack_bolt.app.app import SlackAppDevelopmentServer, App
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
class TestDevServer:
signing_secret = "secret"
valid_token = "<PASSWORD>"
mock_api_server_base_url = "http://localhost:8888"
web_client = WebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
def setup_method(self):
self.old_os_env = remove_os_env_temporarily()
setup_mock_web_api_server(self)
def teardown_method(self):
cleanup_mock_web_api_server(self)
restore_os_env(self.old_os_env)
def test_instance(self):
server = SlackAppDevelopmentServer(
port=3001,
path="/slack/events",
app=App(signing_secret=self.signing_secret, client=self.web_client),
)
assert server is not None
|
43337
|
import tensorflow as tf
if __name__ == "__main__":
with tf.Session() as sess:
game_dir = "Gobang"
model_dir = "model2_10_10_5"
batch = "11000"
# 初始化变量
sess.run(tf.global_variables_initializer())
# 获取最新的checkpoint,其实就是解析了checkpoint文件
latest_ckpt = tf.train.latest_checkpoint("../" + game_dir + "/" + model_dir + "/" + batch)
# 加载图
restore_saver = tf.train.import_meta_graph("../" + game_dir + "/" + model_dir + "/" + batch + "/policy_value_net.model.meta")
# 恢复图,即将weights等参数加入图对应位置中
restore_saver.restore(sess, latest_ckpt)
# 将图中的变量转为常量
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ["action_fc/LogSoftmax", "evaluation_fc2/Tanh"])
# 将新的图保存到"/pretrained/graph.bytes"文件中
tf.train.write_graph(output_graph_def, "../" + game_dir + "/" + model_dir + "/" + batch, "graph.bytes", as_text=False)
|
43338
|
from django.db import connection
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view()
def root(request):
return Response({"message": "Hello, from Yappa!",
"next step": "go to the next example: "
"connect you managed Postgresql!"})
|
43391
|
from .assembla import AssemblaPlatform
from .base import BasePlatform
from .bitbucket import BitbucketPlatform
from .friendcode import FriendCodePlatform
from .github import GitHubPlatform
from .gitlab import GitLabPlatform
# Supported platforms
PLATFORMS = [
# name -> Platform object
("github", GitHubPlatform()),
("bitbucket", BitbucketPlatform()),
("friendcode", FriendCodePlatform()),
("assembla", AssemblaPlatform()),
("gitlab", GitLabPlatform()),
# Match url
("base", BasePlatform()),
]
|
43445
|
from pysmt.shortcuts import Symbol
from pysmt.typing import INT
h = Symbol("H", INT)
domain = (1 <= h) & (10 >= h)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.