filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_3848 | # -*- coding: utf-8 -*-
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY
from synapse.storage.database import Database
from synapse.util.caches.descriptors import Cache
from ._base import BaseSlavedStore
class SlavedClientIpStore(BaseSlavedStore):
def __init__(self, database: Database, db_conn, hs):
super(SlavedClientIpStore, self).__init__(database, db_conn, hs)
self.client_ip_last_seen = Cache(
name="client_ip_last_seen", keylen=4, max_entries=50000
)
def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
now = int(self._clock.time_msec())
key = (user_id, access_token, ip)
try:
last_seen = self.client_ip_last_seen.get(key)
except KeyError:
last_seen = None
# Rate-limited inserts
if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
return
self.client_ip_last_seen.prefill(key, now)
self.hs.get_tcp_replication().send_user_ip(
user_id, access_token, ip, user_agent, device_id, now
)
|
the-stack_0_3849 | import numpy as np
import math
import cv2
import numpy.random as random
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, pts=None):
for t in self.transforms:
img, pts = t(img, pts)
return img, pts
class RandomMirror(object):
def __init__(self):
pass
def __call__(self, image, polygons=None):
if np.random.randint(2):
image = np.ascontiguousarray(image[:, ::-1])
_, width, _ = image.shape
for polygon in polygons:
polygon.points[:, 0] = width - polygon.points[:, 0]
return image, polygons
class AugmentColor(object):
def __init__(self):
self.U = np.array([[-0.56543481, 0.71983482, 0.40240142],
[-0.5989477, -0.02304967, -0.80036049],
[-0.56694071, -0.6935729, 0.44423429]], dtype=np.float32)
self.EV = np.array([1.65513492, 0.48450358, 0.1565086], dtype=np.float32)
self.sigma = 0.1
self.color_vec = None
def __call__(self, img, polygons=None):
color_vec = self.color_vec
if self.color_vec is None:
if not self.sigma > 0.0:
color_vec = np.zeros(3, dtype=np.float32)
else:
color_vec = np.random.normal(0.0, self.sigma, 3)
alpha = color_vec.astype(np.float32) * self.EV
noise = np.dot(self.U, alpha.T) * 255
return np.clip(img + noise[np.newaxis, np.newaxis, :], 0, 255), polygons
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, polygons=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return np.clip(image, 0, 255), polygons
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, polygons=None):
image = image.astype(np.float32)
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return np.clip(image, 0, 255), polygons
class Rotate(object):
def __init__(self, up=30):
self.up = up
def rotate(self, center, pt, theta): # 二维图形学的旋转
xr, yr = center
yr = -yr
x, y = pt[:, 0], pt[:, 1]
y = -y
theta = theta / 360 * 2 * math.pi
cos = math.cos(theta)
sin = math.sin(theta)
_x = xr + (x - xr) * cos - (y - yr) * sin
_y = yr + (x - xr) * sin + (y - yr) * cos
return _x, -_y
def __call__(self, img, polygons=None):
if np.random.randint(2):
return img, polygons
angle = np.random.uniform(-self.up, self.up) #
rows, cols = img.shape[0:2]
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1.0)
img = cv2.warpAffine(img, M, (cols, rows), borderValue=[0, 0, 0])
center = cols / 2.0, rows / 2.0
if polygons is not None:
for polygon in polygons:
x, y = self.rotate(center, polygon.points, angle)
pts = np.vstack([x, y]).T
polygon.points = pts
return img, polygons
class SquarePadding(object):
def __call__(self, image, pts=None):
H, W, _ = image.shape
if H == W:
return image, pts
padding_size = max(H, W)
expand_image = np.zeros((padding_size, padding_size, 3), dtype=image.dtype)
if H > W:
y0, x0 = 0, (H - W) // 2
else:
y0, x0 = (W - H) // 2, 0
if pts is not None:
pts[:, 0] += x0
pts[:, 1] += y0
expand_image[y0:y0+H, x0:x0+W] = image
image = expand_image
return image, pts
class Padding(object):
def __init__(self, fill=0):
self.fill = fill
def __call__(self, image, polygons=None):
if np.random.randint(2):
return image, polygons
try:
height, width, depth = image.shape
except:
height, width = image.shape
depth = 1;
ratio = np.random.uniform(1, 2)
left = np.random.uniform(0, width * ratio - width)
top = np.random.uniform(0, height * ratio - height)
expand_image = np.zeros(
(int(height * ratio), int(width * ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = self.fill
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
if polygons is not None:
for polygon in polygons:
polygon.points[:, 0] = polygon.points[:, 0] + left
polygon.points[:, 1] = polygon.points[:, 1] + top
return image, polygons
class RandomResizedCrop(object):
def __init__(self, size, scale=(0.3, 1.0), ratio=(3. / 4., 4. / 3.)):
self.size = (size, size)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = np.random.uniform(*scale) * area
aspect_ratio = np.random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.random() < 0.5:
w, h = h, w
if h < img.shape[0] and w < img.shape[1]:
j = np.random.randint(0, img.shape[1] - w)
i = np.random.randint(0, img.shape[0] - h)
return i, j, h, w
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return i, j, w, w
def __call__(self, image, pts=None):
i, j, h, w = self.get_params(image, self.scale, self.ratio)
cropped = image[i:i + h, j:j + w, :]
pts = pts.copy()
mask = (pts[:, 1] >= i) * (pts[:, 0] >= j) * (pts[:, 1] < (i+h)) * (pts[:, 0] < (j+w))
pts[~mask, 2] = -1
scales = np.array([self.size[0]/w, self.size[1]/h])
pts[:, :2] -= np.array([j, i])
pts[:, :2] = (pts[:, :2] * scales)
img = cv2.resize(cropped, self.size)
return img, pts
class RandomResizedLimitCrop(object):
def __init__(self, size, scale=(0.3, 1.0), ratio=(3. / 4., 4. / 3.)):
self.size = (size, size)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = np.random.uniform(*scale) * area
aspect_ratio = np.random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if np.random.random() < 0.5:
w, h = h, w
if h < img.shape[0] and w < img.shape[1]:
j = np.random.randint(0, img.shape[1] - w)
i = np.random.randint(0, img.shape[0] - h)
return i, j, h, w
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return i, j, w, w
def __call__(self, image, polygons=None):
i, j, h, w = self.get_params(image, self.scale, self.ratio)
cropped = image[i:i + h, j:j + w, :]
scales = np.array([self.size[0] / w, self.size[1] / h])
if polygons is not None:
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] - j) * scales[0]
polygon.points[:, 1] = (polygon.points[:, 1] - i) * scales[1]
img = cv2.resize(cropped, self.size)
return img, polygons
class Normalize(object):
def __init__(self, mean, std):
self.mean = np.array(mean)
self.std = np.array(std)
def __call__(self, image, polygons=None):
image = image.astype(np.float32)
image /= 255.0
image -= self.mean
image /= self.std
return image, polygons
class Resize(object):
def __init__(self, size=256):
self.size = size
def __call__(self, image, polygons=None):
h, w, _ = image.shape
image = cv2.resize(image, (self.size,
self.size))
scales = np.array([self.size / w, self.size / h])
if polygons is not None:
for polygon in polygons:
polygon.points = polygon.points * scales
return image, polygons
class Augmentation(object):
def __init__(self, size, mean, std):
self.size = size
self.mean = mean
self.std = std
self.augmentation = Compose([
# Resize(size),
Padding(),
RandomResizedLimitCrop(size=size, scale=(0.24, 1.0), ratio=(0.33, 3)),
# RandomBrightness(),
# RandomContrast(),
RandomMirror(),
Rotate(),
Normalize(mean, std)
])
def __call__(self, image, polygons=None):
return self.augmentation(image, polygons)
class BaseTransform(object):
def __init__(self, size, mean, std):
self.size = size
self.mean = mean
self.std = std
self.augmentation = Compose([
Resize(size),
Normalize(mean, std)
])
def __call__(self, image, polygons=None):
return self.augmentation(image, polygons)
|
the-stack_0_3851 | description = 'Various devices for logical motors in AMOR'
includes = ['sinq_amor_movable']
devices = dict(
controller = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotorHandler',
description = 'Logical Motors Controller',
lowlevel = True,
loglevel = 'debug'
),
m2t = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical motor monochromator two theta',
motortype = 'm2t',
controller = 'controller',
),
s2t = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical motor sample two theta',
motortype = 's2t',
controller = 'controller',
),
ath = device('nicos_sinq.amor.devices.logical_motor.AmorLogicalMotor',
description = 'Logical Motor analyser theta',
motortype = 'ath',
controller = 'controller',
loglevel = 'debug'
),
dimetix = device('nicos_sinq.amor.devices.dimetix.EpicsDimetix',
description = 'Laser distance measurement device',
readpv = 'SQ:AMOR:DIMETIX:DIST',
epicstimeout = 3.0,
),
laser_switch = device('nicos_sinq.amor.devices.sps_switch.SpsSwitch',
description = 'Laser light controlled by SPS',
epicstimeout = 3.0,
readpv = 'SQ:AMOR:SPS1:DigitalInput',
commandpv = 'SQ:AMOR:SPS1:Push',
commandstr = "S0001",
bytelist = [(15, 7)],
mapping = {'OFF': 0, 'ON': 1}
),
xlz = device('nicos_ess.devices.epics.motor.EpicsMotor',
description = 'Counter z position distance laser motor',
epicstimeout = 3.0,
motorpv = 'SQ:AMOR:mota:xlz',
errormsgpv = 'SQ:AMOR:mota:xlz-MsgTxt',
lowlevel = True
),
laser_positioner = device('nicos.devices.generic.Switcher',
description = 'Position laser to read components',
moveable = 'xlz',
mapping = {
'park': -0.1,
'analyser': -24.0,
'detector': 0.0,
'polariser': -88.0,
'sample': -52.0,
'slit2': -73.0,
'slit3': -63.0,
'slit4': -34.0,
'selene': -116.0,
},
fallback = '<undefined>',
precision = 0
),
Distances = device('nicos_sinq.amor.devices.component_handler.DistancesHandler',
description = 'Device to handle distance calculation in AMOR',
components = {
'polariser': (-232, 0),
'slit2': (302, 0),
'slit3': (-22, 0),
'slit4': (306, 0),
'sample': (-310, 0),
'detector': (326, 0),
'analyser': (310, 0),
'filter': (-726, 0),
'slit1': (0, 0)
},
fixedcomponents = {
'chopper': 9906,
},
switch = 'laser_switch',
positioner = 'laser_positioner',
dimetix = 'dimetix'
),
com = device('test.nicos_ess.test_devices.test_epics_motor.FakeEpicsMotor',
epicstimeout = 3.0,
description = 'Counter tilt motor',
motorpv = 'com',
),
coz = device('test.nicos_ess.test_devices.test_epics_motor.FakeEpicsMotor',
epicstimeout = 3.0,
description = 'Counter z translation motor',
motorpv = 'coz',
),
nu = device('nicos_sinq.amor.devices.logical_motor.DetectorAngleMotor',
description = 'Sample omega',
com = 'com',
coz = 'coz',
unit = 'deg',
coz_scale_factor = 10.,
),
)
|
the-stack_0_3854 |
import os
import subprocess
import platform
from SetupPython import PythonConfiguration as PythonRequirements
# Make sure everything we need for the setup is installed
PythonRequirements.Validate()
from SetupPremake import PremakeConfiguration as PremakeRequirements
# from SetupVulkan import VulkanConfiguration as VulkanRequirements
os.chdir('./../') # Change from devtools/scripts directory to root
premakeInstalled = PremakeRequirements.Validate()
# VulkanRequirements.Validate()
print("\nUpdating submodules...")
subprocess.call(["git", "submodule", "update", "--init", "--recursive"])
if (premakeInstalled):
if platform.system() == "Windows":
print("\nRunning premake...")
subprocess.call([os.path.abspath("./scripts/Win-GenProjects.bat"), "nopause"])
print("\nSetup completed!")
else:
print("Hazel requires Premake to generate project files.") |
the-stack_0_3856 | n1 = int(input('Digite um número inteiro: '))
print('''Escolha uma das bases para conversão:
[ 1 ] converter para BINÁRIO
[ 2 ] converter para OCTAL
[ 3 ] converter para HEXADECIMAL''')
opção = int(input('Escolha uma opção: '))
if opção == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(n1, bin(n1)[2:]))
elif opção == 2:
print('{} convertido para OCTAL é igual a {}'.format(n1, oct(n1)[2:]))
elif opção == 3:
print('{} converito para HEXADECIMAL é igual a {}'.format(n1, hex(n1)[2:]))
else:
print('Opção inválida! Tente novamente.') |
the-stack_0_3859 | import cv2
from Recognizer import *
# for more details on how to use this code see : https://github.com/Ahmedjellouli/FaceRecognition
Recognizer = Recognizer(Database="Database",
Tolerance=0.55,
detectFrontalFace=False,
detectLandmarks=True)
Image = Image(Recognizer=Recognizer,
filename="Faces\\Malala-Yousafzai.jpg",
Save=True)
Video = Video(Recognizer=Recognizer,
filename="Videos\elon.mp4", # put your image path here e.g : D:\image.jpg
)
Image.RecognizeFaces()
Video.RecognizeFaces() # to detect faces in image
Video.AddAudio()
|
the-stack_0_3860 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Miroslav Bauer, CESNET.
#
# oarepo-references is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test OARepo references fields."""
import uuid
import pytest
from tests.test_utils import TestSchema
from oarepo_references.mixins import ReferenceFieldMixin
@pytest.mark.usefixtures("db")
class TestOArepoReferencesFields:
"""OARepo references fields test."""
def test_reference_field(self, test_record_data, referenced_records):
"""Test marshmallow schema ReferenceField methods."""
schema = TestSchema()
rf = schema.fields['ref']
assert isinstance(rf, ReferenceFieldMixin)
rec_uuid = referenced_records[0].id
rf.register(test_record_data['taxo1']['links']['self'], rec_uuid, True)
assert len(rf.context['references']) == 1
ref = rf.context['references'][0]
assert ref['reference'] == \
test_record_data['taxo1']['links']['self']
assert ref['reference_uuid'] == rec_uuid
def test_marshmallow_load(self, test_record_data):
"""Test marshmallow schema load."""
schema = TestSchema()
res = schema.load(test_record_data, partial=True)
assert res == test_record_data
|
the-stack_0_3862 | #!python
from bbfreeze import Freezer
import shutil
destDir = 'dist'
def main():
#includes = ['requests', 'email.utils']
includes = ['requests', 'email.utils']
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl', 'tk'
'Tkconstants', 'Tkinter',]
frz = Freezer(destDir, includes=includes, excludes=excludes)
#frz.addScript("meteor.py", gui_only=True)
frz.addScript("play_development.py")
frz.addScript("play_fullscreen.py", gui_only=True)
frz.addScript("play_windowed.py", gui_only=True)
#frz.addScript("gameassets.py")
#frz.addScript("geoip.py")
#frz.addScript("shipsprite.py")
#frz.addScript("sprites.py")
#frz.addScript("timevars.py")
#frz.addScript("vector.py")
frz.use_compression = 0
frz.include_py = True
frz()
addFile('config.json')
addFile('avbin.dll')
#addDir('images')
#addDir('fonts')
#addDir('sounds')
addDir('themes')
def addFile(f):
# Add a non-script file to directory.
# Why this isn't part of bbfreeze beats me
# Currently assumes file is in script directory. That's lazy but all
# I need for now.
d = "%s/%s" % (destDir, f)
shutil.copyfile( f, d)
def addDir(d):
dd = "%s/%s" % (destDir, d)
shutil.copytree( d, dd)
main()
|
the-stack_0_3863 | import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import asyncio
import aiohttp
import json
import os
import shutil
import time
from PIL import Image
from datetime import datetime
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from translation import Translation
from database.database import db
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes, TimeFormatter
async def ddl_call_back(bot, update):
cb_data = update.data
tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split("=")
thumb_image_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".jpg"
youtube_dl_url = update.message.reply_to_message.text
custom_file_name = os.path.basename(youtube_dl_url)
if "|" in youtube_dl_url:
url_parts = youtube_dl_url.split("|")
if len(url_parts) == 2:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
if youtube_dl_url is not None:
youtube_dl_url = youtube_dl_url.strip()
if custom_file_name is not None:
custom_file_name = custom_file_name.strip()
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
description = Translation.CUSTOM_CAPTION_UL_FILE
start = datetime.now()
await bot.edit_message_text(
text=Translation.DOWNLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
download_directory = tmp_directory_for_each_user + "/" + custom_file_name
command_to_exec = []
async with aiohttp.ClientSession() as session:
c_time = time.time()
try:
await download_coroutine(
bot,
session,
youtube_dl_url,
download_directory,
update.message.chat.id,
update.message.message_id,
c_time
)
except asyncio.TimeOutError:
await bot.edit_message_text(
text=Translation.SLOW_URL_DECED,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
return False
if os.path.exists(download_directory):
end_one = datetime.now()
await bot.edit_message_text(
text=Translation.UPLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
download_directory = os.path.splitext(download_directory)[0] + "." + "mkv"
file_size = os.stat(download_directory).st_size
if file_size > Config.TG_MAX_FILE_SIZE:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.RCHD_TG_API_LIMIT,
message_id=update.message.message_id
)
else:
# get the correct width, height, and duration for videos greater than 10MB
width = 0
height = 0
duration = 0
if tg_send_type != "file":
metadata = extractMetadata(createParser(download_directory))
if metadata is not None:
if metadata.has("duration"):
duration = metadata.get('duration').seconds
if os.path.exists(thumb_image_path):
width = 0
height = 0
metadata = extractMetadata(createParser(thumb_image_path))
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
if tg_send_type == "vm":
height = width
Image.open(thumb_image_path).convert(
"RGB").save(thumb_image_path)
img = Image.open(thumb_image_path)
if tg_send_type == "file":
img.resize((320, height))
else:
img.resize((90, height))
img.save(thumb_image_path, "JPEG")
else:
thumb_image_path = None
start_time = time.time()
if (await db.get_upload_as_doc(update.from_user.id)) is False:
thumbnail = await Gthumb01(bot, update)
await bot.send_document(
chat_id=update.message.chat.id,
document=download_directory,
thumb=thumbnail,
caption=description,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
width, height, duration = await Mdata01(download_directory)
thumb_image_path = await Gthumb02(bot, update, duration, download_directory)
await bot.send_video(
chat_id=update.message.chat.id,
video=download_directory,
caption=description,
duration=duration,
width=width,
height=height,
supports_streaming=True,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
if tg_send_type == "audio":
duration = await Mdata03(download_directory)
thumbnail = await Gthumb01(bot, update)
await bot.send_audio(
chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
thumb=thumbnail,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
width, duration = await Mdata02(download_directory)
thumbnail = await Gthumb02(bot, update, duration, download_directory)
await bot.send_video_note(
chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumbnail,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
logger.info("Did this happen? :\\")
end_two = datetime.now()
try:
os.remove(download_directory)
os.remove(thumb_image_path)
except:
pass
time_taken_for_download = (end_one - start).seconds
time_taken_for_upload = (end_two - end_one).seconds
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
else:
await bot.edit_message_text(
text=Translation.NO_VOID_FORMAT_FOUND.format("Incorrect Link"),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
async def download_coroutine(bot, session, url, file_name, chat_id, message_id, start):
downloaded = 0
display_message = ""
async with session.get(url, timeout=Config.PROCESS_MAX_TIMEOUT) as response:
total_length = int(response.headers["Content-Length"])
content_type = response.headers["Content-Type"]
if "text" in content_type and total_length < 500:
return await response.release()
await bot.edit_message_text(
chat_id,
message_id,
text="""Initiating Download
URL: {}
File Size: {}""".format(url, humanbytes(total_length))
)
with open(file_name, "wb") as f_handle:
while True:
chunk = await response.content.read(Config.CHUNK_SIZE)
if not chunk:
break
f_handle.write(chunk)
downloaded += Config.CHUNK_SIZE
now = time.time()
diff = now - start
if round(diff % 5.00) == 0 or downloaded == total_length:
percentage = downloaded * 100 / total_length
speed = downloaded / diff
elapsed_time = round(diff) * 1000
time_to_completion = round(
(total_length - downloaded) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
try:
current_message = """**Download Status**
URL: {}
File Size: {}
Downloaded: {}
ETA: {}""".format(
url,
humanbytes(total_length),
humanbytes(downloaded),
TimeFormatter(estimated_total_time)
)
if current_message != display_message:
await bot.edit_message_text(
chat_id,
message_id,
text=current_message
)
display_message = current_message
except Exception as e:
logger.info(str(e))
pass
return await response.release()
|
the-stack_0_3865 | # Copyright 2015 - Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import wsme
from wsme import types as wtypes
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import types as api_types
class Workflow(wtypes.Base):
"""Representation of a Workflow.
A workflow maintains a living creation and deployment of an App.
"""
# (devkulkarni) Added base_url to get around strict validation
# checking of WSME 0.8.0
# https://bugs.launchpad.net/solum/+bug/1491504
# https://bugs.launchpad.net/solum/+bug/1491499
base_url = common_types.Uri
"URI of the base resource."
uri = common_types.Uri
"URI to the resource."
uuid = wtypes.text
"Unique Identifier of the resource"
type = wtypes.text
"The resource type."
id = wtypes.text
updated_at = datetime.datetime
created_at = datetime.datetime
app_id = wtypes.text
wf_id = int
source = wtypes.DictType(wtypes.text, api_types.MultiType(
wtypes.text,
int,
bool,
float))
config = {wtypes.text: wtypes.text}
actions = [wtypes.text]
du_id = wtypes.text
status = wtypes.text
result = wtypes.text
scale_target = int
def __init__(self, *args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
@classmethod
def sample(cls):
return cls(
wf_id=1,
config={},
actions={},
source={},
status=''
)
@classmethod
def from_db_model(cls, m, host_url):
json = m.as_dict()
json['type'] = m.__tablename__
json['uri'] = ''
json['uri'] = ('%s/v1/apps/%s/workflows/%s' %
(host_url, m.app_id, m.wf_id))
return cls(**(json))
def as_dict_from_keys(self, keys):
return dict((k, getattr(self, k))
for k in keys
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
def as_dict(self, db_model):
valid_keys = [attr for attr in db_model.__dict__.keys()
if attr[:2] != '__' and attr != 'as_dict']
base = self.as_dict_from_keys(valid_keys)
attrs = [
'id',
'app_id',
'wf_id',
'source',
'config',
'actions',
'status',
'result',
'scale_target'
]
for a in attrs:
if getattr(self, a) is wsme.Unset:
continue
if getattr(self, a) is None:
continue
base[a] = getattr(self, a)
return base
|
the-stack_0_3868 | # future
from __future__ import annotations
# stdlib
import functools
from functools import lru_cache
import operator
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
import numpy as np
import torch
# syft absolute
# absolute
import syft as sy
# relative
from . import utils
from .... import logger
from ....proto.core.tensor.share_tensor_pb2 import ShareTensor as ShareTensor_PB
from ...common.serde.deserialize import _deserialize as deserialize
from ...common.serde.serializable import serializable
from ...common.serde.serialize import _serialize as serialize
from ...smpc.store.crypto_store import CryptoStore
from ..passthrough import PassthroughTensor # type: ignore
from .party import Party
METHODS_FORWARD_ALL_SHARES = {
"repeat",
"copy",
"diagonal",
"flatten",
"transpose",
"partition",
"resize",
"ravel",
"compress",
"reshape",
"squeeze",
"swapaxes",
"__pos__",
"__neg__",
"take",
"choose",
}
INPLACE_OPS = {"resize", "put"}
RING_SIZE_TO_OP = {
2: {
"add": operator.xor,
"sub": operator.xor,
"mul": operator.and_,
"lt": operator.lt,
"gt": operator.gt,
"ge": operator.ge,
"le": operator.le,
"eq": operator.eq,
"ne": operator.ne,
},
2
** 32: {
"add": operator.add,
"sub": operator.sub,
"mul": operator.mul,
"lt": operator.lt,
"gt": operator.gt,
"ge": operator.ge,
"le": operator.le,
"eq": operator.eq,
"ne": operator.ne,
},
}
CACHE_CLIENTS: Dict[Party, Any] = {}
def populate_store(*args: List[Any], **kwargs: Dict[Any, Any]) -> None:
ShareTensor.crypto_store.populate_store(*args, **kwargs) # type: ignore
@serializable()
class ShareTensor(PassthroughTensor):
crypto_store = CryptoStore()
__slots__ = (
"rank",
"ring_size",
"clients", # clients connections
"min_value",
"max_value",
"generator_przs",
# Only ShareTensors with seed_przs could be sent over the wire
"seed_przs",
"parties_info",
"nr_parties",
)
def __init__(
self,
rank: int,
parties_info: List[Party],
ring_size: int,
seed_przs: int = 42,
clients: Optional[List[Any]] = None,
value: Optional[Any] = None,
init_clients: bool = False,
) -> None:
# TODO: Ring size needs to be changed to 2^64 (or other specific sizes)
self.rank = rank
self.ring_size = ring_size
self.nr_parties = len(parties_info)
self.parties_info = parties_info
self.clients = []
if clients is not None:
self.clients = clients
elif init_clients: # type: ignore
self.clients = ShareTensor.login_clients(parties_info)
self.min_value, self.max_value = ShareTensor.compute_min_max_from_ring(
self.ring_size
)
# This should be set only in the deserializer
self.generator_przs = None
self.seed_przs = seed_przs
super().__init__(value)
@staticmethod
def login_clients(parties_info: List[Party]) -> Any:
clients = []
for party_info in parties_info:
party_info.url = party_info.url.replace("localhost", "docker-host")
client = CACHE_CLIENTS.get(party_info, None)
if client is None:
# default cache to true, here to prevent multiple logins
# due to gevent monkey patching, context switch is done during
# during socket connection initialization.
CACHE_CLIENTS[party_info] = True
# TODO: refactor to use a guest account
client = sy.login( # nosec
url=party_info.url,
email="[email protected]",
password="changethis",
port=party_info.port,
verbose=False,
)
base_url = client.routes[0].connection.base_url
client.routes[0].connection.base_url = base_url.replace( # type: ignore
"localhost", "docker-host"
)
CACHE_CLIENTS[party_info] = client
clients.append(client)
return clients
def __getitem__(self, item: Union[str, int, slice]) -> ShareTensor:
return ShareTensor(
rank=self.rank,
parties_info=self.parties_info,
ring_size=self.ring_size,
value=self.child[item],
clients=self.clients,
)
def copy_tensor(self) -> ShareTensor:
return ShareTensor(
value=self.child,
rank=self.rank,
parties_info=self.parties_info,
ring_size=self.ring_size,
seed_przs=self.seed_przs,
clients=self.clients,
)
@staticmethod
@lru_cache(32)
def compute_min_max_from_ring(ring_size: int = 2 ** 32) -> Tuple[int, int]:
if ring_size == 2:
min_value, max_value = 0, 1
else:
min_value = (-ring_size) // 2
max_value = (ring_size) // 2 - 1
return min_value, max_value
@staticmethod
@lru_cache(maxsize=None)
def get_op(ring_size: int, op_str: str) -> Callable[..., Any]:
"""Returns method attribute based on ring_size and op_str.
Args:
ring_size (int): Ring size
op_str (str): Operation string.
Returns:
op (Callable[...,Any]): The operation method for the op_str.
Raises:
ValueError : If invalid ring size or op_str is given as input.
"""
ops = RING_SIZE_TO_OP.get(ring_size, None)
if ops is None:
raise ValueError(f"Do not have operations for ring size {ring_size}")
op = ops.get(op_str, None)
if op is None:
raise ValueError(
f"Operator {op_str} does not exist for ring size {ring_size}"
)
return op
""" TODO: Remove this -- we would use generate_przs since the scenario we are testing is that
the secret is remotly
@staticmethod
def generate_shares(secret, nr_shares, ring_size=2 ** 64):
from .fixed_precision_tensor import FixedPrecisionTensor
if not isinstance(secret, (int, FixedPrecisionTensor)):
secret = FixedPrecisionTensor(value=secret)
shape = secret.shape
min_value, max_value = ShareTensor.compute_min_max_from_ring(ring_size)
generator_shares = np.random.default_rng()
random_shares = []
for i in range(nr_shares):
random_value = generator_shares.integers(
low=min_value, high=max_value, size=shape
)
fpt_value = FixedPrecisionTensor(value=random_value)
random_shares.append(fpt_value)
shares_fpt = []
for i in range(nr_shares):
if i == 0:
share = value = random_shares[i]
elif i < nr_shares - 1:
share = random_shares[i] - random_shares[i - 1]
else:
share = secret - random_shares[i - 1]
shares_fpt.append(share)
# Add the ShareTensor class between them
shares = []
for rank, share_fpt in enumerate(shares_fpt):
share_fpt.child = ShareTensor(rank=rank, value=share_fpt.child)
shares.append(share_fpt)
return shares
"""
@staticmethod
def generate_przs(
value: Any,
shape: Tuple[int, ...],
rank: int,
parties_info: List[Party],
ring_size: int = 2 ** 32,
seed_przs: Optional[int] = None,
generator_przs: Optional[Any] = None,
init_clients: bool = True,
) -> "ShareTensor":
nr_parties = len(parties_info)
# Try:
# 1. First get numpy type if secret is numpy and obtain ring size from there
# 2. If not get the type from the ring size
numpy_type = None
ring_size_final = None
ring_size_from_type = utils.TYPE_TO_RING_SIZE.get(
getattr(value, "dtype", None), None
)
if ring_size_from_type is None:
logger.warning("Could not get ring size from {value}")
else:
ring_size_final = ring_size_from_type
numpy_type = value.dtype
if numpy_type is None:
numpy_type = utils.RING_SIZE_TO_TYPE.get(ring_size, None)
ring_size_final = ring_size
if numpy_type is None:
raise ValueError(f"Ring size {ring_size} not known how to be treated")
# relative
from ..tensor import Tensor
if (seed_przs is None) == (generator_przs is None):
raise ValueError("Only seed_przs or generator should be populated")
if value is None:
value = Tensor(np.zeros(shape, dtype=numpy_type))
# TODO: Sending the seed and having each party generate the shares is not safe
# Since the parties would know some of the other parties shares (this might not impose a risk
# when shares are not sent between parties -- like private addition/subtraction, but it might
# impose for multiplication
# The secret holder should generate the shares and send them to the other parties
if generator_przs:
generator_shares = generator_przs
else:
generator_shares = np.random.default_rng(seed_przs)
if isinstance(value.child, ShareTensor):
value = value.child
share = ShareTensor(
value=value.child,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs, # type: ignore #TODO:Inspect as we could pass none.
init_clients=init_clients,
ring_size=ring_size_final, # type: ignore
)
share.generator_przs = generator_shares
shares = [
generator_shares.integers(
low=share.min_value,
high=share.max_value,
size=shape,
endpoint=True,
dtype=numpy_type,
)
for _ in range(nr_parties)
]
op = ShareTensor.get_op(ring_size_final, "sub")
przs_share = op(shares[rank], shares[(rank + 1) % nr_parties])
share.child = op(share.child, przs_share)
return share
@staticmethod
def generate_przs_on_dp_tensor(
value: Optional[Any],
shape: Tuple[int],
rank: int,
parties_info: List[Party],
seed_przs: int,
share_wrapper: Any,
ring_size: int = 2 ** 32,
) -> PassthroughTensor:
if value is not None:
share = ShareTensor.generate_przs(
value=value.child,
shape=shape,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs,
ring_size=ring_size,
)
else:
share = ShareTensor.generate_przs(
value=value,
shape=shape,
rank=rank,
parties_info=parties_info,
seed_przs=seed_przs,
ring_size=ring_size,
)
share_wrapper.child.child = share
return share_wrapper
@staticmethod
def sanity_check(
share: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> None:
"""Check type for share
Args:
share (Union[int, float, ShareTensor, np.ndarray, torch.Tensor]): value to check
Raises:
ValueError: if type is not supported
"""
if isinstance(share, float):
raise ValueError("Type float not supported yet!")
if isinstance(share, np.ndarray) and (
not np.issubdtype(share.dtype, np.integer)
and share.dtype != np.dtype("bool")
):
raise ValueError(
f"NPArray should have type int or bool, but found {share.dtype}"
)
if isinstance(share, torch.Tensor) and torch.is_floating_point(share):
raise ValueError("Torch tensor should have type int, but found float")
def apply_function(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"], op_str: str
) -> "ShareTensor":
"""Apply a given operation.
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): tensor to apply the operator.
op_str (str): Operator.
Returns:
ShareTensor: Result of the operation.
"""
op = ShareTensor.get_op(self.ring_size, op_str)
numpy_type = utils.RING_SIZE_TO_TYPE.get(self.ring_size, None)
if numpy_type is None:
raise ValueError(f"Do not know numpy type for ring size {self.ring_size}")
print("=====================================================")
print("OP", op, numpy_type, self.ring_size)
print("====================================================")
if isinstance(y, ShareTensor):
utils.get_ring_size(self.ring_size, y.ring_size)
value = op(self.child, y.child)
else:
# TODO: Converting y to numpy because doing "numpy op torch tensor" raises exception
value = op(self.child, np.array(y, numpy_type)) # TODO: change to np.int64
res = self.copy_tensor()
res.child = value
return res
def add(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "add" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self + y
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "add")
return new_share
def sub(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "sub" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self - y
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "sub")
return new_share
def rsub(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "rsub" operation between "self" and "y"
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): y - self
Returns:
ShareTensor. Result of the operation.
"""
ShareTensor.sanity_check(y)
new_self = self.mul(-1)
new_share = new_self.apply_function(y, "add")
return new_share
def mul(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "mul" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self * y
Returns:
ShareTensor. Result of the operation.
"""
# if isinstance(y, ShareTensor):
# raise ValueError(
# "We should not reach this point for private multiplication. Only public one"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "mul")
return new_share
def matmul(
self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
) -> "ShareTensor":
"""Apply the "matmul" operation between "self" and "y".
Args:
y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): self @ y.
Returns:
ShareTensor: Result of the operation.
"""
if isinstance(y, ShareTensor):
raise ValueError("Private matmul not supported yet")
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "matmul")
return new_share
def rmatmul(self, y: torch.Tensor) -> "ShareTensor":
"""Apply the "rmatmul" operation between "y" and "self".
Args:
y (torch.Tensor): y @ self
Returns:
ShareTensor. Result of the operation.
"""
if isinstance(y, ShareTensor):
raise ValueError("Private matmul not supported yet")
ShareTensor.sanity_check(y)
new_share = y.apply_function(self, "matmul")
return new_share
def lt(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "lt" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self < y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "lt")
return new_share
def gt(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "gt" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self > y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "gt")
return new_share
def ge(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "ge" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self >= y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "ge")
return new_share
def le(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "le" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self <= y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "le")
return new_share
def ne(self, y: Union[ShareTensor, np.ndarray]) -> "ShareTensor":
"""Apply the "ne" operation between "y" and "self".
Args:
y (Union[ShareTensor,np.ndarray]): self != y
Returns:
ShareTensor. Result of the operation.
"""
# raise ValueError(
# "It should not reach this point since we generate SMPCAction for this"
# )
ShareTensor.sanity_check(y)
new_share = self.apply_function(y, "ne")
return new_share
def bit_decomposition(self) -> "ShareTensor":
"""Apply the "decomposition" operation on self
Args:
None
Returns:
ShareTensor. Result of the operation.
"""
raise ValueError(
"It should not reach this point since we generate SMPCAction for this"
)
def eq(self, other: Any) -> bool:
"""Equal operator.
Check if "self" is equal with another object given a set of
attributes to compare.
Args:
other (Any): Value to compare.
Returns:
bool: True if equal False if not.
"""
# TODO: Rasswanth: Fix later after the comparison operation
# relative
# from .... import Tensor
# if (
# isinstance(self.child, Tensor)
# and isinstance(other.child, Tensor)
# and (self.child != other.child).child.any() # type: ignore
# ):
# return False
# if (
# isinstance(self.child, np.ndarray)
# and isinstance(other.child, np.ndarray)
# and (self.child != other.child).any()
# ):
# return False
# if self.rank != other.rank:
# return False
# if self.ring_size != other.ring_size:
# return False
# if self.nr_parties != other.nr_parties:
# return False
# return True
return self.child == other.child
# TRASK: commenting out because ShareTEnsor doesn't appear to have .session_uuid or .config
# def div(
# self, y: Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]
# ) -> "ShareTensor":
# """Apply the "div" operation between "self" and "y".
#
# Args:
# y (Union[int, float, torch.Tensor, np.ndarray, "ShareTensor"]): Denominator.
#
# Returns:
# ShareTensor: Result of the operation.
#
# Raises:
# ValueError: If y is not an integer or LongTensor.
# """
# if not isinstance(y, (int, torch.LongTensor)):
# raise ValueError("Div works (for the moment) only with integers!")
#
# res = ShareTensor(session_uuid=self.session_uuid, config=self.config)
# # res = self.apply_function(y, "floordiv")
# res.tensor = self.tensor // y
# return res
def bit_extraction(self, pos: int = 0) -> ShareTensor:
"""Extracts the bit at the specified position.
Args:
pos (int): position to extract bit.
Returns:
ShareTensor : extracted bits at specific position.
Raises:
ValueError: If invalid position is provided.
"""
ring_bits = utils.get_nr_bits(self.ring_size)
if pos < 0 or pos > ring_bits - 1:
raise ValueError(
f"Invalid position for bit_extraction: {pos}, must be in range:[0,{ring_bits-1}]"
)
shape = self.shape
numpy_type = utils.RING_SIZE_TO_TYPE[self.ring_size]
# logical shift
bit_mask = np.ones(shape, dtype=numpy_type) << pos
value = self.child & bit_mask
value = value.astype(np.bool_)
share = self.copy_tensor()
share.child = value
return share
@staticmethod
def hook_method(__self: ShareTensor, method_name: str) -> Callable[..., Any]:
"""Hook a framework method.
Args:
method_name (str): method to hook
Returns:
A hooked method
"""
def method_all_shares(
_self: ShareTensor, *args: List[Any], **kwargs: Dict[Any, Any]
) -> Any:
share = _self.child
if method_name != "resize":
method = getattr(share, method_name)
else:
# Should be modified to remove copy
# https://stackoverflow.com/questions/23253144/numpy-the-array-doesnt-have-its-own-data
share = share.copy()
method = getattr(share, method_name)
if method_name not in INPLACE_OPS:
new_share = method(*args, **kwargs)
else:
method(*args, **kwargs)
new_share = share
res = _self.copy_tensor()
res.child = new_share
return res
return functools.partial(method_all_shares, __self)
def __getattribute__(self, attr_name: str) -> Any:
if attr_name in METHODS_FORWARD_ALL_SHARES or attr_name in INPLACE_OPS:
return ShareTensor.hook_method(self, attr_name)
return object.__getattribute__(self, attr_name)
def _object2proto(self) -> ShareTensor_PB:
proto_init_kwargs = {
"rank": self.rank,
"parties_info": [serialize(party) for party in self.parties_info],
"seed_przs": self.seed_przs,
"ring_size": sy.serialize(self.ring_size, to_bytes=True),
}
if isinstance(self.child, np.ndarray):
proto_init_kwargs["array"] = serialize(self.child)
elif isinstance(self.child, torch.Tensor):
proto_init_kwargs["array"] = serialize(np.array(self.child))
else:
proto_init_kwargs["tensor"] = serialize(self.child)
return ShareTensor_PB(**proto_init_kwargs)
@staticmethod
def _proto2object(proto: ShareTensor_PB) -> "ShareTensor":
init_kwargs = {
"rank": proto.rank,
"parties_info": [deserialize(party) for party in proto.parties_info],
"seed_przs": proto.seed_przs,
"ring_size": int(sy.deserialize(proto.ring_size, from_bytes=True)),
}
if proto.HasField("tensor"):
init_kwargs["value"] = deserialize(proto.tensor)
else:
init_kwargs["value"] = deserialize(proto.array)
# init_kwargs["init_clients"] = True
res = ShareTensor(**init_kwargs)
generator_przs = np.random.default_rng(proto.seed_przs)
res.generator_przs = generator_przs
return res
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return ShareTensor_PB
__add__ = add
__radd__ = add
__sub__ = sub
__rsub__ = rsub
__mul__ = mul
__rmul__ = mul
__matmul__ = matmul
__rmatmul__ = rmatmul
__lt__ = lt
__gt__ = gt
__ge__ = ge
__le__ = le
__eq__ = eq
__ne__ = ne
|
the-stack_0_3870 | # qubit number=4
# total number=35
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=12
prog += X(3) # number=13
prog += H(3) # number=28
prog += CZ(0,3) # number=29
prog += H(3) # number=30
prog += Z(3) # number=10
prog += H(1) # number=2
prog += H(2) # number=3
prog += RX(2.708052867394402,1) # number=11
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += Y(2) # number=16
prog += CNOT(1,0) # number=19
prog += H(3) # number=25
prog += Z(1) # number=20
prog += CNOT(3,0) # number=32
prog += Z(3) # number=33
prog += CNOT(3,0) # number=34
prog += H(0) # number=22
prog += CZ(1,0) # number=23
prog += H(0) # number=24
prog += Z(2) # number=15
prog += H(2) # number=7
prog += H(3) # number=8
prog += Y(2) # number=18
prog += H(0) # number=9
prog += CNOT(1,0) # number=26
prog += CNOT(1,0) # number=27
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2652.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_3874 | import mimetypes
from datetime import timedelta, datetime
from enum import Enum
from http.cookiejar import CookieJar
from time import sleep
from typing import List, Optional, AnyStr, TypeVar, TextIO, Tuple, Callable, Dict
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from loguru import logger
from requests import Response, Session, Request, PreparedRequest, codes
# noinspection PyUnresolvedReferences,PyProtectedMember
from requests._internal_utils import to_native_string
from requests.adapters import BaseAdapter
from requests.cookies import extract_cookies_to_jar, merge_cookies, cookiejar_from_dict
from requests.exceptions import ChunkedEncodingError, ContentDecodingError, \
TooManyRedirects, RequestException
from requests.sessions import merge_setting, merge_hooks
from requests.structures import CaseInsensitiveDict
from requests.utils import requote_uri, rewind_body, get_netrc_auth
from urllib3.util.url import parse_url, Url
from spoofbot.adapter import FileCache
from spoofbot.operating_system import Windows, WindowsVersion, MacOSX, MacOSXVersion, Linux, LinuxDerivatives, \
random_os, OS
from spoofbot.tag import MimeTypeTag, LanguageTag
from spoofbot.util import ReferrerPolicy, are_same_origin, are_same_site, sort_dict, \
TimelessRequestsCookieJar, random_version, get_firefox_versions, get_chrome_versions
from spoofbot.util.log import log_request, log_response
from numpy.random import choice, poisson
class Destination(Enum):
AUDIO = "audio"
AUDIO_WORKLET = "audioworklet"
DOCUMENT = "document"
EMBED = "embed"
EMPTY = "empty"
FONT = "font"
IMAGE = "image"
MANIFEST = "manifest"
OBJECT = "object"
PAINT_WORKLET = "paintworklet"
REPORT = "report"
SCRIPT = "script"
SERVICE_WORKER = "serviceworker"
SHARED_WORKER = "sharedworker"
STYLE = "style"
TRACK = "track"
VIDEO = "video"
WORKER = "worker"
XSLT = "xslt"
NESTED_DOCUMENT = "nested-document"
class Mode(Enum):
CORS = "cors"
NAVIGATE = "navigate"
NESTED_NAVIGATE = "nested-navigate"
NO_CORS = "no-cors"
SAME_ORIGIN = "same-origin"
WEBSOCKET = "websocket"
class Site(Enum):
CROSS_SITE = "cross-site"
SAME_ORIGIN = "same-origin"
SAME_SITE = "same-site"
NONE = "none"
class User(Enum):
USER_ACTIVATED = "?1"
AUTOMATIC = None
DictOrBytes = TypeVar('DictOrBytes', dict, bytes)
DictOrTupleListOrBytesOrFileLike = TypeVar('DictOrTupleListOrBytesOrFileLike', dict,
List[tuple], bytes, TextIO)
DictOrCookieJar = TypeVar('DictOrCookieJar', dict, CookieJar)
StrOrFileLike = TypeVar('StrOrFileLike', str, TextIO)
AuthTupleOrCallable = TypeVar('AuthTupleOrCallable', Tuple[str, str], Callable)
FloatOrTuple = TypeVar('FloatOrTuple', float, Tuple[float, float])
StrOrBool = TypeVar('StrOrBool', str, bool)
StrOrStrTuple = TypeVar('StrOrStrTuple', str, Tuple[str, str])
class Browser(Session):
"""Basic browser session
Specific browsers must inherit from this class and overwrite the abstract methods
"""
_user_agent: str
_accept: List[MimeTypeTag]
_accept_language: List[LanguageTag]
_accept_encoding: List[str]
_dnt: bool
_upgrade_insecure_requests: bool
_te: str
_connection: str
_last_response: Response
_last_navigate: Response
_last_request_timestamp: datetime
_request_timeout: timedelta
_honor_timeout: bool
_waiting_period: timedelta
_did_wait: bool
_header_precedence: list
_referrer_policy: ReferrerPolicy
_adapter: BaseAdapter
def __init__(self):
super(Browser, self).__init__()
self._name = 'Spoofbot'
from spoofbot import __version__
self._version = __version__
self._user_agent = ''
self._accept = []
self._accept_language = []
self._accept_encoding = []
self._dnt = False
self._upgrade_insecure_requests = False
self._te = 'Trailers'
self._connection = 'keep-alive'
# noinspection PyTypeChecker
self._last_response = None
# noinspection PyTypeChecker
self._last_navigate = None
self._last_request_timestamp = datetime(1, 1, 1)
self._request_timeout = timedelta(seconds=1.0)
self._honor_timeout = True
self._waiting_period = timedelta(seconds=0.0)
self._did_wait = False
self._header_precedence = []
self._referrer_policy = ReferrerPolicy.NO_REFERRER_WHEN_DOWNGRADE
# noinspection PyTypeChecker
self._adapter = self.get_adapter('https://')
@property
def name(self) -> str:
"""Name of the browser"""
return self._name
@property
def version(self) -> str:
"""Version of the browser"""
return self._version
@property
def adapter(self) -> BaseAdapter:
"""Gets the adapter for the HTTP/HTTPS requests
:return: The mounted adapter
:rtype: BaseAdapter
"""
return self._adapter
@adapter.setter
def adapter(self, adapter: BaseAdapter):
"""Sets the adapter for the HTTP/HTTPS requests
:param adapter: The adapter to be mounted
:type adapter: BaseAdapter
"""
self._adapter = adapter
self.mount('https://', adapter)
# noinspection HttpUrlsUsage
self.mount('http://', adapter)
@property
def user_agent(self) -> str:
return self._user_agent
@user_agent.setter
def user_agent(self, value: str):
self._user_agent = value
@property
def accept(self) -> List[MimeTypeTag]:
return self._accept
@accept.setter
def accept(self, value: List[MimeTypeTag]):
self._accept = value
@property
def accept_language(self) -> List[LanguageTag]:
return self._accept_language
@accept_language.setter
def accept_language(self, value: List[LanguageTag]):
self._accept_language = value
@property
def accept_encoding(self) -> List[str]:
return self._accept_encoding
@accept_encoding.setter
def accept_encoding(self, value: List[str]):
self._accept_encoding = value
@property
def do_not_track(self) -> bool:
return self._dnt
@do_not_track.setter
def do_not_track(self, value: bool):
self._dnt = value
@property
def upgrade_insecure_requests(self) -> bool:
return self._upgrade_insecure_requests
@upgrade_insecure_requests.setter
def upgrade_insecure_requests(self, value: bool):
self._upgrade_insecure_requests = value
@property
def transfer_encoding(self) -> str:
return self._te
@transfer_encoding.setter
def transfer_encoding(self, value: str):
self._te = value
@property
def connection(self) -> str:
return self._connection
@connection.setter
def connection(self, value: str):
self._connection = value
@property
def origin(self) -> Optional[Url]:
if self._last_response is None:
return None
last_url = parse_url(self._last_response.url)
return Url(last_url.scheme, host=last_url.host)
@property
def last_response(self) -> Optional[Response]:
return self._last_response
@property
def last_navigate(self) -> Optional[Response]:
return self._last_navigate
@property
def last_request_timestamp(self) -> datetime:
return self._last_request_timestamp
@last_request_timestamp.setter
def last_request_timestamp(self, value: datetime):
self._last_request_timestamp = value
@property
def request_timeout(self) -> timedelta:
return self._request_timeout
@request_timeout.setter
def request_timeout(self, value: timedelta):
self._request_timeout = value
@property
def honor_timeout(self) -> bool:
return self._honor_timeout
@honor_timeout.setter
def honor_timeout(self, value: bool):
self._honor_timeout = value
@property
def waiting_period(self) -> timedelta:
return self._waiting_period
@property
def did_wait(self) -> bool:
return self._did_wait
@property
def header_precedence(self) -> list:
return self._header_precedence
@header_precedence.setter
def header_precedence(self, value: list):
self._header_precedence = value
@staticmethod
def create_user_agent(**kwargs) -> str:
"""Creates a user agent string according to the browsers identity.
:param kwargs: Specific arguments to take into account.
:returns: A custom user agent string.
:rtype: str
"""
raise NotImplementedError
@staticmethod
def create_random_user_agent() -> str:
"""Creates seemingly random user agent string
:returns: A random user agent string.
:rtype: str
"""
raise NotImplementedError
# noinspection DuplicatedCode
def _get_referer(self, url: Url) -> Optional[str]:
if self._last_navigate is None:
return None
nav_url = parse_url(self._last_navigate.url)
return self._referrer_policy.get_referrer(nav_url, url)
def _get_origin(self, method: str, url: Url) -> Optional[str]:
if self._last_navigate is None:
return None
nav_url = parse_url(self._last_navigate.url)
if not are_same_origin(nav_url, url) or method not in ['GET', 'HEAD']:
return self._referrer_policy.get_origin(nav_url, url)
@staticmethod
def _get_host(url: Url) -> str:
if url.port:
return f"{url.hostname}:{url.port}"
return url.hostname
def _get_user_agent(self) -> str:
return self._user_agent
def _get_accept(self, url: Url) -> str:
mime_type, _ = mimetypes.guess_type(url.path if url.path is not None else '')
if mime_type is not None:
return mime_type
return ','.join(map(str, self._accept))
def _get_accept_language(self) -> str:
return ','.join(map(str, self._accept_language))
def _get_accept_encoding(self, url: Url) -> str:
_, enc = mimetypes.guess_type(url.path if url.path is not None else '')
if enc is not None:
return enc
encodings = self._accept_encoding.copy()
if url.scheme != 'https' and 'br' in encodings:
encodings.remove('br')
return ', '.join(encodings)
def _get_connection(self) -> Optional[str]:
if self._connection != '':
return self._connection
def _get_dnt(self) -> Optional[str]:
if self._dnt:
return '1'
def _get_upgrade_insecure_requests(self) -> Optional[str]:
if self._upgrade_insecure_requests:
return '1'
def _get_te(self, url: Url) -> Optional[str]:
if url.scheme == 'https' and self._te != '':
return self._te
@staticmethod
def _get_sec_fetch_dest(dest: Destination) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-dest-header
# noinspection SpellCheckingInspection
if dest is None:
dest = Destination.EMPTY
# noinspection PyTypeChecker
return dest.value
def _get_sec_fetch_mode(self, method: str, url: Url) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-mode-header
mode = Mode.NO_CORS
if self._last_navigate is None:
mode = Mode.NAVIGATE
# noinspection PyTypeChecker
return mode.value
nav_url = parse_url(self._last_navigate.url)
if are_same_origin(url, nav_url):
mode = Mode.SAME_ORIGIN
if self._get_origin(method, url) is not None:
mode = Mode.CORS
# noinspection PyTypeChecker
return mode.value
def _get_sec_fetch_site(self, url: Url) -> str:
# https://www.w3.org/TR/fetch-metadata/#sec-fetch-site-header
site = Site.SAME_ORIGIN
if self._last_navigate is None:
site = Site.NONE
# noinspection PyTypeChecker
return site.value
nav_url = parse_url(self._last_navigate.url)
if not are_same_origin(url, nav_url):
site = Site.CROSS_SITE
if not are_same_site(url, nav_url):
site = Site.SAME_SITE
# noinspection PyTypeChecker
return site.value
def navigate(self, url: str, **kwargs) -> list[Response]:
"""Sends a GET request to the url and sets it into the Referer header in
subsequent requests
:param url: The url the browser is supposed to connect to
:param kwargs: Additional arguments to forward to the requests module
:returns: The response to the sent request
:rtype: Response
"""
kwargs.setdefault('user_activation', True)
response = self.get(url, **kwargs)
self._last_navigate = response
return self._request_attachments(response)
def _request_attachments(self, response: Response) -> list[Response]:
response.raise_for_status()
responses = [response]
bs = BeautifulSoup(response.content, features='html.parser')
url = parse_url(response.url)
links = self._gather_valid_links(bs, url) + \
self._gather_valid_scripts(bs, url) + \
self._gather_valid_imgs(bs, url)
for link in links:
logger.debug(f"Fetching {link.url}")
try:
resp = self.get(link.url)
responses.append(resp)
except RequestException:
pass
return responses
@staticmethod
def _gather_valid_links(bs: BeautifulSoup, origin: Url) -> list[Url]:
links = []
for link in bs.find_all('link'):
ignore = False
for rel in link.get('rel', None):
# https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/rel
if rel in {'dns-prefetch', 'preconnect'}:
# DNS resolve and preemptively connecting is useless to us
ignore |= True
elif rel in {'canonical'}:
ignore |= True
elif rel in {'manifest', 'mask-icon'}:
# non-standard and unsupported
ignore |= True
if ignore:
continue
href: str = link.get('href', None)
if href is None:
continue
if href.startswith('/'):
href = f"{origin.scheme}://{origin.hostname}{href}"
links.append(parse_url(href))
return links
@staticmethod
def _gather_valid_scripts(bs: BeautifulSoup, origin: Url) -> list[Url]:
scripts = []
for script in bs.find_all('script'):
if 'src' not in script.attrs:
continue
src: str = script.get('src', None)
if src is None:
continue
if src.startswith('/'):
src = f"{origin.scheme}://{origin.hostname}{src}"
scripts.append(parse_url(src))
return scripts
@staticmethod
def _gather_valid_imgs(bs: BeautifulSoup, origin: Url) -> list[Url]:
scripts = []
for script in bs.find_all('img'):
if 'src' not in script.attrs:
continue
src: str = script.get('src', None)
if src is None:
continue
if src.startswith('/'):
src = f"{origin.scheme}://{origin.hostname}{src}"
scripts.append(parse_url(src))
return scripts
def request(self, method: AnyStr, url: AnyStr, params: DictOrBytes = None,
data: DictOrTupleListOrBytesOrFileLike = None, headers: dict = None,
cookies: DictOrCookieJar = None,
files: StrOrFileLike = None, auth: AuthTupleOrCallable = None,
timeout: FloatOrTuple = None,
allow_redirects=True, proxies: Dict[str, str] = None,
hooks: Dict[str, Callable] = None,
stream: bool = None, verify: StrOrBool = None,
cert: StrOrStrTuple = None,
json: str = None, user_activation: bool = False) -> Response:
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param user_activation: (optional) Indicates that the request was user
initiated.
:param hooks: (optional) Dictionary mapping a hook (only 'request' is
possible) to a Callable.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we
verify the server's TLS certificate, or a string, in which case it must
be a path to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
cookies = cookies if cookies is not None else self.cookies
self.headers = self._get_default_headers(method, parse_url(url),
user_activation)
self.headers.update(headers if headers else {})
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=self.headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
log_request(prep)
prep.headers = CaseInsensitiveDict(
sort_dict(dict(prep.headers), self._header_precedence))
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Await the request timeout
self.await_timeout(parse_url(prep.url))
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
req_timestamp = datetime.now()
response = self.send(prep, **send_kwargs)
adapter = self.adapter
if isinstance(adapter, FileCache) and not adapter.hit:
self._last_request_timestamp = req_timestamp
self._last_response = response
log_response(response)
return response
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
cookie_jar = self.cookies.__new__(self.cookies.__class__)
cookie_jar.__init__()
if isinstance(cookie_jar, TimelessRequestsCookieJar) and isinstance(
self.cookies, TimelessRequestsCookieJar):
cookie_jar.mock_date = self.cookies.mock_date
merged_cookies = merge_cookies(
merge_cookies(cookie_jar, self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers,
dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
p.headers = CaseInsensitiveDict(
sort_dict(dict(p.headers), self._header_precedence))
return p
# noinspection PyUnresolvedReferences
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False,
**adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
log_response(resp)
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
# noinspection PyStatementEffect
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects,
response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
# noinspection SpellCheckingInspection
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
# noinspection PyProtectedMember
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (
codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
parsed_url = parse_url(url)
headers = dict(prepared_request.headers)
if 'Accept-Encoding' in headers:
headers['Accept-Encoding'] = self._get_accept_encoding(parsed_url)
te = self._get_te(parsed_url)
if 'TE' in headers and te is None:
del headers['TE']
elif te is not None:
headers['TE'] = te
uir = self._get_upgrade_insecure_requests()
if 'Upgrade-Insecure-Requests' in headers and uir is None:
del headers['Upgrade-Insecure-Requests']
elif uir is not None:
headers['Upgrade-Insecure-Requests'] = uir
if 'Host' in headers:
headers['Host'] = parsed_url.hostname
origin = self._get_origin(prepared_request.method, parsed_url)
if 'Origin' in headers and origin is None:
del headers['Origin']
elif origin is not None:
headers['Origin'] = origin
try:
del headers['Cookie']
except KeyError:
pass
prepared_request.headers = headers
self._adapt_redirection(prepared_request)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
# noinspection PyProtectedMember
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
# noinspection PyProtectedMember
merge_cookies(prepared_request._cookies, self.cookies)
# noinspection PyProtectedMember
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
# noinspection PyProtectedMember
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
prepared_request.headers = dict(
sort_dict(prepared_request.headers, self._header_precedence))
req = prepared_request
log_request(req)
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
self._last_navigate = resp
yield resp
def _get_default_headers(self, method: str, url: Url,
user_activation: bool) -> CaseInsensitiveDict:
"""Provides the default headers the browser should send when connecting to an
endpoint
The method tries to guess the mimetype and encoding to fill the Accept and
Accept-Encoding headers
:param method: The method of the HTTP request
:param url: The url the browser is supposed to connect to
:returns: A dictionary form of the default headers.
:rtype: OrderedHeaders
"""
return CaseInsensitiveDict(dict(filter(lambda kvp: kvp[1] != '', {
'Host': self._get_host(url),
'User-Agent': self._get_user_agent(),
'Accept': self._get_accept(url),
'Accept-Language': self._get_accept_language(),
'Accept-Encoding': self._get_accept_encoding(url),
'Connection': self._get_connection(),
'Origin': self._get_origin(method, url),
'Referer': self._get_referer(url),
'DNT': self._get_dnt(),
'Upgrade-Insecure-Requests': self._get_upgrade_insecure_requests(),
'TE': self._get_te(url),
}.items())))
def await_timeout(self, url: Url = None):
"""Waits until the request timeout expires.
The delay will be omitted if the last request was a hit in the cache.
Gets called automatically on every request.
"""
if not self._honor_timeout:
return
time_passed = datetime.now() - self._last_request_timestamp
if time_passed < self._request_timeout:
adapter = self.adapter
if url is not None and isinstance(adapter, FileCache) and adapter.is_hit(
url) and adapter.is_active:
logger.debug("Request will be a hit in cache. No need to wait.")
return
time_to_wait = self._request_timeout - time_passed
logger.debug(f"Waiting for {time_to_wait.total_seconds()} seconds.")
sleep(time_to_wait.total_seconds())
self._did_wait = True
return
self._did_wait = False
def _adapt_redirection(self, request: PreparedRequest):
pass
FF_NEWEST = (90, 0)
class Firefox(Browser):
def __init__(self,
os=Windows(),
ff_version=FF_NEWEST,
build_id=20100101,
do_not_track=False,
upgrade_insecure_requests=True):
super(Firefox, self).__init__()
self._name = 'Firefox'
self._version = '.'.join(map(str, ff_version))
self._user_agent = self.create_user_agent(os, ff_version, build_id)
self._accept = [
MimeTypeTag("text", "html"),
MimeTypeTag("application", "xhtml+xml"),
MimeTypeTag("application", "xml", q=0.9),
MimeTypeTag("image", "webp"),
MimeTypeTag("*", "*", q=0.8)
]
self._accept_language = [
LanguageTag("en", "US"),
LanguageTag("en", q=0.5)
]
self._accept_encoding = ['gzip', 'deflate', 'br']
self._dnt = do_not_track
self._upgrade_insecure_requests = upgrade_insecure_requests
self._connection = 'keep-alive'
self._header_precedence = [
'Host',
'User-Agent',
'Accept',
'Accept-Language',
'Accept-Encoding',
'DNT',
'Content-Type',
'Content-Length',
'Origin',
'Connection',
'Referer',
'Cookie',
'Upgrade-Insecure-Requests',
'TE',
]
@staticmethod
def create_user_agent(os: OS = Windows(), version: tuple[int, ...] = FF_NEWEST, build_id: int = 20100101) -> str:
"""Creates a user agent string for Firefox
:param os: The underlying operating system (default :py:class:`Windows`).
:param version: The version of Firefox (default (71, 0)).
:param build_id: The build id of Gecko (default 20100101).
:returns: A custom user agent string.
"""
ff_version = '.'.join(map(str, version))
return f"Mozilla/5.0 ({os}; rv:{ff_version}) " \
f"Gecko/{build_id} " \
f"Firefox/{ff_version}"
@staticmethod
def create_random_user_agent() -> str:
os = random_os()
ff_version = random_version(get_firefox_versions())
return Firefox.create_user_agent(os, ff_version)
CHROME_NEWEST = (92, 0, 4495, 0)
WEBKIT_NEWEST = (537, 36)
class Chrome(Browser):
def __init__(self,
os=Windows(),
chrome_version=CHROME_NEWEST,
webkit_version=WEBKIT_NEWEST,
do_not_track=False,
upgrade_insecure_requests=True):
super(Chrome, self).__init__()
self._name = 'Chrome'
self._version = '.'.join(map(str, chrome_version))
self._user_agent = self.create_user_agent(os=os, version=chrome_version,
webkit_version=webkit_version)
self._accept = [
MimeTypeTag("text", "html"),
MimeTypeTag("application", "xhtml+xml"),
MimeTypeTag("application", "xml", q=0.9),
MimeTypeTag("image", "webp"),
MimeTypeTag("image", "apng"),
MimeTypeTag(q=0.8),
MimeTypeTag("application", "signed-exchange", v='b3', q=0.9),
]
self._accept_language = [
LanguageTag("en", "US"),
LanguageTag("en", q=0.9)
]
self._accept_encoding = ['gzip', 'deflate', 'br']
self._dnt = do_not_track
self._upgrade_insecure_requests = upgrade_insecure_requests
self._connection = 'keep-alive'
self._header_precedence = [
'Host',
'Connection',
'Content-Type',
# 'Content-Length',
'Upgrade-Insecure-Requests',
'User-Agent',
'Sec-Fetch-User',
'Accept',
'Origin',
'Sec-Fetch-Site',
'Sec-Fetch-Mode',
'Referer',
'Accept-Encoding',
'Accept-Language',
# 'DNT',
# 'Cookie',
# 'TE',
]
@staticmethod
def create_user_agent(os=Windows(), version=CHROME_NEWEST,
webkit_version=WEBKIT_NEWEST) -> str:
"""Creates a user agent string for Firefox
:param os: The underlying operating system (default :py:class:`Windows`).
:param version: The version of the underlying webkit
(default `(79, 0, 3945, 88)).
:param webkit_version: The version of Chrome (default: (537, 36)).
:returns: A custom user agent string.
"""
webkit_ver = '.'.join(map(str, webkit_version))
return f"Mozilla/5.0 ({os}) " \
f"AppleWebKit/{webkit_ver} (KHTML, like Gecko) " \
f"Chrome/{'.'.join(map(str, version))} " \
f"Safari/{webkit_ver}"
@staticmethod
def create_random_user_agent() -> str:
os = random_os()
chrome_version = random_version(get_chrome_versions())
return Firefox.create_user_agent(os, chrome_version)
def navigate(self, url: str, **kwargs) -> Response:
if parse_url(url).scheme == 'https':
kwargs.setdefault('headers', {}).setdefault('Sec-Fetch-User', '?1')
kwargs.setdefault('headers', {}).setdefault('Sec-Fetch-Mode', 'navigate')
responses = super(Chrome, self).navigate(url, **kwargs)
self._last_navigate = responses[0]
return responses
def _get_default_headers(self, method: str, url: Url,
user_activation: bool) -> CaseInsensitiveDict:
adjust_accept_encoding = self._last_navigate is None
if adjust_accept_encoding:
self._accept_encoding = ['gzip', 'deflate']
headers = super(Chrome, self)._get_default_headers(method, url, user_activation)
if adjust_accept_encoding:
self._accept_encoding = ['gzip', 'deflate', 'br']
if url.scheme == 'https':
headers['Sec-Fetch-Site'] = self._get_sec_fetch_site(url)
headers['Sec-Fetch-Mode'] = self._get_sec_fetch_mode(method, url)
return headers
def _adapt_redirection(self, request: PreparedRequest):
url = parse_url(request.url)
if 'Host' in request.headers:
del request.headers['Host']
if 'Connection' in request.headers:
del request.headers['Connection']
if 'Accept-Encoding' in request.headers:
request.headers['Accept-Encoding'] = self._get_accept_encoding(url)
if url.scheme == 'https':
request.headers['Sec-Fetch-Site'] = self._get_sec_fetch_site(url)
if self._last_navigate is None:
request.headers['Sec-Fetch-Mode'] = 'navigate'
else:
request.headers['Sec-Fetch-Mode'] = self._get_sec_fetch_mode(
request.method, url)
request.headers = CaseInsensitiveDict(
sort_dict(dict(request.headers), self._header_precedence))
|
the-stack_0_3875 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Affine(nn.Module):
def __init__(self,
min_rot, max_rot,
min_shear_x, max_shear_x,
min_shear_y, max_shear_y,
min_scale_x, max_scale_x,
min_scale_y, max_scale_y
):
super(Affine, self).__init__()
self.min_rot = min_rot
self.max_rot = max_rot
self.min_shear_x = min_shear_x
self.max_shear_x = max_shear_x
self.min_shear_y = min_shear_y
self.max_shear_y = max_shear_y
self.min_scale_x = min_scale_x
self.max_scale_x = max_scale_x
self.min_scale_y = min_scale_y
self.max_scale_y = max_scale_y
def forward(self, x):
rot_theta = np.random.uniform(self.min_rot, self.max_rot)
shear_phi_x = np.random.uniform(self.min_shear_x, self.max_shear_x)
shear_psi_y = np.random.uniform(self.min_shear_y, self.max_shear_y)
scale_x = np.random.uniform(self.min_scale_x, self.max_scale_x)
scale_y = np.random.uniform(self.min_scale_y, self.max_scale_y)
rotation_matrix = np.array([[np.cos(rot_theta), np.sin(rot_theta), 0],
[-np.sin(rot_theta), np.cos(rot_theta), 0],
[0, 0, 1]], dtype=np.float32)
shear_matrix = np.array([[1, np.tan(shear_phi_x), 0],
[np.tan(shear_psi_y), 1, 0],
[0, 0, 1]], dtype=np.float32)
scale_matrix = np.array([[scale_x, 0, 0],
[0, scale_y, 0],
[0, 0, 1]], dtype=np.float32)
transformation_matrix = np.dot(np.dot(rotation_matrix, shear_matrix), scale_matrix)[0:2, :]
matrix = torch.FloatTensor(np.stack([transformation_matrix for _ in range(x.size(0))])).cuda()
grid = F.affine_grid(matrix, x.size())
return F.grid_sample(x, grid)
|
the-stack_0_3876 | import argparse
import sys
import os
import subprocess
import shlex
import glob
import irc_bot
def process_arguments(args):
parser = argparse.ArgumentParser(description='configure the irc clients')
parser.add_argument('--txts_path', action='store', help='path to folder with txt files')
parser.add_argument('--bot_script_path', action='store', help='path to the irc_bot.py script', type=str, default='irc_bot.py')
parser.add_argument('--server', action='store', help='the server to connect the bots to', type=str, default='localhost')
parser.add_argument('--max_bots', action='store', help='the maximum number of bots to train', type=int, default=10)
params = vars(parser.parse_args(args))
return params
def start_individual_bot_process(script_path, file_path, server):
python_path = os.popen('which python').read().rstrip()
line = python_path + ' ' + script_path + ' --txt_path ' + file_path + ' --server ' + server
subprocess.Popen(shlex.split(line), shell=False)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
txts_path = params['txts_path']
max_bots = params['max_bots']
bot_script_path = params['bot_script_path']
server = params['server']
for file in glob.glob(txts_path + '/*.txt')[:max_bots]:
start_individual_bot_process(script_path=bot_script_path, file_path=file, server=server)
statistic_client = irc_bot.EcoStatistics('HumammadSusej')
statistic_client.connect(server, tls=False)
statistic_client.handle_forever() |
the-stack_0_3879 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 14:04:34 2021
@author: mesar
"""
import numpy as np
import pandas as pd
from lib import utils
from progressbar import progressbar as pbar
from pathlib import Path
import itertools
from time import time
import csv
if __name__ == "__main__":
Path("../data/model_build_outputs/all_prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_heading").mkdir(parents=True, exist_ok=True)
Path("../data/model_build_outputs/prob_matrices_angle").mkdir(parents=True, exist_ok=True)
print("Calculating train_routes")
#train_routes = utils.get_train_routes()
routes = utils.get_routes()
hroutes = np.unique(routes.route_fid)
all_zroutes = utils.get_routes_as_zones()
zroutes = all_zroutes[all_zroutes.route_fid.isin(hroutes)]
print("Done reading routes")
t0 = time()
max_distances = [50, 100, 150, 200, 250, 300]
dwks = [0.01, 0.05, 0.1, 0.15]
r = []
for max_distance, dwk in itertools.product(max_distances, dwks):
tt = time()
#print ("\n----------\n%3d"%max_distance, "%.2f"%dwk, end=" || ", flush=True)
za = utils.ZrouteField(zroutes, max_distance=max_distance).compute_field(dwk=dwk, use_pbar=True)
h = za.get_estimated_headings(use_pbar=True)
rr = za.heading_estimations_cosdistance(h)
rr['max_distance'] = max_distance
rr['dwk'] = dwk
rr['zones_estimated'] = np.mean(h.cos_distance!=0)
rr['time'] = time()-t0
rr['nroutes'] = len(np.unique(za.zroutes.route_fid))
t0 = time()
r.append(rr)
print ("maxd %3d, "%max_distance, "dwk %.2f, "%dwk, f'time {time()-tt:.4f}, cos_sim {rr["cos_distance_mean"]:.4f}', flush=True)
r = pd.DataFrame(r)
r.to_hdf("../data/model_build_outputs/md_dkw_exploration.hdf", "data")
dwks = np.sort(np.unique(r.dwk))
max_distances = np.sort(np.unique(r.max_distance))
csims = np.zeros((len(dwks), len(max_distances)))
zcovered = np.zeros((len(dwks), len(max_distances)))
for i,dwk in enumerate(dwks):
for j,max_distance in enumerate(max_distances):
k = r[(r.max_distance==max_distance)&(r.dwk==dwk)].iloc[0]
csims[i,j] = k.cos_distance_mean
zcovered[i,j] = k.zones_estimated
for distance in max_distances:
k = r[r.max_distance==distance]
print(k)
estimated_zones_value = 1.0
best_options = r[r.zones_estimated >= estimated_zones_value]
if not best_options.empty:
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
while best_options.empty:
print("Empty for value: " + str(estimated_zones_value))
estimated_zones_value = estimated_zones_value - 0.1
best_options = r[r.zones_estimated >= estimated_zones_value]
best_combination = best_options[best_options.cos_distance_mean == best_options.cos_distance_mean.max()]
selected_max_distance = best_combination.max_distance.values[0]
selected_dwk = best_combination.dwk.values[0]
print(selected_max_distance)
print(selected_dwk)
output_path = "../data/model_build_outputs/best_max_distance.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_max_distance])
output_path = "../data/model_build_outputs/best_dwk.csv"
with open(output_path, "w") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow([selected_dwk])
print("Max distance: " + str(selected_max_distance))
print("dwk: " + str(selected_dwk))
print("Calculating train_routes")
train_routes = utils.get_routes()
print("Calculating train_zroutes")
train_zroutes = utils.get_routes_as_zones()
print("Calculating z_route_fields")
za = utils.ZrouteField(train_zroutes, max_distance=selected_max_distance).compute_field(dwk=selected_dwk)
print("Calculating heading_matrices")
h = za.get_estimated_headings(zroutes=train_zroutes)
fname = f'../data/model_build_outputs/heading_estimations_md_{selected_max_distance}_dwk_{selected_dwk:.4f}.hdf'
h.to_hdf(fname, "data")
#h = pd.read_hdf("../data/model_apply_outputs/heading_estimations_md_200_dwk_0.1000.hdf")
zroutes = train_zroutes.copy()
print("Calculating prob_matrices")
for route_fid in pbar(np.unique(h.route_fid)):
probs = utils.get_heading_based_probmatrix(h, route_fid)
probs = probs[~probs.index.str.contains("Station")]
#probs.drop(probs.filter(regex='Station').columns, axis=1, inplace=True)
probs.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_probs.csv", sep=',', na_rep='nan')
zones_id = zroutes.zone_id[zroutes.route_fid==route_fid]
zones_id = zones_id[~zones_id.str.contains("Station")]
zones_id.reset_index(inplace=True, drop=True)
cities = zroutes.city[zroutes.route_fid==route_fid]
cities.reset_index(inplace=True, drop=True)
city = cities[0]
city_size = len(city) + 2
zones_id = [zones_id[i][city_size:] for i in range(0,len(zones_id))] #Empieza desde 1 para saltarse del Depot
zones_df = pd.Series(zones_id)
zones_df = zones_df.append(pd.Series("nan"))
zones_df.to_csv(f"../data/model_build_outputs/prob_matrices_heading/{route_fid}_zroutes.csv", index=False, header=False, na_rep='nan')
prob_matrix = utils.get_angle_based_probmatrix(h, route_fid)
prob_matrix.to_csv(f"../data/model_build_outputs/prob_matrices_angle/{route_fid}_probs.csv", sep=',', na_rep='nan')
#probs.to_hdf(f"data/prob_matrices_based_on_heading/{route_fid}_probs.hdf", "data")
print("Done")
|
the-stack_0_3880 | import matplotlib
import re
import custom_style
from custom_style import setup_columns,col,remove_chart_junk
import matplotlib.pyplot as plt
import sys
import numpy as np
from matplotlib.ticker import FuncFormatter
import math
from collections import defaultdict
from matplotlib.patches import Patch
import scipy.special
from scipy.special import lambertw
lb_1_name = "micro_balancer_make_batch.dat"
lb_2_name = "micro_balancer_match_resps.dat"
suboram_name = "micro_suboram_batch_sz.dat"
labels = ["Load balancer (make batch)", "SubORAM (process batch)", "Load balancer (match responses)"]
#colors=[custom_style.mix_colors[2], custom_style.hash_colors[4], custom_style.hash_colors[1], custom_style.hash_colors[0]]
colors=["#FFCA3E","#FF6F50","#D03454"]
suborams = 1
data_size = 2**10
def getLoadBalancerData(filename):
results = []
f1 = open(filename, "r")
lines_1 = f1.readlines()
for i in range(len(lines_1)):
elems_1 = lines_1[i].split()
result = {
"suborams": int(elems_1[0]),
"requests": int(elems_1[1]),
"latency": (float(elems_1[2])) / 1000000.0,
}
results.append(result)
f1.close()
return results
def getSuboramData():
results = []
with open(suboram_name, "r") as f:
lines = f.readlines()
for line in lines:
elems = line.split()
result = {
"data_size": int(elems[0]),
"batch": int(elems[1]),
"latency": float(elems[2]) / 1000.0,
}
results.append(result)
return results
def f(N, n_suborams, secparam=128):
mu = N / n_suborams
alpha = math.log(n_suborams * (2 ** secparam))
rhs = alpha / (math.e * mu) - 1 / math.e
branch = 0
epsilon = math.e ** (lambertw(rhs, branch) + 1) - 1
#epsilon = (alpha + math.sqrt(2 * mu * alpha)) / mu # uncomment for looser bound
#print(alpha, rhs, lambertw(rhs, 0), lambertw(rhs, 1))
#print("bound", suborams, secparam, alpha, rhs, lambertw(rhs), epsilon)
return mu * (1 + epsilon)
def getLoadBalancerLatencyForParams(data, suborams, requests):
for elem in data:
if elem["suborams"] == suborams and elem["requests"] == requests:
return elem["latency"]
print(("load balancer out-of-bounds params: no latency for params suborams=%d, requests=%d") % (suborams, requests))
return -1.0
def getSuboramLatencyForParams(data, data_size, batch):
for elem in data:
if elem["data_size"] == data_size and elem["batch"] == batch:
return elem["latency"]
print(("suboram out-of-bounds params: no latency for params data_size=%d, batch=%d") % (data_size, batch))
return -1.0
def roundUpPow2(x):
return 2 ** (math.ceil(math.log(x,2)))
def makeBreakdownFig(in_name, out_name, data_size, title, args):
lb_1_data = getLoadBalancerData(lb_1_name)
lb_2_data = getLoadBalancerData(lb_2_name)
suboram_data = getSuboramData()
lb1_plt = []
lb2_plt = []
suboram_plt = []
reqs_plt = [2**i for i in range(6,11)]
for reqs in reqs_plt:
lb1_plt.append(getLoadBalancerLatencyForParams(lb_1_data,suborams,reqs) * 1000)
lb2_plt.append(getLoadBalancerLatencyForParams(lb_2_data,suborams,reqs) * 1000)
batch_size_rounded = roundUpPow2(f(reqs,suborams))
suboram_plt.append(getSuboramLatencyForParams(suboram_data,data_size,reqs) * 1000)
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(111)
ax.stackplot(reqs_plt, lb1_plt, suboram_plt, lb2_plt, labels=labels, colors=colors)
#ax.stackplot(np.arange(10, 110, step=10), y[0], y[1], y[2], y[3], labels=labels, colors=colors)
ax.set_xlabel("Requests")
ax.set_ylabel("Process time (ms)")
#ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xticks([2**6, 2**8, 2**10])
ax.set_xticklabels(["$2^6$", "$2^8$", "$2^{10}$"])
#ax.set_title(title, fontsize=8)
print("updated")
#plt.legend()
#ax.spines['left'].set_position("zero")
#ax.spines['bottom'].set_position("zero")
remove_chart_junk(plt,ax,lightGrid=True,below=False)
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"""
% \input{../fonts}
\usepackage[T1]{fontenc}
\newcommand\hmmax{0}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{mathptmx}
""",
],
"text.usetex": True,
"font.family": "serif",
"font.serif": [],
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 7,
"font.size": 10,
"legend.fontsize": 7,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"lines.markersize": 3,
"lines.markeredgewidth": 0,
"axes.linewidth": 0.5,
}
matplotlib.rcParams.update(pgf_with_pdflatex)
#ax.yaxis.grid(which='major', color='0.9', linestyle='dotted')
if args.title:
ax.set_title(args.title, y=1.5)
if args.large:
plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
mode="expand", borderaxespad=0)
custom_style.save_fig(fig, out_name, [2.5, 3], pad=0.3)
else:
custom_style.save_fig(fig, out_name, [1.3, 1.4])
#custom_style.save_fig(fig, out_name, [3.25, 1.8])
#plt.show()
|
the-stack_0_3881 | #!/usr/bin/env python3
#
# Copyright (C) 2018 Bloomberg LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from buildgrid._version import __version__
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("BuildGrid requires Python >= 3.5")
sys.exit(1)
try:
from setuptools import setup, find_packages, Command
except ImportError:
print("BuildGrid requires setuptools in order to build. Install it using"
" your package manager (usually python3-setuptools) or via pip (pip3"
" install setuptools).")
sys.exit(1)
class BuildGRPC(Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build gRPC protobuf modules'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import grpc_tools.command
except ImportError:
print("BuildGrid requires grpc_tools in order to build gRPC modules.\n"
"Install it via pip (pip3 install grpcio-tools).")
exit(1)
protos_root = 'buildgrid/_protos'
grpc_tools.command.build_package_protos(protos_root)
# Postprocess imports in generated code
for root, _, files in os.walk(protos_root):
for filename in files:
if filename.endswith('.py'):
path = os.path.join(root, filename)
with open(path, 'r') as f:
code = f.read()
# All protos are in buildgrid._protos
code = re.sub(r'^from ', r'from buildgrid._protos.',
code, flags=re.MULTILINE)
# Except for the core google.protobuf protos
code = re.sub(r'^from buildgrid._protos.google.protobuf', r'from google.protobuf',
code, flags=re.MULTILINE)
with open(path, 'w') as f:
f.write(code)
# Load main requirements from file:
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
auth_requirements = []
# Load 'auth' requirements from dedicated file:
if os.path.isfile('requirements.auth.txt'):
with open('requirements.auth.txt') as requirements_file:
auth_requirements = requirements_file.read().splitlines()
docs_requirements = []
# Load 'docs' requirements from dedicated file:
if os.path.isfile('requirements.docs.txt'):
with open('requirements.docs.txt') as requirements_file:
docs_requirements = requirements_file.read().splitlines()
tests_requirements = []
# Load 'tests' requirements from dedicated file:
if os.path.isfile('requirements.tests.txt'):
with open('requirements.tests.txt') as requirements_file:
tests_requirements = requirements_file.read().splitlines()
db_requirements = []
# Load 'db' requirements from dedicated file:
if os.path.isfile('requirements.db.txt'):
with open('requirements.db.txt') as requirements_file:
db_requirements = requirements_file.read().splitlines()
redis_requirements = []
# Load 'redis' requirements from dedicated file:
if os.path.isfile('requirements.redis.txt'):
with open('requirements.redis.txt') as requirements_file:
redis_requirements = requirements_file.read().splitlines()
setup(
name="BuildGrid",
version=__version__,
license="Apache License, Version 2.0",
description="A remote execution service",
cmdclass={
'build_grpc': BuildGRPC, },
packages=find_packages(),
package_data={'buildgrid.server.persistence.sql': ['alembic/*', 'alembic/**/*']},
python_requires='>= 3.5.3', # janus requirement
install_requires=install_requirements,
setup_requires=['pytest-runner'],
tests_require=tests_requirements,
extras_require={
'auth': auth_requirements,
'database': db_requirements,
'redis': redis_requirements,
'docs': docs_requirements,
'tests': tests_requirements, },
entry_points={
'console_scripts': [
'bgd = buildgrid._app:cli',
]
}
)
|
the-stack_0_3882 | # -*- coding: utf-8 -*-
import os
import sys
import argparse
import time
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, main_path)
def call1(dataset, num_chunks, args):
call1_ = "python -m sklearnex fea.1.mmm_bopf_repr_fit.py "
call1_ += "{0} {1} --timestamp={2} --n_jobs={3} --num_chunks={4}".format(
dataset, args.config_file, args.timestamp, args.n_jobs, num_chunks
)
return call1_
def call2(dataset, num_chunks, args):
call2_ = "python -m sklearnex fea.2.mmm_bopf_repr_transform.py "
call2_ += "{0} {1} --timestamp={2} --n_jobs={3} --num_chunks={4}".format(
dataset, args.config_file, args.timestamp, args.n_jobs, num_chunks
)
return call2_
def call3(dataset, num_chunks, args):
call2_ = "python -m sklearnex fea.3.mmm_bopf_compact_fit.py "
call2_ += "{0} {1} --timestamp={2} --num_chunks={3}".format(dataset, args.config_file, args.timestamp, num_chunks)
return call2_
def call4(dataset, num_chunks, args):
call3_ = "python -m sklearnex fea.4.mmm_bopf_compact_transform.py "
call3_ += "{0} --timestamp={1} --num_chunks={2}".format(dataset, args.timestamp, num_chunks)
return call3_
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"config_file",
help="filename for method MMMBOPF configuration"
)
parser.add_argument(
'-fit',
'--fit_dataset',
default="plasticc_train",
help='Name of the dataset to fit.',
)
parser.add_argument(
'-transform',
'--transform_dataset',
nargs="+",
default=["plasticc_test", "plasticc_augment"],
help='List of datasets to Transform.'
)
parser.add_argument(
'-n_chunks',
'--num_chunks',
nargs="+",
default=[1, 100, 20],
help='The number of chunks to divide each dataset in order. '
'First num_chunk for fit_dataset, and the rest for transform_dataset',
)
parser.add_argument(
"-t",
"--timestamp",
type=str,
default=time.strftime("%Y%m%d-%H%M%S"),
help="timestamp for creating unique files"
)
parser.add_argument(
"-c",
"--compact_method",
type=str,
default="LSA",
help="The compact method to use, options are: LSA or MANOVA"
)
parser.add_argument(
'-n_jobs',
"--n_jobs",
type=int,
default=-1,
help="The number of process to run in parallel"
)
# RUNNING EXAMPLE
# python fea.pipeline.py optimal_config_lsa.json -fit plasticc_train -transform plasticc_test plasticc_augment_v3 -n_chunks 1 100 10 -c LSA -n_jobs 6
args = parser.parse_args()
c = args.compact_method # LSA MANOVA
# print("RUNNING 1.mmm_bopf_repr_fit.py for compact_method=%s, dataset=%s" % (c, args.fit_dataset))
# os.system(call1(args.fit_dataset, args.num_chunks[0], args))
# for dataset, num_chunks in zip(args.transform_dataset, args.num_chunks[1:]):
# print("RUNNING 2.mmm_bopf_repr_transform.py for compact_method=%s, dataset=%s" % (c, dataset))
# os.system(call2(dataset, int(num_chunks), args))
print("RUNNING 3.mmm_bopf_compact_fit.py for compact_method=%s, dataset=%s" % (c, args.fit_dataset))
os.system(call3(args.fit_dataset, args.num_chunks[0], args))
for dataset, num_chunks in zip(args.transform_dataset, args.num_chunks[1:]):
print("RUNNING 4.mmm_bopf_compact_transform.py for compact_method=%s, dataset=%s" % (c, dataset))
os.system(call4(dataset, num_chunks, args))
print("DONE!!")
print("TIMESTAMP: ", args.timestamp)
# RUNING EXAMPLE
# python pipeline.py plasticc_train plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=1 --test_num_chunks=200
# python pipeline.py plasticc_train plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=1 --test_num_chunks=100 --timestamp=20210916-035944 --n_jobs=6
# python pipeline.py plasticc_augment_v3 plasticc_test optimal_config_lsa.json --compact_method=LSA --train_num_chunks=10 --test_num_chunks=100 --n_jobs=6
|
the-stack_0_3888 | from matplotlib.pyplot import figure
from numpy import array, zeros
from scipy.integrate import solve_ivp
from .dynamics import Dynamics
from ..util import default_fig
class SystemDynamics(Dynamics):
"""Abstract dynamics class for simulation.
Override eval_dot.
"""
def __init__(self, n, m):
"""Create a SystemDynamics object.
Inputs:
Number of states, n: int
Number of actions, m: int
"""
self.n = n
self.m = m
def eval(self, x, t):
return x
def step(self, x_0, u_0, t_0, t_f, atol=1e-6, rtol=1e-6):
"""Simulate system from initial state with constant action over a
time interval.
Approximated using Runge-Kutta 4,5 solver.
Inputs:
Initial state, x_0: numpy array
Control action, u_0: numpy array
Initial time, t_0: float
Final time, t_f: float
Absolute tolerance, atol: float
Relative tolerance, rtol: float
Outputs:
State at final time: numpy array
"""
x_dot = lambda t, x: self.eval_dot(x, u_0, t)
t_span = [t_0, t_f]
res = solve_ivp(x_dot, t_span, x_0, atol=atol, rtol=rtol)
return res.y[:, -1]
def simulate(self, x_0, controller, ts, processed=True, atol=1e-6, rtol=1e-6):
"""Simulate system from initial state with specified controller.
Approximated using Runge-Kutta 4,5 solver.
Actions computed at time steps and held constant over sample period.
Inputs:
Initial state, x_0: numpy array
Control policy, controller: Controller
Time steps, ts: numpy array
Flag to process actions, processed: bool
Absolute tolerance, atol: float
Relative tolerance, rtol: float
Outputs:
State history: numpy array
Action history: numpy array
"""
#print("Dimension",self.n)
#print("State",x_0)
assert len(x_0) == self.n
N = len(ts)
xs = zeros((N, self.n))
us = [None] * (N - 1)
controller.reset()
xs[0] = x_0
for j in range(N - 1):
x = xs[j]
t = ts[j]
u = controller.eval(x, t)
us[j] = u
u = controller.process(u)
xs[j + 1] = self.step(x, u, t, ts[j + 1])
if processed:
us = array([controller.process(u) for u in us])
return xs, us
def plot_timeseries(self, ts, data, fig=None, ax=None, title=None, labels=None):
fig, ax = default_fig(fig, ax)
if title is not None:
ax.set_title(title, fontsize=16)
ax.set_xlabel('$t$ (sec)', fontsize=16)
ax.plot(ts, data, linewidth=3)
if labels is not None:
ax.legend(labels, fontsize=16)
return fig, ax
def plot_states(self, ts, xs, fig=None, ax=None, labels=None):
if labels is None:
labels = [f'$x_{i}$' for i in range(self.n)]
return self.plot_timeseries(ts, xs, fig, ax, 'States', labels)
def plot_actions(self, ts, us, fig=None, ax=None, labels=None):
if labels is None:
labels = [f'$u_{j}$' for j in range(self.m)]
return self.plot_timeseries(ts[:-1], us, fig, ax, 'Actions', labels)
def plot(self, xs, us, ts, fig=None, state_labels=None, action_labels=None):
if fig is None:
fig = figure(figsize=(12, 6), tight_layout=True)
state_ax = fig.add_subplot(1, 2, 1)
fig, state_ax = self.plot_states(ts, xs, fig, state_ax, state_labels)
action_ax = fig.add_subplot(1, 2, 2)
fig, action_ax = self.plot_actions(ts, us, fig, action_ax, action_labels)
return fig, (state_ax, action_ax)
|
the-stack_0_3889 | import os
from fnmatch import fnmatch
import pickle
# General Processing
import numpy as np
import pandas as pd
import collections
# DECOMPOSITION
from sklearn.decomposition import NMF
from scipy.linalg import svd
# NLU
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import NaturalLanguageUnderstandingV1 as NLUV1
# from ibm_watson.natural_language_understanding_v1 import \
# Features, ConceptsOptions, EntitiesOptions, KeywordsOptions
# Presentation / apps
import seaborn as sns
# GENERAL FUNCTIONS
# SELECTION
def random_split(lst, split=0.5):
shuffled = np.array(lst)
np.random.shuffle(shuffled)
split = int(split * len(shuffled))
return shuffled[-split:], shuffled[:-split]
# NORMALIZATION
def norm_stat(vec, weights=False):
'''
Normalizes a vector v-v.mean())/v.std()
'''
if weights:
return np.mean(abs(vec - vec.mean()))
return (vec-vec.mean())/vec.std()
# Algebraic normalization - dot product
def norm_dot(vec, weights=False):
'''
Normalizes a vector - dot product: v @ v = 1
'''
if weights:
return np.sqrt(vec @ vec)
return vec / np.sqrt(vec @ vec)
# Algebraic normalization - dot product
def norm_sum(vec, weights=False):
'''
Normalizes a vector - sum: v.sum = 1
'''
if weights:
return vec.sum()
return vec / vec.sum()
# Scaled Normalization -
def scale(vec, weights=False):
'''
Normalizes a vector: v.min = 0, v.max = 1
'''
stop_divide_by_zero = 0.00000001
if weights:
return (vec.max()-vec.min() + stop_divide_by_zero)
return (vec-vec.min())/(vec.max()-vec.min() + stop_divide_by_zero)
def cleanup_chars(string, char_list=('\n', ' ')):
result = string
for char in char_list:
result = result.replace(char, '')
return result
# Matrix dot product
def dotdf(df1, df2):
'''
performs df1 @ df2 without exceptions, when df1.columns and df2.index
are not identical
'''
c = set(df1.columns)
i = set(df2.index)
var = list(c - (c - i))
return df1[var] @ df2.loc[var]
# OS system commands
def ls(search, name_only=False, cos=None):
'''
emulates unix ls (without flags). Accepts wildcard/'*'
'''
search_split = search.replace('/', '/ ').split()
pattern = search_split[-1]
path = ''.join(search_split[:-1])
if cos is None:
# look in filesystem
# numpy array enables Boolean Mask
all_names = np.array(os.listdir(path))
else:
# look in cloud object store
all_names = np.array(cos.get_bucket_contents())
if not name_only and cos is None:
# add path to each name
all_names = np.array([path+name for name in all_names])
mask = [fnmatch(name, pattern) for name in all_names]
result = all_names[mask]
return result
# MATRIX-FACTORIZATION: DIMENSIONALITY REDUCTION & ARCHETYPING
# CLUSTER FEATURES INTO OCCUPATION CATEGORIES
# Use non-zero matrix factorization for clustering
# Use singular value decomposition first state for determining overall
# similarity
class Archetypes:
'''
Archetypes: Performs NMF of order n on X and stores the result as
attributes.
Archetypes are normalized: cosine similarity a(i) @ a(i) = 1.
Atributes:
my_archetypes.n - order / number of archetypes
my_archetypes.X - input matrix
my_archetypes.model - NMF model
my_archetypes.w - NMF w-matrix
my_archetypes.h - NMF h-matrix
my_archetypes.f - features x archetypes matrix (from h-matrix)
my_archetypes.fn - Dot-Normalized archetypes
my_archetypes.o - documents x archetypes matrix (from w-matrix)
my_archetypes.on - Sum-Normalized documents
'''
def __init__(self, X, n,
norm=norm_dot,
bootstrap=False, bootstrap_frac=0.5,
random_state=None):
self.n = n
self.X = X
self.norm = norm
self.random_state = random_state
if bootstrap:
self.bootstrap_n = bootstrap
self.bootstrap_frac = bootstrap_frac
else:
self.bootstrap_n = 1
self.bootstrap_frac = 1
self.model = NMF(
n_components=n,
init='random',
random_state=self.random_state,
max_iter=1000,
tol=0.0000001
)
self.w_dic = {}
self.o_dic = {}
self.h_dic = {}
self.f_dic = {}
for j in range(self.bootstrap_n):
XX = self.X.sample(int(len(self.X) * self.bootstrap_frac))
self.w_dic[j] = self.model.fit_transform(XX)
self.o_dic[j] = pd.DataFrame(self.w_dic[j], index=XX.index)
self.h_dic[j] = self.model.components_
self.f_dic[j] = pd.DataFrame(self.h_dic[j], columns=XX.columns)
self.w = self.w_dic[0] # TEMPORARY
self.o = self.o_dic[0] # TEMPORARY
self.h = self.h_dic[0] # TEMPORARY
self.f = self.f_dic[0] # TEMPORARY
self.fn = self.f.T.apply(norm_dot).T
self.on = self.o.T.apply(norm_sum).T
class Svd:
'''
Singular value decomposition-as-an-object
my_svd = Svd(X) returns
my_svd.u/.s/.vt – U S and VT from the Singular Value Decomposition
(see manual)
my_svd.f – Pandas.DataFrame: f=original features x svd_features
my_svd.o - Pandas.DataFrame: o=occupations x svd_features
my_svd.volume(keep_volume)
- collections.namedtuple ('dotted dicionary'):
Dimensionality reduction. keeps 'keep_volume' of
total variance
'''
def __init__(self, X):
self.u, self.s, self.vt = svd(np.array(X))
self.f = pd.DataFrame(self.vt, columns=X.columns)
self.o = pd.DataFrame(self.u, columns=X.index)
def volume(self, keep_volume):
'''
Dimensionality reduction, keeps 'keep_volume' proportion of
original variance
Type: collections.namedtuple ('dotted dictionary')
Examples of usage:
my_svd.volume(0.9).s - np.array: eigenvalues for 90% variance
my_svd.volume(0.8).f - dataframe: features for 80% variance
my_svd.volume(0.5).o - dataframe: occupations for 50% variance
'''
dotted_dic = collections.namedtuple('dotted_dic', 's f o')
a1 = self.s.cumsum()
a2 = a1/a1[-1]
n_max = np.argmin(np.square(a2 - keep_volume))
cut_dic = dotted_dic(
s=self.s[:n_max],
f=self.f.iloc[:n_max],
o=self.o.iloc[:n_max]
)
return cut_dic
class WatsonDocumentArchetypes:
'''
WatsonDocumentArchetypes performs Archetypal Analysis on a corpus
consisting of a set of documents, for example a set
of articles, books, news stories or medical dictations.
Input parameters:
PATH - Dictionary with paths to I/O
PATH['data'] - Directory for input text files.
Example: './data/input_texts/'
PATH['results'] - Directory for output.
Example: './data/output_nlu/'
NLU - Dictionary with information for running Watson NLU
NLU['apikey'] - apikey for running Watson NLU
NLU['apiurl'] - URL for Watson NLU API
NLU['version'] - Watson NLU version, e.g. '2019-07-12'
NLU['features'] - Features requested from Watson NLU for each
document in the set, e.g.
Features(
categories= CategoriesOptions(),
concepts = ConceptsOptions(),
entities = EntitiesOptions(),
keywords = KeywordsOptions(),
relations = RelationsOptions(),
syntax = SyntaxOptions()
)
Attributes:
self.PATH
'''
def __init__(self, PATH, NLU,
train_test=False,
random_state=None,
use_cloud_store=False):
from cloud_object_store import CloudObjectStore
self.PATH = PATH
self.NLU = NLU
self.random_state = random_state
# To random partition documents into train/test-sets,
# choose relative size of test-set, train_test (1 = 100%)
self.train_test = train_test
self.use_cloud_store = use_cloud_store
# Create clients to interface Watson and Cloud services
authenticator = IAMAuthenticator(NLU['apikey'])
self.nlu_model = NLUV1(
version=NLU['version'], authenticator=authenticator
)
self.nlu_model.set_service_url(NLU['apiurl'])
if self.use_cloud_store:
self.cos_dictations = CloudObjectStore(
PATH['dictation_bucket'],
PATH['cos_dictation_apikey'],
PATH['cos_dictation_crn'],
PATH['cos_dictation_endpoint']
)
self.cos_nlu = CloudObjectStore(
PATH['nlu_bucket'],
PATH['cos_nlu_apikey'],
PATH['cos_nlu_crn'],
PATH['cos_nlu_endpoint']
)
# Initiate X_matrix dictionaries
self.X_matrix_dic = {}
self.X_matrix_train_dic = {}
self.X_matrix_test_dic = {}
self.archetypes_dic = {}
self.svd_dic = {}
# PREPARE DATA
if self.use_cloud_store:
# load from cloud storage bucket
self.filenames = ls(
'*.txt', name_only=True, cos=self.cos_dictations
)
else:
# load from local file system
# all filenames ending with '.txt'
self.filenames = ls(self.PATH['data']+'*.txt', name_only=True)
self.names = [name.replace('.txt', '') for name in self.filenames]
# if train_test - self.names will be set to self.names_train
self.all_names = self.names * 1
# dictionary for dictation files
self.dictation_dic = {}
for name in self.filenames:
if (self.use_cloud_store):
self.dictation_dic[name.replace('.txt', '')] = \
self.cos_dictations.get_item(name).decode('utf-8')
else:
self.dictation_dic[name.replace('.txt', '')] = \
open(self.PATH['data']+name, encoding="utf-8").read()
self.dictation_df = pd.Series(self.dictation_dic)
# TRAIN-TEST SPLIT
if self.train_test:
# 0<train_test<1 - the proportion of names to save as 'test'
# (rounded downwards)
self.names_test, self.names_train = random_split(
self.all_names, self.train_test
)
self.names = self.names_train
# PERFORM WATSON NLU ANALYSIS
# IF DICTATION ALREADY HAS PKL WITH Watson NLU:
# READ EXISTING PKL. SKIP NEW WATSON CALC.
# Dictionary with Watson-NLU results for each dictation
self.watson = {}
if self.use_cloud_store:
# Check in Cloud storage bucket
self.watson_pkl = 'all_dictations_nlu.pkl'
pkl_exists = self.watson_pkl in self.cos_nlu.get_bucket_contents()
else:
# Check in local filesystem
self.watson_pkl = PATH['results']+'all_dictations_nlu.pkl'
pkl_exists = os.path.exists(self.watson_pkl)
if pkl_exists:
if self.use_cloud_store:
# load previous result from Cloud storage
self.watson = pickle.loads(
self.cos_nlu.get_item(self.watson_pkl)
)
else:
# load previous result from local filesystem
self.watson = pickle.load(open(self.watson_pkl, "rb"))
else:
# perform nlu-analysis on dictations
for item in list(self.dictation_dic.items()):
lbl = item[0]
text = item[1]
self.watson[lbl] = self.nlu_model.analyze(
text=text, features=NLU['features']
)
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
str(lbl)+'_nlu.pkl',
pickle.dumps(self.watson[lbl])
)
else:
# save result to local filesystem
f = open(PATH['results']+str(lbl)+'_nlu.pkl', 'wb')
pickle.dump(self.watson[lbl], f)
f.close()
if self.use_cloud_store:
# save result to Cloud storage
self.cos_nlu.create_item(
self.watson_pkl, pickle.dumps(self.watson)
)
else:
f = open(self.watson_pkl, 'wb')
pickle.dump(self.watson, f)
f.close()
# Copy Watson NLU results to Pandas Dataframes
self.watson_nlu = {}
for dctn in self.watson.items():
self.watson_nlu[dctn[0]] = {}
for item in list(dctn[1].result.items()):
self.watson_nlu[dctn[0]][item[0]] = \
pd.DataFrame(list(item[1]))
# ARCHETYPAL ANALYSIS
# CONSTRUCT X- MATRIX
def X_matrix(self, typ='entities'):
'''
Construct the archetypal analysis X-matrix by pivoting the dataframe
in the dictionary my_wda.watson_nlu that contains the Watson NLU
analysis in question.
X_matrix(typ)
rows : Dictations
columns: Variables; keywords/entities/concepts, from Watson NLU
analysis
values : Weights, from Watson NLU analysis
The constructed X_matrix(typ) is saved as X_matrix_dic[typ]
If my_wda.train_test has a value (not False), X_matrix_train_dic[typ]
and X_matrix_test[typ] are added computed and added to their
respective dicionaries.
'''
if typ not in self.X_matrix_dic.keys():
df = pd.DataFrame()
for key in self.names:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
if self.train_test:
self.X_matrix_train_dic[typ] = self.X_matrix_dic[typ]
df = pd.DataFrame()
for key in self.names_test:
dfx = self.watson_nlu[key][typ].copy()
dfx['dictation'] = key
df = df.append(dfx, sort=True)
if typ == 'entities':
df = df[df['type'] == 'HealthCondition']
df.rename({'relevance': 'rel0'}, axis=1, inplace=True)
df['relevance'] = df['rel0'] * df['confidence']
self.X_matrix_test_dic[typ] = df.pivot_table(
index='dictation', columns='text', values='relevance'
).fillna(0)
return self.X_matrix_dic[typ]
# CALCULATE ARCHETYPES
def archetypes(self, typ='entities',
n_archs=6, bootstrap=False,
bootstrap_frac=0.5,
random_state=False,
norm=norm_sum):
if random_state is False:
random_state = self.random_state
if typ not in self.archetypes_dic.keys():
self.archetypes_dic[typ] = {}
hyperparam = (n_archs, bootstrap, bootstrap_frac, random_state, norm)
self.X_matrix(typ)
self.archetypes_dic[typ][hyperparam] = Archetypes(
self.X_matrix(typ), n_archs, bootstrap=bootstrap,
bootstrap_frac=bootstrap_frac, random_state=random_state,
norm=norm
)
return self.archetypes_dic[typ][hyperparam]
def display_archetype(self, arch_nr=-1, typ='entities',
n_archs=6, var='variables',
threshold=0.10, norm=scale):
fun = {
'variables': 'self.archetypes(typ = typ,n_archs = n_archs).f.T ',
'dictations': 'self.archetypes(typ = typ,n_archs = n_archs).o'
}
f = eval(fun[var])
fn = f.apply(norm)
if arch_nr == -1:
return sns.clustermap(f).data2d
else:
arc = fn.sort_values(by=arch_nr, ascending=False)
# normalized over sum: threshold is ignored volume
if norm is norm_sum:
arc_cs = arc[arch_nr].cumsum()
thresh_idx = abs(arc_cs - (1 - threshold)).values.argmin()
result = arc.iloc[:thresh_idx]
if norm is scale:
result = arc[
arc[arch_nr] >= (threshold * arc[arch_nr][0])
]
return result
# CALCULATE SVD
def svd(self, typ='entities'):
self.X_matrix(typ)
self.svd_dic[typ] = Svd(self.X_matrix(typ))
return
# ANALYZE A TEXT
def analyze(self, text, typ='entities'):
pass
|
the-stack_0_3890 | # case where generator doesn't intercept the thrown/injected exception
def gen():
yield 123
yield 456
g = gen()
print(next(g))
try:
g.throw(KeyError)
except KeyError:
print('got KeyError from downstream!')
# case where a thrown exception is caught and stops the generator
def gen():
try:
yield 1
yield 2
except:
pass
g = gen()
print(next(g))
try:
g.throw(ValueError)
except StopIteration:
print('got StopIteration')
# generator ignores a thrown GeneratorExit (this is allowed)
def gen():
try:
yield 123
except GeneratorExit:
print('GeneratorExit')
yield 456
# thrown a class
g = gen()
print(next(g))
print(g.throw(GeneratorExit))
# thrown an instance
g = gen()
print(next(g))
print(g.throw(GeneratorExit()))
|
the-stack_0_3891 | # pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constants import (Commands, Status, SWD_FREQ_MAP, JTAG_FREQ_MAP)
from ...core import exceptions
from ...coresight import dap
from ...utility import conversion
from ...utility.mask import bfx
import logging
import struct
import six
import threading
from enum import Enum
LOG = logging.getLogger(__name__)
class STLink(object):
"""!
@brief STLink V2 and V3 command-level interface.
"""
class Protocol(Enum):
"""!
@brief Protocol options to pass to STLink.enter_debug() method.
"""
SWD = 1
JTAG = 2
## Maximum number of bytes to send or receive for 32- and 16- bit transfers.
#
# 8-bit transfers have a maximum size of the maximum USB packet size (64 bytes for full speed).
MAXIMUM_TRANSFER_SIZE = 1024
## Minimum required STLink firmware version (hw version 2).
MIN_JTAG_VERSION = 24
## Firmware version that adds 16-bit transfers (hw version 2).
MIN_JTAG_VERSION_16BIT_XFER = 26
## Firmware version that adds multiple AP support (hw version 2).
MIN_JTAG_VERSION_MULTI_AP = 28
## Firmware version that adds DP bank support.
#
# Keys are the hardware version, value is the minimum JTAG version.
MIN_JTAG_VERSION_DPBANKSEL = {2: 32, 3: 2}
## Port number to use to indicate DP registers.
DP_PORT = 0xffff
## Map to convert from STLink error response codes to exception classes.
_ERROR_CLASSES = {
# AP protocol errors
Status.SWD_AP_WAIT: exceptions.TransferTimeoutError,
Status.SWD_AP_FAULT: exceptions.TransferFaultError,
Status.SWD_AP_ERROR: exceptions.TransferError,
Status.SWD_AP_PARITY_ERROR: exceptions.TransferError,
# DP protocol errors
Status.SWD_DP_WAIT: exceptions.TransferTimeoutError,
Status.SWD_DP_FAULT: exceptions.TransferFaultError,
Status.SWD_DP_ERROR: exceptions.TransferError,
Status.SWD_DP_PARITY_ERROR: exceptions.TransferError,
# High level transaction errors
Status.SWD_AP_WDATA_ERROR: exceptions.TransferFaultError,
Status.SWD_AP_STICKY_ERROR: exceptions.TransferError,
Status.SWD_AP_STICKYORUN_ERROR: exceptions.TransferError,
}
## These errors indicate a memory fault.
_MEM_FAULT_ERRORS = (
Status.JTAG_UNKNOWN_ERROR, # Returned in some cases by older STLink firmware.
Status.SWD_AP_FAULT,
Status.SWD_DP_FAULT,
Status.SWD_AP_WDATA_ERROR,
Status.SWD_AP_STICKY_ERROR,
)
def __init__(self, device):
self._device = device
self._hw_version = 0
self._jtag_version = 0
self._version_str = None
self._target_voltage = 0
self._protocol = None
self._lock = threading.RLock()
def open(self):
with self._lock:
self._device.open()
self.enter_idle()
self.get_version()
self.get_target_voltage()
def close(self):
with self._lock:
self.enter_idle()
self._device.close()
def get_version(self):
# GET_VERSION response structure:
# Byte 0-1:
# [15:12] Major/HW version
# [11:6] JTAG/SWD version
# [5:0] SWIM or MSC version
# Byte 2-3: ST_VID
# Byte 4-5: STLINK_PID
response = self._device.transfer([Commands.GET_VERSION], readSize=6)
ver, = struct.unpack('>H', response[:2])
# TODO create version bitfield constants
self._hw_version = bfx(ver, 15, 12)
self._jtag_version = bfx(ver, 11, 6)
self._msc_version = bfx(ver, 5, 0)
# For STLinkV3 we must use the extended get version command.
if self._hw_version >= 3:
# GET_VERSION_EXT response structure (byte offsets):
# 0: HW version
# 1: SWIM version
# 2: JTAG/SWD version
# 3: MSC/VCP version
# 4: Bridge version
# 5-7: reserved
# 8-9: ST_VID
# 10-11: STLINK_PID
response = self._device.transfer([Commands.GET_VERSION_EXT], readSize=12)
hw_vers, _, self._jtag_version, self._msc_version = struct.unpack('<4B', response[0:4])
self._version_str = "V%dJ%dM%d" % (self._hw_version, self._jtag_version, self._msc_version)
LOG.debug("STLink probe %s firmware version: %s", self.serial_number, self._version_str)
# Check versions.
if self._jtag_version == 0:
raise exceptions.ProbeError("%s firmware does not support JTAG/SWD. Please update"
"to a firmware version that supports JTAG/SWD" % (self._version_str))
if not self._check_version(self.MIN_JTAG_VERSION):
raise exceptions.ProbeError("STLink %s is using an unsupported, older firmware version. "
"Please update to the latest STLink firmware. Current version is %s, must be at least version v2J%d.)"
% (self.serial_number, self._version_str, self.MIN_JTAG_VERSION))
def _check_version(self, min_version):
return (self._hw_version >= 3) or (self._jtag_version >= min_version)
@property
def vendor_name(self):
return self._device.vendor_name
@property
def product_name(self):
return self._device.product_name
@property
def serial_number(self):
return self._device.serial_number
@property
def hw_version(self):
return self._hw_version
@property
def jtag_version(self):
return self._jtag_version
@property
def version_str(self):
return self._version_str
@property
def target_voltage(self):
return self._target_voltage
@property
def supports_banked_dp(self):
"""! @brief Whether the firmware version supports accessing banked DP registers.
This property is not valid until the connection is opened.
"""
return self._jtag_version >= self.MIN_JTAG_VERSION_DPBANKSEL[self._hw_version]
def get_target_voltage(self):
response = self._device.transfer([Commands.GET_TARGET_VOLTAGE], readSize=8)
a0, a1 = struct.unpack('<II', response[:8])
self._target_voltage = 2 * a1 * 1.2 / a0 if a0 != 0 else None
def enter_idle(self):
with self._lock:
response = self._device.transfer([Commands.GET_CURRENT_MODE], readSize=2)
if response[0] == Commands.DEV_DFU_MODE:
self._device.transfer([Commands.DFU_COMMAND, Commands.DFU_EXIT])
elif response[0] == Commands.DEV_JTAG_MODE:
self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_EXIT])
elif response[0] == Commands.DEV_SWIM_MODE:
self._device.transfer([Commands.SWIM_COMMAND, Commands.SWIM_EXIT])
self._protocol = None
def set_swd_frequency(self, freq=1800000):
with self._lock:
if self._hw_version >= 3:
self.set_com_frequency(self.Protocol.JTAG, freq)
else:
for f, d in SWD_FREQ_MAP.items():
if freq >= f:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.SWD_SET_FREQ, d], readSize=2)
self._check_status(response)
return
raise exceptions.ProbeError("Selected SWD frequency is too low")
def set_jtag_frequency(self, freq=1120000):
with self._lock:
if self._hw_version >= 3:
self.set_com_frequency(self.Protocol.JTAG, freq)
else:
for f, d in JTAG_FREQ_MAP.items():
if freq >= f:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_SET_FREQ, d], readSize=2)
self._check_status(response)
return
raise exceptions.ProbeError("Selected JTAG frequency is too low")
def get_com_frequencies(self, protocol):
assert self._hw_version >= 3
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.GET_COM_FREQ, protocol.value - 1]
response = self._device.transfer(cmd, readSize=52)
self._check_status(response[0:2])
freqs = conversion.byte_list_to_u32le_list(response[4:52])
currentFreq = freqs.pop(0)
freqCount = freqs.pop(0)
return currentFreq, freqs[:freqCount]
def set_com_frequency(self, protocol, freq):
assert self._hw_version >= 3
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.SET_COM_FREQ, protocol.value - 1, 0]
cmd.extend(conversion.u32le_list_to_byte_list([freq // 1000]))
response = self._device.transfer(cmd, readSize=8)
self._check_status(response[0:2])
freqs = conversion.byte_list_to_u32le_list(response[4:8])
return freqs[0]
def enter_debug(self, protocol):
with self._lock:
self.enter_idle()
if protocol == self.Protocol.SWD:
protocolParam = Commands.JTAG_ENTER_SWD
elif protocol == self.Protocol.JTAG:
protocolParam = Commands.JTAG_ENTER_JTAG_NO_CORE_RESET
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_ENTER2, protocolParam, 0], readSize=2)
self._check_status(response)
self._protocol = protocol
def open_ap(self, apsel):
with self._lock:
if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP):
return
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_INIT_AP, apsel, Commands.JTAG_AP_NO_CORE]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def close_ap(self, apsel):
with self._lock:
if not self._check_version(self.MIN_JTAG_VERSION_MULTI_AP):
return
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_CLOSE_AP_DBG, apsel]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def target_reset(self):
with self._lock:
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, Commands.JTAG_DRIVE_NRST_PULSE], readSize=2)
self._check_status(response)
def drive_nreset(self, isAsserted):
with self._lock:
value = Commands.JTAG_DRIVE_NRST_LOW if isAsserted else Commands.JTAG_DRIVE_NRST_HIGH
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_DRIVE_NRST, value], readSize=2)
self._check_status(response)
def _check_status(self, response):
status, = struct.unpack('<H', response)
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
else:
raise exceptions.ProbeError(error_message)
def _clear_sticky_error(self):
with self._lock:
if self._protocol == self.Protocol.SWD:
self.write_dap_register(self.DP_PORT, dap.DP_ABORT,
dap.ABORT_ORUNERRCLR | dap.ABORT_WDERRCLR | dap.ABORT_STKERRCLR | dap.ABORT_STKCMPCLR)
elif self._protocol == self.Protocol.JTAG:
self.write_dap_register(self.DP_PORT, dap.DP_CTRL_STAT,
dap.CTRLSTAT_STICKYERR | dap.CTRLSTAT_STICKYCMP | dap.CTRLSTAT_STICKYORUN)
def _read_mem(self, addr, size, memcmd, max, apsel):
with self._lock:
result = []
while size:
thisTransferSize = min(size, max)
cmd = [Commands.JTAG_COMMAND, memcmd]
cmd.extend(six.iterbytes(struct.pack('<IHB', addr, thisTransferSize, apsel)))
result += self._device.transfer(cmd, readSize=thisTransferSize)
addr += thisTransferSize
size -= thisTransferSize
# Check status of this read.
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_GETLASTRWSTATUS2], readSize=12)
status, _, faultAddr = struct.unpack('<HHI', response[0:8])
# Handle transfer faults specially so we can assign the address info.
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._MEM_FAULT_ERRORS:
# Clear sticky errors.
self._clear_sticky_error()
exc = exceptions.TransferFaultError("read")
exc.fault_address = faultAddr
exc.fault_length = thisTransferSize - (faultAddr - addr)
raise exc
elif status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
elif status != Status.JTAG_OK:
raise exceptions.ProbeError(error_message)
return result
def _write_mem(self, addr, data, memcmd, max, apsel):
with self._lock:
while len(data):
thisTransferSize = min(len(data), max)
thisTransferData = data[:thisTransferSize]
cmd = [Commands.JTAG_COMMAND, memcmd]
cmd.extend(six.iterbytes(struct.pack('<IHB', addr, thisTransferSize, apsel)))
self._device.transfer(cmd, writeData=thisTransferData)
addr += thisTransferSize
data = data[thisTransferSize:]
# Check status of this write.
response = self._device.transfer([Commands.JTAG_COMMAND, Commands.JTAG_GETLASTRWSTATUS2], readSize=12)
status, _, faultAddr = struct.unpack('<HHI', response[0:8])
# Handle transfer faults specially so we can assign the address info.
if status != Status.JTAG_OK:
error_message = Status.get_error_message(status)
if status in self._MEM_FAULT_ERRORS:
# Clear sticky errors.
self._clear_sticky_error()
exc = exceptions.TransferFaultError("write")
exc.fault_address = faultAddr
exc.fault_length = thisTransferSize - (faultAddr - addr)
raise exc
elif status in self._ERROR_CLASSES:
raise self._ERROR_CLASSES[status](error_message)
elif status != Status.JTAG_OK:
raise exceptions.ProbeError(error_message)
def read_mem32(self, addr, size, apsel):
assert (addr & 0x3) == 0 and (size & 0x3) == 0, "address and size must be word aligned"
return self._read_mem(addr, size, Commands.JTAG_READMEM_32BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def write_mem32(self, addr, data, apsel):
assert (addr & 0x3) == 0 and (len(data) & 3) == 0, "address and size must be word aligned"
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_32BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def read_mem16(self, addr, size, apsel):
assert (addr & 0x1) == 0 and (size & 0x1) == 0, "address and size must be half-word aligned"
if not self._check_version(self.MIN_JTAG_VERSION_16BIT_XFER):
# 16-bit r/w is only available from J26, so revert to 8-bit accesses.
return self.read_mem8(addr, size, apsel)
return self._read_mem(addr, size, Commands.JTAG_READMEM_16BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def write_mem16(self, addr, data, apsel):
assert (addr & 0x1) == 0 and (len(data) & 1) == 0, "address and size must be half-word aligned"
if not self._check_version(self.MIN_JTAG_VERSION_16BIT_XFER):
# 16-bit r/w is only available from J26, so revert to 8-bit accesses.
self.write_mem8(addr, data, apsel)
return
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_16BIT, self.MAXIMUM_TRANSFER_SIZE, apsel)
def read_mem8(self, addr, size, apsel):
return self._read_mem(addr, size, Commands.JTAG_READMEM_8BIT, self._device.max_packet_size, apsel)
def write_mem8(self, addr, data, apsel):
self._write_mem(addr, data, Commands.JTAG_WRITEMEM_8BIT, self._device.max_packet_size, apsel)
def _check_dp_bank(self, port, addr):
"""! @brief Check if attempting to access a banked DP register with a firmware version that
doesn't support that.
"""
if ((port == self.DP_PORT) and ((addr & 0xf0) != 0) and not self.supports_banked_dp):
raise exceptions.ProbeError("this STLinkV%d firmware version does not support accessing"
" banked DP registers; please upgrade to the latest STLinkV%d firmware release",
self._hw_version, self._hw_version)
def read_dap_register(self, port, addr):
assert (addr >> 16) == 0, "register address must be 16-bit"
self._check_dp_bank(port, addr)
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_READ_DAP_REG]
cmd.extend(six.iterbytes(struct.pack('<HH', port, addr)))
response = self._device.transfer(cmd, readSize=8)
self._check_status(response[:2])
value, = struct.unpack('<I', response[4:8])
return value
def write_dap_register(self, port, addr, value):
assert (addr >> 16) == 0, "register address must be 16-bit"
self._check_dp_bank(port, addr)
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.JTAG_WRITE_DAP_REG]
cmd.extend(six.iterbytes(struct.pack('<HHI', port, addr, value)))
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_start(self, baudrate):
with self._lock:
bufferSize = 4096
cmd = [Commands.JTAG_COMMAND, Commands.SWV_START_TRACE_RECEPTION]
cmd.extend(six.iterbytes(struct.pack('<HI', bufferSize, baudrate)))
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_stop(self):
with self._lock:
cmd = [Commands.JTAG_COMMAND, Commands.SWV_STOP_TRACE_RECEPTION]
response = self._device.transfer(cmd, readSize=2)
self._check_status(response)
def swo_read(self):
with self._lock:
response = None
bytesAvailable = None
try:
cmd = [Commands.JTAG_COMMAND, Commands.SWV_GET_TRACE_NEW_RECORD_NB]
response = self._device.transfer(cmd, readSize=2)
bytesAvailable, = struct.unpack('<H', response)
if bytesAvailable:
return self._device.read_swv(bytesAvailable)
else:
return bytearray()
except KeyboardInterrupt:
# If we're interrupted after sending the SWV_GET_TRACE_NEW_RECORD_NB command,
# we have to read the queued SWV data before any other commands can be sent.
if response is not None:
if bytesAvailable is None:
bytesAvailable, = struct.unpack('<H', response)
if bytesAvailable:
self._device.read_swv(bytesAvailable)
|
the-stack_0_3892 | import torch
import torch.nn as nn
import torch.nn.functional as F
# from CAPS.effiUnet_v3 import EfficientUNet
from loguru import logger
# from CAPS.effiUnet_v3_1 import EfficientUNet
from CAPS.effiUnet_v4 import EfficientUNet
class CAPSNet(nn.Module):
def __init__(self, args, device):
super(CAPSNet, self).__init__()
self.args = args
self.device = device
self.net = EfficientUNet()
if args.phase == "train":
if not args.magic_pretrain:
raise Exception("args.magic_pretrain should not be none in traing mode")
magic_net_model_dict = torch.load(args.magic_pretrain)
self.net.magic_net.load_state_dict(magic_net_model_dict)
self.net.to(device)
for param in self.net.magic_net.parameters():
param.requires_grad = False
@staticmethod
def normalize(coord, h, w):
'''
turn the coordinates from pixel indices to the range of [-1, 1]
:param coord: [..., 2]
:param h: the image height
:param w: the image width
:return: the normalized coordinates [..., 2]
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord.device).float()
coord_norm = (coord - c) / c
return coord_norm
@staticmethod
def denormalize(coord_norm, h, w):
'''
turn the coordinates from normalized value ([-1, 1]) to actual pixel indices
:param coord_norm: [..., 2]
:param h: the image height
:param w: the image width
:return: actual pixel coordinates
'''
c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord_norm.device)
coord = coord_norm * c + c
return coord
def ind2coord(self, ind, width):
ind = ind.unsqueeze(-1)
x = ind % width
# y = ind // width
y = torch.div(ind, width, rounding_mode='floor')
coord = torch.cat((x, y), -1).float()
return coord
def gen_grid(self, h_min, h_max, w_min, w_max, len_h, len_w):
x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w), torch.linspace(h_min, h_max, len_h)])
grid = torch.stack((x, y), -1).transpose(0, 1).reshape(-1, 2).float().to(self.device)
return grid
def sample_feat_by_coord(self, x, coord_n, norm=False):
'''
sample from normalized coordinates
:param x: feature map [batch_size, n_dim, h, w]
:param coord_n: normalized coordinates, [batch_size, n_pts, 2]
:param norm: if l2 normalize features
:return: the extracted features, [batch_size, n_pts, n_dim]
'''
feat = F.grid_sample(x, coord_n.unsqueeze(2), align_corners=True).squeeze(-1)
if norm:
feat = F.normalize(feat)
feat = feat.transpose(1, 2)
return feat
def compute_prob(self, feat1, feat2):
'''
compute probability
:param feat1: query features, [batch_size, m, n_dim]
:param feat2: reference features, [batch_size, n, n_dim]
:return: probability, [batch_size, m, n]
'''
assert self.args.prob_from in ['correlation', 'distance']
if self.args.prob_from == 'correlation':
sim = feat1.bmm(feat2.transpose(1, 2))
prob = F.softmax(sim, dim=-1) # Bxmxn
else:
dist = torch.sum(feat1**2, dim=-1, keepdim=True) + \
torch.sum(feat2**2, dim=-1, keepdim=True).transpose(1, 2) - \
2 * feat1.bmm(feat2.transpose(1, 2))
prob = F.softmax(-dist, dim=-1) # Bxmxn
return prob
def get_1nn_coord(self, feat1, featmap2):
'''
find the coordinates of nearest neighbor match
:param feat1: query features, [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the other image
:return: normalized correspondence locations [batch_size, n_pts, 2]
'''
batch_size, d, h, w = featmap2.shape
feat2_flatten = featmap2.reshape(batch_size, d, h*w).transpose(1, 2) # Bx(hw)xd
assert self.args.prob_from in ['correlation', 'distance']
if self.args.prob_from == 'correlation':
sim = feat1.bmm(feat2_flatten.transpose(1, 2))
ind2_1nn = torch.max(sim, dim=-1)[1]
else:
dist = torch.sum(feat1**2, dim=-1, keepdim=True) + \
torch.sum(feat2_flatten**2, dim=-1, keepdim=True).transpose(1, 2) - \
2 * feat1.bmm(feat2_flatten.transpose(1, 2))
ind2_1nn = torch.min(dist, dim=-1)[1]
coord2 = self.ind2coord(ind2_1nn, w)
coord2_n = self.normalize(coord2, h, w)
return coord2_n
def get_expected_correspondence_locs(self, feat1, featmap2, with_std=False):
'''
compute the expected correspondence locations
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param with_std: if return the standard deviation
:return: the normalized expected correspondence locations [batch_size, n_pts, 2]
'''
B, d, h2, w2 = featmap2.size()
grid_n = self.gen_grid(-1, 1, -1, 1, h2, w2)
featmap2_flatten = featmap2.reshape(B, d, h2*w2).transpose(1, 2) # BX(hw)xd
prob = self.compute_prob(feat1, featmap2_flatten) # Bxnx(hw)
grid_n = grid_n.unsqueeze(0).unsqueeze(0) # 1x1x(hw)x2
expected_coord_n = torch.sum(grid_n * prob.unsqueeze(-1), dim=2) # Bxnx2
if with_std:
# convert to normalized scale [-1, 1]
var = torch.sum(grid_n**2 * prob.unsqueeze(-1), dim=2) - expected_coord_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
return expected_coord_n, std
else:
return expected_coord_n
def get_expected_correspondence_within_window(self, feat1, featmap2, coord2_n, with_std=False):
'''
:param feat1: the feature vectors of query points [batch_size, n_pts, n_dim]
:param featmap2: the feature maps of the reference image [batch_size, n_dim, h, w]
:param coord2_n: normalized center locations [batch_size, n_pts, 2]
:param with_std: if return the standard deviation
:return: the normalized expected correspondence locations, [batch_size, n_pts, 2], optionally with std
'''
batch_size, n_dim, h2, w2 = featmap2.shape
n_pts = coord2_n.shape[1]
grid_n = self.gen_grid(h_min=-self.args.window_size, h_max=self.args.window_size,
w_min=-self.args.window_size, w_max=self.args.window_size,
len_h=int(self.args.window_size*h2), len_w=int(self.args.window_size*w2))
grid_n_ = grid_n.repeat(batch_size, 1, 1, 1) # Bx1xhwx2
coord2_n_grid = coord2_n.unsqueeze(-2) + grid_n_ # Bxnxhwx2
feat2_win = F.grid_sample(featmap2, coord2_n_grid, padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1) # Bxnxhwxd
feat1 = feat1.unsqueeze(-2)
prob = self.compute_prob(feat1.reshape(batch_size*n_pts, -1, n_dim),
feat2_win.reshape(batch_size*n_pts, -1, n_dim)).reshape(batch_size, n_pts, -1)
expected_coord2_n = torch.sum(coord2_n_grid * prob.unsqueeze(-1), dim=2) # Bxnx2
if with_std:
var = torch.sum(coord2_n_grid**2 * prob.unsqueeze(-1), dim=2) - expected_coord2_n**2 # Bxnx2
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # Bxn
return expected_coord2_n, std
else:
return expected_coord2_n
def forward(self, im1, im2, coord1):
# extract features for both images
# modify the output
# xf1 = self.net(im1)
# xf2 = self.net(im2)
prob_nms1, xf1 = self.net(im1)
prob_nms2, xf2 = self.net(im2)
# image width and height
h1i, w1i = im1.size()[2:]
h2i, w2i = im2.size()[2:]
# normalize coordination
coord1_n = self.normalize(coord1, h1i, w1i)
# the center locations of the local window for fine level computation
feat1_fine = self.sample_feat_by_coord(xf1, coord1_n) # Bxnxd
coord2_ef_n, std_2f = self.get_expected_correspondence_locs(feat1_fine, xf2, with_std=True)
feat2_fine = self.sample_feat_by_coord(xf2, coord2_ef_n) # Bxnxd
coord1_ef_n, std_1f = self.get_expected_correspondence_locs(feat2_fine, xf1, with_std=True)
coord2_ef = self.denormalize(coord2_ef_n, h2i, w2i)
coord1_ef = self.denormalize(coord1_ef_n, h1i, w1i)
return {'coord2_ef': coord2_ef, 'coord1_ef': coord1_ef,
'std_1f': std_1f, 'std_2f': std_2f}
def extract_features(self, im, coord):
'''
extract coarse and fine level features given the input image and 2d locations
:param im: [batch_size, 3, h, w]
:param coord: [batch_size, n_pts, 2]
:return: coarse features [batch_size, n_pts, coarse_feat_dim] and fine features [batch_size, n_pts, fine_feat_dim]
'''
xf = self.net(im)
hi, wi = im.size()[2:]
coord_n = self.normalize(coord, hi, wi)
feat_f = self.sample_feat_by_coord(xf, coord_n)
return feat_f
def exetrct_det_and_des(self, im, src_shape):
prob_nms, xf = self.net(im)
# logger.info("im shape: {}".format(im.shape))
# logger.info("prob_nms.shape: {}".format(prob_nms.shape))
# logger.info("xf shape: {}".format(xf.shape))
prob_nms = prob_nms.squeeze(dim=1)
edge_size = 30
prob_nms[:, :edge_size, :] = -1
prob_nms[:, :, :edge_size] = -1
prob_nms[:, src_shape[0] - edge_size:, :] = -1
prob_nms[:, :, src_shape[1] - edge_size:] = -1
# preds = [pred > 0.015 for pred in prob_nms]
points = [torch.stack(torch.where(pred > 0.015)).T for pred in prob_nms]
points = [torch.flip(element, dims=[1]) for element in points]
# logger.info("prob_nms.shape: {}".format(prob_nms.shape))
# logger.info("the first pred shape is : {}".format(preds[0].shape))
# logger.info("len preds is: {}".format(len(preds)))
# logger.info("points len: {}".format(len(points[0])))
# print(points[0])
# print(points[0])
hi, wi = im.size()[2:]
batch_size = im.size()[0]
discriptor = list()
for i in range(batch_size):
coord_n = self.normalize(points[i], hi, wi)
feat_f = self.sample_feat_by_coord(xf[i: i+1], coord_n.unsqueeze(dim=0))
discriptor.append(feat_f)
return points, discriptor
def test(self, im1, im2, coord1):
'''
given a pair of images im1, im2, compute the coorrespondences for query points coord1.
We performa full image search at coarse level and local search at fine level
:param im1: [batch_size, 3, h, w]
:param im2: [batch_size, 3, h, w]
:param coord1: [batch_size, n_pts, 2]
:return: the fine level correspondence location [batch_size, n_pts, 2]
'''
xc1, xf1 = self.net(im1)
xc2, xf2 = self.net(im2)
h1i, w1i = im1.shape[2:]
h2i, w2i = im2.shape[2:]
coord1_n = self.normalize(coord1, h1i, w1i)
feat1_c = self.sample_feat_by_coord(xc1, coord1_n)
_, std_c = self.get_expected_correspondence_locs(feat1_c, xc2, with_std=True)
coord2_ec_n = self.get_1nn_coord(feat1_c, xc2)
feat1_f = self.sample_feat_by_coord(xf1, coord1_n)
_, std_f = self.get_expected_correspondence_within_window(feat1_f, xf2, coord2_ec_n, with_std=True)
coord2_ef_n = self.get_1nn_coord(feat1_f, xf2)
coord2_ef = self.denormalize(coord2_ef_n, h2i, w2i)
std = (std_c + std_f)/2
return coord2_ef, std
|
the-stack_0_3893 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CertificateGroupUse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'use': 'FixedReferenceWithRemote'
}
attribute_map = {
'id': 'id',
'name': 'name',
'use': 'use'
}
def __init__(self, id=None, name=None, use=None):
"""
CertificateGroupUse - a model defined in Swagger
"""
self._id = None
self._name = None
self._use = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if use is not None:
self.use = use
@property
def id(self):
"""
Gets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system.
:return: The id of this CertificateGroupUse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system.
:param id: The id of this CertificateGroupUse.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot).
:return: The name of this CertificateGroupUse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot).
:param name: The name of this CertificateGroupUse.
:type: str
"""
self._name = name
@property
def use(self):
"""
Gets the use of this CertificateGroupUse.
A reference to an object using this certificate group.
:return: The use of this CertificateGroupUse.
:rtype: FixedReferenceWithRemote
"""
return self._use
@use.setter
def use(self, use):
"""
Sets the use of this CertificateGroupUse.
A reference to an object using this certificate group.
:param use: The use of this CertificateGroupUse.
:type: FixedReferenceWithRemote
"""
self._use = use
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CertificateGroupUse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_3896 | import warnings
from collections import defaultdict
from itertools import chain
from typing import Dict, Iterable, Iterator, List, Optional, Set
from math import ceil
from maggma.core import Builder, Store
from maggma.utils import grouper
from monty.json import MontyDecoder
from pymatgen.analysis.phase_diagram import PhaseDiagramError
from pymatgen.entries.compatibility import MaterialsProject2020Compatibility
from pymatgen.entries.computed_entries import ComputedStructureEntry
from emmet.builders.utils import chemsys_permutations
from emmet.core.thermo import ThermoDoc, PhaseDiagramDoc
from emmet.core.utils import jsanitize
class ThermoBuilder(Builder):
def __init__(
self,
materials: Store,
thermo: Store,
phase_diagram: Optional[Store] = None,
oxidation_states: Optional[Store] = None,
query: Optional[Dict] = None,
compatibility=None,
num_phase_diagram_eles: Optional[int] = None,
**kwargs,
):
"""
Calculates thermodynamic quantities for materials from phase
diagram constructions
Args:
materials (Store): Store of materials documents
thermo (Store): Store of thermodynamic data such as formation
energy and decomposition pathway
phase_diagram (Store): Store of phase diagram data for each unique chemical system
oxidation_states (Store): Store of oxidation state data to use in correction scheme application
query (dict): dictionary to limit materials to be analyzed
compatibility (PymatgenCompatability): Compatability module
to ensure energies are compatible
num_phase_diagram_eles (int): Maximum number of elements to use in phase diagram construction
for data within the separate phase_diagram collection
"""
self.materials = materials
self.thermo = thermo
self.query = query if query else {}
self.compatibility = (
compatibility
if compatibility
else MaterialsProject2020Compatibility("Advanced")
)
self.oxidation_states = oxidation_states
self.phase_diagram = phase_diagram
self.num_phase_diagram_eles = num_phase_diagram_eles
self._completed_tasks: Set[str] = set()
self._entries_cache: Dict[str, List[ComputedStructureEntry]] = defaultdict(list)
sources = [materials]
if oxidation_states is not None:
sources.append(oxidation_states)
targets = [thermo]
if phase_diagram is not None:
targets.append(phase_diagram)
super().__init__(sources=sources, targets=targets, **kwargs)
def ensure_indexes(self):
"""
Ensures indicies on the tasks and materials collections
"""
# Search index for materials
self.materials.ensure_index("material_id")
self.materials.ensure_index("chemsys")
self.materials.ensure_index("last_updated")
# Search index for thermo
self.thermo.ensure_index("material_id")
self.thermo.ensure_index("last_updated")
# Search index for phase_diagram
if self.phase_diagram:
self.phase_diagram.ensure_index("chemsys")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
updated_chemsys = self.get_updated_chemsys()
new_chemsys = self.get_new_chemsys()
affected_chemsys = self.get_affected_chemsys(updated_chemsys | new_chemsys)
# Remove overlapping chemical systems
to_process_chemsys = set()
for chemsys in updated_chemsys | new_chemsys | affected_chemsys:
if chemsys not in to_process_chemsys:
to_process_chemsys |= chemsys_permutations(chemsys)
N = ceil(len(to_process_chemsys) / number_splits)
for chemsys_chunk in grouper(to_process_chemsys, N):
yield {"query": {"chemsys": {"$in": list(chemsys_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets whole chemical systems of entries to process
"""
self.logger.info("Thermo Builder Started")
self.logger.info("Setting indexes")
self.ensure_indexes()
updated_chemsys = self.get_updated_chemsys()
new_chemsys = self.get_new_chemsys()
affected_chemsys = self.get_affected_chemsys(updated_chemsys | new_chemsys)
# Remove overlapping chemical systems
processed = set()
to_process_chemsys = []
for chemsys in sorted(
updated_chemsys | new_chemsys | affected_chemsys,
key=lambda x: len(x),
reverse=True,
):
if chemsys not in processed:
processed |= chemsys_permutations(chemsys)
to_process_chemsys.append(chemsys)
self.logger.info(
f"Found {len(to_process_chemsys)} chemical systems with new/updated materials to process"
)
self.total = len(to_process_chemsys)
# Yield the chemical systems in order of increasing size
# Will build them in a similar manner to fast Pourbaix
for chemsys in sorted(
to_process_chemsys, key=lambda x: len(x.split("-")), reverse=False
):
entries = self.get_entries(chemsys)
yield entries
def process_item(self, item: List[Dict]):
if len(item) == 0:
return []
entries = [ComputedStructureEntry.from_dict(entry) for entry in item]
# determine chemsys
elements = sorted(
set([el.symbol for e in entries for el in e.composition.elements])
)
chemsys = "-".join(elements)
self.logger.debug(f"Processing {len(entries)} entries for {chemsys}")
material_entries: Dict[str, Dict[str, ComputedStructureEntry]] = defaultdict(
dict
)
pd_entries = []
for entry in entries:
material_entries[entry.entry_id][entry.data["run_type"]] = entry
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Failed to guess oxidation states.*"
)
pd_entries = self.compatibility.process_entries(entries)
self.logger.debug(f"{len(pd_entries)} remain in {chemsys} after filtering")
try:
docs, pd = ThermoDoc.from_entries(pd_entries, deprecated=False)
for doc in docs:
doc.entries = material_entries[doc.material_id]
doc.entry_types = list(material_entries[doc.material_id].keys())
pd_data = None
if self.phase_diagram:
if (
self.num_phase_diagram_eles is None
or len(elements) <= self.num_phase_diagram_eles
):
pd_doc = PhaseDiagramDoc(chemsys=chemsys, phase_diagram=pd)
pd_data = jsanitize(pd_doc.dict(), allow_bson=True)
docs_pd_pair = (
jsanitize([d.dict() for d in docs], allow_bson=True),
[pd_data],
)
except PhaseDiagramError as p:
elsyms = []
for e in entries:
elsyms.extend([el.symbol for el in e.composition.elements])
self.logger.warning(
f"Phase diagram error in chemsys {'-'.join(sorted(set(elsyms)))}: {p}"
)
return []
except Exception as e:
self.logger.error(
f"Got unexpected error while processing {[ent_.entry_id for ent_ in entries]}: {e}"
)
return []
return docs_pd_pair
def update_targets(self, items):
"""
Inserts the thermo and phase diagram docs into the thermo collection
Args:
items ([[tuple(List[dict],List[dict])]]): a list of list of thermo dictionaries to update
"""
# print(len(items))
thermo_docs = [item[0] for item in items]
phase_diagram_docs = [item[1] for item in items]
# flatten out lists
thermo_docs = list(filter(None, chain.from_iterable(thermo_docs)))
phase_diagram_docs = list(filter(None, chain.from_iterable(phase_diagram_docs)))
# Check if already updated this run
thermo_docs = [
i for i in thermo_docs if i["material_id"] not in self._completed_tasks
]
self._completed_tasks |= {i["material_id"] for i in thermo_docs}
for item in thermo_docs:
if isinstance(item["last_updated"], dict):
item["last_updated"] = MontyDecoder().process_decoded(
item["last_updated"]
)
if self.phase_diagram:
self.phase_diagram.update(phase_diagram_docs)
if len(thermo_docs) > 0:
self.logger.info(f"Updating {len(thermo_docs)} thermo documents")
self.thermo.update(docs=thermo_docs, key=["material_id"])
else:
self.logger.info("No thermo items to update")
def get_entries(self, chemsys: str) -> List[Dict]:
"""
Gets a entries from the tasks collection for the corresponding chemical systems
Args:
chemsys(str): a chemical system represented by string elements seperated by a dash (-)
Returns:
set(ComputedEntry): a set of entries for this system
"""
self.logger.info(f"Getting entries for: {chemsys}")
# First check the cache
all_chemsys = chemsys_permutations(chemsys)
cached_chemsys = all_chemsys & set(self._entries_cache.keys())
query_chemsys = all_chemsys - cached_chemsys
all_entries = list(
chain.from_iterable(self._entries_cache[c] for c in cached_chemsys)
)
self.logger.debug(
f"Getting {len(cached_chemsys)} sub-chemsys from cache for {chemsys}"
)
self.logger.debug(
f"Getting {len(query_chemsys)} sub-chemsys from DB for {chemsys}"
)
# Second grab the materials docs
new_q = dict(self.query)
new_q["chemsys"] = {"$in": list(query_chemsys)}
new_q["deprecated"] = False
materials_docs = list(
self.materials.query(
criteria=new_q, properties=["material_id", "entries", "deprecated"]
)
)
# Get Oxidation state data for each material
oxi_states_data = {}
if self.oxidation_states:
material_ids = [t["material_id"] for t in materials_docs]
oxi_states_data = {
d["material_id"]: d.get("average_oxidation_states", {})
for d in self.oxidation_states.query(
properties=["material_id", "average_oxidation_states"],
criteria={
"material_id": {"$in": material_ids},
"state": "successful",
},
)
}
self.logger.debug(
f"Got {len(materials_docs)} entries from DB for {len(query_chemsys)} sub-chemsys for {chemsys}"
)
# Convert the entries into ComputedEntries and store
for doc in materials_docs:
for r_type, entry_dict in doc.get("entries", {}).items():
entry_dict["data"]["oxidation_states"] = oxi_states_data.get(
entry_dict["entry_id"], {}
)
entry_dict["data"]["run_type"] = r_type
elsyms = sorted(set([el for el in entry_dict["composition"]]))
self._entries_cache["-".join(elsyms)].append(entry_dict)
all_entries.append(entry_dict)
self.logger.info(f"Total entries in {chemsys} : {len(all_entries)}")
return all_entries
def get_updated_chemsys(self,) -> Set:
"""Gets updated chemical system as defined by the updating of an existing material"""
updated_mats = self.thermo.newer_in(self.materials, criteria=self.query)
updated_chemsys = set(
self.materials.distinct(
"chemsys", {"material_id": {"$in": list(updated_mats)}, **self.query}
)
)
self.logger.debug(f"Found {len(updated_chemsys)} updated chemical systems")
return updated_chemsys
def get_new_chemsys(self) -> Set:
"""Gets newer chemical system as defined by introduction of a new material"""
# All materials that are not present in the thermo collection
thermo_mat_ids = self.thermo.distinct("material_id")
mat_ids = self.materials.distinct("material_id", self.query)
dif_task_ids = list(set(mat_ids) - set(thermo_mat_ids))
q = {"material_id": {"$in": dif_task_ids}}
new_mat_chemsys = set(self.materials.distinct("chemsys", q))
self.logger.debug(f"Found {len(new_mat_chemsys)} new chemical systems")
return new_mat_chemsys
def get_affected_chemsys(self, chemical_systems: Set) -> Set:
"""Gets chemical systems affected by changes in the supplied chemical systems"""
# First get all chemsys with any of the elements we've marked
affected_chemsys = set()
affected_els = list({el for c in chemical_systems for el in c.split("-")})
possible_affected_chemsys = self.materials.distinct(
"chemsys", {"elements": {"$in": affected_els}}
)
sub_chemsys = defaultdict(list)
# Build a dictionary mapping sub_chemsys to all super_chemsys
for chemsys in possible_affected_chemsys:
for permutation in chemsys_permutations(chemsys):
sub_chemsys[permutation].append(chemsys)
# Select and merge distinct super chemsys from sub_chemsys
for chemsys in chemical_systems:
affected_chemsys |= set(sub_chemsys[chemsys])
self.logger.debug(
f"Found {len(affected_chemsys)} chemical systems affected by this build"
)
return affected_chemsys
|
the-stack_0_3899 | import csv
def save_minimal_pairs(output_filename, to_output, write_header=True):
if isinstance(output_filename, str):
outf = open(output_filename, mode='w', encoding='utf-8-sig', newline='')
needs_closed = True
else:
outf = output_filename
needs_closed = False
writer = csv.writer(outf, delimiter='\t')
if write_header:
writer.writerow(['FIRST_SEGMENT', 'SECOND_SEGMENT',
'FIRST_WORD', 'FIRST_WORD_TRANSCRIPTION',
'SECOND_WORD', 'SECOND_WORD_TRANSCRIPTION'])
for _, _, ret_dict in to_output:
for seg_pair, word_pair_set in ret_dict.items():
for word_pair in word_pair_set:
writer.writerow([seg_pair[0], seg_pair[1],
word_pair[0][0], word_pair[0][1],
word_pair[1][0], word_pair[1][1]])
if needs_closed:
outf.close()
|
the-stack_0_3900 | import numpy as np
import random
from collections import defaultdict
from environment import Env
class SARSAgent:
def __init__(self, actions):
self.actions = actions
self.learning_rate = 0.01
self.discount_factor = 0.9
self.epsilon = 0.1
self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])
# <s, a, r, s', a'>의 샘플로부터 큐함수를 업데이트
def learn(self, state, action, reward, next_state, next_action):
print(self.q_table)
current_q = self.q_table[state][action]
next_state_q = self.q_table[next_state][next_action]
new_q = (current_q + self.learning_rate *
(reward + self.discount_factor * next_state_q - current_q))
self.q_table[state][action] = new_q
# 입실론 탐욕 정책에 따라서 행동을 반환
def get_action(self, state):
if np.random.rand() < self.epsilon:
# 무작위 행동 반환
action = np.random.choice(self.actions)
else:
# 큐함수에 따른 행동 반환
state_action = self.q_table[state]
action = self.arg_max(state_action)
return action
@staticmethod
def arg_max(state_action):
max_index_list = []
max_value = state_action[0]
for index, value in enumerate(state_action):
if value > max_value:
max_index_list.clear()
max_value = value
max_index_list.append(index)
elif value == max_value:
max_index_list.append(index)
return random.choice(max_index_list)
if __name__ == "__main__":
env = Env()
agent = SARSAgent(actions=list(range(env.n_actions)))
for episode in range(1000):
# 게임 환경과 상태를 초기화
state = env.reset()
# 현재 상태에 대한 행동을 선택
action = agent.get_action(str(state))
while True:
env.render()
# 행동을 위한 후 다음상태 보상 에피소드의 종료 여부를 받아옴
next_state, reward, done = env.step(action)
# 다음 상태에서의 다음 행동 선택
next_action = agent.get_action(str(next_state))
# <s,a,r,s',a'>로 큐함수를 업데이트
agent.learn(str(state), action, reward, str(next_state), next_action)
state = next_state
action = next_action
# 모든 큐함수를 화면에 표시
env.print_value_all(agent.q_table)
if done:
break
|
the-stack_0_3905 | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from zope.interface import implementer
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import _defaultCurveName
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, ContextFactory
from twisted.python.failure import Failure
logger = logging.getLogger(__name__)
class ServerContextFactory(ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
try:
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
context.set_tmp_ecdh(_ecCurve)
except Exception:
logger.exception("Failed to enable elliptic curve for TLS")
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate_chain_file(config.tls_certificate_file)
if not config.no_tls:
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
def _idnaBytes(text):
"""
Convert some text typed by a human into some ASCII bytes. This is a
copy of twisted.internet._idna._idnaBytes. For documentation, see the
twisted documentation.
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text)
def _tolerateErrors(wrapped):
"""
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
the error is immediately logged and the connection is dropped if possible.
This is a copy of twisted.internet._sslverify._tolerateErrors. For
documentation, see the twisted documentation.
"""
def infoCallback(connection, where, ret):
try:
return wrapped(connection, where, ret)
except: # noqa: E722, taken from the twisted implementation
f = Failure()
logger.exception("Error during info_callback")
connection.get_app_data().failVerification(f)
return infoCallback
@implementer(IOpenSSLClientConnectionCreator)
class ClientTLSOptions(object):
"""
Client creator for TLS without certificate identity verification. This is a
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
verification left out. For documentation, see the twisted documentation.
"""
def __init__(self, hostname, ctx):
self._ctx = ctx
self._hostname = hostname
self._hostnameBytes = _idnaBytes(hostname)
ctx.set_info_callback(
_tolerateErrors(self._identityVerifyingInfoCallback)
)
def clientConnectionForTLS(self, tlsProtocol):
context = self._ctx
connection = SSL.Connection(context, None)
connection.set_app_data(tlsProtocol)
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
if where & SSL.SSL_CB_HANDSHAKE_START:
connection.set_tlsext_host_name(self._hostnameBytes)
class ClientTLSOptionsFactory(object):
"""Factory for Twisted ClientTLSOptions that are used to make connections
to remote servers for federation."""
def __init__(self, config):
# We don't use config options yet
pass
def get_options(self, host):
return ClientTLSOptions(
host.decode('utf-8'),
CertificateOptions(verify=False).getContext()
)
|
the-stack_0_3907 | import unittest
from nose.plugins.attrib import attr
from jnpr.healthbot import HealthBotClient
from jnpr.healthbot import PlaybookSchema
from jnpr.healthbot import PlayBookInstanceBuilder
from mock import patch
@attr('unit')
class TestPlaybooks(unittest.TestCase):
@patch('jnpr.healthbot.healthbot.requests.Session')
def setUp(self, mock_request):
self.mock_request = mock_request
self.conn = HealthBotClient(
server='1.1.1.1',
user='test',
password='password123')
def test_add_playbook_using_schema_check_existance(self):
self.mock_request().get.side_effect = self._mock_manager
pbs = PlaybookSchema(playbook_name="automation-coredump-pb")
pbs.description = "HbEZ Demo Examples"
pbs.synopsis = 'fpc status'
pbs.rules = ['hbez/hbez-fpc-heap-utilization']
self.assertTrue(self.conn.playbook.add(pbs))
def test_add_playbook_using_schema(self):
self.mock_request().get.side_effect = self._mock_manager
pbs = PlaybookSchema(playbook_name="testing")
pbs.description = "HbEZ Demo Examples"
pbs.rules = ['hbez/hbez-fpc-heap-utilization']
self.assertTrue(self.conn.playbook.add(pbs))
def test_delete_playbook(self):
self.assertTrue(
self.conn.playbook.delete(
playbook_name="testing"))
def test_get_playbook(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get(
playbook_name="automation-coredump-pb")
self.assertEqual(obj.rules, [
"protocol-automation-coredumps/check-coredumps"
])
def test_update_playbook(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get(
playbook_name="automation-coredump-pb")
obj.description = "testing"
self.conn.playbook.update(obj)
self.assertEqual(
self.mock_request().mock_calls[4][2]['json']['description'],
"testing")
def test_get_playbooks(self):
self.mock_request().get.side_effect = self._mock_manager
obj = self.conn.playbook.get()
self.assertGreaterEqual(len(obj), 1)
def test_playbook_instance_builder_with_no_variable(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.apply()
self.assertEqual(self.mock_request().mock_calls[6][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[6][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_instance_builder_delete(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.delete()
self.assertEqual(self.mock_request().mock_calls[7][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[7][1][0],
'https://1.1.1.1:8080/api/v1/device/vmx')
self.assertEqual(
self.mock_request().mock_calls[11][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_apply_commit(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'HbEZ-instance',
'Core')
pbb.apply(commit=True)
self.assertEqual(self.mock_request().mock_calls[10][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[10][1][0],
'https://1.1.1.1:8080/api/v1/configuration')
def test_playbook_instance_builder_with_no_device_group(self):
from jnpr.healthbot.exception import NotFoundError
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn, 'automation-coredump-pb', 'xyz', 'real')
self.assertRaises(NotFoundError, pbb.apply)
def test_playbook_instance_builder_with_variable(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.apply()
self.assertEqual(self.mock_request().mock_calls[7][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[7][1][0],
'https://1.1.1.1:8080/api/v1/device-group/Core')
def test_playbook_instance_builder_with_variable_per_device(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.apply(device_ids=['vmx'])
self.assertEqual(self.mock_request().mock_calls[8][0], 'put')
self.assertEqual(
self.mock_request().mock_calls[8][1][0],
'https://1.1.1.1:8080/api/v1/device/vmx')
def test_playbook_instance_builder_with_non_existing_device(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
self.assertRaises(RuntimeError, pbb.apply, device_ids=['fake'])
def test_clear(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
routesummary_fib_summary.route_count_threshold = 200
routesummary_fib_summary.route_address_family = 'abc'
pbb.clear()
routesummary_fib_summary = pbb.rule_variables["protocol.routesummary/check-fib-summary"]
self.assertEqual(
routesummary_fib_summary.route_count_threshold,
'10000')
def test_playbook_schema_setter(self):
self.mock_request().get.side_effect = self._mock_manager
pbb = PlayBookInstanceBuilder(
self.conn,
'forwarding-table-summary',
'HbEZ-instance',
'Core')
with self.assertRaises(RuntimeError):
pbb.playbook_schema = 30
def test_get_playbook_schema_error(self):
self.mock_request().get.side_effect = self._mock_manager
self.assertRaises(AttributeError, PlayBookInstanceBuilder, self.conn,
'dummy', 'HbEZ-instance', 'Core')
def _mock_manager(self, *args):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.ok = True
def json(self):
return self.json_data
def raise_for_status(self):
return None
if args[0] == 'https://1.1.1.1:8080/api/v1/playbook/automation-coredump-pb/?working=true':
obj = MockResponse({
"playbook-name": "automation-coredump-pb",
"rules": [
"protocol-automation-coredumps/check-coredumps"
]
}, 200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/topic/protocol-automation-coredumps/rule/check-coredumps/?working=true':
obj = MockResponse({"description": "This rule will monitor for the automation coredumps",
"field": [{"description": "Actual coredump filename",
"field-name": "coredump-filename",
"formula": {"user-defined-function": {"argument": [{"argument": "message",
"value": "$coredump-message"}],
"function-name": "get-core-filename-from-message"}},
"type": "string"},
{"description": "The actual syslog that appears when a coredump happens",
"field-name": "coredump-message",
"sensor": [{"path": "/junos/events/event/message",
"sensor-name": "coredump-detectors",
"where": [{"query": "/junos/events/event/message =~ /.*Core and context for (eventd|cscript).*/"}]}],
"type": "string"},
{"description": "Timestamp of the coredump as registered by the telemetry sensor",
"field-name": "coredump-timestamp",
"sensor": [{"path": "/junos/events/event/timestamp/seconds",
"sensor-name": "coredump-detectors"}],
"type": "string"}],
"keys": ["coredump-message"],
"rule-name": "check-coredumps",
"sensor": [{"open-config": {"frequency": "0s",
"sensor-name": "/junos/events/event[id='SYSTEM']"},
"sensor-name": "coredump-detectors"}],
"synopsis": "To monitor automation coredumps",
"trigger": [{"frequency": "15s",
"term": [{"term-name": "core-generated",
"then": {"status": {"color": "red",
"message": "Coredump was seen: $coredump-message"},
"user-defined-action": [{"argument": [{"argument": "local_dir_name",
"value": "coredumps"}],
"function-name": "get-automation-traces"},
{"argument": [{"argument": "local_dir_name",
"value": "coredump"},
{"argument": "remote_dir_name",
"value": "$coredump-filename"}],
"function-name": "get-file-from-device"},
{"argument": [{"argument": "local_dir_name",
"value": "coredump"},
{"argument": "remote_dir_name",
"value": "/var/log/* /var/tmp/*"}],
"function-name": "get-log-file-from-device"}]},
"when": {"matches-with": [{"left-operand": "$coredump-message",
"right-operand": ".*Core and context.*",
"time-range": "30s"}]}},
{"term-name": "Core-not-generated",
"then": {"status": {"color": "green",
"message": "No core found"}}}],
"trigger-name": "core-generated"}]},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/playbooks/?working=true':
obj = MockResponse({"playbook": [{"playbook-name": "netsvc-playbook",
"rules": ["chassis.networkservices/netsvc-rule"]},
{"playbook-name": "phyport",
"rules": ["external/interface-info"]},
{"playbook-name": "automation-coredump-pb",
"rules": ["protocol-automation-coredumps/check-coredumps"]},
{"description": "This playbook help to collect eventd debug logs",
"playbook-name": "eventd-debug-collection",
"rules": ["protocol-eventd-debug/collect-debugs"],
"synopsis": "Collect eventd logs"}]},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/topic/protocol.routesummary/rule/check-fib-summary/?working=true':
obj = MockResponse({
"description": "Collects forwarding-table's total-route-count of each protocol and sets dynamic thresholds and notify anomaly when route count is abnormal",
"field": [
{
"description": "Address family name to be monitored",
"field-name": "address-family",
"sensor": [
{
"path": "address-family",
"sensor-name": "fib-sensor",
"where": [
{
"query": "address-family =~ /^{{route-address-family}}$/"
}
]
}
],
"type": "string"
},
{
"description": "Detects anamoly dynamically using kmeans algorithm",
"field-name": "dt-route-count",
"formula": {
"dynamic-threshold": {
"algorithm": "3sigma",
"field-name": "$route-count",
"learning-period": "7d",
"pattern-periodicity": "1h"
}
},
"type": "integer"
},
{
"description": "Route table type to be monitored",
"field-name": "route-table-type",
"sensor": [
{
"path": "route-table-type",
"sensor-name": "fib-sensor",
"where": [
{
"query": "route-table-type =~ /^{{table-type}}$/"
}
]
}
],
"type": "string"
},
{
"description": "Route table name to be monitored",
"field-name": "table-name",
"sensor": [
{
"path": "table-name",
"sensor-name": "fib-sensor",
"where": [
{
"query": "table-name =~ /^{{route-table-name}}$/"
}
]
}
],
"type": "string"
},
{
"constant": {
"value": "{{route-count-threshold}}"
},
"description": "Route count static threshold",
"field-name": "threshold",
"type": "integer"
}
],
"keys": [
"address-family",
"route-table-type",
"table-name"
],
"rule-name": "check-fib-summary",
"sensor": [
{
"description": "iAgent sensor collect forwarding-table route-count stats from network device",
"iAgent": {
"file": "fib.yml",
"frequency": "10m",
"table": "FibSummaryTable"
},
"sensor-name": "fib-sensor",
"synopsis": "FIB iAgent sensor definition"
}
],
"synopsis": "Forwarding-table protocols routes statistics analyzer",
"trigger": [
{
"frequency": "10m",
"term": [
{
"term-name": "is-route-count-abnormal",
"then": {
"status": {
"color": "red",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) abnormal"
}
},
"when": {
"greater-than-or-equal-to": [
{
"left-operand": "$route-count",
"right-operand": "$threshold",
"time-range": "30m"
}
]
}
},
{
"term-name": "is-route-count-above-dt",
"then": {
"status": {
"color": "yellow",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) is above dynamic threshold"
}
},
"when": {
"equal-to": [
{
"left-operand": "$dt-route-count",
"right-operand": "1",
"time-range": "30m"
}
]
}
},
{
"term-name": "route-count-normal",
"then": {
"status": {
"color": "green",
"message": "Route count of $table-name of $address-family of $route-table-type is ($route-count) normal"
}
}
}
],
"trigger-name": "fib-route-count"
}
],
"variable": [
{
"description": "address-family names to monitor, regular expression, eg 'Internet|Internet6|MPLS|VPLS'",
"name": "route-address-family",
"type": "string",
"value": ".+"
},
{
"description": "Forwarding table's each protocol's route count threshold",
"name": "route-count-threshold",
"type": "int",
"value": "10000"
},
{
"description": "route table names to monitor, regular expression, eg 'default.inet|default.inet6|vpn_0.inet'",
"name": "route-table-name",
"type": "string",
"value": ".+"
},
{
"description": "route table types to monitor, regular expression, eg 'perm|intf|user'",
"name": "table-type",
"type": "string",
"value": ".+"
}
]
},
200)
obj.ok = True
return obj
if args[0] == 'https://1.1.1.1:8080/api/v1/playbook/forwarding-table-summary/?working=true':
obj = MockResponse({
"description": "Playbook monitors forwarding-table's each protocol's route count and notifies anomaly when route count is above static or dynamic threshold",
"playbook-name": "forwarding-table-summary",
"rules": [
"protocol.routesummary/check-fib-summary"
],
"synopsis": "Forwarding table and protocol routes key performance indicators"
}, 200)
obj.ok = False
return obj
elif args[0] == 'https://1.1.1.1:8080/api/v1/device-group/Core/?working=true':
return MockResponse({"description": "testing",
"device-group-name": "Core",
"devices": ["vmx"],
"native-gpb": {"ports": [22000]},
"notification": {},
"playbooks": ["eventd-debug-collection",
"eventd-kpis-playbook",
'automation-coredump-pb'],
"reports": [],
"variable": [{"@": {"changed-seconds": 1564722219},
"instance-id": "HbEZ-instance",
"playbook": "automation-coredump-pb",
"rule": "x/y",
"variable-value": []}]},
200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/device/vmx/?working=true':
return MockResponse({
"authentication": {
"password": {
"password": "xxxx",
"username": "xxxx"
}
},
"device-id": "vmx",
"host": "10.221.136.140",
"open-config": {
"port": 32767
},
"system-id": "testing",
"variable": [{"@": {"changed-seconds": 1564722219},
"instance-id": "HbEZ-instance",
"playbook": "automation-coredump-pb",
"rule": "x/y",
"variable-value": []}],
"vendor": {
"juniper": {
"operating-system": "junos"
}
}
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/playbook/testing/?working=true':
obj = MockResponse({
"detail": "Playbook not found",
"status": 404
}, 404)
obj.ok = False
return obj
elif args[0] == 'https://1.1.1.1:8080/api/v1/device/vmx/?working=true':
return MockResponse({
"authentication": {
"password": {
"password": "xxxx",
"username": "xxxx"
}
},
"device-id": "vmx",
"host": "10.221.xxx.xxx",
"open-config": {
"port": 32767
},
"system-id": "testing",
"variable": [],
"vendor": {
"juniper": {
"operating-system": "junos"
}
}
}, 200)
return MockResponse(None, 404)
|
the-stack_0_3908 | import typing
from discord_bot_eternal_dice.model.discord_command import DiscordCommand
from discord_bot_eternal_dice.model.discord_event import CommandType, DiscordEvent
class DiscordRoute:
def __init__(self, handler, command_type: CommandType, command: str, subcommand: str = None,
options: typing.Dict = None):
self.handler = handler
self.command_type = command_type
self.subcommand = subcommand
self.command = command
self.options = options
def matches(self, event: DiscordEvent) -> bool:
if event.command.command_name != self.command:
return False
if event.command.subcommand_name is not None and event.command.subcommand_name != self.subcommand:
return False
return True
def validate(self, command: DiscordCommand) -> bool:
unconsumed_options = list(command.options.keys())
for option, option_type in self.options.items():
if option not in command.options:
return False
if type(command.options[option]) is not option_type:
return False
unconsumed_options.remove(option)
if len(unconsumed_options) > 0:
return False
return True
|
the-stack_0_3912 | #!/usr/bin/env python
###############################################################################
#
# superimposessemap.py - Superimpose structures according to SSE mapping
#
# File: superimposessemap.py
# Author: Alex Stivala
# Created: August 2008
#
#
# Supermipose in 3D the residues in corresponding SSEs by orthogonal
# transformations (using SVD) using the Bio.PDB.Superimposer module.
#
# $Id: superimposessemap.py 1821 2008-08-18 00:54:56Z astivala $
#
###############################################################################
"""
Using the SSE mapping from soln2ssemap.py, which shows pairs of SSE
sequential (from 1) numbers that correspond to each other, use orthogonal
transormation to superimpose the residues in corresponding SSEs,
calculating RMSD and producing superimposition in a PDB file for visualization.
Requires the ptsecstruct.py module to get secondary structures using
DSSP (or STRIDE) (add directory contianing to to PYTHONPATH).
Note that these must be the same definintions used
to produce the mapping, i.e. that the tableaux database and query
were built with, otherwise it won't realy make sense.
"""
import warnings # so we can suppress the annoying tempnam 'security' warning
import sys,os
import getopt
from time import strftime,localtime
import Bio.PDB
import ptsecstruct
from ptutils import biopdbresid_to_pdbresseq,get_int_icode
from parsessemap import parse_ssemap,SearchMap,QuerySSEMap
from pathdefs import ASTRAL_ROOT
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def get_structure(scopsid, thepdbfile=None):
"""
Get Bio.PDB parsed structure for specified identifier or PDB file.
Parameters:
scopsid - SCOP identifier to get SSEs for; used to locate file
under ASTRAL SCOP hierarchy.
thepdbfile - (default None) if not None, PDB file to get SSEs for,
overriding scopsid.
Return value:
Bio.PDB parsed structure.
"""
if thepdbfile:
pdbfile = thepdbfile
else:
pdbfilename = os.path.join(scopsid[2:4].lower(),
scopsid.lower() + '.ent')
pdbfile = os.path.join(ASTRAL_ROOT, pdbfilename)
parser = Bio.PDB.PDBParser()
structure = parser.get_structure(scopsid, pdbfile)
return structure
def get_sse_nodes(scopsid, thepdbfile=None):
"""
Get SSE definitions in form of PTNode objects
from the supplied SCOP sid using
DSSP. Uses the ptsecstruct.py module, note comments at top of this
module also regarding ensuring the same definitions are used here
as for the actual search.
Parameters:
scopsid - SCOP identifier to get SSEs for; used to locate file
under ASTRAL SCOP hierarchy.
thepdbfile - (default None) if not None, PDB file to get SSEs for,
overriding scopsid.
Return value:
list of PTNode objects represneting the SSEs.
"""
if thepdbfile:
pdbfile = thepdbfile
else:
pdbfilename = os.path.join(scopsid[2:4].lower(),
scopsid.lower() + '.ent')
pdbfile = os.path.join(ASTRAL_ROOT, pdbfilename)
secstruct = ptsecstruct.read_secstruct_from_dssp(pdbfile)
return secstruct.get_sse_tuple_list()
def get_residue_list(model):
"""
Get list of Bio.PDB.Residue objects in supplied Bio.PDB.Model
Parmeters:
model - Bio.PDB.Model object
Return value:
List of Bio.PDB.Residue objects in the model
"""
residue_list = []
for chain in model:
# id of a residue in Bio.PDB is tuple (hetatm, resseqnum, icode)
residue_list += [ residue for residue in chain.get_unpacked_list()
if Bio.PDB.is_aa(residue) ]
return residue_list
def build_resid_dict(residue_list):
"""
Build dictionary mapping (chainid, pdb_resid) to index in residue_list
for all residues, not just those in this domain.
Parameters:
residue_list - list of Bio.PDB.Residue objects
Return value:
dict of { {chainid,pdb_resseq) : seqindx }
where chainid and pdb_resseq make up
the PDB residue identifier, the pdb_resseq
being string resnum+icode if any e.g.
'60' or '60A', seqindx is the indiex
into sequential list of all residues
residue_list.
"""
pdb_resid_dict = {}
seq_indx = 0
while seq_indx < len(residue_list):
residue = residue_list[seq_indx]
pdb_resid_dict[( ptsecstruct.pdb_chainid_to_stride_chainid(
residue.get_full_id()[2]),
biopdbresid_to_pdbresseq(residue.get_id()) )] = seq_indx
seq_indx += 1
return pdb_resid_dict
def get_matched_residues(matched_sses, query_struct, db_struct):
"""
Given the list of correpsonding SSEs in the two structures, return
list of corresponding Bio.PDB.Residue objects.
Parameters:
matched_sses - list of (A,B) tuples where A and B are
tuples (chain, start_resi, end_resi, type) in
query_struct and db_struct respectively.
query_struct - Bio.PDB.Structure
db_struct - Bio.PDB.Structure
Return value:
tuple (match_query_residues, match_db_residues) of equal length lists of
corresponding Bio.PDB.Residue objects in query and db structs resp.
"""
query_model = query_struct[0] # always using model 0 (TODO)
db_model = db_struct[0] # always using model 0 (TODO)
query_residue_list = get_residue_list(query_model)
query_resid_dict = build_resid_dict(query_residue_list)
db_residue_list = get_residue_list(db_model)
db_resid_dict = build_resid_dict(db_residue_list)
match_query_residues = []
match_db_residues = []
for ((qchain, qstart_resi, qend_resi, qtype),
(dchain, dstart_resi, dend_resi, dtype)) in matched_sses:
try:
start_indx = query_resid_dict[(qchain, qstart_resi)]
except KeyError:
# May be HETATM
while not query_resid_dict.has_key((qchain, qstart_resi)):
qstart_resi = str(get_int_icode(qstart_resi)[0] + 1)
start_indx = query_resid_dict[(qchain, qstart_resi)]
try:
end_indx = query_resid_dict[(qchain, qend_resi)]
except KeyError:
# May be HETATM
while not query_resid_dict.has_key((qchain, qend_resi)):
qend_resi = str(get_int_icode(qend_resi)[0] - 1)
end_indx = query_resid_dict[(qchain, qend_resi)]
query_residues = query_residue_list[start_indx : end_indx + 1]
try:
start_indx = db_resid_dict[(dchain, dstart_resi)]
except KeyError:
# May be HETATM
while not db_resid_dict.has_key((dchain, dstart_resi)):
dstart_resi = str(get_int_icode(dstart_resi)[0] + 1)
start_indx = db_resid_dict[(dchain, dstart_resi)]
try:
end_indx = db_resid_dict[(dchain, dend_resi)]
except KeyError:
# May be HETATM
while not db_resid_dict.has_key((dchain, dend_resi)):
dend_resi = str(get_int_icode(dend_resi)[0] - 1)
end_indx = db_resid_dict[(dchain, dend_resi)]
db_residues = db_residue_list[start_indx : end_indx + 1]
# # if the SSEs are of unequal length, just truncate the longer
# # FIXME: should do something better here, e.g. use residues
# # in middle of SSEs since definitions at ends probably less certain
# if len(db_residues) > len(query_residues):
# db_residues = db_residues[:len(query_residues)]
# elif len(query_residues) > len(db_residues):
# query_residues = query_residues[:len(db_residues)]
# match_query_residues += query_residues
# match_db_residues += db_residues
# # use the first and last residues in each SSE
# # FIXME: should really use projected enpoints on vector
# # to represent the vector actually used to construct tableau
# # as per fit_axis in ptnode.py
# match_query_residues += [query_residues[0], query_residues[-1]]
# match_db_residues += [db_residues[0], db_residues[-1]]
# another dodgy way: just the 'most cetnral' residue (FIXME)
match_query_residues.append(query_residues[len(query_residues)/2])
match_db_residues.append(db_residues[len(db_residues)/2])
assert(len(match_query_residues) == len(match_db_residues))
return (match_query_residues, match_db_residues)
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " +progname + " [-d domainid] [-u query_pdbfile] [-b db_pdbfile] [-o outputdir] \n")
sys.stderr.write(
"-d domainid: use this structure, if more than one in input\n"
"-u query_pdbfile: filename of query PDB file. If not specified then\n"
" identifier is used to find in ASTRAL SCOP hierarchy.\n"
"-b db_pdbfile: filename of database PDB file. If not specfied then\n"
" identifier is used to find in ASTRAL SCOP hierarchy.\n"
" Only valid if there is only one domain (either becuase -d is\n"
" specified or there is only one in the input).\n"
"-o outputdir: directory to write PDB of superimposed structures in.\n"
)
sys.exit(1)
def main():
"""
main for superimposessemap.py
Usage: superimposessemap.py [-d domainid] [-s] [-u query_pdbfile] [-b db_pdbfile] [-o outputdir]
-d domainid: only output for this domain, not all
-u query_pdbfile: filename of query PDB file. If not specified then
identifier is used to find in ASTRAL SCOP hierarchy.
-b db_pdbfile: filename of database PDB file. If not specfied then
identifier is used to find in ASTRAL SCOP hierarchy.
Only valid if there is only one domain (either becuase -d is
specified or there is only one in the input).
-o outputdir: directory to write PDB files of superimposed structures in.
Input is on stdin, the output of soln2ssemap.py,
identifier and score (as per input), then
for each matching a line containing
i and j separated by a space,
one per line (with blank line before next id) e.g.:
d1wiua_ -23.0000
1 1
3 2
8 4
9 5
11 6
14 9
The first SSE number on each line is in the query structure
(specified in header information), the second
is in the db structure (d1wiua_ in example).
Output is RMSD value on stdout, and PDB file(s) in specified directory if -o
specfied.
stdout output format is one result per line, fields whitespace delimited:
identifier score num_sses_matched num_aligned_points rmsd
e.g.
d1t10a_ -40.9999 8 16 16.93
num_aligned_points is number of points used in the superposition,
RMSD is the RMS deviation of those points (in Angstroms).
"""
global verbose
verbose = False
dbdomid = None
query_pdbfile = None
db_pdbfile = None
outputdir = None
try:
opts,args = getopt.getopt(sys.argv[1:], "d:u:b:o:")
except:
usage(os.path.basename(sys.argv[0]))
for opt,arg in opts:
if opt == "-d": # domain id specified, only get this one
dbdomid = arg
elif opt == "-u": # query PDB filename
query_pdbfile = arg
elif opt == "-b": # db PDB filename
db_pdbfile = arg
elif opt == "-o": # output directory
outputdir = arg
else:
usage(os.path.basename(sys.argv[0]))
if len(args) != 0:
usage(os.path.basename(sys.argv[0]))
search_maps = parse_ssemap(sys.stdin)
if (db_pdbfile and not dbdomid and len(search_maps.query_ssemap_list) > 1):
sys.stderr.write("ERROR: -b specified without -d and more than one "
"structure on input\n")
sys.exit(1)
query_sse_nodes = get_sse_nodes(search_maps.queryid, query_pdbfile)
query_structure = get_structure(search_maps.queryid, query_pdbfile)
for query_ssemap in search_maps.query_ssemap_list:
if ((not dbdomid) or (query_ssemap.domid == dbdomid)):
db_sse_nodes = get_sse_nodes(query_ssemap.domid, db_pdbfile)
db_structure = get_structure(query_ssemap.domid, db_pdbfile)
sse_map = query_ssemap.sse_map
if len(sse_map) == 0:
sys.stderr.write('no SSEs matched for ' + query_ssemap.domid +
': skipping\n')
continue
matched_sse_nodes = [(query_sse_nodes[i-1],db_sse_nodes[j-1]) for (i,j) in sse_map]
matched_residues = get_matched_residues(matched_sse_nodes,
query_structure,
db_structure)
# get Carbon alpha atoms for matched residues
query_atoms = [residue['CA'] for residue in matched_residues[0]]
db_atoms = [residue['CA'] for residue in matched_residues[1]]
# get orthogonal transformation to superimpose query and db atoms
superimposer = Bio.PDB.Superimposer()
superimposer.set_atoms(query_atoms, db_atoms)
# get the RMSD for the atoms used to calculate transformation
rmsd = superimposer.rms
sys.stdout.write('%s %8.4f %4d %4d %6.2f\n' %
(query_ssemap.domid,query_ssemap.score,
len(sse_map),
len(matched_residues[0]), rmsd))
if outputdir:
if not os.path.isdir(outputdir):
sys.stderr.write("'" + outputdir + "' is not an existing "
"directory, no output written\n")
else:
# apply the transformation to all db atoms
superimposer.apply(db_structure.get_atoms())
# save aligned structure as PDB file
io = Bio.PDB.PDBIO()
io.set_structure(db_structure)
outpdbfilename = search_maps.queryid.lstrip().rstrip() + \
'_' + \
query_ssemap.domid.lstrip().rstrip() + \
'.pdb'
outpdbfh = open(os.path.join(outputdir,outpdbfilename), 'w')
outpdbfh.write('REMARK generated by ' +
os.path.basename(sys.argv[0]) + '\n')
timestamp = strftime("%d%b%Y %H:%M:%S", localtime())
outpdbfh.write('REMARK on ' + timestamp + '\n')
outpdbfh.write('REMARK \n')
outpdbfh.write('REMARK ' + query_ssemap.domid +
' superimposed on ' + search_maps.queryid +
'\n')
outpdbfh.write('REMARK SCORE = %8.4f\n' % query_ssemap.score)
outpdbfh.write('REMARK NSSES = %4d\n' % len(sse_map))
outpdbfh.write('REMARK NRES = %4d\n' % len(matched_residues[0]))
outpdbfh.write('REMARK RMSD = %6.2f\n' % rmsd)
outpdbfh.write('REMARK \n')
outpdbfh.write('REMARK from:\n')
for cline in search_maps.comment_lines:
outline = cline[:65]
outpdbfh.write('REMARK ' + outline)
if outline[-1] != '\n':
outpdbfh.write('\n')
io.save(outpdbfh)
outpdbfh.close()
if __name__ == "__main__":
warnings.filterwarnings('ignore', 'tempnam', RuntimeWarning)
main()
|
the-stack_0_3914 | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="djvu_properties.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import pprint
import re
import six
class DjvuProperties(object):
"""Represents properties of djvu file.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'background_color': 'str',
'has_background_color': 'bool',
'pages_count': 'int'
}
attribute_map = {
'background_color': 'BackgroundColor',
'has_background_color': 'HasBackgroundColor',
'pages_count': 'PagesCount'
}
def __init__(self, background_color=None, has_background_color=None, pages_count=None):
"""DjvuProperties - a model defined in Swagger"""
super(DjvuProperties, self).__init__()
self._background_color = None
self._has_background_color = None
self._pages_count = None
if background_color is not None:
self.background_color = background_color
if has_background_color is not None:
self.has_background_color = has_background_color
if pages_count is not None:
self.pages_count = pages_count
@property
def background_color(self):
"""Gets the background_color of this DjvuProperties.
Gets or sets background color.
:return: The background_color of this DjvuProperties.
:rtype: str
"""
return self._background_color
@background_color.setter
def background_color(self, background_color):
"""Sets the background_color of this DjvuProperties.
Gets or sets background color.
:param background_color: The background_color of this DjvuProperties.
:type: str
"""
self._background_color = background_color
@property
def has_background_color(self):
"""Gets the has_background_color of this DjvuProperties.
Gets or sets a value indicating whether background color is used.
:return: The has_background_color of this DjvuProperties.
:rtype: bool
"""
return self._has_background_color
@has_background_color.setter
def has_background_color(self, has_background_color):
"""Sets the has_background_color of this DjvuProperties.
Gets or sets a value indicating whether background color is used.
:param has_background_color: The has_background_color of this DjvuProperties.
:type: bool
"""
if has_background_color is None:
raise ValueError("Invalid value for `has_background_color`, must not be `None`")
self._has_background_color = has_background_color
@property
def pages_count(self):
"""Gets the pages_count of this DjvuProperties.
Gets or sets pages count.
:return: The pages_count of this DjvuProperties.
:rtype: int
"""
return self._pages_count
@pages_count.setter
def pages_count(self, pages_count):
"""Sets the pages_count of this DjvuProperties.
Gets or sets pages count.
:param pages_count: The pages_count of this DjvuProperties.
:type: int
"""
if pages_count is None:
raise ValueError("Invalid value for `pages_count`, must not be `None`")
self._pages_count = pages_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DjvuProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_3915 | import os
import glob
import sys
import shutil
import pysam
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio import bam
from bcbio import broad
from bcbio.wgbsseq import kits
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
assert data["analysis"].lower().startswith("wgbs-seq"), "No comparible alignment."
config = data["config"]
sample = dd.get_sample_name(data)
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_dir = os.path.join(align_dir, "%s_bismark" % dd.get_lane(data))
if not ref_file:
logger.error("bismark index not found. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners bismark --genomes genome-build-name --data")
sys.exit(1)
final_out = os.path.join(align_dir, "{0}.bam".format(sample))
if file_exists(final_out):
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
bismark = config_utils.get_program("bismark", config)
# bismark uses 5 threads/sample and ~12GB RAM/sample (hg38)
resources = config_utils.get_resources("bismark", data["config"])
max_cores = dd.get_num_cores(data)
max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) / (1024.0 * 1024.0)
instances = calculate_bismark_instances(max_cores, max_mem * max_cores)
# override instances if specified in the config
if resources and resources.get("bismark_threads"):
instances = resources.get("bismark_threads")
logger.info(f"Using {instances} bismark instances - overriden by resources")
bowtie_threads = 1
if resources and resources.get("bowtie_threads"):
bowtie_threads = resources.get("bowtie_threads")
logger.info(f"Using {bowtie_threads} bowtie threads per bismark instance")
kit = kits.KITS.get(dd.get_kit(data), None)
directional = "--non_directional" if kit and not kit.is_directional else ""
other_opts = resources.get("options", [])
other_opts = " ".join([str(x) for x in other_opts]).strip()
fastq_files = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
safe_makedir(align_dir)
cmd = "{bismark} {other_opts} {directional} --bowtie2 --temp_dir {tx_out_dir} --gzip --parallel {instances} -p {bowtie_threads} -o {tx_out_dir} --unmapped {ref_file} {fastq_file} "
if pair_file:
fastq_file = "-1 %s -2 %s" % (fastq_file, pair_file)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
if not raw_bam:
with tx_tmpdir() as tx_out_dir:
run_message = "Running Bismark aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
shutil.move(tx_out_dir, out_dir)
raw_bam = glob.glob(out_dir + "/*bismark*bt2*bam")
# don't process bam in the bismark pipeline!
utils.symlink_plus(raw_bam[0], final_out)
data = dd.set_work_bam(data, final_out)
data["bam_report"] = glob.glob(os.path.join(out_dir, "*report.txt"))[0]
data = dd.update_summary_qc(data, "bismark", base=data["bam_report"])
return data
def _process_bam(bam_file, in_fastq, sample, reference, config):
broad_runner = broad.runner_from_config(config)
names = {'rg': in_fastq, 'library': 'WGBS_LIB', 'pl': 'Illumina', 'pu': 'R1', 'sm': in_fastq, 'sample': sample}
out_fix_bam = broad_runner.run_fn("picard_fix_rgs", bam_file, names)
order_bam = utils.append_stem(out_fix_bam, "_order")
broad_runner.run_fn("picard_reorder", out_fix_bam, reference, order_bam)
bam.index(order_bam, config)
# order_bam = _set_quality(order_bam)
# bam.index(order_bam, config)
return order_bam
def remap_index_fn(ref_file):
"""Map sequence references to equivalent bismark indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "bismark")
def _set_quality(in_bam):
"""
change all quality to 255
"""
bam = pysam.AlignmentFile(in_bam, "rb")
out_file = utils.append_stem(in_bam, "_normqual")
if file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out:
with pysam.AlignmentFile(tx_out, "wb", template=bam) as out_handle:
for read in bam.fetch():
read.mapping_quality = 255
out_handle.write(read)
return out_file
def index(ref_file, out_dir, data):
"""Create a bismark index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
bismark = config_utils.find_program("bismark", data["config"])
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a bismark index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
other_opts = config_utils.get_resources("bismark", data["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts]).strip()
cmd = "{bismark} {other_opts} --bowtie2 -p {num_cores} -n 1 -o {tx_out_dir} --basename {sample} --unmapped {ref_file} {in_fastq}"
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def calculate_bismark_instances(cores, memory):
"""
calculate number of parallel bismark instances to run, based on disussion here
https://github.com/FelixKrueger/Bismark/issues/96
cores and memory here are the maximum amounts available for us to use
"""
BISMARK_CORES = 1
BOWTIE_CORES_PER_INSTANCE = 2
SAMTOOLS_CORES_PER_INSTANCE = 1
CORES_PER_INSTANCE = BOWTIE_CORES_PER_INSTANCE + SAMTOOLS_CORES_PER_INSTANCE
GENOME_MEMORY_GB = 12
INSTANCE_MEMORY_GB = 10
available_instance_memory = memory - GENOME_MEMORY_GB
instances_in_memory = max(available_instance_memory / INSTANCE_MEMORY_GB, 1)
available_instance_cores = cores - BISMARK_CORES
instances_in_cores = max(available_instance_cores / CORES_PER_INSTANCE, 1)
instances = int(min(instances_in_memory, instances_in_cores))
logger.info(f"{cores} cores and {memory} memory are available. Spinning up {instances} instances of bismark.")
return instances
|
the-stack_0_3916 | # -*- coding: utf-8 -*-
"""
@Time:2020/8/20 22:25
@Auth"JunLin615
@File:example1.py
@IDE:PyCharm
@Motto:With the wind light cloud light mentality, do insatiable things
@email:[email protected]
"""
from shiningspectrum import pretreatment
from shiningspectrum import database
import os
import matplotlib.pyplot as plt
import numpy as np
from shiningspectrum import shiningnoodles
import time
def main():
time_start = time.time()
data_path=os.getcwd()+"\\Prepare incoming data"
file_data1 = database.read_file(data_path, "氯仿.txt")#67-66-3
file_data2 = database.read_file(data_path, "甲苯.txt")#108-88-3
list_spectrum1 = database.data_extraction(file_data1)
list_spectrum2 = database.data_extraction(file_data2)
list_spectrum_compound = [list_spectrum1[0],list(np.array(list_spectrum1[1])+np.array(list_spectrum2[1]))]
plt.figure(figsize=(10,10))
plt.plot(list_spectrum_compound[0],list_spectrum_compound[1],"k-",label="list_spectrum_compound")
plt.plot(list_spectrum1[0],list_spectrum1[1],"r-",label="list_spectrum1")
plt.plot(list_spectrum2[0],list_spectrum2[1],"b-",label="list_spectrum2")
plt.legend()
#plt.show()
all_spectrum = database.read_all("raman_database")
list_of_compounds = shiningnoodles.shining2noodles(all_spectrum)
#1.1
#unknown_x, unknown_y = shiningnoodles.combine_spectra(list_of_compounds[1], list_of_compounds[3])#noodles会对数据进行插值,减慢运行速度。
#__
#1.2
unknown_x = np.asarray(list_spectrum_compound[0])
unknown_y = np.asarray(list_spectrum_compound[1])
#__
x_data, y_data, y_base = pretreatment.autbaseline(unknown_x, unknown_y, deg=4, max_it=200,tol=None)#shining重构
unknow_compound = {"title":"unkonw","x":x_data,"y":y_data}
#2.1
#A=shiningnoodles.component_testing(peak_algorithm = "noodles")#"shining" 或者 "noodles",前者快,后者准。
#2.2
A = shiningnoodles.component_testing(peak_algorithm="shining")
print("准备启动多进程")
unkonw_peak_center, unknown_peak_assignments, percentages = A.peak_assignment(unknow_compound, list_of_compounds)
cnames = {
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# colors = ['b', 'r', 'g', 'c', 'm', 'y', 'b']
colors = list(cnames.keys())
# fig = plt.figure(figsize=(10, 4), dpi=300)
time_end = time.time()
time_time = time_end - time_start
title_s = 'Elapsed time {}. The sample to be tested contains :'.format(time_time)
lower_confidence_limit = 50 #大于该值认为含有,小于等于该值认为不含有。
for key in percentages:
if percentages[key] > lower_confidence_limit:
title_s = title_s + key + ';'
plt.figure(figsize=(10, 10))
plt.plot(unknown_x, unknown_y, color='black', label='Unknown Spectrum')
for i, _ in enumerate(unkonw_peak_center):
plt.axvline(x=unkonw_peak_center[i], color=colors[i],
label=unknown_peak_assignments[i],
linestyle='--')
plt.legend(loc=0, framealpha=1)
plt.xlabel('Wavenumber (cm$^{-1}$)', fontsize=12)
plt.ylabel('Counts', fontsize=12)
plt.ylim(unknown_y.min(), unknown_y.max())
plt.xlim(unknown_x.min(), unknown_x.max())
plt.title(title_s)
plt.show()
print(percentages)
if __name__ == '__main__':
main() |
the-stack_0_3918 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loops for DP-FTRL."""
import os.path
import pprint
import random
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from dp_ftrl import dp_fedavg
from utils import utils_impl
from tensorboard.plugins.hparams import api as hp
def _setup_outputs(root_output_dir: str, experiment_name: str,
hparam_dict: Dict[str, Any]):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
program_state_dir = os.path.join(root_output_dir, 'checkpoints',
experiment_name)
program_state_mngr = tff.program.FileProgramStateManager(program_state_dir)
logging_mngr = tff.program.LoggingReleaseManager()
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.program.CSVFileReleaseManager(
file_path=csv_file, key_fieldname='round_num')
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tensorboard_mngr = tff.program.TensorBoardReleaseManager(summary_logdir)
if hparam_dict:
summary_writer = tf.summary.create_file_writer(summary_logdir)
hparam_dict['metrics_file'] = csv_file
hparams_file = os.path.join(results_dir, 'hparams.csv')
utils_impl.atomic_write_series_to_csv(hparam_dict, hparams_file)
with summary_writer.as_default():
hp.hparams({k: v for k, v in hparam_dict.items() if v is not None})
logging.info('Writing...')
logging.info(' program state to: %s', program_state_dir)
logging.info(' metrics csv to: %s', csv_file)
logging.info(' summaries to: %s', summary_logdir)
return program_state_mngr, [logging_mngr, metrics_mngr, tensorboard_mngr]
def _write_metrics(metrics_mngrs, metrics, round_num):
"""Atomic metrics writer which inlines logic from MetricsHook class."""
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
for metrics_mngr in metrics_mngrs:
metrics_mngr.release(metrics, round_num)
def run(
iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int, int], Tuple[List, int]], # pylint: disable=g-bare-generic
validation_fn: Callable[[Any], Dict[str, float]],
total_epochs: int,
total_rounds: int,
experiment_name: str,
train_eval_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
hparam_dict: Optional[Dict[str, Any]] = None,
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50,
rounds_per_train_eval: Optional[int] = 100,
server_state_epoch_update_fn: Optional[Callable[
[dp_fedavg.ServerState], dp_fedavg.ServerState]] = None):
"""Runs federated training for a given `tff.templates.IterativeProcess`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
Args:
iterative_process: A `tff.templates.IterativeProcess` instance to run.
client_datasets_fn: Function accepts integer arguments (the round number and
the epoch) and returns a tuple of a list of client datasets to use as data
data for that round, and the updated epoch index.
validation_fn: A callable accepting the `model` attribute of the iterative
process state and returning a dict of evaluation metrics. Used to compute
validation metrics throughout the training process.
total_epochs: Nubmer of total epochs if using `ClientIDShuffler` to shuffle
clients. Use 0 when sampling clients and control by `total_rounds`.
total_rounds: The number of federated training rounds to perform. If
`ClientIDShuffler` is used for `client_datasets_fn`, the total rounds will
take the minimum of `total_rounds` and rounds_per_epoch*`total_epochs`.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
train_eval_fn: An optional callable accepting the `model` attribute of the
iterative process state and returning a dict of evaluation metrics. Used
to compute training metrics over the entire training dataset throughout
the course of the iterative process. If set to `None`, no such evaluation
is done.
test_fn: An optional callable accepting the `model` attribute of the
iterative process state and returning a dict of test metrics. Used to
compute test metrics at the end of the training process.
root_output_dir: The name of the root output directory for writing
experiment outputs.
hparam_dict: An optional dictionary specifying hyperparameters of the
experiment. If provided, the hyperparameters will be written to CSV.
rounds_per_eval: How often to compute validation metrics.
rounds_per_checkpoint: How often to checkpoint the iterative process state.
If you expect the job to restart frequently, this should be small. If no
interruptions are expected, this can be made larger.
rounds_per_train_eval: How often to compute metrics over the entire training
dataset. Note that this is only done if a `train_eval_fn` argument is
supplied.
server_state_epoch_update_fn: A function to update the `SeverState` outside
of TFF iterative process. It is called at the beginning of each epoch
traversing all the clients. Used to restart tree for FTRL algorithm.
Returns:
The final `state` of the iterative process after training.
"""
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise TypeError('iterative_process should be type '
'`tff.templates.IterativeProcess`.')
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if train_eval_fn is not None and not callable(train_eval_fn):
raise TypeError('train_eval_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
program_state_mngr, metrics_mngrs = _setup_outputs(root_output_dir,
experiment_name,
hparam_dict)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = program_state_mngr.load_latest(initial_state)
# TODO(b/172867399): we disable restarting from checkpoint when shuffling
# client IDs by epochs. Non-trivial amount of change has to be made to make
# sure disjoint clients are used cross rounds when restarts. A better design
# of client dataset generator with random seed instead of `client_datasets_fn`
# accepting `epoch` as argument, can help.
epoch = 0 if total_epochs > 0 else -1
if state is None or total_epochs > 0:
state = initial_state
round_num = 0
logging.info('Initializing experiment from scratch at round %d.', round_num)
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1 # Increment to avoid overwriting current checkpoint
loop_start_time = time.time()
while epoch < total_epochs and round_num < total_rounds:
data_prep_start_time = time.time()
prev_epoch = epoch
federated_train_data, epoch = client_datasets_fn(round_num, epoch)
# Server state is updated outside of TFF iterative process, which is used
# to restart the tree in DP-FTRL.
if server_state_epoch_update_fn is not None and epoch == prev_epoch + 1:
logging.info('External server state update at epoch %d', epoch)
state = server_state_epoch_update_fn(state)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
state, _ = iterative_process.next(state, federated_train_data)
train_metrics['training_secs'] = time.time() - training_start_time
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, (time.time() - loop_start_time) / (round_num + 1)))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
try:
program_state_mngr.save(state, round_num)
except Exception: # pylint: disable=broad-except
logging.info('Checkpoint saving exception: %s', Exception)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if train_eval_fn and round_num % rounds_per_train_eval == 0:
# Compute metrics over the entire training dataset
train_eval_start = time.time()
train_eval_metrics = train_eval_fn(state.model)
train_eval_metrics['evaluate_secs'] = time.time() - train_eval_start
metrics['train_eval'] = train_eval_metrics
if round_num % rounds_per_eval == 0:
# Compute validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state.model)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngrs, metrics, round_num)
round_num += 1
# Final metrics evaluation once the training has completed
metrics = {}
# Validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(state.model)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
# Training set metrics
if train_eval_fn:
train_eval_start = time.time()
train_eval_metrics = train_eval_fn(state.model)
train_eval_metrics['evaluate_secs'] = time.time() - train_eval_start
metrics['train_eval'] = train_eval_metrics
# Test set metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(state.model)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngrs, metrics, round_num)
return state
class ClientIDShuffler(object):
"""Shuffling clients in federated learning for DP-FTRL."""
def __init__(self,
clients_per_round: int,
client_data: tff.simulation.datasets.ClientData,
drop_remainder: bool = True):
self._client_ids = list(client_data.client_ids)
self._clients_per_round = clients_per_round
self._drop_remainder = drop_remainder
self._epoch = 0
self._start_index = 0
def _shuffle_client_ids(self):
random.shuffle(self._client_ids)
self._start_index = 0
self._epoch += 1
def sample_client_ids(self, round_num: int, epoch: int) -> Tuple[List, int]: # pylint: disable=g-bare-generic
"""Returns sampled client IDs and the updated epoch index.
This function can be used as `client_datasets_fn` in `training_loop.run`.
Args:
round_num: the current round index.
epoch: the current epoch index.
"""
if epoch != self._epoch:
raise ValueError(
'Epoch index for client shuffling does not match: {} vs {}'.format(
epoch, self._epoch))
end_index = min(self._start_index + self._clients_per_round,
len(self._client_ids))
sampled_ids = self._client_ids[self._start_index:end_index]
skip_remainder_flag = (
self._drop_remainder and
(end_index + self._clients_per_round) > len(self._client_ids))
if skip_remainder_flag or end_index >= len(self._client_ids):
logging.info(
'shuffling clients at epoch %d, round %d, client start index %d',
epoch, round_num, self._start_index)
self._shuffle_client_ids()
else:
self._start_index = end_index
return sampled_ids, self._epoch
|
the-stack_0_3919 | # -*- coding: utf-8 -*-
#
# Copyright 2018-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating external connectors
"""
import json
import sys
from .world import world, setup_module, teardown_module, show_doc, show_method
from . import create_source_steps as source_create
from . import create_external_steps as connector_create
class TestExternalConnector(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating an external connector:
Given I create an external connector from environment vars
And I wait until the external connector is ready less than <conn_wait> secs
And I update the external connector with args <args>
And the external connector has arguments <args>
# And I create a source from the external connector id
# Then the source has arguments "<source_args>"
"""
show_doc(self.test_scenario1)
headers = ["conn_wait", "args"]
examples = [
['20', '{"name": "my connector name"}']]
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
connector_create.i_create_external_connector(self)
connector_create.the_external_connector_is_finished(
self, example["conn_wait"])
connector_create.i_update_external_connector_with(
self, example["args"])
connector_create.the_external_connector_is_finished(
self, example["conn_wait"])
connector_create.external_connector_has_args(
example["args"])
"""
args = {"source": "postgresql",
"externalconnector_id": world.external_connector["resource"][18:],
"query": "SELECT * FROM public.iris"}
source_create.i_create_using_connector(self, \
{"source": "postgresql",
"externalconnector_id": world.external_connector["resource"][18:],
"query": "SELECT * FROM public.iris"})
source_create.the_source_is_finished(self, example[3])
source_create.source_has_args(self, json.dumps({"external_data": args}))
"""
|
the-stack_0_3921 | # -*- coding: utf-8 -*-
"""
@date: 2021/7/20 下午10:27
@file: operation.py
@author: zj
@description:
"""
import os
import torch
from zcls.config.key_word import KEY_OUTPUT
from slim.config import cfg
from slim.model.build import build_model
from slim.prune.build import build_prune
from slim.util.profile import computer_flops_and_params, compute_model_time
def load_model(config_file, data_shape=(1, 3, 224, 224), device=torch.device('cpu')):
cfg.merge_from_file(config_file)
model = build_model(cfg).to(device)
# print(model)
computer_flops_and_params(model)
compute_model_time(data_shape, model, device)
return model, cfg.MODEL.RECOGNIZER.NAME
def prune_model(arch_name, model, ratio=0.2, minimum_channels=8, divisor=8):
pruned_ratio, threshold = build_prune(model,
model_name=arch_name,
ratio=ratio,
minimum_channels=minimum_channels,
divisor=divisor,
)
computer_flops_and_params(model)
compute_model_time((1, 3, 224, 224), model, torch.device('cpu'))
return model, pruned_ratio, threshold
def save_model(model, model_name):
data = torch.randn(1, 3, 224, 224)
res = model(data)[KEY_OUTPUT]
print(res.shape)
output_dir = os.path.split(os.path.abspath(model_name))[0]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(model, model_name) |
the-stack_0_3923 | from __future__ import division
import numpy as np
from warnings import warn
__all__ = ['img_as_float', 'img_as_int', 'img_as_uint', 'img_as_ubyte',
'img_as_bool', 'dtype_limits']
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.float32: (-1, 1),
np.float64: (-1, 1)}
integer_types = (np.uint8, np.uint16, np.int8, np.int16)
_supported_types = (np.bool_, np.bool8,
np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32,
np.float32, np.float64)
if np.__version__ >= "1.6.0":
dtype_range[np.float16] = (-1, 1)
_supported_types += (np.float16, )
def dtype_limits(image, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
Parameters
----------
image : ndarray
Input image.
clip_negative : bool
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def convert(image, dtype, force_copy=False, uniform=False):
"""
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
uniform : bool
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
References
----------
(1) DirectX data conversion rules.
http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
(2) Data Conversions.
In "OpenGL ES 2.0 Specification v2.0.25", pp 7-8. Khronos Group, 2010.
(3) Proper treatment of pixels as integers. A.W. Paeth.
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
(4) Dirty Pixels. J. Blinn.
In "Jim Blinn's corner: Dirty Pixels", pp 47-57. Morgan Kaufmann, 1998.
"""
image = np.asarray(image)
dtypeobj = np.dtype(dtype)
dtypeobj_in = image.dtype
dtype = dtypeobj.type
dtype_in = dtypeobj_in.type
if dtype_in == dtype:
if force_copy:
image = image.copy()
return image
if not (dtype_in in _supported_types and dtype in _supported_types):
raise ValueError("can not convert %s to %s." % (dtypeobj_in, dtypeobj))
def sign_loss():
warn("Possible sign loss when converting negative image of type "
"%s to positive image of type %s." % (dtypeobj_in, dtypeobj))
def prec_loss():
warn("Possible precision loss when converting from "
"%s to %s" % (dtypeobj_in, dtypeobj))
def _dtype(itemsize, *dtypes):
# Return first of `dtypes` with itemsize greater than `itemsize`
return next(dt for dt in dtypes if itemsize < np.dtype(dt).itemsize)
def _dtype2(kind, bits, itemsize=1):
# Return dtype of `kind` that can store a `bits` wide unsigned int
c = lambda x, y: x <= y if kind == 'u' else x < y
s = next(i for i in (itemsize, ) + (2, 4, 8) if c(bits, i * 8))
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
# Scale unsigned/positive integers from n to m bits
# Numbers can be represented exactly only if m is a multiple of n
# Output array is of same kind as input.
kind = a.dtype.kind
if n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
prec_loss()
if copy:
b = np.empty(a.shape, _dtype2(kind, m))
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
casting='unsafe')
return b
else:
a //= 2**(n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of n bits
if copy:
b = np.empty(a.shape, _dtype2(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = np.array(a, _dtype2(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of n bits,
# then downscale with precision loss
prec_loss()
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype2(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2**(o - m)
return b
else:
a = np.array(a, _dtype2(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2**(o - m)
return a
kind = dtypeobj.kind
kind_in = dtypeobj_in.kind
itemsize = dtypeobj.itemsize
itemsize_in = dtypeobj_in.itemsize
if kind == 'b':
# to binary image
if kind_in in "fi":
sign_loss()
prec_loss()
return image > dtype_in(dtype_range[dtype_in][1] / 2)
if kind_in == 'b':
# from binary image, to float and to integer
result = image.astype(dtype)
if kind != 'f':
result *= dtype(dtype_range[dtype][1])
return result
if kind in 'ui':
imin = np.iinfo(dtype).min
imax = np.iinfo(dtype).max
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_in == 'f':
if np.min(image) < -1.0 or np.max(image) > 1.0:
raise ValueError("Images of type float must be between -1 and 1.")
if kind == 'f':
# floating point -> floating point
if itemsize_in > itemsize:
prec_loss()
return image.astype(dtype)
# floating point -> integer
prec_loss()
# use float type that can represent output integer type
image = np.array(image, _dtype(itemsize, dtype_in,
np.float32, np.float64))
if not uniform:
if kind == 'u':
image *= imax
else:
image *= imax - imin
image -= 1.0
image /= 2.0
np.rint(image, out=image)
np.clip(image, imin, imax, out=image)
elif kind == 'u':
image *= imax + 1
np.clip(image, 0, imax, out=image)
else:
image *= (imax - imin + 1.0) / 2.0
np.floor(image, out=image)
np.clip(image, imin, imax, out=image)
return image.astype(dtype)
if kind == 'f':
# integer -> floating point
if itemsize_in >= itemsize:
prec_loss()
# use float type that can exactly represent input integers
image = np.array(image, _dtype(itemsize_in, dtype,
np.float32, np.float64))
if kind_in == 'u':
image /= imax_in
# DirectX uses this conversion also for signed ints
#if imin_in:
# np.maximum(image, -1.0, out=image)
else:
image *= 2.0
image += 1.0
image /= imax_in - imin_in
return image.astype(dtype)
if kind_in == 'u':
if kind == 'i':
# unsigned integer -> signed integer
image = _scale(image, 8 * itemsize_in, 8 * itemsize - 1)
return image.view(dtype)
else:
# unsigned integer -> unsigned integer
return _scale(image, 8 * itemsize_in, 8 * itemsize)
if kind == 'u':
# signed integer -> unsigned integer
sign_loss()
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize)
result = np.empty(image.shape, dtype)
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
return result
# signed integer -> signed integer
if itemsize_in > itemsize:
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize - 1)
image = image.astype(_dtype2('i', itemsize * 8))
image -= imin_in
image = _scale(image, 8 * itemsize_in, 8 * itemsize, copy=False)
image += imin
return image.astype(dtype)
def img_as_float(image, force_copy=False):
"""Convert an image to double-precision floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float64
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
"""
return convert(image, np.float64, force_copy)
def img_as_uint(image, force_copy=False):
"""Convert an image to 16-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of uint16
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 65535.
"""
return convert(image, np.uint16, force_copy)
def img_as_int(image, force_copy=False):
"""Convert an image to 16-bit signed integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of uint16
Output image.
Notes
-----
The values are scaled between -32768 and 32767.
If the input data-type is positive-only (e.g., uint8), then
the output image will still only have positive values.
"""
return convert(image, np.int16, force_copy)
def img_as_ubyte(image, force_copy=False):
"""Convert an image to 8-bit unsigned integer format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of ubyte (uint8)
Output image.
Notes
-----
Negative input values will be clipped.
Positive values are scaled between 0 and 255.
"""
return convert(image, np.uint8, force_copy)
def img_as_bool(image, force_copy=False):
"""Convert an image to boolean format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of bool (`bool_`)
Output image.
Notes
-----
The upper half of the input dtype's positive range is True, and the lower
half is False. All negative values (if present) are False.
"""
return convert(image, np.bool_, force_copy)
|
the-stack_0_3925 | import atexit
import configparser
import telegram
import time
from binance.client import Client
from datetime import datetime
#Bot and config instances
config = None
telegram_bot = None
binance_bot = None
#General variables
refresh_rate = -1
chat_id = -1
time_started = -1
#Order-related variables
orders = []
send_open = False
send_closed = False
def start():
init()
process()
def init():
#Print starting string, and save the current time
print("Starting your bot...\n")
global time_started
time_started = str(datetime.now())
#Initialize the config parser and bots
initConfig()
initTelegram()
initBinance()
#Print and send success messages
print("\nBot started successfully... Beginning processing...\n")
telegram_bot.send_message(chat_id=chat_id, text=("Your bot instance (" + time_started + ") has started. Monitoring has begun."))
def initConfig():
#Initialize the config file
global config
config = configparser.ConfigParser()
config.read("config.ini")
#Raise an error if it cannot import the configuration datasets
if('GENERAL' in config):
global refresh_rate, send_open, send_closed
refresh_rate = config['GENERAL']['refresh_rate']
send_open = config['GENERAL'].getboolean('update_open')
send_closed = config['GENERAL'].getboolean('update_closed')
else:
raise ValueError("Cannot find the 'General' dataset in your config file.")
def initTelegram():
#Telegram
if('TELEGRAM' in config):
#Initialize the Telegram bot
global telegram_bot, chat_id
telegram_bot = telegram.Bot(token=config['TELEGRAM']['token'])
chat_id = config['TELEGRAM']['chat_id']
#Fetches and prints bot ID to ensure valid token
try:
print("Your Telegram API information is valid (Bot ID: {})!".format(telegram_bot.get_me().id))
except:
print("Your Telegram API information is invalid.")
else:
raise ValueError("Cannot find the 'Telegram' dataset in your config file.")
def initBinance():
#Binance
if('BINANCE' in config):
#Initialize the Binance bot
global binance_bot
binance_bot = Client(config['BINANCE']['key'], config['BINANCE']['secret'])
#Fetches your BTC address to test successful API information
btc_address = binance_bot.get_deposit_address(asset='BTC')
if(btc_address.get("success") == True):
print("Your Binance API information is valid!")
else:
print("Your Binance API information is invalid.")
else:
raise ValueError("Cannot find the 'Binance' dataset in your config file.")
def process():
while(1):
#Fetches all open orders
open_orders = binance_bot.get_open_orders()
#Iterate through all orders fetched from Binance and append any new orders
for order in open_orders:
if not order in orders:
addOrder(order)
#Iterate through all orders in our own list and remove any orders not on Binance anymore
for order in orders:
if not order in open_orders:
closeOrder(order)
#Sleep for refresh_rate amount of seconds
time.sleep((int(refresh_rate)*60))
def addOrder(order):
#Add the order to the global list
global orders
orders.append(order)
#Send a message to Telegram if enabled in the config
if(send_open):
msg = "*{} Order Created*\n\n*Symbol*: {}\n*Price*: {}\n*Quantity*: {}".format(order.get("side").capitalize(), order.get("symbol"), order.get("price"), order.get("origQty"))
telegram_bot.send_message(chat_id=chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
def closeOrder(order):
#Remove the order from the global list
global orders
orders.remove(order)
#Send a message to Telegram if enabled in the config
if(send_closed):
msg = "*{} Order Closed*\n\n*Symbol*: {}\n*Price*: {}\n*Quantity*: {}".format(order.get("side").capitalize(), order.get("symbol"), order.get("price"), order.get("origQty"))
telegram_bot.send_message(chat_id=chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
@atexit.register
def exit():
#Send an "exiting bot" message before exiting script
telegram_bot.send_message(chat_id=chat_id, text=("Your bot instance (" + time_started + ") has exited. Monitoring has stopped."))
print("Bot has exited successfully...")
if(__name__ == "__main__"):
start() |
the-stack_0_3928 | import datetime
import logging
import requests
import auditUtils
import constants
import dbUtil
import rds_config
import restUtils
import tables
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SUCCESS_RESPONSE = {"statusCode": 200}
def prepUserSettings(conn, chatId, userId, app):
setting = getUserSettings(conn, chatId, app)
# logger.info(setting)
if setting is None:
# logger.info("creating setting")
createSetting(conn, chatId, userId, app)
setting = getUserSettings(conn, chatId, app)
return setting
def getUserSettings(conn, chatId, app):
sql = "select * "
sql += " from " + tables.tgsetting
sql += " where chatId = %s and app=%s "
sql += " limit 1"
# logger.info(sql)
setting = dbUtil.getSingleRecordNoJsonWithConn(sql, conn, (chatId, app))
# logger.info(setting)
return setting
def createSetting(conn, chatId, userId, app):
sql = "insert into " + tables.tgsetting
sql += " (chatId, userId, app, lastUpdated) "
sql += " values (%s, %s, %s, %s) "
conn.cursor().execute(sql, (chatId, userId, app, datetime.datetime.now()))
conn.commit()
def reset(conn, chatId, app):
# conn = dbUtil.getConnection()
sql = "update " + tables.tgsetting
sql += " set address=null, favPools=null, addressAlias=null, "
sql += " lastUpdated=%s where chatId=%s and app=%s "
conn.cursor().execute(sql, (datetime.datetime.now(), chatId, app))
conn.commit()
# conn.close()
def updateSetting(conn, chatId, attribute, value, app):
logger.info("in update setting for attributes")
logger.info((chatId, attribute, value))
sql = "update " + tables.tgsetting
sql += " set " + attribute + "=%s, "
sql += " lastUpdated=%s where chatId=%s and app=%s "
logger.info(sql)
params = (value, datetime.datetime.now(), chatId, app)
# logger.info(params)
conn.cursor().execute(sql, params)
conn.commit()
def getParam(message, key, error):
msg = message.strip()
if not message.startswith(key):
return error
poolMsg = message[len(key):]
# logger.info(poolMsg)
return poolMsg.strip()
def getStakingNetworkInfo():
url = "https://hmny-t.co/networks/mainnet/staking_network_info"
return restUtils.callRestJson(url)
def getBidRanks(current):
response = getStakingNetworkInfo()
# logger.info(response)
if current:
table = response["table"]
return table
else:
table = response["live_table"]
return table
def getValByAddress(validators, address):
# logger.info("in getValByAddress: address: {})".format(address))
for val in validators:
if val["address"] == address:
return val
return None
def validateAndGetText(chatId, message, startTime):
if "text" not in message:
return None, missingTextError(chatId, message, startTime)
textCmd = str(message["text"])
# if not textCmd.startswith("/"):
# return None, SUCCESS_RESPONSE
if "@" in textCmd:
textCmd = textCmd.replace(constants.TG_BOT_NAME_1, "")
textCmd = textCmd.replace(constants.TG_BOT_NAME_2, "")
return textCmd, None
def missingTextError(chatId, message, startTime):
logger.info("processing missingTextError for message: ")
logger.info(message)
conn = dbUtil.getConnection()
auditUtils.auditBot(conn, chatId, "Missing Text", startTime)
conn.close()
response = "The bot appears to have hit a bug. Please contact the bot operator @bigb4ever \n"
respData = {"text": response.encode("utf8"), "chat_id": chatId}
url = rds_config.TG_BASE_URL + "/sendMessage?parse_mode=html"
requests.post(url, respData)
return SUCCESS_RESPONSE
def getAllAddressesForTxAlert(conn, app):
sql = "select address from " + tables.tgsetting
sql += " where app = %s and "
sql += " address is not null and notifyAddress='True'"
addresses = dbUtil.listResultsWithConn(sql, conn, app)
uniqueAddresses = set()
for addressDetails in addresses:
logger.info("processing address: {}".format(addressDetails))
address = addressDetails["address"]
addressList = address.split(",")
for value in addressList:
uniqueAddresses.add(value)
logger.info("after adding list ({}), unique addresses are: {}".format(addressList, uniqueAddresses))
logger.info("list of unique addresses is: {}".format(uniqueAddresses))
return uniqueAddresses
|
the-stack_0_3933 | import csv
from difflib import SequenceMatcher
import re
import string
import pandas as pd
import urllib.request
from bs4 import BeautifulSoup
from googlesearch import search
from collections import OrderedDict
#import unidecode
'''
['Twenty20 Internationals', 'One-Day Internationals', 'Twenty20','Tests 1st Innings','Tests 2nd Innings' ,
'minor tour','tour','Youth One-Day Internationals','Other Twenty20 matches','Other one-day/limited-overs matches',
'Women\'s Twenty20 Internationals','Women\'s One-Day Internationals','List A','First-class','Other matches']:
'''
def handleSubs(commentry,playerDict_n,teamKeys):
name = ''
if commentry.strip().startswith("run out "):
name = ''.join(x for x in commentry if x in string.printable)
name = name.split('[')[1].split(']')[0]
else:
name = ''.join(x for x in commentry if x in string.printable)
try:
name = name.split('(')[1].split(')')[0]
except:
print('web error',name)
return None
## print('-',name)
playerInfo = {}
search_url = ''
url = ''
## page = ''
## soup = ''
## pees = ''
## try:
## for url in search(name + ' ESPN cricket',tld = 'co.in',lang = 'en', num = 1,stop = 1):
## search_url = url
## break
## except Exception as e:
## print(e)
## exit()
for url in search(name + ' ESPN cricket',tld = 'com', num = 1,stop = 1):
search_url = url
break
page = urllib.request.urlopen(search_url,timeout = 60)
## opener = urllib.build_opener()
## opener.addheaders = [('User-agent','Mozilla/5.0')]
## response = opener.open(search_url)
## page = response.read()
soup = BeautifulSoup(page,'html.parser')
pees = soup.find_all('p',class_='ciPlayerinformationtxt')
val = []
key = []
for pee in pees:
key.append(pee.find('b').get_text())
val.append(pee.find('span').get_text())
# print('url : '+search_url+'name : '+name)
# print(key,val)
playerInfo['short_name'] = name
playerInfo['player_cric_info_link'] = search_url
playerInfo['team'] = teamKeys
cricInfoBatsmanId = str(search_url).split('/')[-1].replace('.html', '')
playerInfo['_id'] = cricInfoBatsmanId + '-' + playerDict_n['match_id']
playerInfo['TeamID'] = playerDict_n['OpponentID']
playerInfo['OpponentID'] = playerDict_n['TeamID']
playerInfo['run_scored'] = '-'
playerInfo['balls_faced'] = '-'
playerInfo['M'] = '-'
playerInfo['4s'] = '-'
playerInfo['6s'] = '-'
playerInfo['strike_rate'] = '-'
playerInfo['MatchURL'] = playerDict_n['MatchURL']
playerInfo['match_id'] = playerDict_n['match_id']
playerInfo['Match_start_Date'] = playerDict_n['Match_start_Date']
playerInfo['Venue'] = playerDict_n['Venue']
playerInfo['innings'] = playerDict_n['innings']
playerInfo['commentry'] = '-'
playerInfo['match_type_text'] = playerDict_n['match_type_text']
if "Full name" in key:
playerInfo['Player_Full_Name'] = val[key.index("Full name")]
else:
playerInfo['Player_Full_Name'] = '-'
if 'Born' in key:
playerInfo['date,place_of_birth'] = val[key.index('Born')].replace('\n','').strip()
else:
playerInfo['date,place_of_birth'] = '-'
if 'Nickname' in key:
playerInfo['Player_Nickname'] = val[key.index('Nickname')]
else:
playerInfo['Player_Nickname'] = '-'
if not 'run_scored' in playerInfo:
playerInfo['run_scored'] = "-"
if not 'balls_faced' in playerInfo:
playerInfo['balls_faced'] = "-"
if not 'strike_rate' in playerInfo:
playerInfo['strike_rate'] = "-"
if not 'balls_bowled' in playerInfo:
playerInfo['balls_bowled'] = "-"
if not 'maiden_overs' in playerInfo:
playerInfo['maiden_overs'] = "-"
if not 'runs_given' in playerInfo:
playerInfo['runs_given'] = "-"
if not 'wicket' in playerInfo:
playerInfo['wicket'] = "-"
if not 'econ' in playerInfo:
playerInfo['econ'] = "-"
if not 'wide_balls' in playerInfo:
playerInfo['wide_balls'] = "-"
if not 'no_balls' in playerInfo:
playerInfo['no_balls'] = "-"
return playerInfo
#csv_file = "D:\\temp\\player_match_stats_29Nov2018.csv"
def Process_CSV(year):
csv_file = year+".csv"
df = pd.read_csv(csv_file)
types = set(x.strip() for x in df['match_type_text'])
matchId_playersDict = {}
def addCommentryField(playerName, shortName, playersDict_1, field):
if " " in playerName:
if SequenceMatcher(None, playerName, shortName).ratio() >= 0.7:
if field in playersDict_1:
catches = playersDict_1[field]
catches += 1
playersDict_1[field] = catches
else:
playersDict_1[field] = 1
else:
shortNameArr = shortName.split(" ")
for sName in shortNameArr:
sName = sName.strip()
if len(sName) > 2:
if SequenceMatcher(None, playerName, sName).ratio() >= 0.9:
if field in playersDict_1:
catches = playersDict_1[field]
catches += 1
playersDict_1[field] = catches
else:
playersDict_1[field] = 1
# below creating a match wise players dict
with open(csv_file, 'r') as csvfile:
csvReader = csv.DictReader(csvfile)
for row in csvReader:
matchType = row['match_type_text']
if matchType.strip() in types:
matchId = row['match_id']
if matchId in matchId_playersDict:
playersDict = matchId_playersDict[matchId]
playerId = row['cric_info_id']
playersDict[playerId] = row
else:
playersDict = {}
playerId = row['cric_info_id']
playersDict[playerId] = row
matchId_playersDict[matchId] = playersDict
print("matchId_playersDict length: ", len(matchId_playersDict))
#print("870881 length: ", len(matchId_playersDict['870881']))
#print("870881 values: ", matchId_playersDict['870881'])
with open(year+'_1.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(("PlayerID", "TeamID", "MatchID", "OpponentID", "PlayerProfileURL",
"MatchURL", "MatchFormat", "MatchStartDate", "MatchVenue",'innings', "TeamName", "PlayerName", "PlayerFullName",
"Player Date,Place of Birth", "PlayerNickName", "B_Bat", "R_Bat", "4s", "6s",
"SR_Bat", "BallsBowled","RunsGiven", "MaidenOvers", "W_Bow", "ER_Bow", "Wide_Bow",
"No_Bow",'commentry' ,"catches", "stumped","run_out"))
for matchId, players in matchId_playersDict.items():
teamDict = {}
for playerId, playerDict in players.items():
team = playerDict['team']
if team in teamDict:
teamPlayers = teamDict[team]
teamPlayers.append(playerDict)
else:
teamPlayers = []
teamPlayers.append(playerDict)
teamDict[team] = teamPlayers
print("teamDict length: ", len(teamDict))
teamKeys = list(teamDict.keys())
print("teamKeys: ", teamKeys)
teamPlayers_0 = teamDict[teamKeys[0]]
teamPlayers_1 = teamDict[teamKeys[1]]
for playerDict_0 in teamPlayers_0:
commentry = str(playerDict_0['commentry'])
if ' sub ' in commentry or '(sub ' in commentry:
handle = handleSubs(commentry,playerDict_0,teamKeys[1])
if handle == None:
continue
Flag = True
for i in range(0,len(teamPlayers_1)):
if handle['short_name'] == teamPlayers_1[i]['short_name']:
print(handle['short_name'],teamPlayers_1[i]['short_name'])
Flag = False
break
if Flag:
teamPlayers_1.append(handle)
#commentry = commentry.replace('sub','').replace('(','').replace(')','').replace('[',' ').replace(']',' ')
#print("commentry: ", commentry)
if commentry.strip().startswith("c "):
# catch
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"c ([\w\s'-]+) b ", commentry.strip())
## print(playerNameMatch)
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 0c: ", commentry)
playerNameMatch = re.match(r"c & b ([\w\s'-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
#playerName = unidecode.unidecode(playerName)
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_1, 'catches')
elif commentry.strip().startswith("st "):
# stump
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"st ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 0st: ", commentry)
#playerName = unidecode.unidecode(playerName)
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_1, 'stumped')
elif commentry.strip().startswith("run out "):
if 'sub' in commentry:
## # if substitute in commentry, then ignore, as he will not be found in players list
## continue
commentry = commentry.replace('[','').replace(']','').replace(' sub ','')
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
try:
playerNameMatch = re.match(r"run out \(([\w\s'/-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
except:
print('commentry 0ro: ',commentry)
playerNames = []
if '/' in playerName:
playerNames = playerName.split('/')
for playersDict_1 in teamPlayers_1:
shortName = playersDict_1['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
if len(playerNames) > 0:
for player in playerNames:
player = player.strip()
addCommentryField(player, shortName, playersDict_1, 'run_out')
else:
addCommentryField(playerName, shortName, playersDict_1, 'run_out')
for playerDict_1 in teamPlayers_1:
commentry = str(playerDict_1['commentry'])
if ' sub ' in commentry or '(sub ' in commentry:
handle = handleSubs(commentry,playerDict_1,teamKeys[0])
if handle == None:
continue
Flag = True
for i in range(0,len(teamPlayers_0)):
if handle['short_name'] == teamPlayers_0[i]['short_name']:
print(handle['short_name'],teamPlayers_0[i]['short_name'])
Flag = False
break
if Flag:
teamPlayers_0.append(handle)
#commentry = commentry.replace('sub','').replace('(','').replace(')','').replace('[','').replace(']','')
if commentry.strip().startswith("c "):
# catch
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
#print("commentry: ", commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
playerNameMatch = re.match(r"c ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
playerNameMatch = re.match(r"c & b ([\w\s'-]+)", commentry.strip())
playerName = playerNameMatch.group(1)
#playerName = unidecode.unidecode(playerName)
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_0, 'catches')
elif commentry.strip().startswith("st "):
# stump
#commentry = unidecode.unidecode(commentry)
if ' sub ' in commentry:
commentry = commentry.replace('(','').replace(')','').replace(' sub ',' ')
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"st ([\w\s'-]+) b ", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 1st: ", commentry)
#playerName = unidecode.unidecode(playerName)
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
addCommentryField(playerName, shortName, playersDict_0, 'stumped')
elif commentry.strip().startswith("run out "):
if 'sub' in commentry:
## # if substitute in commentry, then ignore, as he will not be found in players list
## continue
commentry = commentry.replace('[','').replace(']','').replace(' sub ','')
#commentry = unidecode.unidecode(commentry)
commentry = ''.join(x for x in commentry if x in string.printable)
playerNameMatch = re.match(r"run out \(([\w\s'/-]+)", commentry.strip())
try:
playerName = playerNameMatch.group(1)
except:
print("commentry 1rt: ", commentry)
playerNames = []
if '/' in playerName:
playerNames = playerName.split('/')
for playersDict_0 in teamPlayers_0:
shortName = playersDict_0['short_name']
#shortName = unidecode.unidecode(shortName)
shortName = ''.join(x for x in shortName if x in string.printable)
playerName = playerName.strip()
if len(playerNames) > 0:
for player in playerNames:
player = player.strip()
addCommentryField(player, shortName, playersDict_0, 'run_out')
else:
addCommentryField(playerName, shortName, playersDict_0, 'run_out')
for playerDict_0 in teamPlayers_0:
if not 'catches' in playerDict_0:
playerDict_0['catches'] = "0"
if not 'stumped' in playerDict_0:
playerDict_0['stumped'] = "0"
if not 'run_out' in playerDict_0:
playerDict_0['run_out'] = "0"
writer.writerow((playerDict_0["_id"],playerDict_0["TeamID"],playerDict_0["match_id"], playerDict_0["OpponentID"], playerDict_0["player_cric_info_link"],
playerDict_0["MatchURL"], playerDict_0["match_type_text"], playerDict_0["Match_start_Date"],
playerDict_0["Venue"], playerDict_0["innings"],playerDict_0["team"], playerDict_0["short_name"], playerDict_0["Player_Full_Name"],
playerDict_0["date,place_of_birth"], playerDict_0["Player_Nickname"], playerDict_0["balls_faced"],
playerDict_0["run_scored"], playerDict_0["4s"], playerDict_0["6s"], playerDict_0["strike_rate"],
playerDict_0["balls_bowled"], playerDict_0["runs_given"], playerDict_0["maiden_overs"],
playerDict_0["wicket"], playerDict_0["econ"], playerDict_0["wide_balls"], playerDict_0["no_balls"],playerDict_0["commentry"],
playerDict_0["catches"], playerDict_0["stumped"], playerDict_0["run_out"]))
for playerDict_1 in teamPlayers_1:
if not 'catches' in playerDict_1:
playerDict_1['catches'] = "0"
if not 'stumped' in playerDict_1:
playerDict_1['stumped'] = "0"
if not 'run_out' in playerDict_1:
playerDict_1['run_out'] = "0"
writer.writerow((playerDict_1["_id"],playerDict_1["TeamID"],playerDict_1["match_id"], playerDict_1["OpponentID"], playerDict_1["player_cric_info_link"],
playerDict_1["MatchURL"], playerDict_1["match_type_text"], playerDict_1["Match_start_Date"],
playerDict_1["Venue"],playerDict_1["innings"] ,playerDict_1["team"], playerDict_1["short_name"], playerDict_1["Player_Full_Name"],
playerDict_1["date,place_of_birth"], playerDict_1["Player_Nickname"], playerDict_1["balls_faced"],
playerDict_1["run_scored"], playerDict_1["4s"], playerDict_1["6s"], playerDict_1["strike_rate"],
playerDict_1["balls_bowled"], playerDict_1["runs_given"], playerDict_1["maiden_overs"],
playerDict_1["wicket"], playerDict_1["econ"], playerDict_1["wide_balls"], playerDict_1["no_balls"],playerDict_1["commentry"],
playerDict_1["catches"], playerDict_1["stumped"], playerDict_1["run_out"]))
## writer.writerow((playerDict_1["_id"], playerDict_1["short_name"], playerDict_1["player_cric_info_link"],
## playerDict_1["team"], playerDict_1["commentry"], playerDict_1["run_scored"],
## playerDict_1["balls_faced"], playerDict_1["M"], playerDict_1["4s"], playerDict_1["6s"],
## playerDict_1["balls_bowled"], playerDict_1["maiden_overs"], playerDict_1["runs_given"],
## playerDict_1["wicket"], playerDict_1["econ"], playerDict_1["dot_delivery"],
## playerDict_1["four_delivery"],
## playerDict_1["six_delivery"], playerDict_1["wide_balls"], playerDict_1["no_balls"],
## playerDict_1["Position"], playerDict_1["match_id"], playerDict_1["match_desc"],
## playerDict_1["url_match_type"],
## playerDict_1["match_type_text"], playerDict_1["season"], playerDict_1["strike_rate"],
## playerDict_1["cric_info_id"], playerDict_1["catches"], playerDict_1["stumped"], playerDict_1["run_out"]))
####for year in ['2011','2012','2013','2014','2015','2016','2017','2018']:
##Process_CSV('2018')
|
the-stack_0_3934 | import pickle
import time
import requests
from bs4 import BeautifulSoup
from scrapers import Talk
from datetime import date, datetime
from dateutil.parser import parse as dateParser_
from scrapers import dateParse
from scrapers import removeParentheses
from scrapers import cleanSpeaker
from datetime import datetime
from selenium import webdriver
from scrapers import cleanSpeaker
import re
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
def scrape(start_date=date(1980, 1, 1), process=None): # process should be Talk -> None
hostname = "https://www.youtube.com"
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.set_headless()
driver = webdriver.Firefox(firefox_options=fireFoxOptions)
# driver = webdriver.Firefox()
URL = "https://www.youtube.com/c/IhesFr/videos?view=0&sort=dd&flow=grid"
try:
driver.get(URL)
# time.sleep(60) # during this minute, I manually scroll to the bottom
# of the page
time.sleep(4)
html = driver.find_element_by_tag_name('html')
html.send_keys(Keys.END)
time.sleep(2)
html.send_keys(Keys.END)
time.sleep(2)
contentsDiv = driver.find_elements_by_id('contents')[1]
contentsHTML = contentsDiv.get_attribute('outerHTML')
soup = BeautifulSoup(contentsHTML,
'html.parser')
videoDivs = soup.find_all('ytd-grid-video-renderer')
for videoDiv in videoDivs:
infoDiv = videoDiv.find('a', id="video-title")
link = hostname + infoDiv['href']
youtubeTitle = infoDiv['title']
if speakerAndTitle := youtubeTitleToMaybeSpeakerAndTitle(
youtubeTitle):
talk = Talk(link)
# date = urlToMaybeDate(link, driver)
# if date:
# if date < start_date:
# break
talk.firstName, talk.lastName = cleanSpeaker(
speakerAndTitle
[0])
talk.title = speakerAndTitle[1]
print(talk)
if process:
process(talk)
except BaseException:
pass
driver.quit()
return None
def youtubeTitleToMaybeSpeakerAndTitle(ytTitle):
pieces = ytTitle.split(' - ')
if len(pieces) == 1:
return None
name = pieces[0]
title = pieces[1]
if len(name.split(' ')) > 3:
return None
return (name, title)
def urlToMaybeDate(url, driver):
try:
driver.get(url)
time.sleep(3)
date = driver.find_element_by_id('date').text[1:]
date = dateParse(date)
return date
except BaseException:
return None
|
the-stack_0_3936 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Basic template framework algorithm uses framework components to define the algorithm.
### Shows EqualWeightingPortfolioConstructionModel.LongOnly() application
### </summary>
### <meta name="tag" content="alpha streams" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="algorithm framework" />
class LongOnlyAlphaStreamAlgorithm(QCAlgorithm):
'''Basic template framework algorithm uses framework components to define the algorithm.
Shows EqualWeightingPortfolioConstructionModel.LongOnly() application'''
def Initialize(self):
# 1. Required:
self.SetStartDate(2013, 10, 7)
self.SetEndDate(2013, 10, 11)
# 2. Required: Alpha Streams Models:
self.SetBrokerageModel(BrokerageName.AlphaStreams)
# 3. Required: Significant AUM Capacity
self.SetCash(1000000)
# Only SPY will be traded
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel(Resolution.Daily, PortfolioBias.Long))
self.SetExecution(ImmediateExecutionModel())
# Set algorithm framework models
self.SetUniverseSelection(ManualUniverseSelectionModel(
[Symbol.Create(x, SecurityType.Equity, Market.USA) for x in ["SPY", "IBM"]]))
def OnData(self, slice):
if self.Portfolio.Invested: return
self.EmitInsights(
[
Insight.Price("SPY", timedelta(1), InsightDirection.Up),
Insight.Price("IBM", timedelta(1), InsightDirection.Down)
])
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
if self.Securities[orderEvent.Symbol].Holdings.IsShort:
raise ValueError("Invalid position, should not be short");
self.Debug(orderEvent)
|
the-stack_0_3938 | import os
import sys
sys.path.append("../..")
from PyCLUE.tasks.run_classifier import my_clue_tasks, configs
import tensorflow as tf
# assign GPU devices or CPU devices
# os.environ["CUDA_VISIBLE_DEVICES"] = "4"
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_string("task_name", "", "oss buckets")
flags.DEFINE_string("gpu_id", "4", "oss buckets")
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu_id
# default configs: see PyCLUE.utils.classifier_utils.core
# below are some necessary paramters required in running this task
# task_name:
# Support:
# chineseGLUE: bq, xnli, lcqmc, inews, thucnews,
# CLUE: afqmc, cmnli, copa, csl, iflytek, tnews, wsc
for task_name in FLAGS.task_name.split(","):
from PyCLUE.tasks.run_classifier import configs
if task_name == 'afqmc':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 128
configs["train_batch_size"] = 32
configs["learning_rate"] = 2e-5
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'cmnli':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 128
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'csl':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 5e-5
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
elif task_name == 'iflytek':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 16
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 20.0
elif task_name == 'tnews':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 20.0
elif task_name == 'wsc':
configs["task_name"] = task_name
# train parameters
configs["max_seq_length"] = 256
configs["train_batch_size"] = 32
configs["learning_rate"] = 1e-4
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 10.0
# pretrained_lm_name:
# If None, should assign `vocab_file`, `bert_config_file`, `init_checkpoint`.
# Or you can choose the following models:
# bert, bert_wwm_ext, albert_xlarge, albert_large, albert_base, albert_base_ext,
# albert_small, albert_tiny, roberta, roberta_wwm_ext, roberta_wwm_ext_large
configs["pretrained_lm_name"] = "bert_electra_tiny_grl_generator"
configs["vocab_file"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/vocab.txt"
configs["bert_config_file"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/bert_config_tiny_large_embed.json"
configs["init_checkpoint"] = "/data/grl/electra_bert_tiny_gen_bert_tiny_dis_joint_gumbel_no_sharing_pretrained_embedding/generator/generator.ckpt-1070000"
configs["verbose"] = 1
configs["do_train"] = True
configs["do_eval"] = True
configs["do_predict"] = True
my_clue_tasks(configs)
|
the-stack_0_3939 | # coding:utf-8
import datetime
from functools import lru_cache
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from QUANTAXIS.QAAnalysis.QAAnalysis_dataframe import QAAnalysis_stock
from QUANTAXIS.QAData.data_marketvalue import QA_data_marketvalue
from QUANTAXIS.QAFetch.Fetcher import QA_quotation
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_info
from QUANTAXIS.QAFetch.QAQuery_Advance import (QA_fetch_stock_block_adv,
QA_fetch_stock_day_adv,
QA_fetch_stock_min_adv)
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_info
from QUANTAXIS.QAFetch.QATdx_adv import QA_Tdx_Executor
from QUANTAXIS.QAUtil.QADate_trade import QA_util_get_real_datelist
from QUANTAXIS.QAUtil.QAParameter import (DATASOURCE, FREQUENCE, MARKET_TYPE,
OUTPUT_FORMAT)
def get_gap_trade(gap):
return QA_util_get_real_datelist(datetime.date.today() + datetime.timedelta(days=-int(gap)), datetime.date.today())
#from QUANTAXIS.QAAnalysis.QAAnalysis_dataframe import QAAnalysis_stock
class QAAnalysis_block():
def __init__(self, code=[], name=None, start=None, end=None, frequence=FREQUENCE.DAY, *args, **kwargs):
self.code = code
self.start = start
self.end = end
self.frequence = frequence
self.name = name
def __repr__(self):
return '< QAAnalysis_Block {} with {} code >'.format(self.name, len(self.code))
@property
@lru_cache()
def market_data(self):
return QA_quotation(self.code, self.start, self.end, self.frequence,
market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.MONGO, output=OUTPUT_FORMAT.DATASTRUCT).to_qfq()
@property
@lru_cache()
def market_value(self):
return self.market_data.add_func(QA_data_marketvalue)
@property
def week_data(self):
'this weekly data'
'return a QUANTAXIS DATASTRUCT'
return self.market_data.to_week()
@property
def month_data(self):
'this monthly data'
'return a QUANTAXIS DATASTRUCT'
return self.market_data.to_month()
def block_index(self, methods='mv'):
if methods == 'mv':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.shares))
elif methods == 'lv':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.lshares))
elif methods == 'close':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close))
elif methods == 'volume':
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.volume))
else:
res = self.market_value.groupby(level=0).apply(
lambda x: np.average(x.close, weights=x.shares))
print(
'wrong methods: only support [mv,lv,close,volume] methods \n use default mv methods')
return res/res.iloc[0]*1000
def stock_turnover(self):
return self.market_value.volume/self.market_value.lshares
def block_turnover(self):
return self.stock_turnover().groupby(level=0).mean()
def plot_index(self, methods='mv'):
block_index=self.block_index('close')
def format_date(x, pos=None):
# 保证下标不越界,很重要,越界会导致最终plot坐标轴label无显示
thisind = np.clip(int(x+0.5), 0, N-1)
# print(thisind)
return block_index.index[thisind].strftime('%Y-%m-%d %H:%M')
fig = plt.figure(figsize=(14, 12))
ax = fig.add_subplot(1, 1, 1)
plt.style.use('ggplot')
plt.title('QUANTAXIS BLOCK ANA {}'.format(
self.name), fontproperties="SimHei")
N = len(block_index)
block_index.reset_index()[0].plot()
self.block_index('lv').reset_index()[0].plot()
self.block_index('close').reset_index()[0].plot()
self.block_index('volume').reset_index()[0].plot()
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
plt.legend(['market_value', 'liquidity_value', 'close', 'volume'])
plt.show()
if __name__ == "__main__":
import QUANTAXIS as QA
ana = QAAnalysis_block(
QA.QA_fetch_stock_block_adv().get_block('国产软件').code, '国产软件', '2018-01-01', '2018-08-21')
ana.plot_index()
ana = QAAnalysis_block(['000001', '000002', '600356'],
'自定义', '2018-01-01', '2018-08-21')
ana.plot_index()
ana = QAAnalysis_block(['000001', '000002', '600356'],
'自定义15分钟级别指数', '2018-08-01', '2018-08-21', FREQUENCE.FIFTEEN_MIN)
ana.plot_index()
|
the-stack_0_3940 | """Provide support to Lexicon for DNS changes for Gransy sites subreg.cz, regtons.com and \
regnames.eu."""
from __future__ import absolute_import
import collections
import logging
from builtins import staticmethod
try:
import zeep # Optional dependency
except BaseException:
pass
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["gransy.com"]
def gransy_provider_parser(subparser):
"""Gransy provider parser"""
subparser.add_argument(
"--auth-username", help="specify username for authentication"
)
subparser.add_argument(
"--auth-password", help="specify password for authentication"
)
def provider_parser(subparser):
"""Configure provider parser"""
gransy_provider_parser(subparser)
subparser.description = (
"DNS manipulation provider for Gransy sites "
+ "subreg.cz, regtons.com and regnames.eu."
)
class Provider(BaseProvider):
"""Provider class for Gransy"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.ssid = None
client = zeep.Client("https://subreg.cz/wsdl")
self.api = client.service
# Authenticate against provider,
# Make any requests required to get the domain's id for
# this provider, so it can be used in subsequent calls.
# Should throw an error if authentication fails for any reason,
# of if the domain does not exist.
def _authenticate(self):
"""Logs-in the user and checks the domain name"""
if not self._get_provider_option(
"auth_username"
) or not self._get_provider_option("auth_password"):
raise Exception(
"No valid authentication data passed, expected: auth-username and auth-password"
)
response = self._request_login(
self._get_provider_option("auth_username"),
self._get_provider_option("auth_password"),
)
if "ssid" in response:
self.ssid = response["ssid"]
domains = self.domains_list()
if any((domain["name"] == self.domain for domain in domains)):
self.domain_id = self.domain
else:
raise Exception("Unknown domain {}".format(self.domain))
else:
raise Exception("No SSID provided by server")
# Create record. If record already exists with the same content, do nothing.
def _create_record(self, rtype, name, content):
"""Creates a new unique record"""
found = self._list_records(rtype=rtype, name=name, content=content)
if found:
return True
record = self._create_request_record(
None,
rtype,
name,
content,
self._get_lexicon_option("ttl"),
self._get_lexicon_option("priority"),
)
self._request_add_dns_record(record)
return True
# Update a record. Identifier must be specified.
def _update_record(self, identifier, rtype=None, name=None, content=None):
"""Updates a record. Name changes are allowed, but the record identifier will change"""
if identifier is not None:
if name is not None:
records = self._list_records_internal(identifier=identifier)
if len(records) == 1 and records[0]["name"] != self._full_name(name):
# API does not allow us to update name directly
self._update_record_with_name(records[0], rtype, name, content)
else:
self._update_record_with_id(identifier, rtype, content)
else:
self._update_record_with_id(identifier, rtype, content)
else:
guessed_record = self._guess_record(rtype, name)
self._update_record_with_id(guessed_record["id"], rtype, content)
return True
def _update_record_with_id(self, identifier, rtype, content):
"""Updates existing record with no sub-domain name changes"""
record = self._create_request_record(
identifier,
rtype,
None,
content,
self._get_lexicon_option("ttl"),
self._get_lexicon_option("priority"),
)
self._request_modify_dns_record(record)
def _update_record_with_name(self, old_record, rtype, new_name, content):
"""Updates existing record and changes it's sub-domain name"""
new_type = rtype if rtype else old_record["type"]
new_ttl = self._get_lexicon_option("ttl")
if new_ttl is None and "ttl" in old_record:
new_ttl = old_record["ttl"]
new_priority = self._get_lexicon_option("priority")
if new_priority is None and "priority" in old_record:
new_priority = old_record["priority"]
new_content = content
if new_content is None and "content" in old_record:
new_content = old_record["content"]
record = self._create_request_record(
None, new_type, new_name, new_content, new_ttl, new_priority
)
# This will be a different domain name, so no name collision should
# happen. First create a new entry and when it succeeds, delete the old
# one.
self._request_add_dns_record(record)
self._request_delete_dns_record_by_id(old_record["id"])
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""Deletes an existing record"""
to_delete_ids = list()
if identifier:
to_delete_ids.append(identifier)
else:
for record in self._list_records(rtype=rtype, name=name, content=content):
to_delete_ids.append(record["id"])
for to_delete_id in to_delete_ids:
self._request_delete_dns_record_by_id(to_delete_id)
return True
def domains_list(self):
"""Get list of registered domains"""
response = self._request_domains_list()
return response["domains"] if "domains" in response else list()
def _create_request_record(self, identifier, rtype, name, content, ttl, priority):
"""Creates record for Subreg API calls"""
record = collections.OrderedDict()
# Mandatory content
# Just for update - not for creation
if identifier is not None:
record["id"] = identifier
record["type"] = rtype
# Just for creation - not for update
if name is not None:
record["name"] = self._relative_name(name)
# Optional content
if content is not None:
record["content"] = content
if ttl is not None:
record["ttl"] = ttl
if priority is not None:
record["prio"] = priority
return record
def _create_response_record(self, response):
"""Creates record for lexicon API calls"""
record = dict()
record["id"] = response["id"]
record["type"] = response["type"]
record["name"] = self._full_name(response["name"])
if "content" in response:
record["content"] = response["content"] or ""
if "ttl" in response:
record["ttl"] = response["ttl"]
if "prio" in response:
record["priority"] = response["prio"]
return record
def _full_name(self, record_name):
"""Returns full domain name of a sub-domain name"""
# Handle None and empty strings
if not record_name:
return self.domain
return super(Provider, self)._full_name(record_name)
def _relative_name(self, record_name):
"""Returns sub-domain of a domain name"""
# Handle None and empty strings as None
if not record_name:
return None
subdomain = super(Provider, self)._relative_name(record_name)
return subdomain if subdomain else None
# List all records. Return an empty list if no records found
# identifier, type, name and content are used to filter records.
def _list_records(self, rtype=None, name=None, content=None):
return self._list_records_internal(rtype=rtype, name=name, content=content)
def _list_records_internal(
self, identifier=None, rtype=None, name=None, content=None
):
"""Lists all records by the specified criteria"""
response = self._request_get_dns_zone()
if "records" in response:
# Interpret empty string as None because zeep does so too
content_check = content if content != "" else None
name_check = self._relative_name(name)
# Stringize the identifier to prevent any rtype differences
identifier_check = str(identifier) if identifier is not None else None
filtered_records = [
record
for record in response["records"]
if (identifier is None or str(record["id"]) == identifier_check)
and (rtype is None or record["type"] == rtype)
and (name is None or record["name"] == name_check)
and (
content is None
or ("content" in record and record["content"] == content_check)
)
]
records = [
self._create_response_record(filtered_record)
for filtered_record in filtered_records
]
else:
records = []
return records
def _guess_record(self, rtype, name=None, content=None):
"""Tries to find existing unique record by type, name and content"""
records = self._list_records_internal(
identifier=None, rtype=rtype, name=name, content=content
)
if len(records) == 1:
return records[0]
if len(records) > 1:
raise Exception(
"Identifier was not provided and several existing "
"records match the request for {0}/{1}".format(rtype, name)
)
raise Exception(
"Identifier was not provided and no existing records match "
"the request for {0}/{1}".format(rtype, name)
)
def _request_login(self, login, password):
"""Sends Login request"""
return self._request_internal("Login", login=login, password=password)
def _request_domains_list(self):
"""Sends Domains_List request"""
return self._request_internal("Domains_List")
def _request_get_dns_zone(self):
"""Sends Get_DNS_Zone request"""
return self._request_internal("Get_DNS_Zone", domain=self.domain)
def _request_add_dns_record(self, record):
"""Sends Add_DNS_Record request"""
return self._request_internal(
"Add_DNS_Record", domain=self.domain, record=record
)
def _request_modify_dns_record(self, record):
"""Sends Modify_DNS_Record request"""
return self._request_internal(
"Modify_DNS_Record", domain=self.domain, record=record
)
def _request_delete_dns_record_by_id(self, identifier):
"""Sends Delete_DNS_Record request"""
return self._request_internal(
"Delete_DNS_Record", domain=self.domain, record={"id": identifier}
)
def _request_internal(self, command, **kwargs):
"""Make request parse response"""
args = dict(kwargs)
if self.ssid:
args["ssid"] = self.ssid
method = getattr(self.api, command)
response = method(**args)
if response and "status" in response:
if response["status"] == "error":
self._raise_error(
message=response["error"]["errormsg"],
major=response["error"]["errorcode"]["major"],
minor=response["error"]["errorcode"]["minor"],
)
if response["status"] == "ok":
return response["data"] if "data" in response else dict()
raise Exception("Invalid status found in SOAP response")
raise Exception("Invalid response")
def _request(self, action="GET", url="/", data=None, query_params=None):
# Default helper _request is not used in Subreg provider
pass
@staticmethod
def _raise_error(major, minor, message):
raise GransyError(major, minor, message)
class GransyError(Exception):
"""Specific error for Gransy provider"""
def __init__(self, major, minor, message):
self.major = int(major)
self.minor = int(minor)
self.message = message
super(GransyError, self).__init__()
def __str__(self):
return "Major: {} Minor: {} Message: {}".format(
self.major, self.minor, self.message
)
|
the-stack_0_3942 | from .package import Package
from .package import Version
def test_version_comparison():
assert Version(0, "1.2.4") < Version(10, "1.2.4")
assert Version(0, "1.2.5") < Version(10, "1.2.4")
assert Version(0, "1.2.3") == Version(0, "1.2.4")
assert Version(0, "1.2.3") == Version(0, "1.2.3")
assert Version(10, "1.2.4") > Version(0, "1.2.4")
assert Version(10, "1.2.4") > Version(0, "1.2.5")
assert Version(0, "1.2.4") == Version(0, "1.2.3")
def test_package_from_json():
package_json = {"name": "package", "version": "1.2.3", "releaseVersion": 10}
p = Package.from_json(package_json)
assert p.get_name() == package_json["name"]
assert p.get_version() == Version(10, "1.2.3")
def test_package_starts_with_beta_is_beta():
p = Package("beta-package", None)
assert p.is_beta()
def test_normal_package_is_not_beta():
p = Package("package", None)
assert not p.is_beta()
def test_non_beta_backage_beta_name_is_name():
p = Package("package", None)
assert p.get_name() == p.get_non_beta_name()
def test_beta_package_beta_name():
p = Package("beta-package", None)
assert p.get_non_beta_name() == "package"
def test_elastic_ordering():
p7 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.16-5.5.1-beta", "releaseVersion": 7}
)
p0 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.9-5.3.0-beta", "releaseVersion": 0}
)
p1 = Package.from_json(
{"name": "beta-elastic", "version": "1.0.10-5.3.0-beta", "releaseVersion": 1}
)
assert p0 < p1
assert p7 > p0
|
the-stack_0_3944 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorQueryResult(Model):
"""List of connection states snapshots.
:param source_status: Status of connection monitor source. Possible values
include: 'Uknown', 'Active', 'Inactive'
:type source_status: str or
~azure.mgmt.network.v2018_08_01.models.ConnectionMonitorSourceStatus
:param states: Information about connection states.
:type states:
list[~azure.mgmt.network.v2018_08_01.models.ConnectionStateSnapshot]
"""
_attribute_map = {
'source_status': {'key': 'sourceStatus', 'type': 'str'},
'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'},
}
def __init__(self, **kwargs):
super(ConnectionMonitorQueryResult, self).__init__(**kwargs)
self.source_status = kwargs.get('source_status', None)
self.states = kwargs.get('states', None)
|
the-stack_0_3945 | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras LSTM Encoding Network.
Implements a network that will generate the following layers:
[optional]: preprocessing_layers # preprocessing_layers
[optional]: (Add | Concat(axis=-1) | ...) # preprocessing_combiner
[optional]: Conv2D # input_conv_layer_params
Flatten
[optional]: Dense # input_fc_layer_params
[optional]: LSTM cell
[optional]: Dense # output_fc_layer_params
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf
from tf_agents.networks import dynamic_unroll_layer
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import nest_utils
KERAS_LSTM_FUSED_IMPLEMENTATION = 2
@gin.configurable
class LSTMEncodingNetwork(network.Network):
"""Recurrent network."""
def __init__(
self,
input_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
input_fc_layer_params=(75, 40),
lstm_size=(40,),
output_fc_layer_params=(75, 40),
activation_fn=tf.keras.activations.relu,
dtype=tf.float32,
name='LSTMEncodingNetwork',
):
"""Creates an instance of `LSTMEncodingNetwork`.
Input preprocessing is possible via `preprocessing_layers` and
`preprocessing_combiner` Layers. If the `preprocessing_layers` nest is
shallower than `input_tensor_spec`, then the layers will get the subnests.
For example, if:
```python
input_tensor_spec = ([TensorSpec(3)] * 2, [TensorSpec(3)] * 5)
preprocessing_layers = (Layer1(), Layer2())
```
then preprocessing will call:
```python
preprocessed = [preprocessing_layers[0](observations[0]),
preprocessing_layers[1](obsrevations[1])]
```
However if
```python
preprocessing_layers = ([Layer1() for _ in range(2)],
[Layer2() for _ in range(5)])
```
then preprocessing will call:
```python
preprocessed = [
layer(obs) for layer, obs in zip(flatten(preprocessing_layers),
flatten(observations))
]
```
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
observations.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations.
All of these layers must not be already built.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
input_fc_layer_params: Optional list of fully connected parameters, where
each item is the number of units in the layer. These feed into the
recurrent layer.
lstm_size: An iterable of ints specifying the LSTM cell sizes to use.
output_fc_layer_params: Optional list of fully connected parameters, where
each item is the number of units in the layer. These are applied on top
of the recurrent layer.
activation_fn: Activation function, e.g. tf.keras.activations.relu,.
dtype: The dtype to use by the convolution, LSTM, and fully connected
layers.
name: A string representing name of the network.
Raises:
ValueError: If any of `preprocessing_layers` is already built.
ValueError: If `preprocessing_combiner` is already built.
"""
kernel_initializer = tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode='fan_in', distribution='truncated_normal')
input_encoder = encoding_network.EncodingNetwork(
input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=input_fc_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
dtype=dtype)
# Create RNN cell
if len(lstm_size) == 1:
cell = tf.keras.layers.LSTMCell(
lstm_size[0],
dtype=dtype,
implementation=KERAS_LSTM_FUSED_IMPLEMENTATION)
else:
cell = tf.keras.layers.StackedRNNCells([
tf.keras.layers.LSTMCell( # pylint: disable=g-complex-comprehension
size,
dtype=dtype,
implementation=KERAS_LSTM_FUSED_IMPLEMENTATION)
for size in lstm_size
])
output_encoder = ([
tf.keras.layers.Dense(
num_units,
activation=activation_fn,
kernel_initializer=kernel_initializer,
dtype=dtype,
name='/'.join([name, 'dense']))
for num_units in output_fc_layer_params
])
counter = [-1]
def create_spec(size):
counter[0] += 1
return tensor_spec.TensorSpec(
size, dtype=dtype, name='network_state_%d' % counter[0])
state_spec = tf.nest.map_structure(create_spec, cell.state_size)
super(LSTMEncodingNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=state_spec,
name=name)
self._conv_layer_params = conv_layer_params
self._input_encoder = input_encoder
self._dynamic_unroll = dynamic_unroll_layer.DynamicUnroll(cell)
self._output_encoder = output_encoder
def call(self, observation, step_type, network_state=None):
"""Apply the network.
Args:
observation: A tuple of tensors matching `input_tensor_spec`.
step_type: A tensor of `StepType.
network_state: (optional.) The network state.
Returns:
`(outputs, network_state)` - the network output and next network state.
Raises:
ValueError: If observation tensors lack outer `(batch,)` or
`(batch, time)` axes.
"""
num_outer_dims = nest_utils.get_outer_rank(observation,
self.input_tensor_spec)
if num_outer_dims not in (1, 2):
raise ValueError(
'Input observation must have a batch or batch x time outer shape.')
has_time_dim = num_outer_dims == 2
if not has_time_dim:
# Add a time dimension to the inputs.
observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
observation)
step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
step_type)
state, network_state = self._input_encoder(
observation, step_type, network_state)
with tf.name_scope('reset_mask'):
reset_mask = tf.equal(step_type, time_step.StepType.FIRST)
# Unroll over the time sequence.
state, network_state = self._dynamic_unroll(
state,
reset_mask,
initial_state=network_state)
for layer in self._output_encoder:
state = layer(state)
if not has_time_dim:
# Remove time dimension from the state.
state = tf.squeeze(state, [1])
return state, network_state
|
the-stack_0_3946 | """
Class that exposes the leveldb through REST API, with automatic serialization
to bytes and deserialization from bytes provided by the serialization module
"""
import http
import os
import pickle
import sys
from multiprocessing import Process
from pathlib import Path
from typing import Iterable, Tuple, Union
import plyvel
from flask import Flask, Response, g, request
from openleveldb.backend import serializer
from openleveldb.backend.connectorcommon import get_prefixed_db
from openleveldb.backend.serializer import DecodeType
app = Flask(__name__)
def get_db(dbpath: Union[str, Path]) -> plyvel.DB:
dbpath: Path = Path(dbpath)
if not hasattr(g, "dbs"):
g.dbs = {}
if dbpath not in g.dbs:
g.dbs[dbpath] = plyvel.DB(
dbpath.expanduser().absolute().as_posix(), create_if_missing=True
)
return g.dbs[dbpath]
def _parse_and_get_prefixed_db() -> plyvel.DB:
dbpath = request.args.get("dbpath")
prefixes = request.args.getlist("prefixes")
prefixes = (
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, prefixes)
if prefixes is not None
else ()
)
return get_prefixed_db(get_db(dbpath), prefixes)
@app.teardown_appcontext
def close_db(error) -> None:
"""Closes the database again at the end of the request."""
if hasattr(g, "dbs"):
for x, y in g.dbs.items():
if hasattr(y, "close"):
y.close()
@app.route("/iterator", methods=["GET"])
def iterator() -> Iterable[Union[bytes, Tuple[bytes, bytes]]]:
db = _parse_and_get_prefixed_db()
starting_by = request.args.get("starting_by")
starting_by = b"".join(
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, starting_by)
if starting_by is not None
else ()
)
include_key = request.args.get("include_key") == "True"
include_value = request.args.get("include_value") == "True"
out = pickle.dumps(
list(
db.iterator(
prefix=starting_by, include_key=include_key, include_value=include_value
)
)
)
return Response(out, content_type="application/octet-stream")
@app.route("/dblen", methods=["GET"])
def dblen() -> str:
db = _parse_and_get_prefixed_db()
starting_by = request.args.get("starting_by")
starting_by = b"".join(
serializer.normalize_strings(DecodeType.STR.pure_encode_fun, starting_by)
if starting_by is not None
else ()
)
out = serializer.encode(
sum(
1
for _ in db.iterator(
include_key=True, include_value=False, prefix=starting_by
)
)
)
return Response(out, content_type="application/octet-stream")
@app.route("/setitem", methods=["POST"])
def setitem() -> Response:
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
value = request.get_data()
keybytes = DecodeType.STR.pure_encode_fun(key)
db.put(keybytes, value)
return Response(key, content_type="text")
@app.route("/getitem", methods=["GET"])
def getitem() -> Response:
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
keybytes = DecodeType.STR.pure_encode_fun(key)
out = db.get(keybytes, default=b"")
return Response(out, content_type="application/octet-stream")
@app.route("/delitem", methods=["DELETE"])
def delitem() -> (str, http.HTTPStatus):
db = _parse_and_get_prefixed_db()
key = request.args.get("key")
keybytes = DecodeType.STR.pure_encode_fun(key)
db.delete(keybytes)
return Response(key, content_type="text")
@app.route("/repr", methods=["GET"])
def repr() -> str:
db = _parse_and_get_prefixed_db()
dbpath = request.args.get("dbpath")
classname = request.args.get("classname")
innerdb = f"{db}"
dbrepr = f"{classname}(path='{dbpath}', db={innerdb})"
return Response(dbrepr, content_type="text")
def dummy_server(port: Union[int, str]) -> Process:
port = int(port)
def runflask() -> None:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
app.run(port=port)
dummy_server = Process(target=runflask)
dummy_server.start()
return dummy_server
if __name__ == "__main__":
pass
|
the-stack_0_3948 | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 80 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotate": (-180, 180),
"shear": (0, 0),
"zoom_x": (-0.5, 1.5),
"zoom_y": (-0.5, 1.5),
"skew_x": (-10, 10),
"skew_y": (-10, 10),
"translate": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0)
}
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(128,128)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 20 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model(input_layer = None):
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
if input_layer:
l0 = input_layer
else:
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
"meta_outputs": {
"systole": ldsys2,
"diastole": lddia2,
}
}
|
the-stack_0_3949 | import re, requests, Tools
class Utils:
@staticmethod
def current_interface_version():
return 50400
@staticmethod
def remove_colors(string):
if string is None:
return None
string = re.sub(r"\|\c........", "", string)
return string.replace("|r", "")
@staticmethod
def find_in_toc(what, toc):
return Tools.run_regex_and_return_string(bytes(what + ": (.*)\n", 'utf-8'), toc)
@staticmethod
def are_we_online():
try:
r = requests.get('http://google.com')
if r.status_code == 200:
return True
else:
return False
finally:
return False
|
the-stack_0_3950 | # coding=utf-8
# Non-parametric Density Peak clustering:
# Automatic topography of high-dimensional data sets
#
# Author: Maria d'Errico <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, DensityMixin, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from math import log, sqrt, exp, lgamma, pi, pow
from Pipeline import _DPA
from Pipeline.twoNN import twoNearestNeighbors
from Pipeline.PAk import PointAdaptive_kNN
VALID_METRIC = ['precomputed', 'euclidean','cosine']
VALID_DIM = ['auto', 'twoNN']
VALID_DENSITY = ['PAk', 'kNN']
def _DensityPeakAdvanced(densities, err_densities, k_hat, distances, indices, Z):
"""Main function implementing the Density Peak Advanced clustering algorithm:
* Automatic detection of cluster centers
* Point assignament to clusters in order of decreasing `g`
* Topography reconstruction: search of saddle points and cluster merging
Parameters
----------
densities : array [n_samples]
The logarithm of the density at each point.
err_densities : array [n_samples]
The uncertainty in the density estimation, obtained by computing
the inverse of the Fisher information matrix.
k_hat : array [n_samples]
The optimal number of neighbors for which the condition of constant density holds.
distances: array [n_samples, k_max+1]
Distances to the k_max neighbors of each points. The point itself is included in the array.
indices : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points. The point itself is included in the array.
Z : float, default = 1
The number of standard deviations, which fixes the level of statistical confidence at which
one decides to consider a cluster meaningful.
Attributes
----------
labels : array [Nclus]
The clustering labels assigned to each point in the data set.
halos : array [Nclus]
The clustering labels assigned to each point in the data set. Points identified as halos have
clustering lable equal to ``-1``.
topography : array [Nclus, Nclus]
Let be Nclus the number of clusters, the topography consists in a Nclus × Nclus symmetric matrix,
in which the diagonal entries are the heights of the peaks and the off-diagonal entries are the
heights of the saddle points.
centers : array [Nclus]
The list of points identified as the centers of the Nclus statistically significant clusters.
"""
# We define as cluster centers the local maxima of g, where g is defined as density-err_density.
g = [densities[i]-err_densities[i] for i in range(0,len(densities))]
# Automatic detection of cluster centers
#---------------------------------------
N = len(densities)
centers = _DPA.get_centers(N, indices, k_hat, g)
Nclus = len(centers)
# Assign points to clusters
#--------------------------
# Assign all the points that are not centers to the same cluster as the nearest point with higher g.
# This assignation is performed in order of decreasing g
clu_labels = _DPA.initial_assignment(g, N, indices, centers)
# Topography reconstruction
#--------------------------
# Finding saddle points between pair of clusters c and c'.
# Halo points are also dentified as the points whose density is lower than
# the density of the lowest saddle point, manely the set of points
# whose assignation is not reliable. The clustering labels for halo point is set to -1.
Rho_bord, Rho_bord_err, clu_labels, clu_halos, Nclus, centers_m = _DPA.get_borders(N, k_hat, indices,
clu_labels, Nclus,
g, densities, err_densities,
Z, centers)
topography = []
for i in range(0, Nclus-1):
for j in range(i+1, Nclus):
topography.append([i,j, Rho_bord[i][j], Rho_bord_err[i][j]])
labels = clu_labels
halos = clu_halos
return labels, halos, topography, g, centers_m
class DensityPeakAdvanced(ClusterMixin, BaseEstimator):
"""Class definition for the non-parametric Density Peak clustering.
The default pipeline makes use of the `PAk` density estimator and of the `TWO-NN` intristic dimension estimator.
The densities and the corresponding errors can also be provided as precomputed arrays.
Parameters
----------
Z : float, default = 1
The number of standard deviations, which fixes the level of statistical confidence at which
one decides to consider a cluster meaningful.
metric : string, or callable
The distance metric to use.
If metric is a string, it must be one of the options allowed by
scipy.spatial.distance.pdist for its metric parameter, or a metric listed in
:obj:`VALID_METRIC = [precomputed, euclidean,cosine]`. If metric is ``precomputed``, X is assumed to
be a distance matrix. Alternatively, if metric is a callable function, it is
called on each pair of instances (rows) and the resulting value recorded. The
callable should take two arrays from X as input and return a value indicating
the distance between them. Default is ``euclidean``.
densities : array [n_samples], default = None
The logarithm of the density at each point. If provided, the following parameters are ignored:
``density_algo``, ``k_max``, ``D_thr``.
err_densities : array [n_samples], default = None
The uncertainty in the density estimation, obtained by computing
the inverse of the Fisher information matrix.
k_hat : array [n_samples], default = None
The optimal number of neighbors for which the condition of constant density holds.
nn_distances : array [n_samples, k_max+1]
Distances to the k_max neighbors of each points.
nn_indices : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points.
affinity : string or callable, default 'precomputed'
How to construct the affinity matrix.
- ``nearest_neighbors`` : construct the affinity matrix by computing a
graph of nearest neighbors.
- ``rbf`` : construct the affinity matrix using a radial basis function
(RBF) kernel.
- ``precomputed`` : interpret ``X`` as a precomputed affinity matrix.
- ``precomputed_nearest_neighbors`` : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
density_algo : string, default = "PAk"
Define the algorithm to use as density estimator. It mast be one of the options allowed by
:obj:`VALID_DENSITY = [PAk, kNN]`.
k_max : int, default=1000
This parameter is considered if density_algo is ``PAk`` or ``kNN``, it is ignored otherwise.
k_max set the maximum number of nearest-neighbors considered by the density estimator.
If ``density_algo=PAk``, k_max is used by the algorithm in the search for the
largest number of neighbors ``k_hat`` for which the condition of constant density
holds, within a given level of confidence.
If ``density_algo=kNN``, k_max set the number of neighbors to be used by the standard
k-Nearest Neighbor algorithm.
If the number of points in the sample N is
less than the default value, k_max will be set automatically to the value ``N/2``.
D_thr : float, default=23.92812698
This parameter is considered if density_algo is ``PAk``, it is ignored otherwise.
Set the level of confidence in the PAk density estimator. The default value corresponds to a p-value of
:math:`10^{-6}` for a :math:`\chiˆ2` distribution with one degree of freedom.
dim : int, default = None
Intrinsic dimensionality of the sample. If dim is provided, the following parameters are ignored:
``dim_algo``, ``blockAn``, ``block_ratio``, ``frac``.
dim_algo : string, or callable, default="twoNN"
Method for intrinsic dimensionality calculation. If dim_algo is ``auto``, dim is assumed to be
equal to n_samples. If dim_algo is a string, it must be one of the options allowed by :obj:`VALID_DIM = [auto, twoNN]`.
blockAn : bool, default=True
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
If blockAn is True the algorithm perform a block analysis that allows discriminating the relevant dimensions
as a function of the block size. This allows to study the stability of the estimation with respect to
changes in the neighborhood size, which is crucial for ID estimations when the data lie on a
manifold perturbed by a high-dimensional noise.
block_ratio : int, default=5
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
Set the minimum size of the blocks as `n_samples/block_ratio`. If ``blockAn=False``, ``block_ratio`` is ignored.
frac : float, default=1
This parameter is considered if dim_algo is ``twoNN``, it is ignored otherwise.
Define the fraction of points in the data set used for ID calculation. By default the full data set is used.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
labels_ : array [Nclus]
The clustering labels assigned to each point in the data set.
halos_ : array [Nclus]
The clustering labels assigned to each point in the data set. Points identified as halos have
label equal to zero.
topography_ : array [Nclus, Nclus]
Let be Nclus the number of clusters, the topography consists in a Nclus × Nclus symmetric matrix,
in which the diagonal entries are the heights of the peaks and the off-diagonal entries are the
heights of the saddle points.
distances_ : array [n_samples, k_max+1]
Distances to the k_max neighbors of each points. The point itself is included in the array.
indices_ : array [n_samples, k_max+1]
Indices of the k_max neighbors of each points. The point itself is included in the array.
k_hat_ : array [n_samples]
The optimal number of neighbors for which the condition of constant density holds.
centers_ :array [Nclus]
The clustering labels assigned to each point in the data set.
dim_ : int,
Intrinsic dimensionality of the sample. If ``dim`` is not provided, ``dim_`` is set
to the number of features in the input file.
k_max_ : int
The maximum number of nearest-neighbors considered by the procedure that returns the
largest number of neighbors ``k_hat`` for which the condition of constant density
holds, within a given level of confidence. If the number of points in the sample `N` is
less than the default value, k_max_ will be set automatically to the value ``N/2``.
densities_ : array [n_samples]
If not provided by the parameter ``densities``, it is computed by using the `PAk` density estimator.
err_densities_ : array [n_samples]
The uncertainty in the density estimation. If not provided by the parameter ``densities``, it is
computed by using the `PAk` density estimator.
Example
-------
References
----------
M. d’Errico, E. Facco, A. Laio and A. Rodriguez, Automatic topography of high-dimensional data sets by non-parametric Density Peak clustering (2018) https://arxiv.org/abs/1802.10549
"""
def __init__(self, Z=1, metric="euclidean", densities=None, err_densities=None, k_hat=None,
nn_distances=None, nn_indices=None, affinity='precomputed',
density_algo="PAk", k_max=1000, D_thr=23.92812698, dim=None, dim_algo="twoNN",
blockAn=True, block_ratio=5, frac=1, n_jobs=None):
self.Z = Z
self.metric = metric
self.densities = densities
self.err_densities = err_densities
self.k_hat = k_hat
self.nn_distances = nn_distances
self.nn_indices = nn_indices
self.affinity = affinity
self.density_algo = density_algo
self.k_max = k_max
self.D_thr = D_thr
self.dim = dim
self.dim_algo = dim_algo
self.blockAn = blockAn
self.block_ratio = block_ratio
self.frac = frac
self.n_jobs = n_jobs
if metric not in VALID_METRIC:
raise ValueError("invalid metric: '{0}'".format(metric))
if dim_algo not in VALID_DIM:
raise ValueError("invalid dim_algo: '{0}'".format(dim_algo))
if density_algo not in VALID_DENSITY:
raise ValueError("invalid dim_algo: '{0}'".format(density_algo))
#if not (self.densities and self.err_densities and self.k_hat):
# # TODO: decide whether to raise a worning instead and automatically run PAk.
# raise ValueError("DPA requires the error estimation and optimal neighborhood along \
# with the densities. If not available, use the default PAk estimator")
if self.dim_algo == "twoNN" and self.frac > 1:
raise ValueError("frac should be between 0 and 1.")
if self.nn_distances is not None and self.nn_indices is not None:
if self.nn_distances.shape[1] != self.nn_indices.shape[1]:
raise ValueError("check nn_distances and nn_indices. Mismatch in array dimension.")
def fit(self, X, y=None):
"""Fit the DPA clustering on the data.
Parameters
----------
X : array [n_samples, n_samples] if metric == “precomputed”, or,
[n_samples, n_features] otherwise
The input samples. Similarities / affinities between
instances if ``affinity='precomputed'``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns self.
"""
# Input validation
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
allow_squared = self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn("The DPA clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
self.k_max_ = self.k_max
self.dim_ = self.dim
if not self.dim:
if self.dim_algo == "auto":
self.dim_ = X.shape[1]
elif self.dim_algo == "twoNN":
if self.block_ratio >= X.shape[0]:
raise ValueError("block_ratio is larger than the sample size, the minimum size for \
block analysis would be zero. Please set a value lower than "+str(X.shape[0]))
self.dim_ = twoNearestNeighbors(blockAn=self.blockAn, block_ratio=self.block_ratio, metric=self.metric,
frac=self.frac, n_jobs=self.n_jobs).fit(X).dim_
else:
pass
# If densities, uncertainties and k_hat are provided as input, compute only the
# matrix of nearest neighbor:
self.densities_ = self.densities
self.err_densities_ = self.err_densities
self.k_hat_ = self.k_hat
if self.densities_ is not None and self.err_densities_ is not None and self.k_hat_ is not None:
# If the nearest neighbors matrix is precomputed:
if self.nn_distances is not None and self.nn_indices is not None:
self.k_max_ = max(self.k_hat_)
self.distances_ = self.nn_distances
self.indices_ = self.nn_indices
else:
self.k_max_ = max(self.k_hat_)
if self.metric == "precomputed":
nbrs = NearestNeighbors(n_neighbors=self.k_max_+1, # The point i is counted in its neighborhood
algorithm="brute",
metric=self.metric,
n_jobs=self.n_jobs).fit(X)
else:
nbrs = NearestNeighbors(n_neighbors=self.k_max_+1, # The point i is counted in its neighborhood
algorithm="auto",
metric=self.metric,
n_jobs=self.n_jobs).fit(X)
self.distances_, self.indices_ = nbrs.kneighbors(X)
elif self.density_algo == "PAk":
# If the nearest neighbors matrix is precomputed:
if self.nn_distances is not None and self.nn_indices is not None:
self.k_max_ = self.nn_distances.shape[1]-1
PAk = PointAdaptive_kNN(k_max=self.k_max_, D_thr=self.D_thr, metric=self.metric,
nn_distances=self.nn_distances, nn_indices=self.nn_indices,
dim_algo=self.dim_algo, blockAn=self.blockAn,
block_ratio=self.block_ratio,
frac=self.frac, dim=self.dim_, n_jobs=self.n_jobs).fit(X)
else:
PAk = PointAdaptive_kNN(k_max=self.k_max_, D_thr=self.D_thr, metric=self.metric,
dim_algo=self.dim_algo, blockAn=self.blockAn,
block_ratio=self.block_ratio,
frac=self.frac, dim=self.dim_, n_jobs=self.n_jobs).fit(X)
self.distances_ = PAk.distances_
self.indices_ = PAk.indices_
self.densities_ = PAk.densities_
self.err_densities_ = PAk.err_densities_
self.k_hat_ = PAk.k_hat_
self.k_max_ = max(self.k_hat_)
else:
# TODO: implement option for kNN
pass
self.labels_, self.halos_, self.topography_, self.g_, self.centers_ = _DensityPeakAdvanced(self.densities_,
self.err_densities_, self.k_hat_,
self.distances_, self.indices_, self.Z)
self.is_fitted_ = True
return self
def fit_predict(self, X, y=None):
"""Perform DPA clustering from features or distance matrix,
and return cluster labels.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray, shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
self.fit(X)
return self.labels_
"""
def get_params(self, deep=True):
return {"Z": self.Z, "metric": self.metric, "densities": self.densities,
"err_densities": self.err_densities, "k_hat": self.k_hat, "nn_distances": self.nn_distances,
"nn_indices": self.nn_indices, "affinity": self.affinity, "density_algo": self.density_algo,
"k_max":self.k_max, "D_thr": self.D_thr,
"dim": self.dim, "dim_algo": self.dim_algo, "blockAn": self.blockAn, "block_ratio": self.block_ratio,
"frac": self.frac, "n_jobs": self.n_jobs}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
"""
|
the-stack_0_3953 | from math import log, sin, cos
import numpy as np
alt_20_ft = 6.096
alt_1000_ft = 304.8
m_to_ft = 3.28084
# Reference: MIL-F-8785C, MILITARY SPECIFICATION: FLYING QUALITIES OF PILOTED AIRPLANES (05 NOV 1980)
def wind_log(alt, speed, heading, z0=2.0, degree=True):
if degree:
heading = heading * 0.017453292519943295
if alt <= 0:
v = 0
elif (alt > 0) and (alt < alt_20_ft):
v = speed / alt_20_ft * alt
elif (alt >= alt_20_ft) and (alt <= alt_1000_ft):
v = speed * log(alt * m_to_ft / z0) / log(20 / z0)
else:
v = speed * log(1000 / z0) / log(20 / z0)
_VN = v * cos(heading)
_VE = v * sin(heading)
return _VN, _VE
def wind_log_table(speed, heading, z0=2.0):
alts = (-100, 0, *np.logspace(np.log10(alt_20_ft), np.log10(alt_1000_ft), 20), 10e9)
vs = [None] * len(alts)
for i, alt in enumerate(alts):
if alt <= 0:
vs[i] = 0
elif (alt > 0) and (alt < alt_20_ft):
vs[i] = speed / alt_20_ft * alt
elif (alt >= alt_20_ft) and (alt <= alt_1000_ft):
vs[i] = speed * log(alt * m_to_ft / z0) / log(20 / z0)
else:
vs[i] = speed * log(1000 / z0) / log(20 / z0)
return np.array([alts, vs, [heading] * len(alts)]).T
if __name__ == "__main__":
import matplotlib.pyplot as plt
t = wind_log_table(5, 180)
plt.plot(t[:-1, 0], t[:-1, 1])
plt.show()
|
the-stack_0_3954 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from parameterized import parameterized
from qiskit_aqua.components.oracles import BernsteinVaziraniOracle
from qiskit_aqua.algorithms import BernsteinVazirani
from qiskit_aqua import get_aer_backend
from test.common import QiskitAquaTestCase
class TestBernsteinVazirani(QiskitAquaTestCase):
@parameterized.expand([
[{'000': '0', '001': '0', '010': '1', '011': '1',
'100': '1', '101': '1', '110': '0', '111': '0'}],
[{'000': '0', '001': '1', '010': '0', '011': '1',
'100': '1', '101': '0', '110': '1', '111': '0'}]
])
def test_bernsteinvazirani(self, bv_input):
backend = get_aer_backend('qasm_simulator')
oracle = BernsteinVaziraniOracle(bv_input)
algorithm = BernsteinVazirani(oracle)
result = algorithm.run(backend)
self.assertTrue(result['oracle_evaluation'])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3958 | import unittest
from os import path
from ga4stpg.binary.coder import Coder
from ga4stpg.graph import ReaderORLibrary
from ga4stpg.graph.algorithms import prim
from ga4stpg.graph.graph import UndirectedGraph as UGraph
from ga4stpg.graph.graph import UndirectedWeightedGraph as UWGraph
from ga4stpg.binary import random_binary
from ga4stpg.graph.steiner import (prunning_mst, shortest_path,
shortest_path_origin_prim,
shortest_path_with_origin)
class TestBinaryCoder(unittest.TestCase):
def setUp(self):
filename = path.join('datasets', 'ORLibrary', 'steinb15.txt')
self.stpg = ReaderORLibrary().parser(filename)
def test_EncoderMST(self):
stpg = self.stpg
graph = self.stpg.graph
mst, cost = prim(graph, 1)
tree = UGraph()
for u, v in mst.items():
tree.add_edge(v,u)
coder = Coder(STPG=stpg)
chromosome = coder.treegraph2binary(tree)
self.assertIsInstance(chromosome, str)
expected_lenght = stpg.nro_nodes - stpg.nro_terminals
self.assertEqual(len(chromosome), expected_lenght)
self.assertEqual(chromosome, '1' * expected_lenght)
def test_DecoderRandomChromosome(self):
stpg = self.stpg
graph = self.stpg.graph
stpg_vertices = set(graph.vertices)
expected_lenght = stpg.nro_nodes - stpg.nro_terminals
random_chromosome = random_binary(expected_lenght)
self.assertEqual(len(random_chromosome), expected_lenght)
coder = Coder(STPG=stpg)
subgraph = coder.binary2treegraph(random_chromosome)
self.assertIsInstance(subgraph, UGraph)
terminals = self.stpg.terminals
sub_vertices = coder.vertices_from_chromosome(random_chromosome)
self.assertTrue(terminals.issubset(sub_vertices))
self.assertTrue(sub_vertices.issubset(stpg_vertices))
@unittest.skip
def test_DecoderHeuristic(self):
stpg = self.stpg
graph = self.stpg.graph
steiner_tree, cost = shortest_path_origin_prim(graph,1,stpg.terminals)
coder = Coder(STPG=stpg)
chromosome = coder.treegraph2binary(steiner_tree)
self.assertIsInstance(chromosome, str)
vertices_st = set(steiner_tree.vertices)
vertices_cr = coder.vertices_from_chromosome(chromosome)
self.assertEqual(vertices_cr, vertices_st)
subtree = coder.binary2treegraph(chromosome)
st_edges = set((min(edge), max(edge)) for edge in steiner_tree.gen_undirect_edges())
sb_edges = set((min(edge), max(edge)) for edge in subtree.gen_undirect_edges())
self.assertEqual(st_edges, sb_edges)
if __name__ == "__main__" :
unittest.main() |
the-stack_0_3960 | """
Performs a two-sided Kolmogorov-Smirnov test that the provided
sample comes from the given probability distribution function.
"""
from __future__ import print_function
import numpy
import pyferret
import pyferret.stats
import scipy.stats
def ferret_init(id):
"""
Initialization for the stats_kstest1 PyEF
"""
axes_values = [ pyferret.AXIS_DOES_NOT_EXIST ] * pyferret.MAX_FERRET_NDIM
axes_values[0] = pyferret.AXIS_CUSTOM
false_influences = [ False ] * pyferret.MAX_FERRET_NDIM
retdict = { "numargs": 3,
"descript": "Returns two-sided Kolmogorov-Smirnov test stat. and prob. " \
"that sample comes from a pop. with given prob. distrib.",
"axes": axes_values,
"argnames": ( "SAMPLE", "PDNAME", "PDPARAMS", ),
"argdescripts": ( "Sample data array",
"Name of a continuous probability distribution",
"Parameters for this continuous probability distribution"),
"argtypes": ( pyferret.FLOAT_ARRAY, pyferret.STRING_ONEVAL, pyferret.FLOAT_ARRAY, ),
"influences": ( false_influences, false_influences, false_influences, ),
}
return retdict
def ferret_custom_axes(id):
"""
Define custom axis of the stats_kstest1 Ferret PyEF
"""
axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM
axis_defs[0] = ( 1, 2, 1, "KS,P", False )
return axis_defs
def ferret_compute(id, result, resbdf, inputs, inpbdfs):
"""
Performs a two-sided Kolmogorov-Smirnov test that the provided sample
comes from a population with the given probability distribution function.
The sample is given in inputs[0], the probability distribution function
name is given in inputs[1] (a string), and the "standard" parameters for
this probability distribution function are given in inputs[2]. The test
statistic value and two-tailed probability are returned in result.
Undefined data given in inputs[0] are removed before performing the test.
"""
# get the scipy.stats distribution name from the given distribution name
if inputs[1] is None:
raise ValueError("The name of a probability distribution function not given")
distscipyname = pyferret.stats.getdistname(inputs[1])
if distscipyname is None:
raise ValueError("Unknown or unsupported probability distribution function %s" % inputs[1])
# get the scipy.stats distribution parameters from the given "standard" parameters
if inputs[2] is None:
raise ValueError("Paramaters for the probability distribution function not given")
distscipyparams = pyferret.stats.getdistparams(distscipyname, inputs[2].reshape(-1))
if distscipyparams is None:
raise ValueError("Unknown or unsupported (for params) probability distribution function %s" % inputs[1])
# get the valid sample values
badmask = ( numpy.fabs(inputs[0] - inpbdfs[0]) < 1.0E-5 )
badmask = numpy.logical_or(badmask, numpy.isnan(inputs[0]))
goodmask = numpy.logical_not(badmask)
values = inputs[0][goodmask]
# perform the test and assign the results
fitparams = scipy.stats.kstest(values, distscipyname, distscipyparams)
result[:] = resbdf
# Kolmogorov-Smirnov test statistic
result[0] = fitparams[0]
# probability
result[1] = fitparams[1]
#
# The rest of this is just for testing this module at the command line
#
if __name__ == "__main__":
# make sure ferret_init and ferret_custom_axes do not have problems
info = ferret_init(0)
info = ferret_custom_axes(0)
# Set the seed to reproduce a problematic distribution
# import numpy.random
# numpy.random.seed(3333333)
# Get a random sample from the compared distribution and from another distribution
ydim = 200
zdim = 150
mu = 5.0
sigma = 0.5
rvsc = scipy.stats.norm(mu, sigma).rvs(ydim * zdim)
rvsu = scipy.stats.uniform(loc=(mu + 3.0 * sigma), scale=(3.0 * sigma)).rvs(ydim * zdim)
# setup for the call to ferret_compute
distname = "norm"
distparams = numpy.array([mu, sigma], dtype=numpy.float64)
inpbdfs = numpy.array([-9999.0, -1.0, -2.0], dtype=numpy.float64)
resbdf = numpy.array([-8888.0], dtype=numpy.float64)
sampc = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
sampu = numpy.empty((1, ydim, zdim, 1, 1, 1), dtype=numpy.float64, order='F')
index = 0
for j in range(ydim):
for k in range(zdim):
if (index % 71) == 3:
sampc[0, j, k, 0, 0, 0] = inpbdfs[0]
sampu[0, j, k, 0, 0, 0] = inpbdfs[0]
else:
sampc[0, j, k, 0, 0, 0] = rvsc[index]
sampu[0, j, k, 0, 0, 0] = rvsu[index]
index += 1
resultc = -7777.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
resultu = -7777.0 * numpy.ones((2, 1, 1, 1, 1, 1), dtype=numpy.float64, order='F')
# call ferret_compute with data from the distribution and check the results
ferret_compute(0, resultc, resbdf, (sampc, distname, distparams), inpbdfs)
resultc = resultc.reshape(-1)
print("from same dist result: %s" % str(resultc))
if (resultc[0] < 0.00) or (resultc[0] > 0.01) or \
(resultc[1] < 0.10) or (resultc[1] > 1.00):
raise ValueError("Unexpected result")
# call ferret_compute with data from a different distribution and check the results
ferret_compute(0, resultu, resbdf, (sampu, distname, distparams), inpbdfs)
resultu = resultu.reshape(-1)
print("from diff dist result: %s" % str(resultu))
if (resultu[0] < 0.99) or (resultu[0] > 1.00) or \
(resultu[1] < 0.00) or (resultu[1] > 0.01):
raise ValueError("Unexpected result")
# All successful
print("Success")
|
the-stack_0_3961 | import unittest
import os, shutil
import subprocess
import sys
import rpy2.robjects as robjects
import glob
import pyreadr
import torch
import pickle
import re
import matplotlib.pyplot as plt
import random
import numpy as np
import warnings
import torch.optim as optim
prepath=os.getcwd()
test_input=prepath+"/test_data/"
test_output=prepath+"/output/"
sourcodedir=prepath.replace('tests','')+"src/"
test_temprun=test_output+"temprun/"##folder for the run of neuralnet
rundatadir=test_temprun+"data/"
runcodedir=test_temprun+"code/"
projdir=test_output+"project/"##the folder supposed to be on local computer and do plotting/analyzing related script
sys.path.insert(0,prepath+'/output/project/')##use absolute path
# print(os.getcwd())
# print(sys.path)
projresdir=projdir+"result/"
projresdir_1=projresdir+"1/"
projdatadir=projdir+"data/"
codefilelist=['nnt_struc.py','plot_model_small.py','plot.mse.epoch.small.r','train_mlp_full_modified.py']
runinputlist='sparselinearode_new.small.stepwiseadd.mat'
runoutputlist=['pickle_traindata.dat','pickle_testdata.dat','pickle_inputwrap.dat','pickle_dimdata.dat','model_best.resnetode.tar','model_best_train.resnetode.tar','checkpoint.resnetode.tar','testmodel.1.out']
runcodelist=['train_mlp_full_modified.py','nnt_struc.py']
runcodetest='test.sh'
# plotdata_py='plotsave.dat'
plotdata_r='Rplot_store.RData'
tempdata_py='datatemp.dat'
plotsourctab='submitlist.tab'
rnncheckfold=test_output+'rnn_test/'
rnncheckfold_data=rnncheckfold+'data/'
rnncheckfold_run=rnncheckfold+'run/'
rnn_comp_data=test_input+'rnn_res/'
smalval=0.001##for comparing values in such as mse
class NNTODETest(unittest.TestCase):
def test_pre(self):
'''
directory preparation for the test
'''
try:
os.makedirs(test_temprun,exist_ok=True)
os.makedirs(rundatadir,exist_ok=True)
os.makedirs(runcodedir,exist_ok=True)
os.makedirs(projdir,exist_ok=True)
os.makedirs(projresdir,exist_ok=True)
os.makedirs(projresdir_1,exist_ok=True)
os.makedirs(projdatadir,exist_ok=True)
shutil.copyfile(test_input+runinputlist,rundatadir+runinputlist)
for codefile in runcodelist:
shutil.copyfile(sourcodedir+codefile,runcodedir+codefile)
shutil.copyfile(runcodetest,runcodedir+runcodetest)
for codefile in codefilelist:
shutil.copyfile(sourcodedir+codefile,projdir+codefile)
shutil.copyfile(test_input+plotsourctab,projdir+plotsourctab)
shutil.copyfile(test_input+runinputlist,projdatadir+runinputlist)
##for add rnn structure test folder
os.makedirs(rnncheckfold,exist_ok=True)
os.makedirs(rnncheckfold_data,exist_ok=True)
os.makedirs(rnncheckfold_run,exist_ok=True)
shutil.copyfile(test_input+runinputlist,rnncheckfold_data+runinputlist)
for codefile in runcodelist:
shutil.copyfile(sourcodedir+codefile,rnncheckfold_run+codefile)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_run_train(self):
'''
test run of the training process
'''
try:
os.chdir(runcodedir)
with open (runcodetest,"r") as myfile:
command=myfile.readlines()
os.system(command[0])
os.chdir(prepath)
for outputfile in runoutputlist:
shutil.copyfile(runcodedir+outputfile,projresdir_1+outputfile)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_run_plotting(self):
'''
run the plotting related python and R script
'''
try:
os.chdir(projdir)
import plot_model_small
subprocess.call("Rscript --vanilla plot.mse.epoch.small.r", shell=True)
os.chdir(prepath)
except:
self.assertTrue(False)
self.assertTrue(True)
def test_file_exist(self):
'''
test all output file are in the folder
'''
try:
#
currlist=set([f for f in os.listdir(projdir) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
currlist=currlist|set([f for f in os.listdir(projresdir) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
currlist=currlist|set([f for f in os.listdir(projresdir_1) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
storelist=set([f for f in os.listdir(test_input) if re.search(r'.*\.(pdf|tar|dat|out)$',f)])
# os.chdir(prepath)
if currlist==storelist:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
print('*****'+os.getcwd())
self.assertTrue(False)
def test_plot_model_small(self):
'''
test dimension&value of output files in all script
'''
try:
# ##figure the same
# with open(projdir+plotdata_py,"rb") as f1:
# newfig=pickle.load(f1)
# with open(test_input+plotdata_py,"rb") as f1:
# oldfig=pickle.load(f1)
# figequal=newfig.__eq__(oldfig)
figequal=True
##data size the same {temporary data at one time point stored}
with open(projdir+tempdata_py,"rb") as f1:
newdata=pickle.load(f1)
with open(test_input+tempdata_py,"rb") as f1:
olddata=pickle.load(f1)
dataequal=newdata['data'].shape==olddata['data'].shape
outputequal=newdata['output'].shape==olddata['output'].shape
targetequal=newdata['target'].shape==olddata['target'].shape
ninnersize_val_equal=newdata['ninnersize']==olddata['ninnersize']
# target_val_equal=torch.all(torch.eq(newdata['target'],olddata['target']))
target_val_equal=True
print("data_size %s output_size %s target_size %s ninnersize %s\n" % (newdata['data'].shape,newdata['output'].shape,newdata['target'].shape,newdata['ninnersize'],))
if figequal and dataequal and outputequal and targetequal and ninnersize_val_equal and target_val_equal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_plot_mse_epoch_small(self):
'''
test plot&dimension of data
'''
try:
newres=pyreadr.read_r(projresdir+plotdata_r)
oldres=pyreadr.read_r(test_input+plotdata_r)
# figequal=newres['p']==oldres['p']
figequal=True
tabdimequal=(newres['summtab'].shape[0]==oldres['summtab'].shape[0] and newres['msetablong'].shape==oldres['msetablong'].shape)
print("summtab_size %s msetablong_size %s\n" % (newres['summtab'].shape,newres['msetablong'].shape,))
if figequal and tabdimequal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_train_mlp_full_modified(self):
'''
test value and dimension of the training script on a small run
'''
try:
#dimension of output and input
with open(projresdir_1+runoutputlist[2],"rb") as f1:
currstore=pickle.load(f1)
with open(test_input+runoutputlist[2],"rb") as f1:
prestore=pickle.load(f1)
print("Xvarnorm_size %s ResponseVar_size %s\n" % (currstore['Xvarnorm'].shape,currstore['ResponseVar'].shape,))
if currstore['Xvarnorm'].shape==prestore['Xvarnorm'].shape and currstore['ResponseVar'].shape==prestore['ResponseVar'].shape:
dimequal=True
else:
dimequal=False
#value of stored data
inputwrap_true=True
for key in currstore.keys():
boolarray=currstore[key]==prestore[key]
if type(boolarray)!=bool:
boolarray=boolarray.all()
inputwrap_true=inputwrap_true and boolarray
with open(projresdir_1+runoutputlist[3],"rb") as f1:
currstore=pickle.load(f1)
with open(test_input+runoutputlist[3],"rb") as f1:
prestore=pickle.load(f1)
dimdict_true=currstore==prestore
device=torch.device('cpu')
currstore=torch.load(projresdir_1+runoutputlist[6],map_location=device)
prestore=torch.load(test_input+runoutputlist[6],map_location=device)
## as new keys will be added to args in future version
currarg=currstore['args_input'].__dict__
prearg=prestore['args_input'].__dict__
currkeys=set(currarg.keys())
prekeys=set(prearg.keys())
overkeys=currkeys.intersection(prekeys)
arg_equal=True
for key in overkeys:
arg_equal=arg_equal and (currarg[key]==prearg[key])
arch_equal=currstore['arch']==prestore['arch']
epoch_equal=currstore['epoch']==prestore['epoch']
print('val: '+str(inputwrap_true)+' '+str(dimdict_true)+' '+str(arch_equal)+' '+str(epoch_equal)+'\n')
if inputwrap_true and dimdict_true and arg_equal and arch_equal and epoch_equal:
valequal=True
else:
valequal=False
print('perf: '+str(currstore['best_acc1']-prestore['best_acc1'])+' '+str(currstore['best_acctr']-prestore['best_acctr'])+'\n')
if (currstore['best_acc1']-prestore['best_acc1'])<smalval and (currstore['best_acctr']-prestore['best_acctr'])<smalval:
perf_equal=True
else:
perf_equal=False
curr_state_dict=currstore['state_dict']
pre_state_dict=prestore['state_dict']
layer_size_equal=True
for layer in curr_state_dict.keys():
layer_size_equal=layer_size_equal and (curr_state_dict[layer].shape==pre_state_dict[layer].shape)
print('final: '+str(dimequal)+' '+str(layer_size_equal)+' '+str(valequal)+' '+str(perf_equal)+'\n')
if dimequal and layer_size_equal and valequal: #perf_equal
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_sampler_function(self):
try:
from train_mlp_full_modified import batch_sampler_block
torch.manual_seed(1)
datasource=np.array([0,1,2,3,4,5,6,7,8,9])
blocks=np.array([0,0,1,1,2,2,3,3,4,4])
nblocks=2
exp_res=[[0,1,8,9],[4,5,6,7],[2,3]]
test_res=list(batch_sampler_block(datasource,blocks,nblock=nblocks))
if test_res==exp_res:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_resnet2x(self):
try:
import nnt_struc as models
##resnet 18
model_resnet18=models.__dict__['resnet18_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
model_resnet18_x=models.__dict__['resnet2x_mlp'](ninput=10,num_response=10,p=0,ncellscale=1,x=9)
model_resnet18_dic=model_resnet18.state_dict()
model_resnet18_x_dic=model_resnet18_x.state_dict()
layer_size_equal_18=True
for layer in model_resnet18_dic.keys():
layer_size_equal_18=layer_size_equal_18 and (model_resnet18_dic[layer].shape==model_resnet18_x_dic[layer].shape)
##resnet 34
model_resnet34=models.__dict__['resnet34_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
model_resnet34_x=models.__dict__['resnet2x_mlp'](ninput=10,num_response=10,p=0,ncellscale=1,x=17)
model_resnet34_dic=model_resnet34.state_dict()
model_resnet34_x_dic=model_resnet34_x.state_dict()
layer_size_equal_34=True
for layer in model_resnet34_dic.keys():
layer_size_equal_34=layer_size_equal_34 and (model_resnet34_dic[layer].shape==model_resnet34_x_dic[layer].shape)
if layer_size_equal_18 and layer_size_equal_34:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_get_lr(self):
try:
import nnt_struc as models
from train_mlp_full_modified import get_lr
model_resnet18=models.__dict__['resnet18_mlp'](ninput=10,num_response=10,p=0,ncellscale=1)
optimizer=optim.SGD(model_resnet18.parameters(),lr=0.1,momentum=0.1)
if get_lr(optimizer)==0.1:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_rnn_run(self):#just test the shape of the nnt layers
try:
os.chdir(rnncheckfold_run)
structlist=['gru_mlp_rnn','gru_rnn','diffaddcell_rnn']
datalist=['checkpoint.gru_mlp.tar','checkpoint.gru.tar','checkpoint.diffaddcell.tar']
commands=['time python3 train_mlp_full_modified.py --batch-size 42 --test-batch-size 42 --epochs 5 --learning-rate 0.04 --seed 2 --net-struct ',' --layersize-ratio 0.5 --optimizer adam --num-layer 1 --inputfile sparselinearode_new.small.stepwiseadd.mat --p 0 --scheduler step --gpu-use 0 --rnn-struct 1 --timetrainlen 21 &> output']
shapeequal=True
for struc_i in range(0,len(structlist)):
struc=structlist[struc_i]
predata=datalist[struc_i]
os.system(commands[0]+struc+commands[1])
device=torch.device('cpu')
currstore=torch.load(rnncheckfold_run+runoutputlist[6],map_location=device)
prestore=torch.load(rnn_comp_data+predata,map_location=device)
curr_state_dict=currstore['state_dict']
pre_state_dict=prestore['state_dict']
for layer in curr_state_dict.keys():
shapeequal=shapeequal and (curr_state_dict[layer].shape==pre_state_dict[layer].shape)
if shapeequal:
self.assertTrue(True)
else:
self.assertTrue(False)
except:
self.assertTrue(False)
def test_clean(self):
try:
for filename in os.listdir(test_output):
file_path=os.path.join(test_output,filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path,e))
self.assertTrue(False)
self.assertTrue(True)
except:
self.assertTrue(False)
def cmp(a,b):
return (a>b)-(a<b)
if __name__ == '__main__':
ln=lambda f: getattr(NNTODETest,f).__code__.co_firstlineno
lncmp=lambda _, a, b: cmp(ln(a),ln(b))
unittest.TestLoader.sortTestMethodsUsing=lncmp
suite=unittest.TestLoader().loadTestsFromTestCase(NNTODETest)
unittest.TextTestRunner().run(suite)
os.chdir(prepath)
|
the-stack_0_3962 | import json
class MorseCode:
def __init__(self):
# load morse dictionary into module
with open("./morse_code.json", mode='r', encoding="utf-8") as f:
self.morse_code = json.load(f)
# convert input sentence to morse code sentence
def encrypt(self, sentence: str) -> str:
converted_sentence = ""
if sentence.strip() == '':
raise ValueError("Invalid sentence detected!")
for letter in sentence.strip():
if letter == " ":
converted_sentence = converted_sentence + "/"
else:
detection_sentence = converted_sentence
for morse in self.morse_code:
# add in morse code to object sentence
if letter.lower() == morse.lower():
converted_sentence = converted_sentence + self.morse_code[morse] + "_"
# pickup fallout and alert
if len(converted_sentence) == len(detection_sentence):
raise ValueError(f"Missing morse code: {letter}")
return converted_sentence
# revert morse code sentence
def decrypt(self, converted_sentence: str) -> str:
reverted_sentence = ""
if converted_sentence.strip() == '':
raise ValueError("Invalid sentence detected!")
morse_words = converted_sentence.split("/") # break words into list of words
for morse_word in morse_words:
morse_letters = morse_word.strip("_") # clean letters with trailing _
morse_letters = morse_letters.split("_") # break word into letters
for morse_letter in morse_letters:
for morse in self.morse_code:
if morse_letter == self.morse_code[morse]:
reverted_sentence = reverted_sentence + morse
reverted_sentence = reverted_sentence + " "
return reverted_sentence.strip()
# view morse code source
def view_source(self):
for morse in self.morse_code:
print(f"{morse}: {self.morse_code[morse]}")
return None
if __name__ == '__main__': # launch as terminal
my_morse = MorseCode()
kernel_activate = True
ascii_activation = """
`. ___
__,' __`. _..----....____
__...--.'``;. ,. ;``--..__ .' ,-._ _.-'
_..-''-------' `' `' `' O ``-''._ (,;') _,'
,'________________ \`-._`-','
`._ ```````````------...___ '-.._'-:
```--.._ ,. ````--...__\-.
`.--. `-` ____ | |`
`. `. ,'`````. ; ;`
`._`. __________ `. \\'__/`
`-:._____/______/___/____`. \ `
| `._ `. \\
`._________`-. `. `.___
`------'`
_ __ _ ___ _ _ _ _
| | / / | | / _ \ | | (_) | | | |
| |/ / ___ _ __ _ __ ___| | / /_\ \ ___| |_ ___ ____ _| |_ ___ __| |
| \ / _ \ '__| '_ \ / _ \ | | _ |/ __| __| \ \ / / _` | __/ _ \/ _` |
| |\ \ __/ | | | | | __/ | | | | | (__| |_| |\ V / (_| | || __/ (_| |
\_| \_/\___|_| |_| |_|\___|_| \_| |_/\___|\__|_| \_/ \__,_|\__\___|\__,_|
"""
count = 0
while kernel_activate:
if count == 0:
print(ascii_activation)
print("Welcome to Morse Code Center!")
print("Input 'help' to view available commands")
command = input("Please input your command ~\n$").strip().lower()
if command == "exit":
print("See you next time!")
kernel_activate = False
elif command == "encrypt":
my_sentence = input("Encrypting below to morse code ~\n$")
print(my_morse.encrypt(my_sentence))
elif command == "decrypt":
my_morse_code = input("Decrypting below to text message ~\n$")
print(my_morse.decrypt(my_morse_code))
elif command == "view":
my_morse.view_source()
elif command == "help":
print("""
exit: Exit terminal
encrypt: Convert input text into morse code
decrypt: Convert input morse code into text
view: View the corresponding text: morse code
""")
else:
print("You have entered invalid command, please try again.")
count = 1
|
the-stack_0_3963 | # -*- coding: utf-8 -*-
import json
import random
import time
import uuid
from datetime import datetime
import boto3
aws_profile = "eq_sanhe"
stream_name = "kinesis-practice-web-event"
n_records_per_second = 10
n_records_per_send = 10
boto_ses = boto3.session.Session(profile_name=aws_profile)
kn_client = boto_ses.client("kinesis")
event_name_pool = ["sign_in", ] * 9 + ["sign_up", ] * 1
while True:
record_data_list = list()
for _ in range(n_records_per_send):
sleep_time_base = 1.0 / n_records_per_second
sleep_time = sleep_time_base * (random.randint(90, 110) / 100.0)
time.sleep(sleep_time)
record_data = {
"event_id": str(uuid.uuid4()),
"event_name": random.choice(event_name_pool),
"event_time": str(datetime.utcnow())
}
record_data_list.append(record_data)
records = [
{
"Data": (json.dumps(record_data) + "\n").encode("utf-8"),
"PartitionKey": record_data["event_id"]
}
for record_data in record_data_list
]
res = kn_client.put_records(
Records=records,
StreamName=stream_name,
)
print(records)
# break
|
the-stack_0_3964 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Pusher component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.pusher import executor
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import PusherSpec
from tfx.utils import json_utils
# TODO(b/133845381): Investigate other ways to keep push destination converged.
class Pusher(base_component.BaseComponent):
"""A TFX component to push validated TensorFlow models to a model serving platform.
The `Pusher` component can be used to push an validated SavedModel from output
of the [Trainer component](https://www.tensorflow.org/tfx/guide/trainer) to
[TensorFlow Serving](https://www.tensorflow.org/tfx/serving). The Pusher
will check the validation results from the [Evaluator
component](https://www.tensorflow.org/tfx/guide/evaluator) and [InfraValidator
component](https://www.tensorflow.org/tfx/guide/infra_validator)
before deploying the model. If the model has not been blessed, then the model
will not be pushed.
*Note:* The executor for this component can be overriden to enable the model
to be pushed to other serving platforms than tf.serving. The [Cloud AI
Platform custom
executor](https://github.com/tensorflow/tfx/tree/master/tfx/extensions/google_cloud_ai_platform/pusher)
provides an example how to implement this.
## Example
```
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
```
"""
SPEC_CLASS = PusherSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
model: types.Channel = None,
model_blessing: Optional[types.Channel] = None,
infra_blessing: Optional[types.Channel] = None,
push_destination: Optional[Union[pusher_pb2.PushDestination,
Dict[Text, Any]]] = None,
custom_config: Optional[Dict[Text, Any]] = None,
custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
output: Optional[types.Channel] = None,
model_export: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct a Pusher component.
Args:
model: A Channel of type `standard_artifacts.Model`, usually produced by
a Trainer component.
model_blessing: An optional Channel of type
`standard_artifacts.ModelBlessing`, usually produced from an Evaluator
component.
infra_blessing: An optional Channel of type
`standard_artifacts.InfraBlessing`, usually produced from an
InfraValidator component.
push_destination: A pusher_pb2.PushDestination instance, providing info
for tensorflow serving to load models. Optional if executor_class
doesn't require push_destination. If any field is provided as a
RuntimeParameter, push_destination should be constructed as a dict with
the same field names as PushDestination proto message.
custom_config: A dict which contains the deployment job parameters to be
passed to cloud-based training platforms. The [Kubeflow example](
https://github.com/tensorflow/tfx/blob/6ff57e36a7b65818d4598d41e584a42584d361e6/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_kubeflow_gcp.py#L278-L285)
contains an example how this can be used by custom executors.
custom_executor_spec: Optional custom executor spec.
output: Optional output `standard_artifacts.PushedModel` channel with
result of push.
model_export: Backwards compatibility alias for the 'model' argument.
instance_name: Optional unique instance name. Necessary if multiple Pusher
components are declared in the same pipeline.
"""
if model_export:
absl.logging.warning(
'The "model_export" argument to the Pusher component has '
'been renamed to "model" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
model = model_export
output = output or types.Channel(type=standard_artifacts.PushedModel)
if push_destination is None and not custom_executor_spec:
raise ValueError('push_destination is required unless a '
'custom_executor_spec is supplied that does not require '
'it.')
spec = PusherSpec(
model=model,
model_blessing=model_blessing,
infra_blessing=infra_blessing,
push_destination=push_destination,
custom_config=json_utils.dumps(custom_config),
pushed_model=output)
super(Pusher, self).__init__(
spec=spec,
custom_executor_spec=custom_executor_spec,
instance_name=instance_name)
|
the-stack_0_3966 | #!/usr/bin/env python
"""
Copyright 2009 Richard Quirk
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import contextlib
import os
import sys
import unittest
import mock
import cmakelint.__version__
import cmakelint.main
# stderr suppression from https://stackoverflow.com/a/1810086
@contextlib.contextmanager
def nostderr():
savestderr = sys.stderr
class Devnull(object):
def write(self, _): pass
def flush(self): pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
class ErrorCollector(object):
def __init__(self):
self._errors = []
def __call__(self, unused_filename, unused_line, category, message):
if cmakelint.main.ShouldPrintError(category):
self._errors.append(message)
def Results(self):
if len(self._errors) < 2:
return ''.join(self._errors)
return self._errors
class CMakeLintTestBase(unittest.TestCase):
def doTestLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines([code])
cmakelint.main.ProcessLine('foo.cmake', 0, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestMultiLineLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.ProcessLine('foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckRepeatLogic(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckRepeatLogic(
'foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFileName(self, filename, expected_message):
errors = ErrorCollector()
cmakelint.main.CheckFileName(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFindPackage(self, filename, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckFindPackage(filename, i, clean_lines, errors)
cmakelint.main._package_state.Done(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestGetArgument(self, expected_arg, code):
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
self.assertEqual(
expected_arg, cmakelint.main.GetCommandArgument(0, clean_lines))
class CMakeLintTest(CMakeLintTestBase):
def setUp(self):
cmakelint.main._lint_state.filters = []
def testLineLength(self):
self.doTestLint(
'# '+('o'*80),
'Lines should be <= 80 characters long')
def testUpperAndLowerCase(self):
self.doTestMultiLineLint(
'''project()\nCMAKE_MINIMUM_REQUIRED()\n''',
'Do not mix upper and lower case commands')
def testContainsCommand(self):
self.assertTrue(cmakelint.main.ContainsCommand('project()'))
self.assertTrue(cmakelint.main.ContainsCommand('project('))
self.assertTrue(cmakelint.main.ContainsCommand('project ( '))
self.assertFalse(cmakelint.main.ContainsCommand('VERSION'))
def testGetCommand(self):
self.assertEqual('project', cmakelint.main.GetCommand('project()'))
self.assertEqual('project', cmakelint.main.GetCommand('project('))
self.assertEqual('project', cmakelint.main.GetCommand('project ( '))
self.assertEqual('', cmakelint.main.GetCommand('VERSION'))
def testIsCommandUpperCase(self):
self.assertTrue(cmakelint.main.IsCommandUpperCase('PROJECT'))
self.assertTrue(cmakelint.main.IsCommandUpperCase(
'CMAKE_MINIMUM_REQUIRED'))
self.assertFalse(cmakelint.main.IsCommandUpperCase(
'cmake_minimum_required'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('project'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('PrOjEct'))
def testIsCommandMixedCase(self):
self.assertTrue(cmakelint.main.IsCommandMixedCase('PrOjEct'))
self.assertFalse(cmakelint.main.IsCommandMixedCase('project'))
self.assertFalse(cmakelint.main.IsCommandMixedCase(
'CMAKE_MINIMUM_REQUIRED'))
self.assertTrue(cmakelint.main.IsCommandMixedCase(
'CMAKE_MINIMUM_required'))
def testCleanComment(self):
self.assertEqual(
('', False), cmakelint.main.CleanComments('# Comment to zap'))
self.assertEqual(
('project()', False),
cmakelint.main.CleanComments('project() # Comment to zap'))
def testCleanCommentQuotes(self):
self.assertEqual(
('CHECK_C_SOURCE_COMPILES("', True),
cmakelint.main.CleanComments('CHECK_C_SOURCE_COMPILES("'))
self.assertEqual(
('', True),
cmakelint.main.CleanComments(' some line in a comment ', True))
self.assertEqual(
('")', False),
cmakelint.main.CleanComments(' end of comment") ', True))
def testCommandSpaces(self):
self.doTestMultiLineLint(
"""project ()""",
"Extra spaces between 'project' and its ()")
def testTabs(self):
self.doTestLint('\tfoo()', 'Tab found; please use spaces')
def testTrailingSpaces(self):
self.doTestLint('# test ', 'Line ends in whitespace')
self.doTestMultiLineLint(
' foo() \n foo()\n', 'Line ends in whitespace')
self.doTestLint(' set(var value)', '')
def testCommandSpaceBalance(self):
self.doTestMultiLineLint(
"""project( Foo)""",
'Mismatching spaces inside () after command')
self.doTestMultiLineLint(
"""project(Foo )""",
'Mismatching spaces inside () after command')
def testCommandNotEnded(self):
self.doTestMultiLineLint(
"""project(
Foo
#
#""",
'Unable to find the end of this command')
def testRepeatLogicExpression(self):
self.doTestCheckRepeatLogic('else(foo)',
'Expression repeated inside else; '
'better to use only else()')
self.doTestCheckRepeatLogic('ELSEIF(NOT ${VAR})', '')
self.doTestCheckRepeatLogic('ENDMACRO( my_macro foo bar baz)',
'Expression repeated inside endmacro; '
'better to use only ENDMACRO()')
def testFindTool(self):
self.doTestCheckFileName('path/to/FindFooBar.cmake',
'Find modules should use uppercase names; '
'consider using FindFOOBAR.cmake')
self.doTestCheckFileName('CMakeLists.txt', '')
self.doTestCheckFileName('cmakeLists.txt',
'File should be called CMakeLists.txt')
def testIsFindPackage(self):
self.assertTrue(cmakelint.main.IsFindPackage('path/to/FindFOO.cmake'))
self.assertFalse(cmakelint.main.IsFindPackage(
'path/to/FeatureFOO.cmake'))
def testCheckFindPackage(self):
self.doTestCheckFindPackage(
'FindFoo.cmake',
'',
['Package should include FindPackageHandleStandardArgs',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS'])
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)''',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'Package should include FindPackageHandleStandardArgs')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK DEFAULT_MSG)''',
'Weird variable passed to std args, should be FOO not KK')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'')
def testGetCommandArgument(self):
self.doTestGetArgument('KK',
'''SET(
KK)''')
self.doTestGetArgument('KK', 'Set( KK)')
self.doTestGetArgument(
'KK', 'FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK BLEUGH)')
def testIsValidFile(self):
self.assertTrue(cmakelint.main.IsValidFile('CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('cmakelists.txt'))
self.assertTrue(cmakelint.main.IsValidFile(
'/foo/bar/baz/CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('Findkk.cmake'))
self.assertFalse(cmakelint.main.IsValidFile('foobar.h.in'))
def testFilterControl(self):
self.doTestMultiLineLint(('# lint_cmake: -whitespace/eol\n'
' foo() \n'
' foo()\n'), '')
def testBadPragma(self):
self.doTestMultiLineLint(('# lint_cmake: I am badly formed\n'
'if(TRUE)\n'
'endif()\n'),
'Filter should start with - or +')
def testBadPragma2(self):
self.doTestMultiLineLint(('# lint_cmake: -unknown thing\n'
'if(TRUE)\n'
'endif()\n'),
'Filter not allowed: -unknown thing')
def testWhitespaceIssue16(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar\n'
' )\n'
'endif()\n'),
'')
def testWhitespaceIssue16NonRegression(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar)\n'
'endif()\n'),
'')
def testWhitespaceIssue16FalseNegative(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar )\n'
'endif()\n'),
'Mismatching spaces inside () after command')
def testNoEnd(self):
self.doTestMultiLineLint('file(APPEND ${OUT} "#endif${nl}")\n', '')
def testBackslashComment(self):
self.doTestMultiLineLint(r'file(APPEND ${OUT} " \"") # comment\n', '')
def testFalsePositiveSourceCompiles(self):
self.doTestMultiLineLint((
'CHECK_C_SOURCE_COMPILES("\n'
'#include\n'
'void foo(void) {}\n'
'int main()\n'
'{\n'
'pthread_once_t once_control = PTHREAD_ONCE_INIT;\n'
'pthread_once(&once_control, foo);\n'
'return 0;\n'
'}"\n'
'HAVE_PTHREAD_ONCE_INIT\n'
')\n'), '')
def testIndent(self):
try:
cmakelint.main._lint_state.spaces = 2
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)', '')
self.doTestLint(' four_indent(test)', '')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 2 spaces')
self.doTestLint(' three_indent(test)',
'Weird indentation; use 2 spaces')
cmakelint.main._lint_state.spaces = 3
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' four_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' three_indent(test)', '')
finally:
cmakelint.main._lint_state.spaces = 2
def testParseArgs(self):
old_usage = cmakelint.main._USAGE
old_version = cmakelint.__version__.VERSION
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main._VERSION = ""
with nostderr():
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--help'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--bogus-option'])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--filter='])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--filter=foo'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--filter=+x,b,-c', 'foo.cmake'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [
'--spaces=c', 'foo.cmake'])
self.assertRaises(
SystemExit, cmakelint.main.ParseArgs, ['--version'])
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--filter=-whitespace', 'foo.cmake']))
cmakelint.main._lint_state.filters = []
self.assertEqual(
['foo.cmake'], cmakelint.main.ParseArgs(['foo.cmake']))
filt = '-,+whitespace'
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(['-', '+whitespace'],
cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.filters = []
filt = '-,+whitespace/eol, +whitespace/tabs'
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(
['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(
['-', '+whitespace/eol', '+whitespace/tabs'], cmakelint.main._lint_state.filters)
cmakelint.main._lint_state.filters = []
cmakelint.main.ParseArgs(['--config=./foo/bar', 'foo.cmake'])
self.assertEqual('./foo/bar', cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['--config=None', 'foo.cmake'])
self.assertEqual(None, cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['foo.cmake'])
self.assertEqual(os.path.expanduser('~') + os.path.sep +
'.cmakelintrc', cmakelint.main._lint_state.config)
config = {'return_value': True}
patcher = mock.patch('os.path.isfile', **config)
patcher.start()
self.assertEqual(['CMakeLists.txt'], cmakelint.main.ParseArgs([]))
self.assertEqual(os.path.expanduser('~')+os.path.sep +
'.cmakelintrc', cmakelint.main._lint_state.config)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._VERSION = old_version
cmakelint.main._lint_state.filters = []
cmakelint.main._lint_state.spaces = old_spaces
def testParseOptionsFile(self):
old_usage = cmakelint.main._USAGE
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main.ParseOptionFile("""
# skip comment
filter=-,+whitespace
spaces= 3
""".split('\n'), ignore_space=False)
self.assertEqual(['-', '+whitespace'],
cmakelint.main._lint_state.filters)
cmakelint.main.ParseArgs(['--filter=+syntax', 'foo.cmake'])
self.assertEqual(['-', '+whitespace', '+syntax'],
cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.spaces = 2
cmakelint.main.ParseOptionFile("""
# skip comment
spaces= 4
""".split('\n'), ignore_space=True)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
# skip comment
linelength= 90
""".split('\n'), ignore_space=True)
self.assertEqual(90, cmakelint.main._lint_state.linelength)
cmakelint.main.ParseOptionFile("""
# skip comment
""".split('\n'), ignore_space=False)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
cmakelint.main._lint_state.quiet = True
cmakelint.main.ParseOptionFile("""
# quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._lint_state.spaces = old_spaces
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3967 | # -*- coding:utf-8 -*-
from src.binary_search_tree import BST, PARENT, LEFT, RIGHT, KEY, SIZE
class SplayTree(BST):
""" Just like a simple Binary Search Tree but with the added operation of
splaying.
Complexity for most operations: O(log n), Omega(n)
TODO test and make sure this works!
See: http://en.wikipedia.org/wiki/Splay_tree
"""
def splay(self, node):
""" The splay operation hoists the indicated node to the root.
There are three cases where rotations are employed to hoist a node
(let x be the input node, p be x's parent and g be x's grand parent).
ZIG (p is root, g is None)
(p) (x)
/ => \
(x) (p)
ZIG-ZIG (x is p's LEFT child, p is g's LEFT child and otherwise)
(g) (x)
/ \
(p) => (p)
/ \
(x) (g)
ZIG-ZAG (x is p's LEFT child, p is g's RIGHT child and otherwise)
(g)
/ (x)
(p) => / \
\ (p) (g)
(x)
Args:
node: list, a representation of a node, format [PARENT, KEY, LEFT, RIGHT, SIZE]
Returns:
node: the splayed node with updated references which is no the root.
"""
while node[PARENT] != None:
# ZIG.
if node[PARENT][PARENT] == None:
if node[PARENT][LEFT] == node:
self.rotate(node[PARENT], LEFT)
else:
self.rotate(node[PARENT], RIGHT)
# ZIG-ZIG.
elif node[PARENT][LEFT] == node and node[PARENT][PARENT][LEFT] == node[PARENT]:
self.rotate(node[PARENT][PARENT], LEFT)
self.rotate(node[PARENT], LEFT)
# ZIG-ZIG.
elif node[PARENT][RIGHT] == node and node[PARENT][PARENT][RIGHT] == node[PARENT]:
self.rotate(node[PARENT][PARENT], RIGHT)
self.rotate(node[PARENT], RIGHT)
# ZIG-ZAG.
elif node[PARENT][LEFT] == node and node[PARENT][PARENT][RIGHT] == node[PARENT]:
self.rotate(node[PARENT], LEFT)
self.rotate(node[PARENT], RIGHT)
# ZIG-ZAG.
else:
self.rotate(node[PARENT], RIGHT)
self.rotate(node[PARENT], LEFT)
return node
def insert(self, key):
""" After regular BST insert, the new node is hoisted to the root. """
node = BST.insert(self, key)
return self.splay(node)
def delete_and_splay(self, key):
""" After regular BST delete, the former node's parent is hoisted to
the root.
"""
removed = BST.delete(self, key)
self.splay(removed[PARENT])
return removed
def search_and_splay(self, key):
""" After a successful search operation the returned node is hoisted
to the root before being returned.
"""
node = BST.search(self, key)
return self.splay(node)
def join(self, other_tree):
""" Join other_tree with the current tree. The conditions is that any
element in other_tree is larger than any element in current tree.
Args:
other_tree: object, instance of src.ballanced_search_tree.BST
"""
current_max = self.get_max()
if current_max > other_tree.get_min():
raise Exception('The tree to join must have strictly larger items '
'than current trees items')
root = self.splay(current_max)
root[LEFT] = other_tree.root
def split(self, key):
""" Splits the current tree into two subtrees, the left one containing
all elements smaller than key, the right one containing all elements
larger than key.
Args:
key: int
Returns
list, format [left, right]
left: instance of SplayTree
right: instance of SplayTree
"""
root = self.search_and_splay(key)
left = SplayTree()
left.root = [None, root[LEFT][KEY], root[LEFT][LEFT], root[LEFT][RIGHT], root[LEFT][SIZE]]
right = SplayTree()
right.root = [None, root[RIGHT][KEY], root[RIGHT][LEFT], root[RIGHT][RIGHT], root[RIGHT][SIZE]]
return [left, right]
|
the-stack_0_3969 | import logging
import os
import torch as t
logger = logging.getLogger()
def save_checkpoint(epoch, arch, model, extras=None, is_best=None, name=None, output_dir='.', serialized=False):
"""Save a pyTorch training checkpoint
Args:
epoch: current epoch number
arch: name of the network architecture/topology
model: a pyTorch model
extras: optional dict with additional user-defined data to be saved in the checkpoint.
Will be saved under the key 'extras'
is_best: If true, will save a copy of the checkpoint with the suffix 'best'
name: the name of the checkpoint file
output_dir: directory in which to save the checkpoint
"""
if not os.path.isdir(output_dir):
raise IOError('Checkpoint directory does not exist at', os.path.abspath(dir))
if extras is None:
extras = {}
if not isinstance(extras, dict):
raise TypeError('extras must be either a dict or None')
filename = 'checkpoint.pth.tar' if name is None else name + '_checkpoint.pth.tar'
filepath = os.path.join(output_dir, filename)
filename_best = 'best.pth.tar' if name is None else name + '_best.pth.tar'
filepath_best = os.path.join(output_dir, filename_best)
# dataparallelized model cannot change parameter after
if serialized is True:
checkpoint = {
'epoch': epoch,
'state_dict': model.state_dict(),
'arch': arch,
'extras': extras,
}
else:
checkpoint = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'arch': arch,
'extras': extras,
}
msg = 'Saving checkpoint to:\n'
msg += ' Current: %s\n' % filepath
t.save(checkpoint, filepath)
if is_best:
msg += ' Best: %s\n' % filepath_best
t.save(checkpoint, filepath_best)
# model.to('cpu')
# t.save(model, filepath_best)
# model.to('cuda')
logger.info(msg)
logname = '_loggs.log' if name is None else name + '_loggs.log'
# logger.FileHandler(logname)
def load_checkpoint(model, chkp_file, model_device=None, strict=False, lean=False):
"""Load a pyTorch training checkpoint.
Args:
model: the pyTorch model to which we will load the parameters. You can
specify model=None if the checkpoint contains enough metadata to infer
the model. The order of the arguments is misleading and clunky, and is
kept this way for backward compatibility.
chkp_file: the checkpoint file
lean: if set, read into model only 'state_dict' field
model_device [str]: if set, call model.to($model_device)
This should be set to either 'cpu' or 'cuda'.
:returns: updated model, optimizer, start_epoch
"""
if not os.path.isfile(chkp_file):
raise IOError('Cannot find a checkpoint at', chkp_file)
checkpoint = t.load(chkp_file, map_location=lambda storage, loc: storage)
if 'state_dict' not in checkpoint:
raise ValueError('Checkpoint must contain model parameters')
extras = checkpoint.get('extras', None)
arch = checkpoint.get('arch', '_nameless_')
checkpoint_epoch = checkpoint.get('epoch', None)
start_epoch = checkpoint_epoch + 1 if checkpoint_epoch is not None else 0
anomalous_keys = model.load_state_dict(checkpoint['state_dict'], strict)
if anomalous_keys:
missing_keys, unexpected_keys = anomalous_keys
if unexpected_keys:
logger.warning("The loaded checkpoint (%s) contains %d unexpected state keys" %
(chkp_file, len(unexpected_keys)))
if missing_keys:
raise ValueError("The loaded checkpoint (%s) is missing %d state keys" %
(chkp_file, len(missing_keys)))
if model_device is not None:
model.to(model_device)
if lean:
logger.info("Loaded checkpoint %s model (next epoch %d) from %s", arch, 0, chkp_file)
return model, 0, None
else:
logger.info("Loaded checkpoint %s model (next epoch %d) from %s", arch, start_epoch, chkp_file)
return model, start_epoch, extras
|
the-stack_0_3970 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import optim, utils
class DynamicLossScaler(object):
def __init__(
self, init_scale=2.**15, scale_factor=2., scale_window=2000,
tolerance=0.05, threshold=None,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
def update_scale(self, overflow):
iter_since_rescale = self._iter - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
@staticmethod
def has_overflow(grad_norm):
# detect inf and nan
if grad_norm == float('inf') or grad_norm != grad_norm:
return True
return False
class FP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args, params)
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
# create FP32 copy of parameters and grads
total_param_size = sum(p.data.numel() for p in params)
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset+numel].copy_(p.data.contiguous().view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
# copy FP16 grads to FP32
offset = 0
for p in self.params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1))
offset += numel
# correct for dynamic loss scaler
self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale)
self._needs_sync = False
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
# copy FP32 params back into FP16 model
offset = 0
for p in self.params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self._needs_sync = False
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(self, args, params, optimizer):
if not optimizer.supports_memory_efficient_fp16:
raise ValueError(
'Unsupported optimizer: {}'.format(optimizer.__class__.__name__)
)
super().__init__(args, params)
self.wrapped_optimizer = optimizer
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
scale_window = 2**14 / args.distributed_world_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._grads_are_scaled = True
def _unscale_grads(self, multiply_grads=1.):
if self._grads_are_scaled:
self._grads_are_scaled = False
# correct for dynamic loss scaler
self.wrapped_optimizer.multiply_grads(multiply_grads / self.scaler.loss_scale)
else:
assert multiply_grads == 1.
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
if self._grads_are_scaled:
self._unscale_grads(c)
else:
self.wrapped_optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._unscale_grads()
grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._unscale_grads()
self.wrapped_optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
self._grads_are_scaled = False
|
the-stack_0_3972 | from mopidy.models import Album, Track
from tests.mpd import protocol
class StatusHandlerTest(protocol.BaseTestCase):
def test_clearerror(self):
self.send_request("clearerror")
self.assertEqualResponse("ACK [0@0] {clearerror} Not implemented")
def test_currentsong(self):
track = Track(uri="dummy:/a")
self.backend.library.dummy_library = [track]
self.core.tracklist.add(uris=[track.uri]).get()
self.core.playback.play().get()
self.send_request("currentsong")
self.assertInResponse("file: dummy:/a")
self.assertInResponse("Time: 0")
self.assertNotInResponse("Artist: ")
self.assertNotInResponse("Title: ")
self.assertNotInResponse("Album: ")
self.assertNotInResponse("Track: 0")
self.assertNotInResponse("Date: ")
self.assertInResponse("Pos: 0")
self.assertInResponse("Id: 1")
self.assertInResponse("OK")
def test_currentsong_unicode(self):
track = Track(
uri="dummy:/à",
name="a nàme",
album=Album(uri="something:àlbum:12345"),
)
self.backend.library.dummy_library = [track]
self.core.tracklist.add(uris=[track.uri]).get()
self.core.playback.play().get()
self.send_request("currentsong")
self.assertInResponse("file: dummy:/à")
self.assertInResponse("Title: a nàme")
self.assertInResponse("X-AlbumUri: something:àlbum:12345")
def test_currentsong_without_song(self):
self.send_request("currentsong")
self.assertInResponse("OK")
def test_stats_command(self):
self.send_request("stats")
self.assertInResponse("OK")
def test_status_command(self):
self.send_request("status")
self.assertInResponse("OK")
|
the-stack_0_3977 | import numpy as np
import time
import sys
from ServoMotor import *
from fns import *
import pandas as pd
import math as m
# Initialize motor control library & USB Port
filename = "/dev/ttyUSB0"
motor = ServoMotor(filename)
IO = motor.IO_Init()
if IO < 0:
print('IO exit')
sys.exit()
version= "0.2.0"
# Read v values from those saved from simulation
df = pd.read_csv('V/Hardcoded_{}.csv'.format(version))
v = np.array(df['Best Values'])
# Call corresponding function to convert sim2real/real2sim
def convFns(pos, convType):
conv = [left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder,
left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder]
targ = np.zeros(12)
for i in range(len(pos)):
if i==0:
targ[i] = conv[i](pos[i], convType, "front")
elif i==6:
targ[i] = conv[i](pos[i], convType, "back")
else:
targ[i] = conv[i](pos[i], convType)
return targ
# Return position to take
def get_action(i):
nextPos = [ 0, (v[6] + v[7]*m.sin(i*v[36] + v[8])), (v[3] + v[4]*m.sin(i*v[36] + v[5])),
0, (v[15] + v[16]*m.sin(i*v[36] + v[17])), (v[12] + v[13]*m.sin(i*v[36] + v[14])),
0, (v[24] + v[25]*m.sin(i*v[36] + v[26])), (v[21] + v[22]*m.sin(i*v[36] + v[23])),
0, (v[33] + v[34]*m.sin(i*v[36] + v[35])), (v[30] + v[31]*m.sin(i*v[36] + v[32]))]
return convFns(nextPos, 'sim2real')
# MOVE MOTOR TO GIVEN POSITION
def walk(pos):
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 0)
h+=1
#time.sleep(0.01)
# Read motor positions
def get_state():
state = []
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
state.append(motor.readPosition(i))
return state
# Initialize motors as servos and set offset
offsets = [30, 0, 64, 0, 70, 50, 26, 0, 55, 80, 90, 35]
h = 0
# Set servo mode to all servos with their offset
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.setServoMode(i)
if offsets[h]!=0:
motor.setPositionOffset(i,offsets[h])
h+=1
# RESET position and stand down & up before walking
pos = [500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
pos = [500, 750, 583, 500, 250, 417, 500, 750, 583, 500, 250, 417]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
if h>5:
motor.move(i, int(pos[h]), 700)
else:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
pos = get_action(0)
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1000)
h+=1
time.sleep(3)
error = []
j=0
# WALK
while j < 400:
# Get current position of motors
state = get_state()
'''
if j>1:
diff = []
for i in range(len(state)):
diff.append(abs(state[i]-pos[i]))
error.append(diff)
'''
# Get target position
pos = get_action(j)
# Move robot to target position
walk(pos)
j += 1
#error_df = pd.DataFrame(error, columns=[10, 11, 12, 20, 21, 22, 30, 31, 32, 40, 41, 42])
#error_df.to_csv('Errors.csv')
print('donio')
|
the-stack_0_3981 | from bika.lims.jsonapi.read import read
from plone.jsonapi.core import router
from plone.jsonapi.core.interfaces import IRouteProvider
from Products.CMFCore.utils import getToolByName
from zExceptions import BadRequest
from zope import interface
import json
import transaction
class doActionFor(object):
interface.implements(IRouteProvider)
def initialize(self, context, request):
pass
@property
def routes(self):
return (
("/doActionFor", "doActionFor", self.do_action_for, dict(methods=['GET', 'POST'])),
("/doActionFor_many", "doActionFor_many", self.do_action_for_many, dict(methods=['GET', 'POST'])),
)
def do_action_for(self, context, request):
"""/@@API/doActionFor: Perform workflow transition on values returned
by jsonapi "read" function.
Required parameters:
- action: The workflow transition to apply to found objects.
Parameters used to locate objects are the same as used for the "read"
method.
"""
savepoint = transaction.savepoint()
workflow = getToolByName(context, 'portal_workflow')
uc = getToolByName(context, 'uid_catalog')
action = request.get('action', '')
if not action:
raise BadRequest("No action specified in request")
ret = {
"url": router.url_for("doActionFor", force_external=True),
"success": True,
"error": False,
}
data = read(context, request)
objects = data.get('objects', [])
if len(objects) == 0:
raise BadRequest("No matching objects found")
for obj_dict in objects:
try:
obj = uc(UID=obj_dict['UID'])[0].getObject()
workflow.doActionFor(obj, action)
obj.reindexObject()
except Exception as e:
savepoint.rollback()
msg = "Cannot execute '{0}' on {1} ({2})".format(
action, obj, e.message)
msg = msg.replace("${action_id}", action)
raise BadRequest(msg)
return ret
def do_action_for_many(self, context, request):
"""/@@API/doActionFor: Perform workflow transition on a list of objects.
required parameters:
- obj_paths: a json encoded list of objects to transition.
- action: the id of the transition
"""
savepoint = transaction.savepoint()
workflow = getToolByName(context, 'portal_workflow')
site_path = request['PATH_INFO'].replace("/@@API/doActionFor_many", "")
obj_paths = json.loads(request.get('f', '[]'))
action = request.get('action', '')
if not action:
raise BadRequest("No action specified in request")
ret = {
"url": router.url_for("doActionFor_many", force_external=True),
"success": True,
"error": False,
}
for obj_path in obj_paths:
if not obj_path.startswith("/"):
obj_path = "/" + obj_path
obj = context.restrictedTraverse(str(site_path + obj_path))
if obj_path.startswith(site_path):
obj_path = obj_path[len(site_path):]
try:
workflow.doActionFor(obj, action)
obj.reindexObject()
except Exception as e:
savepoint.rollback()
msg = "Cannot execute '{0}' on {1} ({2})".format(
action, obj, e.message)
msg = msg.replace("${action_id}", action)
raise BadRequest(msg)
return ret
|
the-stack_0_3982 | import os
import sys
from setuptools import setup
if sys.platform == 'darwin':
import py2app
elif sys.platform == 'win32':
import py2exe
sys.setrecursionlimit(100000)
VERSION = os.environ['DEEPN_VERSION']
BUNDLE_VERSION = VERSION.replace(".", "")
APP = ['gc_jm.py']
INCLUDES = ['sys', 'subprocess']
OPTIONS = {'argv_emulation': True,
'iconfile' : 'icon/Icon1.icns',
'plist': {'CFBundleGetInfoString': 'GCJM',
'CFBundleIdentifier': 'edu.uiowa.robertpiper.deepn.gcjm',
'CFBundleShortVersionString': VERSION,
'CFBundleName': 'GCJM',
'CFBundleVersion': BUNDLE_VERSION,
'NSHumanReadableCopyright': '(c) 2016 Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes'},
'includes': INCLUDES,
'excludes': ['PyQt4.QtDesigner', 'PyQt4.QtNetwork', 'PyQt4.QtOpenGL', 'PyQt4.QtScript', 'PyQt4.QtSql', 'PyQt4.QtTest', 'PyQt4.QtWebKit', 'PyQt4.QtXml', 'PyQt4.phonon'],
}
if sys.platform == 'darwin':
setup(
app=APP,
name='GCJM',
options={'py2app': OPTIONS},
setup_requires=['py2app'],
author='Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes',
data_files=[],
)
elif sys.platform == 'win32':
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in ("msvcp71.dll", "dwmapi.dll", "'msvcp90.dll'"):
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
setup(
console=APP,
version=VERSION,
description='GCJM',
author='Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes',
windows=[{"script":'gc_jm.py',
"icon_resources": [(1, "icon/Icon1.ico")],
"dest_base":"GCJM"
}],
data_files=[],
options={"py2exe": {'includes': INCLUDES,
"optimize": 2,
"compressed": 2,
"bundle_files": 1,
"dist_dir": "dist\GCJM"
}}
)
|
the-stack_0_3986 | """
Generalized vs Standard Lomb-Scargle
------------------------------------
Figure 10.16
A comparison of the standard and generalized Lomb-Scargle periodograms for a
signal y(t) = 10 + sin(2pi t/P) with P = 0.3, corresponding to omega_0 ~ 21.
This example is, in some sense, a worst-case scenario for the standard
Lomb-Scargle algorithm because there are no sampled points during the times
when ytrue < 10, which leads to a gross overestimation of the mean. The bottom
panel shows the Lomb-Scargle and generalized Lomb-Scargle periodograms for
these data; the generalized method recovers the expected peak as the highest
peak, while the standard method incorrectly chooses the peak at omega ~ 17.6
(because it is higher than the true peak at omega_0 ~ 21). The dotted lines
show the 1% and 5% significance levels for the highest peak in the generalized
periodogram, determined by 1000 bootstrap resamplings (see Section 10.3.2).
Note: This Plot Contains an Error
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After the book was in press, a reader pointed out that this plot contains a
typo. Instead of passing the noisy data to the Lomb-Scargle routine, we
had passed the underlying, non-noisy data. This caused an over-estimate
of the Lomb-Scargle power.
Because of this, we add two extra plots to this script: the first reproduces
the current plot without the typo. In it, we see that for the noisy data,
the period is not detected for just ~30 points within ten periods.
In the second additional plot, we increase the baseline and the number of
points by a factor of ten. With this configuration, the peak is detected,
and the qualitative aspects of the above discussion hold true.
We regret the error!
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import \
lomb_scargle, lomb_scargle_BIC, lomb_scargle_bootstrap
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate data where y is positive
np.random.seed(0)
N = 30
P = 0.3
t = P / 2 * np.random.random(N) + P * np.random.randint(100, size=N)
y = 10 + np.sin(2 * np.pi * t / P)
dy = 0.5 + 0.5 * np.random.random(N)
y_obs = y + np.random.normal(dy)
omega_0 = 2 * np.pi / P
#######################################################################
# Generate the plot with and without the original typo
for typo in [True, False]:
#------------------------------------------------------------
# Compute the Lomb-Scargle Periodogram
sig = np.array([0.1, 0.01, 0.001])
omega = np.linspace(17, 22, 1000)
# Notice the typo: we used y rather than y_obs
if typo is True:
P_S = lomb_scargle(t, y, dy, omega, generalized=False)
P_G = lomb_scargle(t, y, dy, omega, generalized=True)
else:
P_S = lomb_scargle(t, y_obs, dy, omega, generalized=False)
P_G = lomb_scargle(t, y_obs, dy, omega, generalized=True)
#------------------------------------------------------------
# Get significance via bootstrap
D = lomb_scargle_bootstrap(t, y_obs, dy, omega, generalized=True,
N_bootstraps=1000, random_state=0)
sig1, sig5 = np.percentile(D, [99, 95])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
# First panel: input data
ax = fig.add_subplot(211)
ax.errorbar(t, y_obs, dy, fmt='.k', lw=1, ecolor='gray')
ax.plot([-2, 32], [10, 10], ':k', lw=1)
ax.set_xlim(-2, 32)
ax.set_xlabel('$t$')
ax.set_ylabel('$y(t)$')
if typo is False:
ax.set_title('Corrected version')
# Second panel: periodogram
ax = fig.add_subplot(212)
ax.plot(omega, P_S, '--k', lw=1, label='standard')
ax.plot(omega, P_G, '-k', lw=1, label='generalized')
ax.legend(loc=2)
# plot the significance lines.
xlim = (omega[0], omega[-1])
ax.plot(xlim, [sig1, sig1], ':', c='black')
ax.plot(xlim, [sig5, sig5], ':', c='black')
# label BIC on the right side
ax2 = ax.twinx()
ax2.set_ylim(tuple(lomb_scargle_BIC(ax.get_ylim(), y_obs, dy)))
ax2.set_ylabel(r'$\Delta BIC$')
ax.set_xlabel('$\omega$')
ax.set_ylabel(r'$P_{\rm LS}(\omega)$')
ax.set_xlim(xlim)
ax.set_ylim(0, 1.1)
#######################################################################
# Redo the plot without the typo
# We need a larger data range to actually get significant power
# with actual noisy data
#------------------------------------------------------------
# Generate data where y is positive
np.random.seed(0)
N = 300
P = 0.3
t = P / 2 * np.random.random(N) + P * np.random.randint(1000, size=N)
y = 10 + np.sin(2 * np.pi * t / P)
dy = 0.5 + 0.5 * np.random.random(N)
y_obs = y + np.random.normal(dy)
omega_0 = 2 * np.pi / P
#------------------------------------------------------------
# Compute the Lomb-Scargle Periodogram
sig = np.array([0.1, 0.01, 0.001])
omega = np.linspace(20.5, 21.1, 1000)
P_S = lomb_scargle(t, y_obs, dy, omega, generalized=False)
P_G = lomb_scargle(t, y_obs, dy, omega, generalized=True)
#------------------------------------------------------------
# Get significance via bootstrap
D = lomb_scargle_bootstrap(t, y_obs, dy, omega, generalized=True,
N_bootstraps=1000, random_state=0)
sig1, sig5 = np.percentile(D, [99, 95])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
# First panel: input data
ax = fig.add_subplot(211)
ax.errorbar(t, y_obs, dy, fmt='.k', lw=1, ecolor='gray')
ax.plot([-20, 320], [10, 10], ':k', lw=1)
ax.set_xlim(-20, 320)
ax.set_xlabel('$t$')
ax.set_ylabel('$y(t)$')
# Second panel: periodogram
ax = fig.add_subplot(212)
ax.plot(omega, P_S, '--k', lw=1, label='standard')
ax.plot(omega, P_S, '-', c='gray', lw=1)
ax.plot(omega, P_G, '-k', lw=1, label='generalized')
ax.legend(loc=2)
# plot the significance lines.
xlim = (omega[0], omega[-1])
ax.plot(xlim, [sig1, sig1], ':', c='black')
ax.plot(xlim, [sig5, sig5], ':', c='black')
# label BIC on the right side
ax2 = ax.twinx()
ax2.set_ylim(tuple(lomb_scargle_BIC(ax.get_ylim(), y_obs, dy)))
ax2.set_ylabel(r'$\Delta BIC$')
ax.set_xlabel('$\omega$')
ax.set_ylabel(r'$P_{\rm LS}(\omega)$')
ax.set_xlim(xlim)
ax.set_ylim(0, 0.12)
plt.show()
|
the-stack_0_3987 | import math
def mirror_numbers_graphing(n, factor, mid, width):
# https://stackoverflow.com/questions/38130895/find-middle-of-a-list/38131003
middle = math.floor(float(n) / 2)
if n % 2 != 0:
fudge_array_minus = []
fudge_array_plus = []
if middle < 1:
adj = 1
else:
adj = 2
for i in range(1, middle + 1):
fudge_array_minus.append(-i * adj * width * factor + mid)
fudge_array_plus.append(i * adj * factor * width + mid)
x = fudge_array_minus[::-1] + [0.0 + mid] + fudge_array_plus
# x = [-factor * i + mid for i in reversed(range(1, middle + 1))] + [0.0 + mid] + [factor * i + mid for i in range(1, middle + 1)]
return x
else:
acc = 0
fudge_array_minus = []
fudge_array_plus = []
for i in range(1, middle + 1):
fudge_array_minus.append((-i - acc) * width * factor + mid)
fudge_array_plus.append((i + acc) * width * factor + mid)
acc += 1
x = fudge_array_minus[::-1] + fudge_array_plus
# x = [-factor * i + mid for i in reversed(range(1, middle + 1))] + [factor * i + mid for i in range(1, middle + 1)]
return x
|
the-stack_0_3988 | """
Defines the class used for server_utils sync
"""
from time import sleep
from colorama import Back, Fore, Style
from .serverprocess import ServerProcess
from .sharedvars import SharedVariables
class ServerSync(ServerProcess):
"""
Class used for server_utils synchronization
2 modes supported : auto for auto run (time interval)
and manual, waiting for the user to manually advance in time
"""
def __init__(self, shared_variables: SharedVariables, time_interval: int):
super().__init__(shared_variables)
self.time_interval = time_interval
self.turn = 0
def update(self):
"""
Used to sync every other subprocess, waiting the barrier
when timer expired OR when received the instruction to do so
"""
print(
f"\n\n{Back.LIGHTBLUE_EX}{Fore.BLACK}***** Turn {self.turn} ended, "
f"begin turn {self.turn + 1} *****{Style.RESET_ALL}"
)
def write(self):
"""
Used to begin the next turn once all houses have finished their exchanges
"""
self.turn += 1
sleep(self.time_interval)
print("Timer expired, begin next turn")
def kill(self) -> None:
"""
Kills softly the process
"""
print(f"{Fore.RED}Stopping sync{Style.RESET_ALL}")
super().kill()
|
the-stack_0_3991 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from lgbserver import LightGBMModelRepository
model_dir = os.path.join(os.path.dirname(__file__), "example_model")
invalid_model_dir = os.path.join(os.path.dirname(__file__), "model_not_exist", "model")
@pytest.mark.asyncio
async def test_load():
repo = LightGBMModelRepository(model_dir=model_dir, nthread=1)
model_name = "model"
await repo.load(model_name)
assert repo.get_model(model_name) is not None
assert repo.is_model_ready(model_name)
@pytest.mark.asyncio
async def test_load_fail():
repo = LightGBMModelRepository(model_dir=model_dir, nthread=1)
model_name = "model"
with pytest.raises(Exception):
await repo.load(model_name)
assert repo.get_model(model_name) is None
assert not repo.is_model_ready(model_name)
|
the-stack_0_3995 | from flask import session, redirect, url_for, render_template, request
from flask import make_response
from . import main
from .forms import LoginForm, RequestKeyForm
import secrets
import sys
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Protocol.KDF import PBKDF2
import binascii
import uuid
from .serverm import ServerM
from .cryptographicHash import HashOperation
#import base64
from base64 import b64decode
sessionKey = secrets.token_hex(32)
secret = 'bil548'
roomCounter = 5
peopleCounter = 0
allRooms = {} #dictionary of all rooms, room name - room key pairs
for i in range(roomCounter):
roomName = 'room' + str(i)
allRooms[roomName] = secrets.token_hex(16)
print( allRooms)
peoplePerRoom = {} #dictionary of all people in each room
allPeople = [] #array of all people
allSessionKeys = {} #dictionary of all session keys, person - session key pairs
@main.route('/', methods=['GET', 'POST'])
def index():
"""Login form to enter a room."""
form = LoginForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
hashedPassword = HashOperation.hashBasedOnPassword(form.room.data)
userName = form.name.data
challangeFromServer = ServerM.insertUser(userName, hashedPassword)
# Encrpyt challenge with the hash of the password
iv = b'Sixteen byte key'
cipher = AES.new(hashedPassword, AES.MODE_CFB, iv)
msg = cipher.encrypt(challangeFromServer)
status = ServerM.getEncrytedMessage(msg, userName)
print(status)
if status == True:
sessionKeyTrue = uuid.uuid1()
################################################# SESSION KEY BURADA DEVREYE GİRİYOR#################
session['key'] = form.key.data
if( session['key'] != allRooms[session['room']]):
session['error'] = 'invalid_key'
return redirect(url_for('.error'))
if( session['name'] not in allPeople):
allPeople.append(session['name'])
allSessionKeys[session['name']] = '123456'
return render_template('chat.html', username=session['name'], room=session['room'], key=session['key'])
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
form.key.data = session.get('key', '')
return render_template('index.html', form=form)
@main.route('/chat')
def chat():
"""Chat room. The user's name and room must be stored in
the session."""
name = session.get('name', '')
room = session.get('room', '')
key = session.get('key', '')
if name == '' or room == '':
return redirect(url_for('.index'))
return render_template('chat.html', username=name, room=room, key=key)
@main.route('/initiateSession')
def initiateSession():
form = LoginForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
session['key'] = form.key.data
return redirect(url_for('.chat'))
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
form.key.data = session.get('key', '')
return render_template('initiateSession.html', form = form)
@main.route('/requestKey', methods=['GET', 'POST'])
def requestKey():
##THESE VALUES ARE USED IN HTML FILES##
name = session.get('name', '')
room = session.get('room', '')
key = allRooms[room]
form = RequestKeyForm()
if form.validate_on_submit():
session['name'] = form.name.data
session['room'] = form.room.data
return redirect(url_for('.requestKey'))
elif request.method == 'GET':
form.name.data = session.get('name', '')
form.room.data = session.get('room', '')
return render_template('requestKey.html', form = form, key=key, room=room, username=name)
@main.route('/error')
def error():
error = session.get('error', '')
if error == 'invalid_key':
return render_template('errorPage.html', note='Room key is incorrect.')
return render_template('errorPage.html')
@main.route('/aes', methods=['GET', 'POST'])
def aes():
message = None
if request.method == 'POST':
key = request.form['_key']
iv = request.form['_iv']
text = request.form['_text']
result = decrypt(key, iv, text)
resp = make_response(result)
resp.headers['Content-Type'] = "application/json"
return resp
@main.route('/decrypt2', methods=['GET', 'POST'])
def decrypt2():
message = None
if request.method == 'POST':
data = request.form['data']
roomkey = request.form['key']
result = decrypt2(data, roomkey)
resp = make_response(result)
resp.headers['Content-Type'] = "application/json"
return resp
def decrypt(key, iv, encrypted_text):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decrypted_text = aes.decrypt(encrypted_text_bytes)
return decrypted_text.decode('ascii')
def decrypt2(data, roomkey):
data = b64decode(data)
byte = PBKDF2( roomkey.encode("utf-8"), "1234salt".encode("utf-8"), 48, 128)
iv = byte[0:16]
key = byte[16:48]
cipher = AES.new(key, AES.MODE_CBC, iv)
text = cipher.decrypt(data)
text = text[:-text[-1]].decode("utf-8")
return text
|
the-stack_0_3997 | ''' Utils module '''
import os
import re
import json
import shutil
import subprocess
from colorama import Fore, Style, init
from lxml import etree
import __main__
from modules.utils.exceptions import NotCreatedDescribeLog
from modules.utils.models import MetadataType, MetadataTypeFromJSON
init(autoreset=True)
INFO_TAG = f'{Fore.YELLOW}[INFO]{Fore.RESET}'
ERROR_TAG = f'{Fore.RED}[ERROR]{Fore.RESET}'
WARNING_TAG = f'{Fore.MAGENTA}[WARNING]{Fore.RESET}'
FATAL_LINE = f'{Fore.RED}[FATAL]'
SUCCESS_LINE = f'{Fore.GREEN}[SUCCESS]'
WARNING_LINE = f'{Fore.MAGENTA}[WARNING]'
API_VERSION = '44.0'
DELTA_FOLDER = 'srcToDeploy'
SOURCE_FOLDER = 'src'
TEMPLATE_FILE = "expansionPanels.html"
PWD = os.path.dirname(os.path.realpath(__main__.__file__))
FOLDER_PATTERN = ['│ ', ' ']
FILE_PATTERN = ['├─ ', '└─ ']
ENV_PROJECT_ID = 'gitMergeRequestTargetProjectId'
ENV_GITLAB_ACCESS_TOKEN = 'GITLAB_ACCESS_TOKEN'
def write_file(folder, filename, content, print_log=False):
''' Writes into a file, creating the folders if not exists '''
if print_log:
print(f'\t- Writting \'{filename}\' in \'{folder}\''
f'{Style.NORMAL}')
if not os.path.exists(folder):
os.makedirs(folder)
with open(f'{folder}/{filename}', 'w', encoding='utf-8') as output_file:
output_file.write(content)
def call_subprocess(command, verbose=True):
''' Calls subprocess, returns output and return code,
if verbose flag is active it will print the output '''
try:
stdout = subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True).decode('utf-8')
if verbose:
print_output(f'{Style.DIM}{stdout}{Style.NORMAL}')
return stdout, 0
except subprocess.CalledProcessError as exc:
output = exc.output.decode('utf-8')
returncode = exc.returncode
if verbose:
print(f'{ERROR_TAG} Subprocess returned non-zero exit '
f'status {returncode}')
print_output(output, color=Fore.RED)
return output, returncode
def pprint_xml(xml, declaration=True):
''' Pretty print the passed xml '''
return etree.tostring(xml, pretty_print=True,
encoding='utf-8',
xml_declaration=declaration).decode('utf-8')
def print_apiname(apiname, package_name):
''' Print a warning message '''
indent = ' ' * 3
print(f'{Style.DIM}{indent}▶︎ {Fore.GREEN}[{package_name}] {Fore.MAGENTA}'
f'{apiname} {Fore.RESET}')
def print_differences(child_xml_name, added, modified, erased):
''' Pretty print differences '''
if added or modified or erased:
added_string = __difference_line(f'Added ({len(added)})',
sorted(added))
modified_string = __difference_line(f'Modified ({len(modified)})',
sorted(modified))
erased_string = __difference_line(f'Erased ({len(erased)})',
sorted(erased))
indent = ' ' * 6
print(f'{Style.DIM}{indent}► {Fore.MAGENTA}{child_xml_name}'
f'{Fore.RESET}:\n{added_string}{modified_string}{erased_string}',
end='')
def print_warning(message):
''' Print a warning message '''
indent = ' ' * 6
print(f'{Style.DIM}{indent}⚠ {Fore.MAGENTA}{message}{Fore.RESET}')
def __difference_line(name, values):
''' Returns a formated string with the Difference type and values '''
if values:
indent = ' ' * 9
return f'{indent}▹ {Fore.YELLOW}{name}{Fore.RESET}: {values}\n'
return ''
def print_output(output, color='', tab_level=1):
''' Prints output in the color passed '''
formated = '\t' * tab_level + output.replace('\n', '\n' + '\t' * tab_level)
print(f'{color}{formated}{Fore.RESET}')
def truncate_string(value, size, fill=False):
''' Truncates a tring to passed size '''
string_value = str(value)
if len(string_value) > size:
return string_value[:size].strip() + (string_value[size:] and '...')
if fill:
return string_value.ljust(size, ' ')
return string_value
def copy_parents(src, dest_folder, dir_offset=0):
''' Copies src tree into dest, offset (optional) omits n
folders of the src path'''
if src.endswith('-meta.xml'): # if its meta file, erase meta part
src = src[:-len('-meta.xml')]
prev_offset = (0 if dir_offset == 0 else
src.replace('/', '%', dir_offset - 1).find('/') + 1)
post_offset = src.rfind('/')
src_dirs = '' if post_offset == -1 else src[prev_offset:post_offset]
src_filename = src[post_offset + 1:]
os.makedirs(f'{dest_folder}/{src_dirs}', exist_ok=True)
dest_file_path = f'{dest_folder}/{src_dirs}/{src_filename}'
copy_file(src, dest_file_path, True)
copy_file(f'{src}-meta.xml', f'{dest_file_path}-meta.xml', True)
def copy_file(src, dest, handle_errors):
''' Copy a file from source to dest, if handle flag is activated,
an an exception is launch while trying to copy it will not fail '''
try:
shutil.copy(src, dest)
except Exception as exception: # noqa # pylint: disable=W0703,W0612
if not handle_errors:
raise exception
else:
pass # TODO log verbose level
def get_xml_names(filepath):
''' Extracts the xml names from a describe '''
if not os.path.isfile(filepath):
raise NotCreatedDescribeLog(filepath)
with open(filepath, 'r') as file:
try:
data = json.load( file )
isJSON = True
except:
data = file.read()
isJSON = False
if isJSON:
dictionary = getXmlNamesFromJSON( data )
else:
dictionary = getXmlNamesFromLog( data )
return dictionary
def getXmlNamesFromJSON(data):
dictionary = {}
for metadataInfo in data[ 'metadataObjects' ]:
inFolder = metadataInfo[ 'inFolder' ]
hasMetadata = metadataInfo[ 'metaFile' ]
childObjects = metadataInfo[ 'childXmlNames' ] if 'childXmlNames' in metadataInfo else []
suffix = metadataInfo[ 'suffix' ] if 'suffix' in metadataInfo else ''
xmlName = metadataInfo[ 'xmlName' ]
dirName = metadataInfo[ 'directoryName' ]
dictKey = dirName
if 'territory2Models' == dirName and 'territory2Model' != suffix:
dictKey = suffix
dictionary[ dictKey ] = MetadataTypeFromJSON( xmlName, dirName, suffix, hasMetadata, inFolder, childObjects )
return dictionary
def getXmlNamesFromLog( data ):
regex_string = (r'\*+\nXMLName: ([a-zA-Z0-9]+)\nDirName: ([a-zA-Z0-9]+)\nSuffix:'
r' ([a-zA-Z0-9]+)\nHasMetaFile: ([a-zA-Z]+)\nInFolder:'
r' ([a-zA-Z]+)\nChildObjects: (?:([a-zA-Z,]+),|)\*+')
xml_names = re.findall(regex_string, data, re.MULTILINE)
dictionary = dict()
for (xml_name, dir_name, suffix, has_metadata,
in_folder, child_objects) in xml_names:
in_folder = castToBoolean( in_folder )
has_metadata = castToBoolean( has_metadata )
dict_key = dir_name
if 'territory2Models' == dir_name and 'territory2Model' != suffix:
dict_key = suffix
dictionary[dict_key] = MetadataType(xml_name, dir_name, suffix,
has_metadata, in_folder,
child_objects)
return dictionary
def castToBoolean( value ):
boolValue = False
if 'true' == value:
boolValue = True
return boolValue
def tree(path, do_output=True, print_hidden=False, max_depth=100, margin=1):
"""Print file and directory tree starting at path.
By default, it prints upto a depth of 100 and doesn't print hidden files,
ie. files whose name begin with a '.'. It can be modified to only return
the count of files and directories, and not print anything.
Returns the tuple of number of files and number of directories
"""
def _tree(path, depth, margin, output):
file_count, directory_count = 0, 0
files = sorted((os.path.join(path, filename)
for filename in os.listdir(path)
if print_hidden or not filename.startswith('.')),
key=lambda s: s.lower())
files_count = len(files)
for i, filepath in enumerate(files, start=1):
# Print current file, based on previously gathered info
if do_output:
folder_lines = ''.join(FOLDER_PATTERN[folder]
for folder in parent_folders)
corner = FILE_PATTERN[i == files_count]
file_name = os.path.basename(filepath)
margin_str = '\t' * margin
output += f'{margin_str}{folder_lines}{corner}{file_name}\n'
# Recurse if we find a new subdirectory
if os.path.isdir(filepath) and depth < max_depth:
# Append whether current directory is last in current list
parent_folders.append(i == files_count)
# Print subdirectory and get numbers
output, subdir_file_count, subdir_directory_count = \
_tree(os.path.join(filepath), depth + 1, margin, output)
# Back in current directory, remove the newly added directory
parent_folders.pop()
# Update counters
file_count += subdir_file_count
directory_count += subdir_directory_count + 1
elif os.path.isdir(filepath):
directory_count += 1
else:
file_count += 1
return output, file_count, directory_count
parent_folders = []
output, file_count, directory_count = _tree(path, 1, margin, '')
output += f'\n\t{file_count} files, {directory_count} directories\n'
print(f'{Style.DIM}{output}{Style.NORMAL}')
def remove_file(file_path):
''' Removes file if exists '''
if os.path.exists(file_path):
os.remove(file_path)
def check_exist(path):
''' Detects if a file exists '''
if not os.path.exists(path):
print(f"{INFO_TAG} The path {path} didn't exists.")
return False
return True
def print_key_value_list(top_message, items):
''' Prints a key value list '''
message = f'{top_message}\n'
for key, value in items:
message += f'{key_value_list(key, value)}\n'
print(message)
def key_value_list(key, value):
''' Returns a pretty formated list, with key in cyan '''
return f'\t- {Fore.CYAN}{key}{Fore.RESET}: {value}'
def get_first_set_value(values):
''' Extracts the first value of the passed set '''
value = values.pop()
values.add(value)
return value
|
the-stack_0_3998 | # Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2021 Taras Lykhenko, Rafael Soares
from anna.anna_pb2 import (
# Anna's lattice types as an enum
LWW, SET, ORDERED_SET, SINGLE_CAUSAL, MULTI_CAUSAL, PRIORITY, WREN,
# Serialized representations of Anna's lattices
LWWValue, SetValue, SingleKeyCausalValue, MultiKeyCausalValue, PriorityValue, MultiKeyWrenValue
)
class Lattice:
def __init__(self):
raise NotImplementedError
def __str__(self):
return str(self.reveal())
def __eq__(self, other):
if other is None:
return False
if type(other) != type(self):
return False
return self.reveal() == other.reveal()
def reveal(self):
'''
The reveal method returns an unwrapped version of the data underlying
data structure stored by the lattice.
'''
raise NotImplementedError
def assign(self, value):
'''
Assigns a new value to the lattice -- this must be the same as the type
expected when creating an instance of a particular lattice.
'''
raise NotImplementedError
def merge(self, other):
'''
Merge two lattices into one. How the merge function works is contingent
on what the underlying data structure us.
'''
raise NotImplementedError
def serialize(self):
'''
Serializes the underlying data structure, including metadata relevant
to the lattice, into a protobuf and returns a protobuf object along
with an enum tag indicating the type of this lattice.
'''
raise NotImplementedError
class LWWPairLattice(Lattice):
def __init__(self, timestamp, value, promise = 0):
if type(timestamp) != int or type(value) != bytes or type(promise) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = timestamp
self.val = value
self.promise = promise
def reveal(self):
return self.val
def assign(self, value):
if type(value) == str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != int \
or type(value[2]) != bytes or type(value[1]) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = value[0]
self.promise = value[1]
self.val = value[2]
def merge(self, other):
if other.ts > self.ts:
return other
else:
return self
def serialize(self):
res = LWWValue()
res.timestamp = self.ts
res.value = self.val
res.promise = self.promise
return res, LWW
class WrenLattice(Lattice):
def __init__(self, timestamp, value, promise = 0):
if type(timestamp) != int or type(value) != bytes or type(promise) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = timestamp
self.val = value
self.promise = promise
def reveal(self):
return self.val
def assign(self, value):
if type(value) == str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != int \
or type(value[2]) != bytes or type(value[1]) != int:
raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')
self.ts = value[0]
self.promise = value[1]
self.val = value[2]
def merge(self, other):
if other.ts > self.ts:
return other
else:
return self
def serialize(self):
res = LWWValue()
res.timestamp = self.ts
res.value = self.val
res.promise = self.promise
return res, WREN
class SetLattice(Lattice):
def __init__(self, value=set()):
if type(value) != set:
raise ValueError('SetLattice can only be formed from a set.')
self.val = value
def reveal(self):
return self.val
def assign(self, value):
if type(value) != set:
raise ValueError('SetLattice can only be formed from a set.')
self.val = value
def merge(self, other):
if type(other) != SetLattice:
raise ValueError('Cannot merge SetLattice with invalid type ' +
str(type(other)) + '.')
new_set = set()
for v in other.val:
new_set.insert(v)
for v in self.val:
new_set.insert(v)
return SetLattice(new_set)
def serialize(self):
res = SetValue()
for v in self.val:
if type(v) != bytes:
raise ValueError('Unsupported type %s in SetLattice!' %
(str(type(v))))
res.values.append(v)
return res, SET
# A wrapper class that implements some convenience OrderedSet operations on
# top of a list. # We use this because it is way cheaper to deserialize into,
# at the cost of having expensive reordering operations (e.g. random insert),
# which we expect to be rare for our use cases (we will almost always be
# inserting at the end).
class ListBasedOrderedSet:
# Preconditions: iterable's elements are unique and sorted ascending.
# Behaviour is undefined if it is not.
def __init__(self, iterable=[]):
self.lst = []
for val in iterable:
self.insert(val)
# Inserts a value, maintaining sorted order.
def insert(self, value):
# Microoptimization for the common case.
if len(self.lst) == 0:
self.lst.append(value)
elif value > self.lst[-1]:
self.lst.append(value)
else:
idx, present = self._index_of(value)
if not present:
self.lst.insert(idx, value)
# Finds the index of an element, or where to insert it if you want to
# maintain sorted order.
# Returns (int index, bool present).
# E.g. _index_of(lst, 'my-value') -> (42, true)
# => lst[42] = 'my-value'
# _index_of(lst, 'my-value') -> (42, false)
# => lst[41] < 'my-value' < lst[42]
def _index_of(self, value):
low = 0
high = len(self.lst)
while low < high:
middle = low + int((high - low) / 2)
pivot = self.lst[middle]
if value == pivot:
return (middle, True)
elif value < pivot:
high = middle
elif pivot < value:
low = middle + 1
return (low, False)
class OrderedSetLattice(Lattice):
def __init__(self, value=ListBasedOrderedSet()):
if type(value) != ListBasedOrderedSet:
raise ValueError('OrderedSetLattice can only be formed from a '
+ 'ListBasedOrderedSet.')
self.val = value
def reveal(self):
return self.val.lst
def assign(self, value):
if type(value) != ListBasedOrderedSet:
raise ValueError('OrderedSetLattice can only be formed from a' +
' ListBasedOrderedSet.')
self.val = value
def merge(self, other):
if type(other) != OrderedSetLattice:
raise ValueError('Cannot merge OrderedSetLattice with type ' +
str(type(other)) + '.')
# Merge the two sorted lists by lockstep merge.
# Note that reconstruction is faster than in-place merge.
new_lst = []
other = other.reveal().lst
us = self.val.lst
i, j = 0, 0 # Earliest unmerged indices.
while i < len(us) or j < len(other):
if i == len(us):
new_lst.extend(other[j:])
break
elif j == len(other):
new_lst.extend(us[i:])
break
else:
a = us[i]
b = other[j]
if a == b:
new_lst.append(a)
i += 1
j += 1
elif a < b:
new_lst.append(a)
i += 1
elif b < a:
new_lst.append(b)
j += 1
return OrderedSetLattice(ListBasedOrderedSet(new_lst))
def serialize(self):
res = SetValue()
res.values.extend(self.val.lst)
return res, ORDERED_SET
class MaxIntLattice(Lattice):
def __init__(self, value):
if type(value) != int:
raise ValueError('MaxIntLattice only accepts integers.')
self.value = value
def reveal(self):
return self.value
def assign(self, value):
if type(value) != int:
raise ValueError('MaxIntLattice only accepts integers.')
self.value = value
def merge(self, other):
if type(other) != MaxIntLattice:
raise ValueError('Cannot merge MaxIntLattice with type ' +
str(type(other)) + '.')
if other.value > self.value:
self.value = other.value
class MapLattice(Lattice):
def __init__(self, mp):
if type(mp) != dict:
raise ValueError('MapLattice only accepts dict data structures.')
self.mp = mp
def reveal(self):
return self.mp
def assign(self, mp):
if type(mp) != dict:
raise ValueError('MapLattice only accepts dict data structures.')
self.mp = mp
def merge(self, other):
if type(other) != MapLattice:
raise ValueError('Cannot merge MapLattice with type ' +
str(type(other)) + '.')
for key in other.mp.keys:
if key in self.mp:
if (not isinstance(self.mp[key], Lattice) or not
isinstance(other.mp[key], Lattice)):
raise ValueError('Cannot merge a MapLattice with values' +
' that are not lattice types.')
self.mp[key].merge(other.mp[key])
else:
self.mp[key] = other.mp[key]
def copy(self):
return MapLattice(self.mp.copy())
class VectorClock(MapLattice):
def __init__(self, mp, deserialize=False):
if type(mp) != dict:
raise ValueError('VectorClock must be a dict, not {type(mp)}.')
if deserialize:
self.mp = VectorClock._deserialize(mp)
else:
VectorClock._validate_vc(mp)
self.mp = mp
def _deserialize(mp):
result = {}
for key in mp:
if type(mp[key]) != int:
raise ValueError('Cannot deserialize VectorClock from'
+ ' non-integer values.')
result[key] = MaxIntLattice(mp[key])
return result
def _validate_vc(mp):
for val in mp.values():
if type(val) != MaxIntLattice:
raise ValueError(('VectorClock values must be MaxIntLattices,'
+ ' not %s.') % str(type(val)))
def assign(self, mp):
if type(mp) != dict:
raise ValueError('VectorClock must be a dict.')
VectorClock._validate_vc(mp)
self.mp = mp
def update(self, key, count):
if key in self.mp:
lattice = MaxIntLattice(count)
self.mp[key].merge(lattice)
def serialize(self, pobj):
for key in self.mp:
pobj[key] = self.mp[key].reveal()
class SingleKeyCausalLattice(Lattice):
def __init__(self, vector_clock, value):
if type(vector_clock) != VectorClock:
raise ValueError('Vector clock of SingleKeyCausalLattice must be a'
+ ' VectorClock.')
if type(value) != SetLattice:
raise ValueError('Value of SingleKeyCausalLattice must be a'
+ ' SetLattice.')
self.vector_clock = vector_clock
self.value = value
def reveal(self):
return list(self.value.reveal())
def assign(self, value):
if type(value) != SetLattice:
raise ValueError('Value of SingleKeyCausalLattice must be a'
+ ' SetLattice.')
self.value = value
def merge(self, other):
if type(other) != SingleKeyCausalLattice:
raise ValueError('Cannot merge SingleKeyCausalLattice with type ' +
str(type(other)) + '.')
previous = self.vector_clock.copy()
self.vector_clock.merge(other.vector_clock)
if self.vector_clock == other.vector_clock:
# The other version dominates this version.
self.value = other.value
elif self.vector_clock != previous:
# The versions are concurrent.
self.value.merge(other.value)
else:
# This version dominates, so we do nothing.
pass
def serialize(self):
skcv = SingleKeyCausalValue()
# Serialize the vector clock for this particular lattice by adding each
# key-counter pair.
self.vector_clock.serialize(skcv.vector_clock)
# Add the value(s) stored by this lattice.
for v in self.value:
skcv.values.add(v)
return skcv, SINGLE_CAUSAL
class MultiKeyCausalLattice(Lattice):
def __init__(self, vector_clock, dependencies, value):
if type(vector_clock) != VectorClock:
raise ValueError('Vector clock of MultiKeyCausalLattice must be a'
+ ' VectorClock.')
if type(dependencies) != MapLattice:
raise ValueError('Dependency set of MultiKeyCausalLattice must be'
+ ' a MapLattice.')
if type(value) != SetLattice:
raise ValueError('Value of MultiKeyCausalLattice must be a'
+ ' SetLattice.')
self.vector_clock = vector_clock
self.dependencies = dependencies
self.value = value
def reveal(self):
return list(self.value.reveal())
def assign(self, value):
if type(value) != SetLattice:
raise ValueError('Value of MultiKeyCausalLattice must be a'
+ ' SetLattice.')
self.value = value
def merge(self, other):
if type(other) != MultiKeyCausalLattice:
raise ValueError('Cannot merge MultiKeyCausalLattice with type ' +
str(type(other)) + '.')
previous = self.vector_clock.copy()
self.vector_clock.merge(other.vector_clock)
if self.vector_clock == other.vector_clock:
# other version dominates this version
self.dependencies = other.dependencies
self.value = other.value
elif self.vector_clock != previous:
# versions are concurrent
self.dependencies.merge(other.dependencies)
self.value.merge(other.value)
else:
# this version dominates, so we do nothing
pass
def serialize(self):
mkcv = MultiKeyCausalValue()
# Serialize the vector clock for this particular lattice by adding each
# key-counter pair.
self.vector_clock.serialize(mkcv.vector_clock)
# Serialize the vector clocks for each of the keys this lattice depends
# on.
for key in self.dependencies:
kv = mkcv.add_dependences()
kv.key = key
self.dependencies[key].serialize(kv.vector_clock)
# Add the value(s) stored by this lattice.
for v in self.value:
mkcv.values.add(v)
return mkcv, MULTI_CAUSAL
class PriorityLattice(Lattice):
def __init__(self, priority, value):
if type(priority) != float or type(value) != bytes:
raise ValueError('PriorityLattice must be a double-bytes pair.')
self.priority = priority
self.value = value
def reveal(self):
return self.value
def assign(self, value):
if type(value) != str:
value = bytes(value, 'utf-8')
if type(value) != tuple or type(value[0]) != float or type(value[1]) != bytes:
raise ValueError('PriorityLattice must be a double-bytes pair.')
self.priority = value[0]
self.value = value[1]
def merge(self, other):
if other.priority < self.priority:
return other
else:
return self
def serialize(self):
res = PriorityValue()
res.priority = self.priority
res.value = self.value
return res, PRIORITY
|
the-stack_0_4001 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014, Lars Asplund [email protected]
from __future__ import print_function
from os.path import join, exists
from os import makedirs
from shutil import rmtree
import traceback
import vunit.ostools as ostools
from vunit.test_report import TestResult, PASSED, FAILED
import sys
class TestRunner:
def __init__(self, report, output_path, verbose=False):
self._report = report
self._output_path = output_path
self._verbose = verbose
def _run_test_suite(self, test_suite, num_tests):
def add_and_print_results(results, runtime):
time = runtime/len(test_suite.test_cases)
for test_name in test_suite.test_cases:
self._report.add_result(test_name,
results[test_name],
time,
output_file_name)
self._report.print_latest_status(total_tests=num_tests)
print()
for test_name in test_suite.test_cases:
self._print_test_case_banner(test_name)
start = ostools.get_time()
old_stdout = sys.stdout
old_stderr = sys.stderr
output_path = join(self._output_path, self._encode_path(test_suite.name))
output_file_name = join(output_path, "output.txt")
try:
# If we could not clean output path, fail all tests
if exists(output_path):
rmtree(output_path)
makedirs(output_path)
output_file = open(output_file_name, "w")
except:
traceback.print_exc()
results = self._fail_suite(test_suite)
add_and_print_results(results, 0.0)
return
try:
if self._verbose:
sys.stdout = TeeToFile([old_stderr, output_file])
sys.stderr = TeeToFile([old_stdout, output_file])
else:
sys.stdout = TeeToFile([output_file])
sys.stderr = TeeToFile([output_file])
results = test_suite.run(output_path)
except:
traceback.print_exc()
results = self._fail_suite(test_suite)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
output_file.close()
any_not_passed = any(value != PASSED for value in results.values())
if (not self._verbose) and any_not_passed:
with open(output_file_name, "r") as fread:
for line in fread:
print(line, end="")
runtime = ostools.get_time() - start
add_and_print_results(results, runtime)
def _fail_suite(self, test_suite):
" Return failure for all tests in suite "
results = {}
for test_name in test_suite.test_cases:
results[test_name] = FAILED
return results
def _print_test_case_banner(self, test_case_name):
" Print a banner before running each testcase "
print("running %s" % test_case_name)
def _encode_path(self, path):
" @TODO what if two tests named 'Test 1' and 'Test_1' ? "
return path.replace(" ", "_")
def run(self, test_suites):
num_tests = 0
for test_suite in test_suites:
for test_name in test_suite.test_cases:
num_tests += 1
if self._verbose:
print("Running test: " + test_name)
if self._verbose:
print("Running %i tests" % num_tests)
for test_suite in test_suites:
self._run_test_suite(test_suite, num_tests)
class TeeToFile:
def __init__(self, files):
self._files = files
def write(self, txt):
for ofile in self._files:
ofile.write(txt)
def flush(self):
for ofile in self._files:
ofile.flush()
|
the-stack_0_4002 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytext.config import ConfigBase
from pytext.config.module_config import ModuleConfig
from pytext.models.module import create_module
from pytext.models.representations.transformer.positional_embedding import (
PositionalEmbedding,
)
from pytext.models.seq_models.base import (
PlaceholderAttentionIdentity,
PlaceholderIdentity,
)
from pytext.models.seq_models.positional import (
PostionalEmbedCombine,
PostionalEmbedType,
SinusoidalPositionalEmbedding,
)
from pytext.models.seq_models.utils import Linear
from torch import Tensor
from torch.nn import LayerNorm
from .attention import MultiheadAttention
from .base import (
PyTextIncrementalDecoderComponent,
PyTextSeq2SeqModule,
PlaceholderIdentity,
)
from .light_conv import LightweightConv
from .projection_layers import (
DecoderWithLinearOutputProjection,
DecoupledDecoderHead,
)
from .utils import extract_ontology_vocab
class LightConvDecoderLayer(PyTextSeq2SeqModule):
class Config(ConfigBase):
attention_dropout: float = 0.0
decoder_attention_heads: int = 1
self_attention_heads: int = 1
decoder_conv_dim: int = 128
decoder_conv_type: Union[
LightweightConv.Config, PlaceholderIdentity.Config
] = LightweightConv.Config()
attention_type: Union[
MultiheadAttention.Config, None
] = MultiheadAttention.Config()
self_attention_type: Optional[MultiheadAttention.Config] = None
decoder_embed_dim: int = 128
decoder_ffn_embed_dim: int = 512
decoder_glu: bool = True
decoder_normalize_before: bool = False
dropout: float = 0.1
input_dropout: float = 0.1
relu_dropout: float = 0.0
need_attention: bool = True
convolution_type: str = "causal"
@classmethod
def from_config(cls, config, kernel_size):
conv = create_module(
config.decoder_conv_type,
input_size=config.decoder_conv_dim,
kernel_size=kernel_size,
convolution_type=config.convolution_type,
)
if config.attention_type is not None:
attention = create_module(
config.attention_type,
config.decoder_embed_dim,
config.decoder_attention_heads,
)
else:
attention = None
if config.self_attention_type is not None:
self_attention = create_module(
config.self_attention_type,
config.decoder_embed_dim,
config.self_attention_heads,
)
else:
self_attention = None
return cls(
**config._asdict(),
conv=conv,
self_attention=self_attention,
attention=attention
)
def __init__(
self,
attention_dropout,
decoder_attention_heads,
self_attention_heads,
decoder_conv_dim,
# ARBABU: need to remove these two type parameters
decoder_conv_type,
attention_type,
self_attention_type,
decoder_embed_dim,
decoder_ffn_embed_dim,
decoder_glu,
decoder_normalize_before,
dropout,
input_dropout,
relu_dropout,
need_attention,
convolution_type,
conv=None,
self_attention=None,
attention=None,
):
super().__init__()
self.embed_dim = decoder_embed_dim
self.conv_dim = decoder_conv_dim
if decoder_glu:
self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)
self.act = nn.GLU()
else:
self.linear1 = Linear(self.embed_dim, self.conv_dim)
self.act = PlaceholderIdentity()
self.conv = conv
self.linear2 = Linear(self.conv_dim, self.embed_dim)
self.dropout = dropout
self.relu_dropout = relu_dropout
self.input_dropout = input_dropout
self.normalize_before = decoder_normalize_before
self.conv_layer_norm = LayerNorm(self.embed_dim)
if attention is None:
self.no_encoder_attn = True
self.encoder_attn = PlaceholderAttentionIdentity()
self.encoder_attn_layer_norm = PlaceholderIdentity()
else:
self.no_encoder_attn = False
self.encoder_attn = attention
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
if self_attention is None:
self.has_self_attn = False
self.self_attn = PlaceholderAttentionIdentity()
else:
self.has_self_attn = True
self.self_attn = self_attention
self.fc1 = Linear(self.embed_dim, decoder_ffn_embed_dim)
self.fc2 = Linear(decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = need_attention
def forward(
self,
x,
encoder_out: Tensor,
encoder_padding_mask: Optional[Tensor],
decoder_padding_mask: Optional[Tensor],
incremental_state: Optional[Dict[str, Tensor]],
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.conv_layer_norm(x)
if self.has_self_attn:
x, _ = self.self_attn(
x,
key=x,
value=x,
key_padding_mask=decoder_padding_mask,
need_weights=False,
incremental_state=incremental_state,
)
x = residual + x
residual = x
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.linear1(x)
x = self.act(x)
if decoder_padding_mask is not None:
x = x.masked_fill(decoder_padding_mask.transpose(0, 1).unsqueeze(2), 0)
x = self.conv(x, incremental_state=incremental_state)
x = self.linear2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.conv_layer_norm(x)
attn: Optional[Tensor] = None
if not self.no_encoder_attn:
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.encoder_attn_layer_norm(x)
residual = x
normalize = self.maybe_layer_norm(before=True)
if normalize:
x = self.final_layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
normalize = self.maybe_layer_norm(after=True)
if normalize:
x = self.final_layer_norm(x)
return x, attn
def maybe_layer_norm(self, before: bool = False, after: bool = False):
"""This a utility function which helps to control the layer norm behavior
`before` and `after` specific components using one variable in config.
If self.normalize_before is set to True, output is true only when `before`
is True
"""
assert before ^ after, "Incorrect usage"
return after ^ self.normalize_before
def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
self.self_attn.reorder_incremental_state(incremental_state, new_order)
self.encoder_attn.reorder_incremental_state(incremental_state, new_order)
self.conv.reorder_incremental_state(incremental_state, new_order)
def extra_repr(self):
return (
"dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format(
self.dropout,
self.relu_dropout,
self.input_dropout,
self.normalize_before,
)
)
class ConvDecoderConfig(ConfigBase):
dropout: float = 0.1
decoder_embed_dim: int = 128
decoder_input_dim: int = 128
decoder_output_dim: int = 128
max_target_positions: int = 128
decoder_learned_pos: bool = False
no_token_positional_embeddings: bool = False
positional_embedding_type: PostionalEmbedType = PostionalEmbedType.LEARNED
combine_pos_embed: PostionalEmbedCombine = PostionalEmbedCombine.CONCAT
decoder_normalize_before: bool = False
class LightConvDecoderBase(PyTextIncrementalDecoderComponent):
class Config(ModuleConfig):
decoder_config: ConvDecoderConfig = ConvDecoderConfig()
layer_config: LightConvDecoderLayer.Config = LightConvDecoderLayer.Config()
decoder_kernel_size_list: List[int] = [3, 7, 15]
@classmethod
def from_config(cls, config, tgt_dict, tgt_embedding):
kernel_size_list = config.decoder_kernel_size_list
layers = []
for size in kernel_size_list:
assert (
config.decoder_config.decoder_embed_dim
== config.layer_config.decoder_embed_dim
)
layers.append(create_module(config.layer_config, kernel_size=size))
return cls(tgt_dict, tgt_embedding, layers, config.decoder_config)
def __init__(self, target_dict, embed_tokens, layers, decoder_config):
super().__init__()
self.dropout = decoder_config.dropout
input_embed_dim = embed_tokens.embedding_dim
embed_dim = decoder_config.decoder_embed_dim
output_embed_dim = decoder_config.decoder_output_dim
padding_idx = target_dict.get_pad_index()
self.max_target_positions = decoder_config.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.padding_idx = padding_idx
self.no_token_positional_embeddings = (
decoder_config.no_token_positional_embeddings
)
# creating this is also conditional
self.project_in_dim = (
Linear(input_embed_dim, embed_dim)
if embed_dim != input_embed_dim
else PlaceholderIdentity()
)
self.embed_layer_norm = LayerNorm(embed_dim)
self.combine_pos_embed = decoder_config.combine_pos_embed.value
if decoder_config.combine_pos_embed == PostionalEmbedCombine.SUM:
pos_embed_dim = embed_dim
elif decoder_config.combine_pos_embed == PostionalEmbedCombine.CONCAT:
pos_embed_dim = embed_dim - input_embed_dim
else:
raise NotImplementedError
if not decoder_config.no_token_positional_embeddings:
if decoder_config.positional_embedding_type == PostionalEmbedType.LEARNED:
self.embed_positions = PositionalEmbedding(
decoder_config.max_target_positions,
pos_embed_dim,
padding_idx,
)
elif (
decoder_config.positional_embedding_type
== PostionalEmbedType.SINUSOIDAL
or decoder_config.positional_embedding_type == PostionalEmbedType.HYBRID
):
self.embed_positions = SinusoidalPositionalEmbedding(
pos_embed_dim,
padding_idx,
init_size=decoder_config.max_target_positions,
learned_embed=decoder_config.positional_embedding_type
== PostionalEmbedType.HYBRID,
)
else:
raise NotImplementedError("Positional embedding type not supported")
else:
self.embed_positions = PlaceholderIdentity()
self.layers = nn.ModuleList(layers)
self.project_out_dim = (
Linear(embed_dim, output_embed_dim, bias=False)
if embed_dim != output_embed_dim
else PlaceholderIdentity()
)
self.normalize = decoder_config.decoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = PlaceholderIdentity()
def forward_unprojected(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
output_dict: Dict[str, Tensor] = {}
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens([[prev_output_tokens]])
if not self.no_token_positional_embeddings:
# TODO : Verify incremental generation for AR mode
x = self.pos_embed(x, prev_output_tokens)
else:
x = self.project_in_dim(x)
x = self.embed_layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
output_dict["decoder_layer_0"] = x.clone()
# B x T x C -> T x B x C
x = x.transpose(0, 1)
last_layer_attn: Optional[Tensor] = None
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
target_lengths = (~decoder_padding_mask).sum(dim=1)
if not decoder_padding_mask.any():
decoder_mask = None
else:
decoder_mask = decoder_padding_mask
encoder = encoder_out["encoder_out"]
encoder_mask: Optional[Tensor] = None
if "encoder_mask" in encoder_out:
encoder_mask = encoder_out["encoder_mask"]
# decoder layers
for idx, layer in enumerate(self.layers):
encoder = encoder_out["encoder_out"]
encoder_mask: Optional[Tensor] = None
if "encoder_mask" in encoder_out:
encoder_mask = encoder_out["encoder_mask"]
x, last_layer_attn = layer(
x, encoder, encoder_mask, decoder_mask, incremental_state
)
output_dict["decoder_layer_" + str(idx + 1)] = x.transpose(0, 1).clone()
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.project_out_dim(x)
if last_layer_attn is not None:
output_dict["attn_scores"] = last_layer_attn
output_dict["target_lengths"] = target_lengths
output_dict["decoder_mask"] = decoder_padding_mask
for key in encoder_out.keys():
output_dict[key] = encoder_out[key]
return x, output_dict
def pos_embed(self, x, src_tokens):
# TODO : Positional embeddings needs to be tested in AR mode
if self.combine_pos_embed == PostionalEmbedCombine.SUM.value:
x = self.project_in_dim(x)
return self._vanilla_transformer(x, src_tokens)
elif self.combine_pos_embed == PostionalEmbedCombine.CONCAT.value:
return self._concat_pos_embed(x, src_tokens)
else:
raise NotImplementedError("Method not supported")
def _vanilla_transformer(self, x, src_tokens):
x += self.embed_positions(src_tokens)
return x
def _concat_pos_embed(self, x, src_tokens):
pos_embed = self.embed_positions(src_tokens)
return torch.cat([x, pos_embed], dim=2)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.no_token_positional_embeddings:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
for layer in self.layers:
layer.reorder_incremental_state(incremental_state, new_order)
def get_probs(
self, decoder_out: Tuple[Tensor, Dict[str, Tensor]]
) -> Tuple[Tensor, Tensor, Tensor]:
return self.projection_layer.get_probs(decoder_out)
class LightConvDecoder(LightConvDecoderBase):
def __init__(self, target_dict, embed_tokens, layers, decoder_config):
super().__init__(target_dict, embed_tokens, layers, decoder_config)
self.projection_layer = DecoderWithLinearOutputProjection(
target_dict, target_dict, decoder_config.decoder_output_dim
)
def forward(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
hidden_decoder_output = self.forward_unprojected(
prev_output_tokens, encoder_out, incremental_state, timestep
)
return self.projection_layer(
encoder_out=encoder_out,
decoder_out=hidden_decoder_output,
incremental_state=incremental_state,
)
def get_probs(
self, decoder_out: Tuple[Tensor, Dict[str, Tensor]]
) -> Tuple[Tensor, Tensor, Tensor]:
return self.projection_layer.get_probs(decoder_out)
class LightConvDecoupledDecoder(LightConvDecoderBase):
class Config(ModuleConfig):
decoder_config: ConvDecoderConfig = ConvDecoderConfig()
layer_config: LightConvDecoderLayer.Config = LightConvDecoderLayer.Config()
decoder_kernel_size_list: List[int] = [3, 7, 15]
decoder_layers: int = 3
decoupled_attention_heads: int = 1
ontology_generation_only: bool = False
model_output_logprob: bool = True
def __init__(
self,
target_dict,
embed_tokens,
layers,
decoder_config,
ontology_generation_only,
decoupled_attention_heads,
model_output_logprob,
):
super().__init__(target_dict, embed_tokens, layers, decoder_config)
fixed_generation_vocab = None
if ontology_generation_only:
fixed_generation_vocab = extract_ontology_vocab(target_dict)
self.projection_layer = DecoupledDecoderHead(
target_dict,
target_dict,
out_embed_dim=decoder_config.decoder_output_dim,
encoder_hidden_dim=decoder_config.decoder_input_dim,
pointer_attention_heads=decoupled_attention_heads,
fixed_generation_vocab=fixed_generation_vocab,
model_output_logprob=model_output_logprob,
)
def forward(
self,
prev_output_tokens: Tensor,
encoder_out: Dict[str, Tensor],
incremental_state: Optional[Dict[str, Tensor]] = None,
timestep: Optional[int] = None,
) -> Tuple[Tensor, Dict[str, Tensor]]:
hidden_decoder_output = self.forward_unprojected(
prev_output_tokens, encoder_out, incremental_state, timestep
)
return self.projection_layer(
encoder_out=encoder_out,
decoder_out=hidden_decoder_output,
incremental_state=incremental_state,
)
@classmethod
def from_config(cls, config, tgt_dict, tgt_embedding):
kernel_size_list = config.decoder_kernel_size_list
layers = []
for size in kernel_size_list:
assert (
config.decoder_config.decoder_embed_dim
== config.layer_config.decoder_embed_dim
)
layers.append(create_module(config.layer_config, kernel_size=size))
return cls(
tgt_dict,
tgt_embedding,
layers,
config.decoder_config,
config.ontology_generation_only,
config.decoupled_attention_heads,
config.model_output_logprob,
)
|
the-stack_0_4004 | # original: https://gist.github.com/karpitsky/29b49c3ae759a606b7db39ad3c3315ca
# This code was taken from karpitsky's gist.
# Modifications:
# --------------
# Takes a collection id obtained from a public collection
# on 'translate.yandex.ru'. Writes the collection to `dict/newdict.txt`
# under the current folder.
import sys
import string
import random
import requests
from pathlib import Path
collection_id = sys.argv[1]
uid = ''.join(random.choices(string.digits, k=18))
cookies = {
'first_visit_src': 'collection_share_desktop',
'yandexuid': uid
}
url = f'https://translate.yandex.ru/props/api/collections/{collection_id}?srv=tr-text&uid'
response = requests.get(url, cookies=cookies).json()
Path('dict').mkdir(exist_ok=True)
with open('dict/newdict.txt', 'w') as fp:
for pair in response['collection']['records']:
fp.write(f'{pair["text"]} - {pair["translation"]}\n')
|
the-stack_0_4005 | import pandas as pd
from ggindex.IndexViz.IndexReport import IndexReport
from ggindex.IndexViz.IndexComparator import IndexComparator
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
from ggindex.GreenGrowthStuff import GreenGrowthStuff
class IndexCrossReport(GreenGrowthStuff):
'''
A class to compare two GreenGrowthIndex AND the data used to compute them.
It is more general than the IndexComparator which only compare the GreenGrowthIndex with looking at the data.
Attributes
----------
Report_1: IndexReport
The IndexReport of the first GreenGrowthIndex using first data
name_1: str
Name of the first Report
Report_2: IndexReport
The IndexReport of the second GreenGrowthIndex using first data
name_2: str
Name of the second Report
IndexComparator: IndexComparator
The comparison of first and second GreenGrowthIndex
data: pd.DataFrame
The full data of the first and second Index
'''
def __init__(self, data_1, ST_1, name_1, data_2, ST_2, name_2):
'''
Initialization
Parameters
----------
data_1: pd.DataFrame
The data to compute the first GreenGrowthIndex
ST_1: DataFrame
The sustainable targets to compute the first GreenGrowthIndex
name_1: str
Name of the first GreenGrowthIndex and Data
data_2: pd.DataFrame
The data to compute the second GreenGrowthIndex
ST_2: DataFrame
The sustainable targets to compute the second GreenGrowthIndex
name_2: str
Name of the second GreenGrowthIndex and Data
'''
super(IndexCrossReport, self).__init__()
self.Report_1 = IndexReport(data_1, ST_1)
self.Report_2 = IndexReport(data_2, ST_2)
self.name_1 = name_1
self.name_2 = name_2
self.IndexComparator = IndexComparator(self.Report_1.GGI,
self.Report_2.GGI,
name_GGI_1=name_1,
name_GGI_2=name_2)
self.data = self.merge_data(data_1, data_2, name_1, name_2)
def add_normalized_to_data(self, data, GGI):
'''
Add the normalized value to the dataframe
'''
data = data.copy().set_index(['ISO', 'Indicator'])
value_normed = GGI.to_long()
value_normed = value_normed[value_normed.Aggregation == 'Indicator_normed'].drop(columns=['Aggregation']).dropna().set_index('Variable', append=True)
data['Value_normalized'] = value_normed['Value']
return data.reset_index()
def merge_data(self, data_1, data_2, name_1, name_2):
'''
TO DO
'''
data_1['name'] = name_1
data_2['name'] = name_2
data_1 = self.add_normalized_to_data(data_1, self.Report_1.GGI)
data_2 = self.add_normalized_to_data(data_2, self.Report_2.GGI)
df = pd.concat([data_1, data_2], axis=0)
return df
def cross_indicators_dimension_continent(self, dimension, continent, normalized=True, save=None):
'''
TO DO
'''
if normalized:
value = 'Value_normalized'
title = f"{dimension} indicators normalized {continent}: {self.name_1} and {self.name_2}"
save_name = f'CrossReport_indicators_normalized_{dimension}_{continent}'
else:
value = 'Value'
title = f"{dimension} indicators {continent}: {self.name_1} and {self.name_2}"
save_name = f'CrossReport_indicators_{dimension}_{continent}'
indicator_names = self.IND_CAT_DIM[self.IND_CAT_DIM.Dimension ==
dimension]['Indicator'].values
df = self.data[(self.data.Indicator.isin(indicator_names))
& (self.data.Continent == continent)]
hover_text = "%{text} <br>%{x}"
fig = make_subplots(rows=1,
cols=len(indicator_names),
subplot_titles=indicator_names,
y_title='ISO')
for k, ind in enumerate(indicator_names):
tmp_df = df[df.Indicator == ind].set_index('name')
fig.add_trace(go.Bar(y=tmp_df.loc[self.name_1]['ISO'],
x=tmp_df.loc[self.name_1][value],
orientation='h',
marker=dict(opacity=0.5),
marker_color='red',
hovertemplate=hover_text,
text=tmp_df.loc[self.name_1]['Text'],
name=self.name_1,
width=0.4,
),
row=1,
col=k + 1)
fig.add_trace(go.Bar(y=tmp_df.loc[self.name_2]['ISO'],
x=tmp_df.loc[self.name_2][value],
orientation='h',
marker=dict(opacity=0.5),
hovertemplate=hover_text,
text=tmp_df.loc[self.name_2]['Text'],
marker_color='blue',
width=0.4,
name=self.name_2,
),
row=1,
col=k + 1)
fig.update_layout(height=1000, width=len(indicator_names) * 200,
title_text=title,
hoverlabel_align='right',
showlegend=False,
barmode='group')
if save:
fig.write_html(f"{save}/{save_name}.html")
return fig
def cross_indicators_ISO(self, ISO, normalized=True, save=None):
'''
TO DO
'''
if normalized:
value = 'Value_normalized'
else:
value = 'Value'
df = self.data[(self.data.ISO == ISO)]
country = df['Country'].unique()[0]
indicator_names = self.IND_CAT_DIM['Indicator'].to_numpy().reshape(18, 2)
hover_text = "%{text} <br>%{x}"
fig = make_subplots(rows=18, cols=2,
subplot_titles=indicator_names.flatten())
for (x, y), ind in np.ndenumerate(indicator_names):
row = x + 1
col = y + 1
tmp_df = df[df.Indicator == ind]
fig.add_trace(go.Bar(x=tmp_df[value],
y=tmp_df['name'],
width=0.1,
marker=dict(opacity=0.5),
orientation='h',
marker_color=['red', 'blue'],
),
row=row,
col=col)
fig.add_trace(go.Scatter(x=tmp_df[value],
y=tmp_df['name'],
marker=dict(opacity=0.99, size=10),
marker_color=['red', 'blue'],
mode='markers',
text=tmp_df['Text'],
hovertemplate=hover_text,
),
row=row,
col=col)
# hover text goes here
fig.update_layout(height=100 * 18, width=2 * 400,
title_text=f"indicators {country} {ISO}",
showlegend=False,
hoverlabel_align='right', barmode='group')
if save:
fig.write_html(f"{save}/CrossReport_indicators_{ISO}.html")
return fig
|
the-stack_0_4008 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
url_or_none,
urlencode_postdata,
urljoin,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
_YOUTUBE_CLIENT_HEADERS = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '1.20200609.04.02',
}
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
}
def _call_api(self, ep, query, video_id):
data = self._DEFAULT_API_DATA.copy()
data.update(query)
response = self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
note='Downloading API JSON', errnote='Unable to download API page',
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'},
query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
return response
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._search_regex(
r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;',
webpage, 'yt initial data'),
video_id)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, webpage, video_id, duration):
if not webpage:
return
data = self._extract_yt_initial_data(video_id, webpage)
if not data or not isinstance(data, dict):
return
chapters_list = try_get(
data,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
@staticmethod
def _extract_chapters_from_description(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _extract_chapters(self, webpage, description, video_id, duration):
return (self._extract_chapters_from_json(webpage, video_id, duration)
or self._extract_chapters_from_description(description, duration))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
or re.search(r'player-age-gate-content">', video_webpage) is not None):
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
if not video_info and not player_response:
player_response = extract_player_response(
self._search_regex(
r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
'initial player response', default='{}'),
video_id)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
microformat = try_get(
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = video_details.get('shortDescription')
if video_description is None:
video_description = self._html_search_meta('description', video_webpage)
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if view_count is None and microformat:
view_count = int_or_none(microformat.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = (
r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
r'"jsUrl"\s*:\s*("[^"]+")',
r'"assets":.+?"js":\s*("[^"]+")')
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
if owner_profile_url:
video_uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
default=None)
video_uploader_url = owner_profile_url
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
thumbnails = []
thumbnails_list = try_get(
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
for t in thumbnails_list:
if not isinstance(t, dict):
continue
thumbnail_url = url_or_none(t.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
if not thumbnails:
video_thumbnail = None
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
if thumbnail_url:
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
if video_thumbnail:
thumbnails.append({'url': video_thumbnail})
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
category = None
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
if not category:
category = try_get(
microformat, lambda x: x['category'], compat_str)
video_categories = None if category is None else [category]
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
if not video_tags:
video_tags = try_get(video_details, lambda x: x['keywords'], list)
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnails': thumbnails,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubeTabIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com tab'
_VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
IE_NAME = 'youtube:tab'
_TESTS = [{
# playlists, multipage
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, multipage, different order
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
},
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'ThirstForScience',
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
# basic, single video playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
# empty playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
# Home tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
},
'playlist_mincount': 2,
}, {
# Videos tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 975,
}, {
# Videos tab, sorted by popular
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
},
'playlist_mincount': 199,
}, {
# Playlists tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
},
'playlist_mincount': 17,
}, {
# Community tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
},
'playlist_mincount': 18,
}, {
# Channels tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
},
'playlist_mincount': 138,
}, {
'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCT-K0qO8z6NzWrywqefBPBQ',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'ChRiStIaAn008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'Cauchemar89',
},
'playlist_mincount': 1123,
}, {
# even larger playlist, 8832 videos
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'InterstellarMovie1',
},
'playlist_mincount': 21,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'Computerphile',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeLiveIE.suitable(url) else super(
YoutubeTabIE, cls).suitable(url)
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_grid_item_renderer(item):
for item_kind in ('Playlist', 'Video', 'Channel'):
renderer = item.get('grid%sRenderer' % item_kind)
if renderer:
return renderer
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(
renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
compat_str)
duration = parse_duration(try_get(
renderer, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(
renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(
renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
return {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
# channel
channel_id = renderer.get('channelId')
if channel_id:
title = try_get(
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
def _shelf_entries_trimmed(self, shelf_renderer):
renderer = try_get(
shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
if not renderer:
return
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
def _shelf_entries(self, shelf_renderer):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if not shelf_url:
return
title = try_get(
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
video_id = None
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
@staticmethod
def _extract_next_continuation_data(renderer):
next_continuation = try_get(
renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = renderer.get('contents')
if not isinstance(contents, list):
return
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
dict)
if not continuation_ep:
continue
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
continue
ctp = continuation_ep.get('clickTrackingParams')
if not ctp:
continue
return {
'ctoken': continuation,
'continuation': continuation,
'itct': ctp,
}
def _entries(self, tab, identity_token):
continuation = None
slr_contents = tab['sectionListRenderer']['contents']
for slr_content in slr_contents:
if not isinstance(slr_content, dict):
continue
is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
if not is_renderer:
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('gridRenderer')
if renderer:
for entry in self._grid_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('shelfRenderer')
if renderer:
for entry in self._shelf_entries(renderer):
yield entry
continue
renderer = isr_content.get('backstagePostThreadRenderer')
if renderer:
for entry in self._post_thread_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('videoRenderer')
if renderer:
entry = self._video_entry(renderer)
if entry:
yield entry
if not continuation:
continuation = self._extract_continuation(is_renderer)
headers = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '2.20201112.04.01',
}
if identity_token:
headers['x-youtube-identity-token'] = identity_token
for page_num in itertools.count(1):
if not continuation:
break
browse = self._download_json(
'https://www.youtube.com/browse_ajax', None,
'Downloading page %d' % page_num,
headers=headers, query=continuation, fatal=False)
if not browse:
break
response = try_get(browse, lambda x: x[1]['response'], dict)
if not response:
break
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict)
if continuation_contents:
continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
if continuation_renderer:
for entry in self._playlist_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('gridContinuation')
if continuation_renderer:
for entry in self._grid_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('itemSectionContinuation')
if continuation_renderer:
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_items = try_get(
response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
if continuation_items:
continuation_item = continuation_items[0]
if not isinstance(continuation_item, dict):
continue
renderer = continuation_item.get('playlistVideoRenderer')
if renderer:
video_list_renderer = {'contents': continuation_items}
for entry in self._playlist_entries(video_list_renderer):
yield entry
continuation = self._extract_continuation(video_list_renderer)
continue
break
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
return tab['tabRenderer']
else:
raise ExtractorError('Unable to find selected tab')
def _real_extract(self, url):
channel_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
webpage = self._download_webpage(url, channel_id)
data = self._extract_yt_initial_data(channel_id, webpage)
tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
selected_tab = self._extract_selected_tab(tabs)
channel_title = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
compat_str)
channel_external_id = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer']['externalId'],
compat_str)
tab_title = selected_tab.get('title')
title = channel_title or channel_id
if tab_title:
title += ' - %s' % tab_title
identity_token = self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None)
return self.playlist_result(
self._entries(selected_tab['content'], identity_token),
playlist_id=channel_external_id or channel_id,
playlist_title=title)
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 982,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'sdragonfang',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
},
'skip': 'This playlist does not exist',
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeTabIE.suitable(url) else super(
YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
return self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtUserIE(InfoExtractor):
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
def _entries(self, query, n):
data = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
'query': query,
}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
search = self._download_json(
'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'})
if not search:
break
slr_contents = try_get(
search,
(lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
list)
if not slr_contents:
break
isr_contents = try_get(
slr_contents,
lambda x: x[0]['itemSectionRenderer']['contents'],
list)
if not isr_contents:
break
for content in isr_contents:
if not isinstance(content, dict):
continue
video = content.get('videoRenderer')
if not isinstance(video, dict):
continue
video_id = video.get('videoId')
if not video_id:
continue
title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
total += 1
yield {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
if total == n:
return
token = try_get(
slr_contents,
lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
compat_str)
if not token:
break
data['continuation'] = token
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
return self.playlist_result(self._entries(query, n), query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
r"""
class YoutubeSearchURLIE(YoutubeSearchIE):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
"""
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape,
headers=self._YOUTUBE_CLIENT_HEADERS)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
the-stack_0_4009 | from . import encode
import numpy
def pygame_play(data, rate=44100):
''' Send audio array to pygame for playback
'''
import pygame
pygame.mixer.init(rate, -16, 1, 1024)
sound = pygame.sndarray.numpysnd.make_sound(encode.as_int16(data))
length = sound.get_length()
sound.play()
pygame.time.wait(int(length * 1000))
pygame.mixer.quit()
def pygame_supported():
''' Return True is pygame playback is supported
'''
try:
import pygame
except:
return False
return True
def oss_play(data, rate=44100):
''' Send audio array to oss for playback
'''
import ossaudiodev
audio = ossaudiodev.open('/dev/audio','w')
formats = audio.getfmts()
if ossaudiodev.AFMT_S16_LE & formats:
# Use 16 bit if available
audio.setfmt(ossaudiodev.AFMT_S16_LE)
data = encode.as_int16(data)
elif ossaudiodev.AFMT_U8 & formats:
# Otherwise use 8 bit
audio.setfmt(ossaudiodev.AFMT_U8)
data = encode.as_uint8(data)
audio.speed(rate)
while len(data):
audio.write(data[:1024])
data = data[1024:]
audio.flush()
audio.sync()
audio.close()
def oss_supported():
''' Return True is oss playback is supported
'''
try:
import ossaudiodev
except:
return False
return True
def pyaudio_play(data, rate=44100):
''' Send audio array to pyaudio for playback
'''
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=1)
stream.write(data.astype(numpy.float32).tostring())
stream.close()
p.terminate()
def pyaudio_supported():
''' Return True is pyaudio playback is supported
'''
try:
import pyaudio
except:
return False
return True
def play(data, rate=44100):
''' Send audio to first available playback method
'''
if pygame_supported():
return pygame_play(data, rate)
elif oss_supported():
return oss_play(data, rate)
elif pyaudio_supported():
return pyaudio_play(data, rate)
else:
raise Exception("No supported playback method found")
|
the-stack_0_4010 | """Functions to manipulate season data"""
import os
import logging
import pandas as pd
from pynba.config import config
from pynba.constants import WNBA, LOCAL, S3
from pynba import load_pbpstats
from pynba.parquet import load_pq_to_df, save_df_to_pq
from pynba.aws_s3 import list_objects
__all__ = [
"season_from_file",
"season_from_pbpstats",
"seasons_on_file",
"save_season",
]
logger = logging.getLogger(__name__)
def save_season(season):
"""Save season data as a csv"""
league = season["league"].iloc[0]
year = season["year"].iloc[0]
season_type = season["season_type"].iloc[0]
save_df_to_pq(season, _season_filepath(league, year, season_type))
def _season_filename(league, year, season_type):
return f"{league}_{year}_{season_type}_games.parquet"
def _seasons_dir():
return os.path.join(config.local_data_directory, config.seasons_directory)
def _season_filepath(league, year, season_type):
return os.path.join(_seasons_dir(), _season_filename(league, year, season_type))
def seasons_on_file():
"""Produces a Pandas DataFrame with info on the seasons on file"""
if config.seasons_source == LOCAL:
filenames = os.listdir(_seasons_dir())
elif config.seasons_source == S3:
prefix = f"{config.aws_s3_key_prefix}/{config.seasons_directory}/"
objects = list_objects(config.aws_s3_bucket, Prefix=prefix)
filenames = [obj["Key"][len(prefix) :] for obj in objects]
else:
raise ValueError(
f"Incompatible config for season source data: {config.seasons_source}"
)
leagues, years, season_types = zip(
*[fn.split(".")[0].split("_")[:3] for fn in filenames]
)
return pd.DataFrame(
{
"league": leagues,
"year": [int(year) for year in years],
"season_type": season_types,
}
).sort_values(by=["league", "year", "season_type"], ascending=False)
def season_from_file(league, year, season_type):
"""
Loads season data from file
Parameters
----------
league : str
e.g. "nba", "wnba"
year : int
e.g. 2018
season_type : str
e.g. "Regular Season", "Playoffs"
Returns
-------
pd.DataFrame
"""
if config.seasons_source == LOCAL:
source = _season_filepath(league, year, season_type)
elif config.seasons_source == S3:
filename = _season_filename(league, year, season_type)
source = (
f"s3://{config.aws_s3_bucket}/{config.aws_s3_key_prefix}/"
f"{config.seasons_directory}/{filename}"
)
else:
raise ValueError(
f"Incompatible config for season source data: {config.seasons_source}"
)
return load_pq_to_df(source)
def season_from_pbpstats(league, year, season_type):
"""
Loads season data from pbpstats data
Parameters
----------
league : str
e.g. "nba", "wnba"
year : int
e.g. 2018
season_type : str
e.g. "Regular Season", "Playoffs"
Returns
-------
pd.DataFrame
"""
pbpstats_year = _parse_year(year, league)
pbpstats_season = load_pbpstats.load_season_from_web(
league, pbpstats_year, season_type
)
season = pd.DataFrame(pbpstats_season.games.final_games)
season["league"] = league
season["year"] = year
season["season_type"] = season_type
if "visitor_team_id" in season:
season.rename(
columns={"visitor_team_id": "away_team_id"}, copy=False, inplace=True
)
return season
def _parse_year(year, league):
"""
Parses a year integer into a pbpstats
compatible year string. The year int represents
the end of the season.
"""
if league == WNBA:
return str(year)
return f"{year - 1}-{(year) % 100:02}"
|
the-stack_0_4012 | from unittest import TestCase
from scheduler.jobs_board import JobsBoard
from scheduler.match_maker import MatchMaker
from scheduler.partial_slot_scheduler import PartialSlotScheduler
from scheduler.tests.helpers import *
class TestPartialSlotScheduler(TestCase):
def setUp(self):
self.volunteers = {
'Jack': create_chef('Jack'),
'John': create_chef('John'),
'Jill': create_delivery_driver('Jill'),
'Sue': create_food_critic('Sue')
}
self.constraints_by_role = {
'chef': [can_cook],
'taster': [can_critique],
'delivery': [can_deliver]
}
def test_merges_two_schedules(self):
matchmaker = MatchMaker(self.volunteers, self.constraints_by_role)
jobs = JobsBoard(
['chef'],
['delivery'],
['taster']
)
partial_schedule = {
'Jack': 'manager'
}
scheduler = PartialSlotScheduler(jobs, matchmaker)
assert scheduler.generate_schedule(partial_schedule) == {
'Jack': 'manager',
'John': 'chef',
'Sue': 'taster',
'Jill': 'delivery'
}
|
the-stack_0_4013 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from exo_changelog/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version('exo_changelog', '__init__.py')
if sys.argv[-1] == 'publish':
try:
import wheel
print('Wheel version: ', wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print('Tagging the version on git:')
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system('git push --tags')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='exo-changelog',
version=version,
description="""Manage changelog as migrations""",
long_description=readme + '\n\n' + history,
author='Tomas Garzon',
author_email='[email protected]',
url='https://github.com/tomasgarzon/exo-changelog',
packages=[
'exo_changelog',
],
include_package_data=True,
install_requires=['django-model-utils>=2.0', ],
license='BSD',
zip_safe=False,
keywords='exo-changelog',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
the-stack_0_4014 | import os
import platform
import sys
def is_active():
return True
def get_name():
return "LinuxBSD"
def can_build():
if (os.name != "posix" or sys.platform == "darwin"):
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if (x11_error):
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null ")
if (x11_error):
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null ")
if (x11_error):
print("xcursor not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null ")
if (x11_error):
print("xinerama not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null ")
if (x11_error):
print("xrandr not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrender --modversion > /dev/null ")
if (x11_error):
print("xrender not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xi --modversion > /dev/null ")
if (x11_error):
print("xi not found.. Aborting.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable('use_llvm', 'Use the LLVM compiler', False),
BoolVariable('use_lld', 'Use the LLD linker', False),
BoolVariable('use_thinlto', 'Use ThinLTO', False),
BoolVariable('use_static_cpp', 'Link libgcc and libstdc++ statically for better portability', False),
BoolVariable('use_coverage', 'Test Godot coverage', False),
BoolVariable('use_ubsan', 'Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)', False),
BoolVariable('use_asan', 'Use LLVM/GCC compiler address sanitizer (ASAN))', False),
BoolVariable('use_lsan', 'Use LLVM/GCC compiler leak sanitizer (LSAN))', False),
BoolVariable('use_tsan', 'Use LLVM/GCC compiler thread sanitizer (TSAN))', False),
BoolVariable('pulseaudio', 'Detect and use PulseAudio', True),
BoolVariable('udev', 'Use udev for gamepad connection callbacks', False),
EnumVariable('debug_symbols', 'Add debugging symbols to release builds', 'yes', ('yes', 'no', 'full')),
BoolVariable('separate_debug_symbols', 'Create a separate file containing debugging symbols', False),
BoolVariable('touch', 'Enable touch events', True),
BoolVariable('execinfo', 'Use libexecinfo on systems where glibc is not available', False),
]
def get_flags():
return []
def configure(env):
## Build type
if (env["target"] == "release"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Prepend(CCFLAGS=['-O3'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Prepend(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
env.Prepend(CPPDEFINES=['DEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "debug"):
env.Prepend(CCFLAGS=['-g3'])
env.Prepend(CPPDEFINES=['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED'])
env.Append(LINKFLAGS=['-rdynamic'])
## Architecture
is64 = sys.maxsize > 2**32
if (env["bits"] == "default"):
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if 'CXX' in env and 'clang' in os.path.basename(env['CXX']):
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env['use_llvm'] = True
if env['use_llvm']:
if ('clang++' not in os.path.basename(env['CXX'])):
env["CC"] = "clang"
env["CXX"] = "clang++"
env["LINK"] = "clang++"
env.Append(CPPDEFINES=['TYPED_METHOD_BIND'])
env.extra_suffix = ".llvm" + env.extra_suffix
if env['use_lld']:
if env['use_llvm']:
env.Append(LINKFLAGS=['-fuse-ld=lld'])
if env['use_thinlto']:
# A convenience so you don't need to write use_lto too when using SCons
env['use_lto'] = True
else:
print("Using LLD with GCC is not supported yet, try compiling with 'use_llvm=yes'.")
sys.exit(255)
if env['use_coverage']:
env.Append(CCFLAGS=['-ftest-coverage', '-fprofile-arcs'])
env.Append(LINKFLAGS=['-ftest-coverage', '-fprofile-arcs'])
if env['use_ubsan'] or env['use_asan'] or env['use_lsan'] or env['use_tsan']:
env.extra_suffix += "s"
if env['use_ubsan']:
env.Append(CCFLAGS=['-fsanitize=undefined'])
env.Append(LINKFLAGS=['-fsanitize=undefined'])
if env['use_asan']:
env.Append(CCFLAGS=['-fsanitize=address'])
env.Append(LINKFLAGS=['-fsanitize=address'])
if env['use_lsan']:
env.Append(CCFLAGS=['-fsanitize=leak'])
env.Append(LINKFLAGS=['-fsanitize=leak'])
if env['use_tsan']:
env.Append(CCFLAGS=['-fsanitize=thread'])
env.Append(LINKFLAGS=['-fsanitize=thread'])
if env['use_lto']:
if not env['use_llvm'] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto=' + str(env.GetOption("num_jobs"))])
else:
if env['use_lld'] and env['use_thinlto']:
env.Append(CCFLAGS=['-flto=thin'])
env.Append(LINKFLAGS=['-flto=thin'])
else:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto'])
if not env['use_llvm']:
env['RANLIB'] = 'gcc-ranlib'
env['AR'] = 'gcc-ar'
env.Append(CCFLAGS=['-pipe'])
env.Append(LINKFLAGS=['-pipe'])
# -fpie and -no-pie is supported on GCC 6+ and Clang 4+, both below our
# minimal requirements.
env.Append(CCFLAGS=['-fpie'])
env.Append(LINKFLAGS=['-no-pie'])
## Dependencies
env.ParseConfig('pkg-config x11 --cflags --libs')
env.ParseConfig('pkg-config xcursor --cflags --libs')
env.ParseConfig('pkg-config xinerama --cflags --libs')
env.ParseConfig('pkg-config xrandr --cflags --libs')
env.ParseConfig('pkg-config xrender --cflags --libs')
env.ParseConfig('pkg-config xi --cflags --libs')
if (env['touch']):
env.Append(CPPDEFINES=['TOUCH_ENABLED'])
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if env['builtin_freetype'] or env['builtin_libpng'] or env['builtin_zlib']:
env['builtin_freetype'] = True
env['builtin_libpng'] = True
env['builtin_zlib'] = True
if not env['builtin_freetype']:
env.ParseConfig('pkg-config freetype2 --cflags --libs')
if not env['builtin_libpng']:
env.ParseConfig('pkg-config libpng16 --cflags --libs')
if not env['builtin_bullet']:
# We need at least version 2.89
import subprocess
bullet_version = subprocess.check_output(['pkg-config', 'bullet', '--modversion']).strip()
if str(bullet_version) < "2.89":
# Abort as system bullet was requested but too old
print("Bullet: System version {0} does not match minimal requirements ({1}). Aborting.".format(bullet_version, "2.89"))
sys.exit(255)
env.ParseConfig('pkg-config bullet --cflags --libs')
if False: # not env['builtin_assimp']:
# FIXME: Add min version check
env.ParseConfig('pkg-config assimp --cflags --libs')
if not env['builtin_enet']:
env.ParseConfig('pkg-config libenet --cflags --libs')
if not env['builtin_squish']:
env.ParseConfig('pkg-config libsquish --cflags --libs')
if not env['builtin_zstd']:
env.ParseConfig('pkg-config libzstd --cflags --libs')
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env['builtin_libtheora']:
env['builtin_libogg'] = False # Needed to link against system libtheora
env['builtin_libvorbis'] = False # Needed to link against system libtheora
env.ParseConfig('pkg-config theora theoradec --cflags --libs')
else:
list_of_x86 = ['x86_64', 'x86', 'i386', 'i586']
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env['builtin_libvpx']:
env.ParseConfig('pkg-config vpx --cflags --libs')
if not env['builtin_libvorbis']:
env['builtin_libogg'] = False # Needed to link against system libvorbis
env.ParseConfig('pkg-config vorbis vorbisfile --cflags --libs')
if not env['builtin_opus']:
env['builtin_libogg'] = False # Needed to link against system opus
env.ParseConfig('pkg-config opus opusfile --cflags --libs')
if not env['builtin_libogg']:
env.ParseConfig('pkg-config ogg --cflags --libs')
if not env['builtin_libwebp']:
env.ParseConfig('pkg-config libwebp --cflags --libs')
if not env['builtin_mbedtls']:
# mbedTLS does not provide a pkgconfig config yet. See https://github.com/ARMmbed/mbedtls/issues/228
env.Append(LIBS=['mbedtls', 'mbedcrypto', 'mbedx509'])
if not env['builtin_wslay']:
env.ParseConfig('pkg-config libwslay --cflags --libs')
if not env['builtin_miniupnpc']:
# No pkgconfig file so far, hardcode default paths.
env.Prepend(CPPPATH=["/usr/include/miniupnpc"])
env.Append(LIBS=["miniupnpc"])
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env['builtin_pcre2']:
env.ParseConfig('pkg-config libpcre2-32 --cflags --libs')
## Flags
if (os.system("pkg-config --exists alsa") == 0): # 0 means found
print("Enabling ALSA")
env.Append(CPPDEFINES=["ALSA_ENABLED", "ALSAMIDI_ENABLED"])
# Don't parse --cflags, we don't need to add /usr/include/alsa to include path
env.ParseConfig('pkg-config alsa --libs')
else:
print("ALSA libraries not found, disabling driver")
if env['pulseaudio']:
if (os.system("pkg-config --exists libpulse") == 0): # 0 means found
print("Enabling PulseAudio")
env.Append(CPPDEFINES=["PULSEAUDIO_ENABLED"])
env.ParseConfig('pkg-config --cflags --libs libpulse')
else:
print("PulseAudio development libraries not found, disabling driver")
if (platform.system() == "Linux"):
env.Append(CPPDEFINES=["JOYDEV_ENABLED"])
if env['udev']:
if (os.system("pkg-config --exists libudev") == 0): # 0 means found
print("Enabling udev support")
env.Append(CPPDEFINES=["UDEV_ENABLED"])
env.ParseConfig('pkg-config libudev --cflags --libs')
else:
print("libudev development libraries not found, disabling udev support")
# Linkflags below this line should typically stay the last ones
if not env['builtin_zlib']:
env.ParseConfig('pkg-config zlib --cflags --libs')
env.Prepend(CPPPATH=['#platform/linuxbsd'])
env.Append(CPPDEFINES=['X11_ENABLED', 'UNIX_ENABLED'])
env.Append(CPPDEFINES=['VULKAN_ENABLED'])
if not env['builtin_vulkan']:
env.ParseConfig('pkg-config vulkan --cflags --libs')
if not env['builtin_glslang']:
# No pkgconfig file for glslang so far
env.Append(LIBS=['glslang', 'SPIRV'])
#env.Append(CPPDEFINES=['OPENGL_ENABLED'])
env.Append(LIBS=['GL'])
env.Append(LIBS=['pthread'])
if (platform.system() == "Linux"):
env.Append(LIBS=['dl'])
if (platform.system().find("BSD") >= 0):
env["execinfo"] = True
if env["execinfo"]:
env.Append(LIBS=['execinfo'])
if not env['tools']:
import subprocess
import re
linker_version_str = subprocess.check_output([env.subst(env["LINK"]), '-Wl,--version']).decode("utf-8")
gnu_ld_version = re.search('^GNU ld [^$]*(\d+\.\d+)$', linker_version_str, re.MULTILINE)
if not gnu_ld_version:
print("Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld")
else:
if float(gnu_ld_version.group(1)) >= 2.30:
env.Append(LINKFLAGS=['-T', 'platform/linuxbsd/pck_embed.ld'])
else:
env.Append(LINKFLAGS=['-T', 'platform/linuxbsd/pck_embed.legacy.ld'])
## Cross-compilation
if (is64 and env["bits"] == "32"):
env.Append(CCFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32', '-L/usr/lib/i386-linux-gnu'])
elif (not is64 and env["bits"] == "64"):
env.Append(CCFLAGS=['-m64'])
env.Append(LINKFLAGS=['-m64', '-L/usr/lib/i686-linux-gnu'])
# Link those statically for portability
if env['use_static_cpp']:
env.Append(LINKFLAGS=['-static-libgcc', '-static-libstdc++'])
|
the-stack_0_4017 | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SeqGAN for language modeling
"""
import os
import argparse
import importlib
import tensorflow as tf
import texar as tx
parser = argparse.ArgumentParser(description='prepare data')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to prepare')
parser.add_argument('--data_path', type=str, default='./',
help="Directory containing coco. If not exists, "
"the directory will be created, and the data "
"will be downloaded.")
parser.add_argument('--config', type=str, default='config_ptb_small',
help='The config to use.')
args = parser.parse_args()
config = importlib.import_module(args.config)
def prepare_data(args, config, train_path):
"""Downloads the PTB or COCO dataset
"""
if not os.path.exists(config.log_dir):
os.mkdir(config.log_dir)
ptb_url = 'https://jxhe.github.io/download/ptb_data.tgz'
coco_url = 'https://VegB.github.io/downloads/coco_data.tgz'
data_path = args.data_path
if not tf.gfile.Exists(train_path):
url = ptb_url if args.dataset == 'ptb' else coco_url
tx.data.maybe_download(url, data_path, extract=True)
os.remove('%s_data.tgz' % args.dataset)
if __name__ == '__main__':
prepare_data(args, config, config.train_data_hparams['dataset']['files'])
|
the-stack_0_4018 | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from openstack_dashboard.dashboards.identity.identity_providers.protocols \
import forms as protocol_forms
class AddProtocolView(forms.ModalFormView):
template_name = 'identity/identity_providers/protocols/create.html'
form_id = "create_protocol_form"
form_class = protocol_forms.AddProtocolForm
submit_label = _("Create Protocol")
success_url = "horizon:identity:identity_providers:protocols_tab"
page_title = _("Create Protocol")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['identity_provider_id'],))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["submit_url"] = reverse(
"horizon:identity:identity_providers:protocols:create",
args=(self.kwargs['identity_provider_id'],))
return context
def get_initial(self):
return {"idp_id": self.kwargs['identity_provider_id']}
|
the-stack_0_4019 | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import math
import os
from datetime import datetime
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
import sqlalchemy
try:
from deckhand_client_factory import DeckhandClientFactory
import service_endpoint
from get_k8s_logs import get_pod_logs
from get_k8s_logs import K8sLoggingException
from service_token import shipyard_service_token
from xcom_puller import XcomPuller
except ImportError:
from shipyard_airflow.plugins.deckhand_client_factory import \
DeckhandClientFactory
from shipyard_airflow.plugins import service_endpoint
from shipyard_airflow.plugins.get_k8s_logs import get_pod_logs
from shipyard_airflow.plugins.get_k8s_logs import K8sLoggingException
from shipyard_airflow.plugins.service_token import shipyard_service_token
from shipyard_airflow.plugins.xcom_puller import XcomPuller
from shipyard_airflow.common.document_validators.document_validation_utils \
import DocumentValidationUtils
from shipyard_airflow.common.notes.notes import NotesManager
from shipyard_airflow.common.notes.notes_helper import NotesHelper
from shipyard_airflow.common.notes.storage_impl_db import \
ShipyardSQLNotesStorage
# Configuration sections
BASE = 'base'
K8S_LOGS = 'k8s_logs'
REQUESTS_CONFIG = 'requests_config'
LOG = logging.getLogger(__name__)
class UcpBaseOperator(BaseOperator):
"""Airship Base Operator
All Airship related workflow operators will use the Airship base
operator as the parent and inherit attributes and methods
from this class
"""
@apply_defaults
def __init__(self,
main_dag_name=None,
pod_selector_pattern=None,
shipyard_conf=None,
start_time=None,
xcom_push=True,
*args, **kwargs):
"""Initialization of UcpBaseOperator object.
:param continue_processing: A boolean value on whether to continue
with the workflow. Defaults to True.
:param main_dag_name: Parent Dag
:param pod_selector_pattern: A list containing the information on
the patterns of the Pod name and name
of the associated container for log
queries. This will allow us to query
multiple components, e.g. MAAS and
Drydock at the same time. It also allows
us to query the logs of specific container
in Pods with multiple containers. For
instance the Airflow worker pod contains
both the airflow-worker container and the
log-rotate container.
:param shipyard_conf: Location of shipyard.conf
:param start_time: Time when Operator gets executed
:param xcom_push: xcom usage
"""
super(UcpBaseOperator, self).__init__(*args, **kwargs)
self.continue_processing = True
self.main_dag_name = main_dag_name
self.pod_selector_pattern = pod_selector_pattern or []
self.shipyard_conf = shipyard_conf
self.start_time = datetime.now()
self.xcom_push_flag = xcom_push
# lazy init field to hold a shipyard_db_engine
self._shipyard_db_engine = None
def execute(self, context):
# Setup values that depend on the shipyard configuration
self.doc_utils = _get_document_util(self.shipyard_conf)
self.endpoints = service_endpoint.ServiceEndpoints(self.shipyard_conf)
# Read and parse shiyard.conf
self.config = configparser.ConfigParser()
self.config.read(self.shipyard_conf)
# Execute Airship base function
self.ucp_base(context)
# Execute base function for child operator
self.run_base(context)
if self.continue_processing:
# Execute child function
try:
self.do_execute()
except Exception:
LOG.exception(
'Exception happened during %s execution, '
'will try to log additional details',
self.__class__.__name__)
self.get_k8s_logs()
if hasattr(self, 'fetch_failure_details'):
self.fetch_failure_details()
raise
def ucp_base(self, context):
LOG.info("Running Airship Base Operator...")
# Configure the notes helper for this run of an operator
# establishes self.notes_helper
self._setup_notes_helper()
# Initialize variable that indicates the kubernetes namespace for the
# Airship components
self.ucp_namespace = self.config.get(K8S_LOGS, 'ucp_namespace')
# Define task_instance
self.task_instance = context['task_instance']
# Set up and retrieve values from xcom
self.xcom_puller = XcomPuller(self.main_dag_name, self.task_instance)
self.action_info = self.xcom_puller.get_action_info()
self.action_type = self.xcom_puller.get_action_type()
self.dc = self.xcom_puller.get_deployment_configuration()
# Set up other common-use values
self.action_id = self.action_info['id']
# extract the `task` or `step` name for easy access
self.task_id = self.task_instance.task_id
self.revision_id = self.action_info['committed_rev_id']
self.action_params = self.action_info.get('parameters', {})
self.design_ref = self._deckhand_design_ref()
self._setup_target_nodes()
def get_k8s_logs(self):
"""Retrieve Kubernetes pod/container logs specified by an operator
This method is "best effort" and should not prevent the progress of
the workflow processing
"""
if self.pod_selector_pattern:
for selector in self.pod_selector_pattern:
# Get difference in current time and time when the
# operator was first executed (in seconds)
t_diff = (datetime.now() - self.start_time).total_seconds()
# Note that we will end up with a floating number for
# 't_diff' and will need to round it up to the nearest
# integer
t_diff_int = int(math.ceil(t_diff))
try:
get_pod_logs(selector['pod_pattern'],
self.ucp_namespace,
selector['container'],
t_diff_int)
except K8sLoggingException as e:
LOG.error(e)
else:
LOG.debug("There are no pod logs specified to retrieve")
def _setup_target_nodes(self):
"""Sets up the target nodes field for this action
When managing a targeted action, this step needs to resolve the
target node. If there are no targets found (should be caught before
invocation of the DAG), then raise an exception so that it does not
try to take action on more nodes than targeted.
Later, when creating the deployment group, if this value
(self.target_nodes) is set, it will be used in lieu of the design
based deployment strategy.
target_nodes will be a comma separated string provided as part of the
parameters to an action on input to Shipyard.
"""
if self.action_type == 'targeted':
t_nodes = self.action_params.get('target_nodes', '')
self.target_nodes = [n.strip() for n in t_nodes.split(',')]
if not self.target_nodes:
raise AirflowException(
'{} ({}) requires targeted nodes, but was unable to '
'resolve any targets in {}'.format(
self.main_dag_name, self.action_id,
self.__class__.__name__
)
)
LOG.info("Target Nodes for action: [%s]",
', '.join(self.target_nodes))
else:
self.target_nodes = None
def _deckhand_design_ref(self):
"""Assemble a deckhand design_ref"""
# Retrieve DeckHand Endpoint Information
LOG.info("Assembling a design ref using revision: %s",
self.revision_id)
deckhand_svc_endpoint = self.endpoints.endpoint_by_name(
service_endpoint.DECKHAND
)
# This URL will be used to retrieve the Site Design YAMLs
deckhand_path = "deckhand+{}".format(deckhand_svc_endpoint)
design_ref = os.path.join(deckhand_path,
"revisions",
str(self.revision_id),
"rendered-documents")
LOG.info("Design Reference is %s", design_ref)
return design_ref
def get_unique_doc(self, schema, name, revision_id=None):
"""Retrieve a specific document from Deckhand
:param schema: the schema of the document
:param name: the metadata.name of the document
:param revision_id: the deckhand revision, or defaults to
self.revision_id
Wraps the document_validation_utils call to get the same.
Returns the sepcified document or raises an Airflow exception.
"""
if revision_id is None:
revision_id = self.revision_id
LOG.info(
"Retrieve shipyard/DeploymentConfiguration/v1, "
"deployment-configuration from Deckhand"
)
try:
return self.doc_utils.get_unique_doc(revision_id=revision_id,
name=name,
schema=schema)
except Exception as ex:
LOG.error("A document was expected to be available: Name: %s, "
"Schema: %s, Deckhand revision: %s, but there was an "
"error attempting to retrieve it. Since this document's "
"contents may be critical to the proper operation of "
"the workflow, this is fatal.", schema, name,
revision_id)
LOG.exception(ex)
# if the document is not found for ANY reason, the workflow is
# broken. Raise an Airflow Exception.
raise AirflowException(ex)
def _get_shipyard_db_engine(self):
"""Lazy initialize an engine for the Shipyard database.
:returns: a SQLAlchemy engine for the Shipyard database.
Developer's Note: Initially the idea was to use the PostgresHook and
retrieve an engine from there as is done with the concurrency check,
but since we have easy access to a configuration file, this does
direct SQLAlchemy to get the engine. By using the config, the database
connection is not exposed as environment variables -- which is one way
that Airflow registers database connections for use by the dbApiHook
"""
if self._shipyard_db_engine is None:
connection_string = self.config.get(BASE, 'postgresql_db')
pool_size = self.config.getint(BASE, 'pool_size')
max_overflow = self.config.getint(BASE, 'pool_overflow')
pool_pre_ping = self.config.getboolean(BASE, 'pool_pre_ping')
pool_recycle = self.config.getint(BASE, 'connection_recycle')
pool_timeout = self.config.getint(BASE, 'pool_timeout')
self._shipyard_db_engine = sqlalchemy.create_engine(
connection_string, pool_size=pool_size,
max_overflow=max_overflow,
pool_pre_ping=pool_pre_ping,
pool_recycle=pool_recycle,
pool_timeout=pool_timeout
)
LOG.info("Initialized Shipyard database connection with pool "
"size: %d, max overflow: %d, pool pre ping: %s, pool "
"recycle: %d, and pool timeout: %d",
pool_size, max_overflow,
pool_pre_ping, pool_recycle,
pool_timeout)
return self._shipyard_db_engine
@shipyard_service_token
def _token_getter(self):
# Generator method to get a shipyard service token
return self.svc_token
def _setup_notes_helper(self):
"""Setup a notes helper for use by all descendent operators"""
connect_timeout = self.config.get(REQUESTS_CONFIG,
'notes_connect_timeout')
read_timeout = self.config.get(REQUESTS_CONFIG, 'notes_read_timeout')
self.notes_helper = NotesHelper(
NotesManager(
storage=ShipyardSQLNotesStorage(self._get_shipyard_db_engine),
get_token=self._token_getter,
connect_timeout=connect_timeout,
read_timeout=read_timeout))
def _get_document_util(shipyard_conf):
"""Retrieve an instance of the DocumentValidationUtils"""
dh_client = DeckhandClientFactory(shipyard_conf).get_client()
return DocumentValidationUtils(dh_client)
class UcpBaseOperatorPlugin(AirflowPlugin):
"""Creates UcpBaseOperator in Airflow."""
name = 'ucp_base_operator_plugin'
operators = [UcpBaseOperator]
|
the-stack_0_4020 | # ======================================================================= #
# Copyright (C) 2021 Hoverset Group. #
# ======================================================================= #
import abc
from collections import defaultdict
from formation.formats import Node
from hoverset.data.keymap import KeyMap, CharKey
from hoverset.ui.icons import get_icon_image as icon
from hoverset.ui.widgets import EventMask
from hoverset.util.execution import Action
from hoverset.data.actions import Routine
from hoverset.ui.menu import MenuUtils, EnableIf
from studio.tools._base import BaseTool
from studio.feature.components import ComponentPane, SelectToDrawGroup
from studio.feature.stylepane import StyleGroup
from studio.ui.tree import NestedTreeView
from studio.lib import generate_id
from studio.lib.canvas import *
from studio.lib.legacy import Canvas
from studio.parsers.loader import BaseStudioAdapter, DesignBuilder
class Coordinate:
pool = defaultdict(list)
active = set()
min_radius = 3
max_radius = 5
def __init__(self, canvas, controller, x, y):
self.radius = self.min_radius
self.canvas = canvas
self.controller = controller
self.x = x
self.y = y
self._id = canvas.create_oval(
x - self.radius, y - self.radius, x + self.radius, y + self.radius,
fill=self.controller.tool.studio.style.colors["accent"],
tags=("coordinate", "controller")
)
canvas.tag_bind(self._id, "<ButtonRelease-1>", self._end_drag)
canvas.tag_bind(self._id, "<Motion>", self._drag)
canvas.tag_bind(self._id, "<Enter>", lambda _: self.grow_effect())
canvas.tag_bind(self._id, "<Leave>", lambda _: self.grow_effect(True))
MenuUtils.bind_canvas_context(self.canvas, self._id, self._context_menu)
self.active.add(self)
self._listeners = []
def grow_effect(self, shrink=False):
self.radius = self.min_radius if shrink else self.max_radius
self.place()
def add_listener(self, func, *args, **kwargs):
def callback():
func(*args, **kwargs)
self._listeners.append(callback)
return callback
def remove_listener(self, callback):
if callback in self._listeners:
self._listeners.remove(callback)
def retire(self):
# remove from view without deleting
self.canvas.itemconfigure(self._id, state='hidden')
self.pool["canvas"].append(self)
self._listeners = []
def place(self, x=None, y=None):
x = self.x if x is None else x
y = self.y if y is None else y
self.canvas.coords(
self._id,
x - self.radius, y - self.radius, x + self.radius, y + self.radius
)
self.x = x
self.y = y
for listener in self._listeners:
listener()
def shift(self, delta_x, delta_y):
self.place(self.x + delta_x, self.y + delta_y)
def revive(self, controller, x, y):
self.controller = controller
self.canvas.itemconfigure(self._id, state='normal')
self.place(x, y)
self.active.add(self)
def _context_menu(self, event):
self.controller.on_coord_context(self, event)
def _drag(self, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
return
self.x = self.canvas.canvasx(event.x)
self.y = self.canvas.canvasy(event.y)
self.place()
self.controller.on_coord_change(self)
def _end_drag(self, _):
self.controller.on_release()
@classmethod
def acquire(cls, canvas, controller, x, y):
if len(cls.pool[canvas]):
coord = cls.pool[canvas][0]
cls.pool[canvas].remove(coord)
coord.revive(controller, x, y)
return coord
else:
return cls(canvas, controller, x, y)
class Link:
pool = defaultdict(list)
active = set()
def __init__(self, canvas, controller, coord1, coord2):
self.canvas = canvas
self.controller = controller
self._id = canvas.create_line(
coord1.x, coord1.y, coord2.x, coord2.y,
fill=self.controller.tool.studio.style.colors["accent"],
tag=("link", "controller"), dash=(5, 4), width=2
)
self.link_coord(coord1, coord2)
canvas.tag_bind(self._id, "<ButtonRelease-1>", self._end_drag)
MenuUtils.bind_canvas_context(self.canvas, self._id, self._context_menu)
canvas.tag_bind(self._id, "<Motion>", self._drag)
self.active.add(self)
self._coord_latch = None
def _to_canvas_coord(self, x, y):
return self.canvas.canvasx(x), self.canvas.canvasy(y)
def _context_menu(self, event):
self.controller.on_link_context(self, event)
def _drag(self, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
return
if self._coord_latch:
x, y = self._to_canvas_coord(event.x, event.y)
xl, yl = self._coord_latch
self.controller.on_move(x - xl, y - yl)
self._coord_latch = x, y
else:
self._coord_latch = self._to_canvas_coord(event.x, event.y)
def _end_drag(self, _):
self.controller.on_release()
self._coord_latch = None
def place(self, coord1, coord2):
self.canvas.coords(self._id, coord1.x, coord1.y, coord2.x, coord2.y)
self.canvas.tag_lower(self._id, "coordinate")
def link_coord(self, coord1, coord2):
coord1.add_listener(self.coord_changed)
coord2.add_listener(self.coord_changed)
self.coord1 = coord1
self.coord2 = coord2
self.place(coord1, coord2)
def unlink_coord(self):
self.coord1 = self.coord2 = None
self._listeners = []
def revive(self, controller, coord1, coord2):
self.controller = controller
self.canvas.itemconfigure(self._id, state='normal')
self.link_coord(coord1, coord2)
self.active.add(self)
def retire(self):
# remove from view without deleting
self.canvas.itemconfigure(self._id, state='hidden')
self.pool["canvas"].append(self)
self.unlink_coord()
def coord_changed(self):
self.place(self.coord1, self.coord2)
@classmethod
def acquire(cls, canvas, controller, coord1, coord2):
if len(cls.pool[canvas]):
coord = cls.pool[canvas][0]
cls.pool[canvas].remove(coord)
coord.revive(controller, coord1, coord2)
return coord
else:
return cls(canvas, controller, coord1, coord2)
class Controller(abc.ABC):
def __init__(self, canvas, tool, item=None, **kw):
self.canvas = canvas
self.tool = tool
self.item = item
self._on_change = None
self.coords = []
self.links = []
def update(self):
pass
def on_change(self, func, *args, **kwargs):
self._on_change = lambda item: func(item, *args, **kwargs)
def _change(self):
if self._on_change:
self._on_change(self.item)
def highlight(self, item):
self.item = item
# raise controller elements to the top
self.canvas.tag_raise("controller")
@abc.abstractmethod
def get_coords(self):
pass
def on_coord_change(self, coord):
pass
def on_coord_context(self, coord, event):
pass
def on_link_context(self, link, event):
pass
def on_move(self, delta_x, delta_y, propagated=False):
for coord in self.coords:
coord.shift(delta_x, delta_y)
self.item.move(delta_x, delta_y)
if not propagated:
self.tool.propagate_move(delta_x, delta_y, self.item)
def on_release(self):
self.tool.on_layout_change()
def release(self):
for coord in self.coords:
coord.retire()
for link in self.links:
link.retire()
self.coords.clear()
self.links.clear()
class SquareController(Controller):
def __init__(self, canvas, tool, item=None, **kw):
super(SquareController, self).__init__(canvas, tool, item, **kw)
self.nw = Coordinate.acquire(canvas, self, 20, 20)
self.ne = Coordinate.acquire(canvas, self, 20, 20)
self.se = Coordinate.acquire(canvas, self, 20, 20)
self.sw = Coordinate.acquire(canvas, self, 20, 20)
self.n = Link.acquire(canvas, self, self.nw, self.ne)
self.s = Link.acquire(canvas, self, self.sw, self.se)
self.e = Link.acquire(canvas, self, self.ne, self.se)
self.w = Link.acquire(canvas, self, self.nw, self.sw)
self.coords = [self.ne, self.nw, self.se, self.sw]
self.links = [self.n, self.w, self.e, self.s]
if item:
self.highlight(item)
def highlight(self, item):
super(SquareController, self).highlight(item)
x1, y1, x2, y2 = item.coords()
self.nw.place(x1, y1)
self.ne.place(x2, y1)
self.se.place(x2, y2)
self.sw.place(x1, y2)
def update(self):
self.highlight(self.item)
def on_coord_change(self, coord):
x, y = coord.x, coord.y
if coord == self.nw:
self.ne.place(y=y)
self.sw.place(x=x)
elif coord == self.ne:
self.nw.place(y=y)
self.se.place(x=x)
elif coord == self.sw:
self.nw.place(x=x)
self.se.place(y=y)
elif coord == self.se:
self.ne.place(x=x)
self.sw.place(y=y)
else:
return
self.item.coords(self.get_coords())
self._change()
def get_coords(self):
return (
self.nw.x, self.nw.y,
self.se.x, self.se.y
)
class LinearController(Controller):
_closed = False
def __init__(self, canvas, tool, item=None, **kw):
super(LinearController, self).__init__(canvas, tool, item, **kw)
if item:
self.highlight(item)
self._link_context = MenuUtils.make_dynamic((
("command", "add point", icon("add", 14, 14), self._add_point, {}),
), tool.studio, tool.studio.style)
self._coord_context = MenuUtils.make_dynamic((
("command", "remove", icon("close", 14, 14), self._remove_point, {}),
), tool.studio, tool.studio.style)
self._active_link = None
self._active_coord = None
self._active_point = None
def on_link_context(self, link, event):
MenuUtils.popup(event, self._link_context)
self._active_link = link
self._active_point = self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)
def on_coord_context(self, coord, event):
MenuUtils.popup(event, self._coord_context)
self._active_coord = coord
self._active_point = self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)
def _add_point(self):
if not self._active_link:
return
index = self.coords.index(self._active_link.coord1) + 1
new_coord = Coordinate.acquire(self.canvas, self, *self._active_point)
self.coords.insert(index, new_coord)
self.item.coords(self.get_coords())
self.update()
self.tool.on_layout_change()
def _remove_point(self):
if not self._active_coord:
return
self.coords.remove(self._active_coord)
self._active_coord.retire()
self.item.coords(self.get_coords())
self.update()
self.tool.on_layout_change()
def on_coord_change(self, coord):
self.item.coords(self.get_coords())
self._change()
def get_coords(self):
return [coord for c in self.coords for coord in (c.x, c.y)]
def update(self):
# there is no smarter way to adjust links and coordinates
# clear them and reapply
self.release()
self.highlight(self.item)
def highlight(self, item):
coords = item.coords()
self.release()
prev = Coordinate.acquire(self.canvas, self, *coords[:2])
self.coords.append(prev)
for i in range(2, len(coords), 2):
# just in case the length of coordinates is odd
if i + 1 >= len(coords):
break
cd = Coordinate.acquire(self.canvas, self, coords[i], coords[i + 1])
self.coords.append(cd)
self.links.append(Link.acquire(self.canvas, self, prev, cd))
prev = cd
if self._closed:
self.links.append(Link.acquire(self.canvas, self, prev, self.coords[0]))
# ensure you have at least one item with "controller" tag before calling super
super(LinearController, self).highlight(item)
class ClosedLinearController(LinearController):
_closed = True
class PointController(Controller):
def __init__(self, canvas, tool, item=None, **kw):
super(PointController, self).__init__(canvas, tool, item, **kw)
self._border = None
if item:
self.highlight(item)
def get_coords(self):
return [self.coords[0].x, self.coords[0].y]
def on_coord_change(self, coord):
self.item.coords(self.get_coords())
self._change()
def on_move(self, delta_x, delta_y, propagated=False):
super(PointController, self).on_move(delta_x, delta_y, propagated)
self.highlight(self.item)
def _get_border_coords(self, item):
bbox = item.bbox() or (*item.coords(), *item.coords())
x1, y1, x2, y2 = bbox
x1, y1, x2, y2 = x1 - 2, y1 - 2, x2 + 2, y2 + 2
return x1, y1, x2, y1, x2, y2, x1, y2, x1, y1
def update(self):
self.canvas.coords(self._border, *self._get_border_coords(self.item))
def highlight(self, item):
coords = self._get_border_coords(item)
if self._border:
self.canvas.coords(self._border, *coords)
else:
self._border = self.canvas.create_line(
*coords, fill=self.tool.studio.style.colors["accent"],
tag="controller", dash=(5, 4), width=2
)
super(PointController, self).highlight(item)
def __del__(self):
if self._border:
self.canvas.delete(self._border)
class Draw(abc.ABC):
def __init__(self, tool):
self.tool = tool
self.active_item = None
@property
def canvas(self):
return self.tool.canvas
def canvas_coord(self, x, y):
return self.canvas.canvasx(x), self.canvas.canvasy(y)
@abc.abstractmethod
def on_button_press(self, event):
pass
@abc.abstractmethod
def on_button_release(self, event):
pass
@abc.abstractmethod
def on_double_press(self, event):
pass
@abc.abstractmethod
def on_motion(self, event):
pass
class SquareDraw(Draw):
def __init__(self, tool):
super(SquareDraw, self).__init__(tool)
self.coords = (0, 0, 0, 0)
self.item = None
self.draw_start = False
def on_button_press(self, event):
x, y = self.canvas_coord(event.x, event.y)
self.coords = (x, y, x, y)
self.draw_start = True
def on_button_release(self, event):
self.draw_start = False
if self.item:
self.tool.on_item_added(self.item)
self.item = None
def on_double_press(self, event):
pass
def on_motion(self, event):
if not self.draw_start:
return
x, y = self.canvas_coord(event.x, event.y)
if self.item is None:
self.item = self.tool.create_item(self.tool.current_draw, self.coords)
self.coords = (*self.coords[:2], x, y)
self.item.coords(*self.coords)
class LinearDraw(Draw):
def __init__(self, tool):
super(LinearDraw, self).__init__(tool)
self.coords = [0, 0, 0, 0]
self.item = None
self.draw_start = False
def on_button_press(self, event):
x, y = self.canvas_coord(event.x, event.y)
if not self.draw_start:
self.coords = [x, y, x, y]
else:
self.coords.extend([x, y])
self.item.coords(*self.coords)
self.draw_start = True
def on_button_release(self, event):
pass
def on_double_press(self, event):
self.draw_start = False
if self.item:
# remove last point which is usually a duplicate
self.item.coords(*self.coords[:-2])
self.tool.on_item_added(self.item)
self.item = None
def on_motion(self, event):
if not self.draw_start:
return
if self.item is None:
self.item = self.tool.create_item(self.tool.current_draw, self.coords)
x, y = self.canvas_coord(event.x, event.y)
# set the last two coordinates
self.coords[-2:] = [x, y]
self.item.coords(*self.coords)
class PointDraw(Draw):
def __init__(self, tool, **default_opts):
super(PointDraw, self).__init__(tool)
self.default_opts = default_opts
def on_button_press(self, event):
if event.state & EventMask.CONTROL:
return
x, y = self.canvas_coord(event.x, event.y)
self.item = self.tool.create_item(
self.tool.current_draw, (x, y), **self.default_opts
)
def on_button_release(self, event):
self.tool.on_item_added(self.item)
self.item = None
def on_double_press(self, event):
pass
def on_motion(self, event):
pass
class TextDraw(PointDraw):
def on_button_press(self, event):
super(TextDraw, self).on_button_press(event)
self.item.configure(text=self.item.name)
class CanvasStyleGroup(StyleGroup):
def __init__(self, master, pane, **cnf):
self.tool = cnf.pop('tool', None)
super().__init__(master, pane, **cnf)
self.label = "Canvas Item"
self.prop_keys = None
self._prev_prop_keys = set()
self._empty_message = "Select canvas item to see styles"
@property
def cv_items(self):
# selected canvas items
return self.tool.selected_items
def supports_widget(self, widget):
return isinstance(widget, Canvas)
def can_optimize(self):
# probably needs a rethink if we consider definition overrides
# in canvas items but there isn't much of that so this will do
return self.prop_keys == self._prev_prop_keys
def compute_prop_keys(self):
items = self.cv_items
if not items:
self.prop_keys = set()
else:
self.prop_keys = None
# determine common configs for multi-selected items
for item in self.cv_items:
if self.prop_keys is None:
self.prop_keys = set(item.configure())
else:
self.prop_keys &= set(item.configure())
if len(items) > 1:
# id cannot be set for multi-selected items
self.prop_keys.remove('id')
def on_widget_change(self, widget):
self._prev_prop_keys = self.prop_keys
self.compute_prop_keys()
super(CanvasStyleGroup, self).on_widget_change(widget)
def _get_prop(self, prop, widget):
# not very useful to us
return None
def _get_key(self, widget, prop):
# generate a key identifying the multi-selection state and prop modified
return f"{','.join(map(lambda x: str(x._id), self.cv_items))}:{prop}"
def _get_action_data(self, widget, prop):
return {item: {prop: item.cget(prop)} for item in self.cv_items}
def _apply_action(self, prop, value, widget, data):
for item in data:
item.configure(data[item])
if item._controller:
item._controller.update()
if self.tool.canvas == widget:
self.on_widget_change(widget)
self.tool.on_items_modified(data.keys())
def _set_prop(self, prop, value, widget):
for item in self.cv_items:
item.configure({prop: value})
if item._controller:
item._controller.update()
self.tool.on_items_modified(self.cv_items)
def get_definition(self):
if not self.cv_items:
return {}
else:
rough_definition = self.cv_items[0].properties
if len(self.cv_items) == 1:
# for single item no need to refine definitions any further
return rough_definition
resolved = {}
for prop in self.prop_keys:
if prop not in rough_definition:
continue
definition = resolved[prop] = rough_definition[prop]
# use default for value
definition.update(value=definition['default'])
return resolved
class CanvasTreeView(NestedTreeView):
class Node(NestedTreeView.Node):
def __init__(self, master=None, **config):
super().__init__(master, **config)
self.item: CanvasItem = config.get("item")
self.item.node = self
self._color = self.style.colors["secondary1"]
self.name_pad.configure(text=self.item.name)
self.icon_pad.configure(
image=icon(self.item.icon, 15, 15, color=self._color)
)
self.editable = True
self.strict_mode = True
def widget_modified(self, widget):
self.item = widget
self.name_pad.configure(text=self.item.name)
self.icon_pad.configure(
image=icon(self.item.icon, 15, 15, color=self._color)
)
def select(self, event=None, silently=False):
super(CanvasTreeView.Node, self).select(event, silently)
if event:
self.item.canvas.focus_set()
def __init__(self, canvas, **kw):
super(CanvasTreeView, self).__init__(canvas.node, **kw)
self._cv_node = canvas.node
self.canvas = canvas
self._is_mapped = False
self.allow_multi_select(True)
def add(self, node):
super(CanvasTreeView, self).add(node)
# if we have a node we make ourselves visible
if self not in self._cv_node.nodes:
self._cv_node.add(self)
def insert(self, index=None, *nodes):
super(CanvasTreeView, self).insert(index, *nodes)
# also make sure nodes is not empty
if self not in self._cv_node.nodes and nodes:
self._cv_node.add(self)
def remove(self, node):
super(CanvasTreeView, self).remove(node)
# if no nodes are left we hide ourselves
if not len(self.nodes):
self._cv_node.remove(self)
def reorder(self, reorder_data):
# rearrange nodes based on data containing {item: index, ...}
for item in reorder_data:
self.insert(reorder_data[item], item.node)
class CanvasStudioAdapter(BaseStudioAdapter):
_tool = None
@classmethod
def assert_tool(cls):
# make sure tool is initialized
if cls._tool is None:
raise RuntimeError("Canvas tool not initialized. Could not load canvas.")
@classmethod
def generate(cls, widget, parent=None):
cls.assert_tool()
# if canvas is selected there is a chance its cursor has been modified by tool
# below lies a hack to set the right cursor and restore it after loading is complete
cursor = None
if widget == cls._tool.canvas:
cursor = widget["cursor"]
widget.config(cursor=cls._tool._cursor)
node = BaseStudioAdapter.generate(widget, parent)
if cursor:
widget.config(cursor=cursor)
if getattr(widget, "_cv_initialized", False):
for item in widget._cv_items:
opts = {
"name": item.name,
"coords": ",".join(map(lambda c: str(round(c)), item.coords())),
"attr": item.altered_options()
}
if not item.name:
opts.pop("name", None)
Node(node, item.__class__.__name__, opts)
return node
@classmethod
def load(cls, node, designer, parent, bounds=None):
widget = BaseStudioAdapter.load(node, designer, parent, bounds=None)
cls.assert_tool()
if len(node):
cls._tool.initialize_canvas(widget)
for sub_node in node:
if sub_node.type not in CANVAS_ITEM_MAP:
raise NotImplementedError(f"Tag <{sub_node.type}> not implemented by canvas tool")
# use a copy just in case something gets popped down the line
config = dict(sub_node.attrib.get("attr", {}))
# add name to config as id so the intercepts can set it for us
config["id"] = sub_node.attrib.get("name", "")
coords = sub_node.attrib.get("coords", "").split(",")
if len(coords) < 2:
raise ValueError("Not enough coordinates provided.")
component = CANVAS_ITEM_MAP[sub_node.type]
item = component(widget, *coords)
item.configure(config)
cls._tool.create_item(
component, item=item, canvas=widget, silently=True
)
return widget
class CanvasTool(BaseTool):
name = "Canvas"
icon = "paint"
def __init__(self, studio, manager):
super(CanvasTool, self).__init__(studio, manager)
self._component_pane: ComponentPane = self.studio.get_feature(ComponentPane)
self.item_select = self._component_pane.register_group(
"Canvas", CANVAS_ITEMS, SelectToDrawGroup, self._evaluator
)
self.style_group = studio.style_pane.add_group(
CanvasStyleGroup, tool=self
)
CanvasStudioAdapter._tool = self
# connect the canvas adapter to load canvas objects to the studio
DesignBuilder.add_adapter(CanvasStudioAdapter, Canvas)
self.items = []
self.item_select.on_select(self.set_draw)
self.canvas = None
self._cursor = "arrow"
self.current_draw = None
self.selected_items = []
self._clipboard = None
self._latch_pos = 0, 0
self._image_placeholder = icon("image_dark", 60, 60)
self.square_draw = SquareDraw(self)
self.line_draw = LinearDraw(self)
self.text_draw = TextDraw(self)
self.bitmap_draw = PointDraw(self, bitmap="gray25")
self.image_draw = PointDraw(self, image=self._image_placeholder)
self.draw_map = {
Oval: self.square_draw,
Rectangle: self.square_draw,
Arc: self.square_draw,
Line: self.line_draw,
Polygon: self.line_draw,
Text: self.text_draw,
Bitmap: self.bitmap_draw,
Image: self.image_draw,
}
self.controller_map = {
Oval: SquareController,
Rectangle: SquareController,
Arc: SquareController,
Line: LinearController,
Polygon: ClosedLinearController,
Text: PointController,
Bitmap: PointController,
Image: PointController,
}
self.keymap = KeyMap(None)
CTRL = KeyMap.CTRL
self.routines = (
Routine(self.cut_items, 'CV_CUT', 'Cut selected items', 'canvas', CTRL + CharKey('x')),
Routine(self.copy_items, 'CV_COPY', 'Copy selected items', 'canvas', CTRL + CharKey('c')),
Routine(self.paste_items, 'CV_PASTE', 'Paste selected items', 'canvas', CTRL + CharKey('v')),
Routine(self.delete_items, 'CV_DELETE', 'Delete selected items', 'canvas', KeyMap.DELETE),
Routine(self.duplicate_items, 'CV_DUPLICATE', 'Duplicate selected items', 'canvas', CTRL + CharKey('d')),
Routine(self._send_back, 'CV_BACK', 'Send item to back', 'canvas', CharKey(']')),
Routine(self._bring_front, 'CV_FRONT', 'Bring item to front', 'canvas', CharKey('[')),
Routine(lambda: self._send_back(1), 'CV_BACK_1', 'send item back one step', 'canvas', CTRL + CharKey(']')),
Routine(lambda: self._bring_front(1), 'CV_FRONT_1', 'bring item forward one step', 'canvas',
CTRL + CharKey('[')),
)
self.keymap.add_routines(*self.routines)
self._item_context_menu = MenuUtils.make_dynamic((
EnableIf(
lambda: self.selected_items,
("separator",),
("command", "copy", icon("copy", 14, 14), self._get_routine('CV_COPY'), {}),
("command", "duplicate", icon("copy", 14, 14), self._get_routine('CV_DUPLICATE'), {}),
EnableIf(
lambda: self._clipboard is not None,
("command", "paste", icon("clipboard", 14, 14), self._get_routine('CV_PASTE'), {})
),
("command", "cut", icon("cut", 14, 14), self._get_routine('CV_CUT'), {}),
("separator",),
("command", "delete", icon("delete", 14, 14), self._get_routine('CV_DELETE'), {}),
("separator",),
("command", "send to back", icon("send_to_back", 14, 14), self._get_routine('CV_BACK'), {}),
("command", "bring to front", icon("bring_to_front", 14, 14), self._get_routine('CV_FRONT'), {}),
("command", "back one step", icon("send_to_back", 14, 14), self._get_routine('CV_BACK_1'), {}),
("command", "forward one step", icon("bring_to_front", 14, 14), self._get_routine('CV_FRONT_1'), {}),
),
), self.studio, self.studio.style)
self._canvas_menu = MenuUtils.make_dynamic((
EnableIf(
lambda: self._clipboard is not None,
("command", "paste", icon("clipboard", 14, 14),
self._get_routine('CV_PASTE'), {})
),
), self.studio, self.studio.style)
@property
def _ids(self):
return [item.name for item_set in self.items for item in item_set._cv_items]
def initialize_canvas(self, canvas=None):
canvas = canvas or self.canvas
if canvas and not getattr(canvas, "_cv_initialized", False):
canvas.bind(
"<ButtonPress-1>", self._draw_dispatch("on_button_press"), True)
canvas.bind(
"<ButtonRelease>", self._draw_dispatch("on_button_release"), True)
canvas.bind(
"<Double-Button-1>", self._draw_dispatch("on_double_press"), True)
canvas.bind(
"<Motion>", self._draw_dispatch("on_motion"), True)
canvas.bind("<Control-Button-1>", self._enter_pointer_mode)
canvas.bind("<Button-1>", self._latch_and_focus(canvas), True)
self.keymap._bind(canvas)
canvas.on_context_menu(self._show_canvas_menu(canvas))
canvas._cv_tree = CanvasTreeView(canvas)
canvas._cv_tree.on_structure_change(self._update_stacking, canvas)
canvas._cv_tree.on_select(self._update_selection, canvas)
canvas._cv_items = []
canvas._cv_initialized = True
@property
def sorted_selected_items(self):
return sorted(self.selected_items, key=lambda x: self.canvas._cv_items.index(x))
def _latch_and_focus(self, canvas):
def func(event):
canvas.focus_set()
self._latch_pos = canvas.canvasx(event.x), canvas.canvasy(event.y)
return func
def _enter_pointer_mode(self, *_):
if self.item_select._selected is None:
return
self.item_select._selected.deselect()
def _show_item_menu(self, item):
def show(event):
if item in self.selected_items:
MenuUtils.popup(event, self._item_context_menu)
return show
def _show_canvas_menu(self, canvas):
def show(event):
x, y = canvas.canvasx(event.x), canvas.canvasy(event.y)
self._latch_pos = x, y
if not canvas.find_overlapping(x, y, x, y):
MenuUtils.popup(event, self._canvas_menu)
return 'break'
return show
def _send_back(self, steps=None):
if not self.selected_items:
return
items = self.sorted_selected_items
if steps is None:
self._update_stacking(
self.canvas,
# arrange starting from zero
{item: index for index, item in enumerate(items)}
)
else:
self._update_stacking(
self.canvas,
# clamp to ensure non-negative index
{item: max(0, self.canvas._cv_items.index(item) - steps) for item in items}
)
def _bring_front(self, steps=None):
if not self.selected_items:
return
# work with items in stacking order
items = self.sorted_selected_items
cv_items = self.canvas._cv_items
if steps is None:
end = len(cv_items) - 1
self._update_stacking(
self.canvas,
# insert each item to the end of the list, will be done in stacking order
{item: end for item in items}
)
else:
self._update_stacking(
self.canvas,
# clamp the new index to within length of items
{item: min(len(cv_items) - 1, cv_items.index(item) + steps) for item in items}
)
def _update_stacking(self, canvas, data=None, silently=False):
if data:
canvas._cv_tree.reorder(data)
else:
data = {}
canvas._cv_items.sort(key=lambda x: canvas._cv_tree.nodes.index(x.node))
prev_data = {}
for index, item in enumerate(canvas._cv_items):
if item._prev_index != index:
# old data
prev_data[item] = item._prev_index
# new data
data[item] = index
item._prev_index = index
if index > 0:
item.lift(canvas._cv_items[index - 1]._id)
if not silently and prev_data != data:
self.studio.new_action(Action(
lambda _: self._update_stacking(canvas, prev_data, True),
lambda _: self._update_stacking(canvas, data, True)
))
def _get_routine(self, key):
for routine in self.routines:
if routine.key == key:
return routine
def create_item(self, component, coords=(), item=None, canvas=None, silently=False, **kwargs):
canvas = canvas or self.canvas
if item is None:
opts = dict(**component.defaults)
opts.update(kwargs)
item = component(canvas, *coords, **opts)
# generate a unique id
item.name = generate_id(component, self._ids)
canvas._cv_items.append(item)
item._prev_index = canvas._cv_items.index(item)
node = canvas._cv_tree.add_as_node(item=item)
item.bind("<ButtonRelease-1>", lambda e: self._handle_select(item, e), True)
item.bind("<ButtonRelease-1>", lambda e: self._handle_end(item, e), True)
item.bind("<Motion>", lambda e: self._handle_move(item, e), True)
MenuUtils.bind_context(item, self._show_item_menu(item))
MenuUtils.bind_all_context(node, self._show_item_menu(item))
if not silently:
self.studio.new_action(Action(
lambda _: self.remove_items([item], silently=True),
lambda _: self.restore_items([item])
))
return item
def remove_items(self, items, silently=False):
items = sorted(items, key=lambda x: x.canvas._cv_items.index(x))
self.deselect_items(items)
for item in items:
item.hide()
item.canvas._cv_items.remove(item)
item.node.remove()
if not silently:
self.studio.new_action(Action(
lambda _: self.restore_items(items),
lambda _: self.remove_items(items, silently=True)
))
def restore_items(self, items):
for item in items:
item.show()
canvas = item.canvas
if item._prev_index is not None:
canvas._cv_items.insert(item._prev_index, item)
canvas._cv_tree.insert(item._prev_index, item.node)
def _get_copy_data(self):
if not self.selected_items:
return []
items = self.sorted_selected_items
for item in items:
item.addtag('bound_check')
bbox = self.canvas.bbox('bound_check') or items[0].coords()
ref_x, ref_y = bbox[:2]
self.canvas.dtag('bound_check', 'bound_check')
return [item.serialize(ref_x, ref_y) for item in items]
def copy_items(self):
if self.selected_items:
self._clipboard = self._get_copy_data()
def cut_items(self):
if self.selected_items:
self.copy_items()
self.delete_items()
def duplicate_items(self):
if self.selected_items:
self.paste_items(self._get_copy_data())
def paste_items(self, _clipboard=None):
_clipboard = self._clipboard if _clipboard is None else _clipboard
if _clipboard:
items = []
for item_data in _clipboard:
item = CanvasItem.from_data(self.canvas, item_data, self._latch_pos)
self.create_item(item.__class__, item=item, silently=True)
items.append(item)
# slightly displace latch position for next paste
self._latch_pos = tuple(map(lambda x: x + 5, self._latch_pos))
self.studio.new_action(Action(
lambda _: self.remove_items(items, silently=True),
lambda _: self.restore_items(items)
))
def delete_items(self):
self.remove_items(list(self.selected_items))
def _handle_move(self, item, event):
if not event.state & EventMask.MOUSE_BUTTON_1:
# we need mouse button 1 to be down to qualify as a drag
return
if getattr(item, '_controller', None) and self.current_draw is None:
if getattr(item, '_coord_latch', None):
x0, y0 = item._coord_latch
x, y = item.canvas.canvasx(event.x), item.canvas.canvasx(event.y)
item._controller.on_move(x - x0, y - y0)
item._coord_latch = x, y
else:
item._coord_latch = item.canvas.canvasx(event.x), item.canvas.canvasx(event.y)
def _handle_end(self, item, event):
if getattr(item, '_coord_latch', None) and self.current_draw is None:
self.on_layout_change()
item._coord_latch = None
def _handle_select(self, item, event):
if self.current_draw is not None or getattr(item, '_coord_latch', None):
# if coord_latch has a value then it means we have been dragging
# an item and the button release means end of drag and not selection
return
if event.state & EventMask.CONTROL:
self.select_item(item, True)
else:
self.select_item(item)
def _draw_dispatch(self, event_type):
def handler(event):
drawer = self.draw_map.get(self.current_draw)
if drawer:
getattr(drawer, event_type)(event)
return handler
def set_draw(self, component):
self._set_cursor()
self.current_draw = component
def _reset_cursor(self):
self.canvas.configure(cursor=self._cursor)
def _set_cursor(self):
if self.item_select.selected:
self.canvas.configure(cursor="crosshair")
else:
self._reset_cursor()
def _evaluator(self, widget):
return isinstance(widget, Canvas)
def set_controller(self, item):
controller_class = self.controller_map.get(item.__class__)
if controller_class:
item._controller = controller_class(item.canvas, self, item)
return item._controller
def remove_controller(self, item):
controller = getattr(item, "_controller", None)
if controller:
controller.release()
item._controller = None
def selection_changed(self):
# called when canvas item selection changes
self.style_group.on_widget_change(self.canvas)
def _update_selection(self, canvas):
# update selections from the canvas tree
if canvas != self.canvas:
self.studio.select(canvas)
# call to studio should cause canvas to be selected
assert self.canvas == canvas
selected = set(self.selected_items)
to_select = {node.item for node in canvas._cv_tree.get()}
# deselect items currently selected that shouldn't be
for item in selected - to_select:
self.remove_controller(item)
self.selected_items.remove(item)
# select items to be selected that are not yet selected
for item in to_select - selected:
controller = self.set_controller(item)
if not controller:
return
self.selected_items.append(item)
self.selection_changed()
def _clear_selection(self):
if self.selected_items:
for item in self.selected_items:
self.remove_controller(item)
item.canvas._cv_tree.deselect(item.node)
self.selected_items.clear()
self.selection_changed()
def _deselect(self, item):
self.remove_controller(item)
self.selected_items.remove(item)
item.canvas._cv_tree.deselect(item.node)
def deselect_items(self, items):
# only consider selected items
items = set(items) & set(self.selected_items)
if items:
for item in items:
if item in self.selected_items:
self._deselect(item)
self.selection_changed()
def select_item(self, item, multi=False):
if multi:
if item in self.selected_items:
self._deselect(item)
else:
controller = self.set_controller(item)
if not controller:
return
self.selected_items.append(item)
item.node.select(silently=True)
else:
for i in self.selected_items:
if i == item:
continue
self.remove_controller(i)
i.canvas._cv_tree.deselect(i.node)
if item in self.selected_items:
self.selected_items = [item]
elif self.set_controller(item):
self.selected_items = [item]
item.node.select(silently=True)
self.selection_changed()
def on_select(self, widget):
if self.canvas == widget:
return
if self.canvas is not None:
self._reset_cursor()
self.release(self.canvas)
if isinstance(widget, Canvas):
self.canvas = widget
self._cursor = widget["cursor"]
self._set_cursor()
self.initialize_canvas()
else:
if self.canvas is None:
return
self.release(self.canvas)
self.canvas = None
def release(self, canvas):
if canvas is None or not getattr(canvas, "_cv_initialized", False):
return
self._clear_selection()
def on_layout_change(self):
prev_data = {item: item._coord_restore for item in self.selected_items}
data = {item: item.coords() for item in self.selected_items}
for item in self.selected_items:
item._coord_restore = item.coords()
self.studio.new_action(Action(
lambda _: self.restore_layouts(prev_data),
lambda _: self.restore_layouts(data)
))
def restore_layouts(self, data):
for item in data:
item.coords(*data[item])
if item._controller:
item._controller.update()
def on_item_added(self, item):
item._coord_restore = item.coords()
def on_items_modified(self, items):
for item in items:
item.node.widget_modified(item)
def on_widget_delete(self, widget):
if isinstance(widget, Canvas):
if widget in self.items:
self.items.remove(widget)
def propagate_move(self, delta_x, delta_y, source=None):
for item in self.selected_items:
if item != source:
item._controller.on_move(delta_x, delta_y, True)
|
the-stack_0_4021 | import numpy
from ..tractography import Tractography
from . import tract_operations
from ..tensor import scalar_measures
try:
from collections import OrderedDict
except ImportError: # Python 2.6 fix
from ordereddict import OrderedDict
def compute_all_measures(tractography, desired_keys_list, scalars=None, resolution=None):
unordered_results = dict()
if ('number of tracts' in desired_keys_list):
unordered_results['number of tracts'] = tract_operations.tract_count(
tractography.tracts())
if ('length mean (mm)' in desired_keys_list) or ('length std (mm^2)' in desired_keys_list):
lengths = numpy.empty(len(tractography.tracts()))
for i, one_tract in enumerate(tractography.tracts()):
lengths[i] = tract_operations.tract_length(one_tract)
unordered_results['length mean (mm)'] = lengths.mean()
unordered_results['length std (mm^2)'] = lengths.std()
if ('tract volume' in desired_keys_list) and (resolution is not None):
resolution = float(resolution)
voxels = tract_operations.voxelized_tract(tractography, resolution)
neighbors = numpy.array([
[0, 1, 0],
[0, -1, 0],
[1, 0, 0],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1]
])
dilated_voxels = set()
dilated_voxels.update(voxels)
eroded_voxels = set()
for voxel in voxels:
neighbors_list = zip(*(neighbors + voxel).T)
dilated_voxels.update(neighbors_list)
if len(voxels.intersection(neighbors_list)) == len(neighbors):
eroded_voxels.add(voxel)
# print len(dilated_voxels), len(voxels), len(eroded_voxels)
approx_voxels = (len(dilated_voxels) - len(eroded_voxels)) / 2.
approx_volume = approx_voxels * (resolution ** 3)
unordered_results['tract volume'] = approx_volume
if ('per tract distance weighted mean %s' in desired_keys_list ) or \
('per tract distance weighted std %s' in desired_keys_list):
mean_keys_list = list()
std_keys_list = list()
for scalar in scalars:
mean_key = 'per tract distance weighted mean %s' % scalar
std_key = 'per tract distance weighted std %s' % scalar
mean_keys_list.append(mean_key)
std_keys_list.append(std_key)
scalars = tractography.tracts_data()[scalar]
weighted_scalars = numpy.empty((len(tractography.tracts()), 2))
for line_index, t_data in enumerate(tractography.tracts()):
tdiff = numpy.sqrt((numpy.diff(t_data, axis=0) ** 2).sum(-1))
length = tdiff.sum()
values = scalars[line_index][1:].squeeze()
average = numpy.average(values, weights=tdiff)
weighted_scalars[line_index, 0] = average
weighted_scalars[line_index, 1] = length
mean = numpy.average(
weighted_scalars[:, 0], weights=weighted_scalars[:, 1])
std = numpy.average(
(weighted_scalars[:, 0] - mean) ** 2, weights=weighted_scalars[:, 1])
unordered_results[mean_key] = mean
unordered_results[std_key] = std
mii = desired_keys_list.index('per tract distance weighted mean %s')
desired_keys_list[mii:mii + 1] = mean_keys_list
sii = desired_keys_list.index('per tract distance weighted std %s')
desired_keys_list[sii:sii + 1] = std_keys_list
# Make Ordered Dictionary
ordered_dict = OrderedDict()
for key in desired_keys_list:
ordered_dict[key] = unordered_results[key]
return ordered_dict
def tract_expand_tensor_metrics(tractography):
from os import path
from scipy import ndimage
from numpy import linalg
quantity_name = "tensor1_FA"
start = 0
new_scalar_data = []
for tract in tractography.original_tracts():
new_scalar_data.append(
new_scalar_data_flat[start: start + len(tract)].copy()
)
start += len(tract)
tractography.original_tracts_data()[quantity_name] = new_scalar_data
return Tractography(
tractography.original_tracts(), tractography.original_tracts_data(),
**tractography.extra_args
)
def decorate_tract_with_measures(tractography, tensor_name):
ot = tractography.original_tracts_data()
all_tensors = ot[tensor_name]
fa_fiber_list = list()
md_fiber_list = list()
ax_fiber_list = list()
rd_fiber_list = list()
ga_fiber_list = list()
for one_fiber in all_tensors:
fa_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
md_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
ax_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
rd_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
ga_by_point = numpy.ndarray((len(one_fiber), 1), dtype=numpy.float32)
index = 0
for one_tensor_values in one_fiber:
one_tensor = numpy.reshape(one_tensor_values, (3, 3))
_, eigenvals, _ = numpy.linalg.svd(one_tensor)
fa_by_point[index] = scalar_measures.fractional_anisotropy_from_eigenvalues(eigenvals)
md_by_point[index] = scalar_measures.mean_diffusivity(eigenvals)
ax_by_point[index] = scalar_measures.axial_diffusivity(eigenvals)
rd_by_point[index] = scalar_measures.radial_diffusivity(eigenvals)
ga_by_point[index] = scalar_measures.geodesic_anisotropy(eigenvals)
index = index + 1
fa_fiber_list.append(fa_by_point)
md_fiber_list.append(md_by_point)
ax_fiber_list.append(ax_by_point)
rd_fiber_list.append(rd_by_point)
ga_fiber_list.append(ga_by_point)
tractography.original_tracts_data()['FA_' + tensor_name] = fa_fiber_list
tractography.original_tracts_data()['MD_' + tensor_name] = md_fiber_list
tractography.original_tracts_data()['AX_' + tensor_name] = ax_fiber_list
tractography.original_tracts_data()['RD_' + tensor_name] = rd_fiber_list
tractography.original_tracts_data()['GA_' + tensor_name] = ga_fiber_list
return Tractography(
tractography.original_tracts(), tractography.original_tracts_data(),
**tractography.extra_args)
|
the-stack_0_4023 | import datetime
import os
from pyhmy.logging import ControlledLogger
_config = {
"AMT_PER_TXN": [1e-9, 1e-9], # The random range for each transaction in the transaction-generation
"NUM_SRC_ACC": 32, # The number of possible source accounts for all transactions, higher = more tps
"NUM_SNK_ACC": 1, # The number of possible destination / sink accounts for all transaction
"MAX_TXN_GEN_COUNT": None, # The upper bound of the number generated transaction, regardless of if `stop` is called
"ONLY_CROSS_SHARD": False, # If true, forces source and destination shards to be different
"ENFORCE_NONCE": False, # If true, will only generate transactions with a valid nonce
"ESTIMATED_GAS_PER_TXN": 1e-3, # The estimated gas, hardcoded
"INIT_SRC_ACC_BAL_PER_SHARD": 1, # The initial balance for EVERY source account
"TXN_WAIT_TO_CONFIRM": 60, # The timeout when a transaction is sent (only used in setup related functions)
"MAX_THREAD_COUNT": os.cpu_count()//2, # Max thread is recommended to be less than your v-core count
"ENDPOINTS": [ # Endpoints for all transaction, index i = shard i
"https://api.s0.pga.hmny.io/",
"https://api.s1.pga.hmny.io/",
"https://api.s2.pga.hmny.io/"
],
"SRC_SHARD_WEIGHTS": [ # Adjust the likelihood that shard i (i = index) gets chosen to be the source shard
1, # Bigger number = higher likelihood of shard i begin chosen
1, # 0 = 0% chance of being chosen
1
],
"SNK_SHARD_WEIGHTS": [ # Adjust the likelihood that shard i (i = index) gets chosen to be the source shard
1,
1,
1
],
"CHAIN_ID": "devnet", # The chain id for all transaction, should be devnet if not localnet.
"REFUND_ACCOUNT": "one1j9hwh7vqz94dsk06q4h9hznr4wlr3x5zup6wz3", # All refunds will be sent to this address
}
import_account_name_prefix = "_tx_gen_"
class Loggers:
"""
A collection of loggers for the transaction generator.
"""
general = ControlledLogger(f"general_log_{datetime.datetime.utcnow()}", "./logs/general")
transaction = ControlledLogger(f"transaction_log_{datetime.datetime.utcnow()}", "./logs/transaction")
balance = ControlledLogger(f"balance_log_{datetime.datetime.utcnow()}", "./logs/balance")
report = ControlledLogger(f"report_log_{datetime.datetime.utcnow()}", "./logs/report")
def start_new_loggers():
"""
This reinitialize all loggers in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
start_new_general_logger()
start_new_transaction_logger()
start_new_balance_logger()
start_new_report_logger()
def start_new_general_logger():
"""
This reinitialize the general logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.general = ControlledLogger(f"general_log_{datetime.datetime.utcnow()}", "./logs/general")
def start_new_transaction_logger():
"""
This reinitialize the transaction logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.transaction = ControlledLogger(f"transaction_log_{datetime.datetime.utcnow()}", "./logs/transaction")
def start_new_balance_logger():
"""
This reinitialize the balance logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.balance = ControlledLogger(f"balance_log_{datetime.datetime.utcnow()}", "./logs/balance")
def start_new_report_logger():
"""
This reinitialize the report logger in `pdoc.Loggers`.
Note that new files will be generated in the process.
"""
Loggers.report = ControlledLogger(f"report_log_{datetime.datetime.utcnow()}", "./logs/report")
def write_all_logs():
"""
Write all the logs in `pdoc.Loggers`
"""
Loggers.general.write()
Loggers.transaction.write()
Loggers.balance.write()
Loggers.report.write()
def _validate_config():
assert isinstance(_config, dict)
if not isinstance(_config["AMT_PER_TXN"], list) or len(_config["AMT_PER_TXN"]) != 2 \
or _config["AMT_PER_TXN"][0] < 0:
raise ValueError("Amount per transaction must be a range from 0")
if not isinstance(_config["NUM_SRC_ACC"], int) or _config["NUM_SRC_ACC"] < 0:
raise ValueError("Number of Source Accounts cannot be negative")
if not isinstance(_config["NUM_SNK_ACC"], int) or _config["NUM_SNK_ACC"] < 0:
raise ValueError("Number of Sink Accounts cannot be negative")
# TODO: check max generation count: input_config["MAX_TXN_GEN_COUNT"]
if not isinstance(_config["ONLY_CROSS_SHARD"], bool):
raise ValueError("Only Cross Shard must be a boolean")
if not isinstance(_config["ESTIMATED_GAS_PER_TXN"], (int, float)) or _config["ESTIMATED_GAS_PER_TXN"] < 0:
raise ValueError("Estimated gas per transaction cannot be negative")
if not isinstance(_config["INIT_SRC_ACC_BAL_PER_SHARD"], (int, float)) \
or _config["INIT_SRC_ACC_BAL_PER_SHARD"] < 0:
raise ValueError("Initial Source Account Balance per shard cannot be negative")
if not isinstance(_config["TXN_WAIT_TO_CONFIRM"], (int, float)) or _config["TXN_WAIT_TO_CONFIRM"] < 0:
raise ValueError("Transaction wait to confirm time cannot be negative")
if _config["MAX_THREAD_COUNT"] is not None and not (isinstance(_config["MAX_THREAD_COUNT"], int)
and _config["MAX_THREAD_COUNT"] > 0):
raise ValueError("Max Threads cannot be negative")
num_shards = len(_config["ENDPOINTS"])
# TODO: check endpoints are valid: input_config["ENDPOINTS"]
if not isinstance(_config["SRC_SHARD_WEIGHTS"], list) or len(_config["SRC_SHARD_WEIGHTS"]) != num_shards:
raise ValueError("Source Shard Weights must be list of len shards")
if not isinstance(_config["SNK_SHARD_WEIGHTS"], list) or len(_config["SNK_SHARD_WEIGHTS"]) != num_shards:
raise ValueError("Sink Shard Weights must be list of len shards")
# TODO: check chain_ID: input_config["CHAIN_ID"]
if not _config["REFUND_ACCOUNT"].startswith("one1"):
raise ValueError("Refund account must be valid account")
def set_config(input_config):
"""
Validate a config, `input_config`, and set the config for the transaction generator.
"""
input_keys = input_config.keys()
assert "ENDPOINTS" in input_keys, "Must specify endpoints"
assert isinstance(input_config["ENDPOINTS"], list)
if "SRC_SHARD_WEIGHTS" not in input_keys:
input_config["SRC_SHARD_WEIGHTS"] = [1] * len(input_config["ENDPOINTS"])
if "SNK_SHARD_WEIGHTS" not in input_keys:
input_config["SNK_SHARD_WEIGHTS"] = [1] * len(input_config["ENDPOINTS"])
_config.update(input_config)
_validate_config()
def get_config():
"""
:returns a COPY of the current config (to prevent accidental modification of config)
"""
return _config.copy()
|
the-stack_0_4024 | import sys, os
import pandas as pd
from tqdm import tqdm
from Bio import SeqIO
import argparse
from multiprocessing import Pool
import pickle
import numpy as np
class QuadruplexFinder(object):
def __init__(self, fasta_file, output_path = '',
GC='G', L=7, q=4, nquadruplets=4, mdef=1, tetdef=1, len_bulge=1, max_bulge = 1,
bulge_priority=False, repeats=False, verbose=False, nthreads=1):
# parse arg
self.fasta_file = fasta_file
self.output_path = output_path
self.GC = GC
self.L = L
self.q = q
self.nquadruplets = nquadruplets
self.mdef = mdef
self.tetdef = tetdef
self.repeats = repeats
self.verbose = verbose
self.len_bulge = len_bulge
self.max_bulge = max_bulge
self.bulge_priority = bulge_priority
self.nthreads = nthreads
def load_fasta(self):
sequences = []
for record in SeqIO.parse(self.fasta_file, "fasta"):
sequences.append((record.seq, record.id))
return sequences
def find_quadruplets_without_bulges(self, fasta):
quadruplets = []
stack = [self.QuadrupletDetector(nuc) for nuc in fasta[:self.q]]
current_state = sum(stack)
if current_state >= self.q - self.tetdef:
quadruplets.append((0, self.q - current_state, self.q))
for i in tqdm(range(self.q, len(fasta)), desc='Qadrupleting', disable = self.verbose):
stack.append(self.QuadrupletDetector(fasta[i]))
current_state = current_state + stack[-1] - stack.pop(0)
if current_state >= self.q - self.tetdef:
quadruplets.append((i-self.q+1, self.q - current_state, self.q))
return quadruplets
def QuadrupletDetector(self, quadr):
if self.repeats:
quadr = quadr.upper()
return 1 if quadr == self.GC.upper() else 0
def find_quadruplets_wrapper(self, data):
return self.find_quadruplets(**data)
def find_quadruplets(self, fasta, shift=0, tqdm_keep_silence=None):
'''
bulge_stack - a set of numbers - amounts how many non-G nucleotides was before + 1
'''
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplets = []
quadruplets_sequences = []
open_bulge = 0
bulge_stack = []
sequence_stack = ''
bulge_current_state = 0
bulge_current_num_state = 0
bulge_num_state = 0
n_bulges = 0
def add_bulge(nuc):
nonlocal open_bulge, bulge_current_num_state, bulge_current_state, bulge_num_state, bulge_stack, n_bulges
if self.QuadrupletDetector(nuc):
bulge_stack.append(open_bulge+1)
if len(bulge_stack) == 1:
bulge_stack[0] = 1
open_bulge = 0
if bulge_current_num_state < self.q:
bulge_current_num_state += 1
bulge_current_state += bulge_stack[-1]
if bulge_stack[-1] != 1:
n_bulges += 1
else:
bulge_num_state += 1
else:
open_bulge += 1
def remove_bulge(nuc):
nonlocal bulge_num_state, bulge_current_state, bulge_current_num_state, bulge_stack, n_bulges
if self.QuadrupletDetector(nuc):
if bulge_num_state > 0:
bulge_current_state += bulge_stack[bulge_current_num_state]
bulge_num_state -= 1
if bulge_stack[bulge_current_num_state] != 1:
n_bulges += 1
else:
bulge_current_num_state -= 1
bulge_current_state -= bulge_stack.pop(0)
if len(bulge_stack) > 0:
pop = bulge_stack.pop(0)
if pop != 1:
n_bulges -= 1
bulge_current_state -= pop - 1
bulge_stack.insert(0, 1)
for i, nuc in enumerate(fasta[:(self.q+self.len_bulge)]):
add_bulge(nuc)
sequence_stack = sequence_stack+nuc
if ((bulge_current_num_state == self.q) & (n_bulges <= self.max_bulge) &
(self.QuadrupletDetector(fasta[0])) & (self.QuadrupletDetector(fasta[bulge_current_state-1]))):
quadruplets.append((0+shift, n_bulges, bulge_current_state))
quadruplets_sequences.append(sequence_stack[:bulge_current_state])
stack = [self.QuadrupletDetector(nuc) for nuc in fasta[:self.q]]
current_state = sum(stack)
if ((current_state >= self.q - self.tetdef) & (current_state < self.q) &
(self.QuadrupletDetector(fasta[0])) & (self.QuadrupletDetector(fasta[self.q-1]))):
quadruplets.append((0+shift, self.q - current_state, self.q))
quadruplets_sequences.append(sequence_stack[:self.q])
for i in tqdm(range(self.q, len(fasta)), desc='Quadrupleting', disable = tqdm_keep_silence):
remove_bulge(fasta[i-self.q])
i_bulge = i + self.len_bulge
if i_bulge < len(fasta):
add_bulge(fasta[i_bulge])
sequence_stack = sequence_stack+fasta[i_bulge]
stack.append(self.QuadrupletDetector(fasta[i]))
current_state = current_state + stack[-1] - stack.pop(0)
sequence_stack = sequence_stack[1:]
if self.QuadrupletDetector(fasta[i-self.q+1]):
if ((bulge_current_num_state == self.q) & (n_bulges <= self.max_bulge) &
(self.QuadrupletDetector(fasta[i-self.q+bulge_current_state]))):
quadruplets.append((i-self.q+1+shift, n_bulges, bulge_current_state))
quadruplets_sequences.append(sequence_stack[:bulge_current_state])
if ((current_state >= self.q - self.tetdef) & (current_state < self.q) &
(self.QuadrupletDetector(fasta[i]))):
quadruplets.append((i-self.q+1+shift, self.q - current_state, self.q))
quadruplets_sequences.append(sequence_stack[:self.q])
return quadruplets, quadruplets_sequences
def find_quadruplets_in_parallel(self, fasta):
pool = Pool(processes=self.nthreads)
minimal_chunk_length = self.q + self.len_bulge
base_chunk_length = len(fasta) // self.nthreads
if base_chunk_length < minimal_chunk_length:
base_chunk_length = minimal_chunk_length
fasta_chunks_starts = list(range(0, len(fasta), base_chunk_length))
if len(fasta) % base_chunk_length != 0:
fasta_chunks_starts = fasta_chunks_starts[:-1]
fasta_chunks_ends = fasta_chunks_starts[1:] + [len(fasta)-minimal_chunk_length]
quadruplets_list = pool.map(self.find_quadruplets_wrapper, ({'fasta':fasta[start:(end+minimal_chunk_length)],
'shift':start,
'tqdm_keep_silence':None if silence_ind==len(fasta_chunks_starts)-1 else True}
for silence_ind, (start, end) in enumerate(zip(fasta_chunks_starts, fasta_chunks_ends))))
pool.close()
pool.join()
quadruplets = []
quadruplets_sequences = []
quadruplets_list_ = []
quadruplets_seq_list_ = []
for quad, quad_seq in quadruplets_list:
if len(quad) != 0:
quadruplets_list_.append(quad)
quadruplets_seq_list_.append(quad_seq)
del quadruplets_list
for quadruplet_now, quadruplet_next, quadruplet_seq_now, quadruplet_seq_next in zip(
quadruplets_list_[:-1], quadruplets_list_[1:],
quadruplets_seq_list_[:-1], quadruplets_seq_list_[1:]):
first_next_quad = quadruplet_next[0]
num_quad_now = -1
while (first_next_quad == quadruplet_now[num_quad_now]) or (first_next_quad[0] <= quadruplet_now[num_quad_now][0]):
num_quad_now -= 1
num_quad_now += 1
if num_quad_now != 0:
quadruplet_now = quadruplet_now[:num_quad_now]
quadruplet_seq_now = quadruplet_seq_now[:num_quad_now]
quadruplets.extend(quadruplet_now)
quadruplets_sequences.extend(quadruplet_seq_now)
quadruplets_sequences.extend(quadruplet_seq_next)
quadruplets.extend(quadruplet_next)
del quadruplets_list_
del quadruplets_seq_list_
return quadruplets, quadruplets_sequences
def find_quadruplexes_wrapper(self, data):
return self.find_quadruplexes(**data)
def find_quadruplexes(self, quadruplets, tqdm_keep_silence=None):
'''
quadruplex: [[Q1-Start, Q1-Defects, Q1-Length]]*self.nquadruplets
'''
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
total_wrongs = 0 #number of the quadruplets with defect
wrongNum = 0
def check_conditions():
nonlocal total_wrongs, wrongNum
if i == 0:
total_wrongs = 0
wrongNum = 0
elif (quadruplets[k][0] - quadruplets[quadruplex_set[i-1]][0] <= quadruplets[quadruplex_set[i-1]][2]):
return 'too close'
elif (quadruplets[k][0] - (quadruplets[quadruplex_set[i-1]][0] + quadruplets[quadruplex_set[i-1]][2]) > self.L):
return 'too far'
if quadruplets[k][1] != 0:
wrongNum = i+1
total_wrongs += 1
if total_wrongs > self.mdef:
total_wrongs -= 1
return False
else:
return True
def revert_wrongs():
nonlocal total_wrongs, wrongNum
if (i >= 0):
if (quadruplets[quadruplex_set[i]][1] != 0):
total_wrongs -= 1
if wrongNum == i+1:
for j in range(i):
if quadruplets[quadruplex_set[j]][1] != 0:
wrongNum == j+1
break
if wrongNum == i+1:
wrongNum = 0
quadruplexes = []
quadruplex_set = list(range(-1, self.nquadruplets))
i = 0
k = quadruplex_set[i]
with tqdm(desc='Qadruplexing', total=len(quadruplets), disable = tqdm_keep_silence) as pbar:
while i >= 0:
k = quadruplex_set[i]+1
if i == 0:
pbar.update(1)
if i == self.nquadruplets:
quadruplex = tuple([quadruplets[qu] for qu in quadruplex_set[:-1]] + [total_wrongs])
quadruplexes.append(list(quadruplex))
i -= 1
revert_wrongs()
elif k >= len(quadruplets) - self.nquadruplets + 1 + i:
i -= 1
revert_wrongs()
else:
status = check_conditions()
if status == True:
quadruplex_set[i] = k
i += 1
quadruplex_set[i] = quadruplex_set[i-1]
elif status == 'too far':
i -= 1
revert_wrongs()
else:
quadruplex_set[i] = k
pbar.update(len(quadruplets) - pbar.n)
return quadruplexes
def group_quadruplexes(self, quadruplexes):
groups = []
q1 = 0
q2 = 1
with tqdm(desc='Grouping', total=len(quadruplexes)-1, disable = self.verbose) as pbar:
while q1 < len(quadruplexes)-1:
while q2 < len(quadruplexes):
pbar.update(1)
tetrads_lehgth_q1 = sum([quadruplexes[q1][i][2]+quadruplexes[q1][i][1] for i in range(self.nquadruplets)])
tetrads_lehgth_q2 = sum([quadruplexes[q2][i][2]+quadruplexes[q2][i][1] for i in range(self.nquadruplets)])
general_length_q1 = quadruplexes[q1][self.nquadruplets - 1][0] + quadruplexes[q1][self.nquadruplets - 1][2] - 1 - quadruplexes[q1][0][0]
general_length_q2 = quadruplexes[q2][self.nquadruplets - 1][0] + quadruplexes[q2][self.nquadruplets - 1][2] - 1 - quadruplexes[q2][0][0]
if (quadruplexes[q2][0][0] > quadruplexes[q1][self.nquadruplets - 1][0] + quadruplexes[q1][self.nquadruplets - 1][2] - 1):
groups.append(quadruplexes[q1])
q1 = q2
if (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q2])
q1 = len(quadruplexes)
elif ((tetrads_lehgth_q2 < tetrads_lehgth_q1) & (not self.bulge_priority) or
(tetrads_lehgth_q2 >= tetrads_lehgth_q1) & (self.bulge_priority) or
(general_length_q2 < general_length_q1) & (not self.bulge_priority) or
(general_length_q2 < general_length_q1) & (self.bulge_priority)):
q1 = q2
if (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q2])
q1 = len(quadruplexes)
elif (q2 == len(quadruplexes)-1):
groups.append(quadruplexes[q1])
q1 = len(quadruplexes)
q2 += 1
return groups
def find_quadruplexes_in_parallel(self, quadruplets):
pool = Pool(processes=self.nthreads)
minimal_chunk_length = (self.q + self.len_bulge + self.L)*(self.nquadruplets)-self.L
if len(quadruplets) > self.nthreads:
base_chunk_length = len(quadruplets) // self.nthreads
else:
base_chunk_length = 1
quadruplets_chunks_starts = list(range(0, len(quadruplets), base_chunk_length))
if len(quadruplets) % base_chunk_length != 0:
quadruplets_chunks_starts = quadruplets_chunks_starts[:-1]
quadruplets_chunks_ends = []
for start_tmp in quadruplets_chunks_starts[1:]:
end_ind = start_tmp
end_val = quadruplets[start_tmp][0]
tmp_end_val = quadruplets[end_ind][0]
while (end_ind < len(quadruplets)) and (tmp_end_val - end_val <= minimal_chunk_length):
end_ind += 1
tmp_end_val = quadruplets[end_ind][0]
quadruplets_chunks_ends.append(end_ind-1)
quadruplets_chunks_ends.append(len(quadruplets))
quadruplexes_list = pool.map(self.find_quadruplexes_wrapper, ({'quadruplets':quadruplets[start:end],
'tqdm_keep_silence':None if silence_ind==len(quadruplets_chunks_starts)-1 else True}
for silence_ind, (start, end) in enumerate(zip(quadruplets_chunks_starts, quadruplets_chunks_ends))))
pool.close()
pool.join()
quadruplexes_list_ = []
for quad in quadruplexes_list:
if len(quad) != 0:
quadruplexes_list_.append(quad)
del quadruplexes_list
quadruplexes = []
for quadruplex_now, quadruplex_next in zip(quadruplexes_list_[:-1], quadruplexes_list_[1:]):
first_next_quad = quadruplex_next[0]
num_quad_now = -1
while first_next_quad[0][0] <= quadruplex_now[num_quad_now][0][0]:
if (first_next_quad == quadruplex_now[num_quad_now]) or (first_next_quad[0][0] <= quadruplex_now[num_quad_now][0][0]):
num_quad_now -= 1
num_quad_now += 1
if num_quad_now != 0:
quadruplex_now = quadruplex_now[:num_quad_now]
quadruplexes.extend(quadruplex_now)
try:
quadruplexes.extend(quadruplex_next)
except:
pass
del quadruplexes_list_
return quadruplexes
def group_to_ranges(self, groups, fasta_id):
ranges = []
for group in tqdm(groups, desc='Converting to ranges', disable = self.verbose):
start = group[0][0]
end = group[self.nquadruplets-1][0]+group[self.nquadruplets-1][2]-1
ranges.append((fasta_id, start, end))
return ranges
def prepare_quadruplets_toprint(self, quadruplets, quadruplets_sequences, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplets_toprint = []
for quad, seq in tqdm(list(zip(quadruplets, quadruplets_sequences)),
desc='Postprocessing quadruplets', disable=tqdm_keep_silence):
quad = list(quad)
quad.append(seq)
quadruplets_toprint.append(quad)
return quadruplets_toprint
def prepare_quadruplexes_toprint(self, quadruplexes, fasta_di, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
quadruplexes_toprint = []
[(shift, fasta)] = fasta_di.items()
for quadruplex in tqdm(quadruplexes, desc='Postprocessing quadruplexes', disable=tqdm_keep_silence):
seq = ''
quadruplex_toprint = []
for qu1, qu2 in zip(quadruplex[:-2], quadruplex[1:-1]):
seq = seq + fasta[(qu1[0]-shift):(qu1[0]+qu1[2]-shift)].upper()+\
fasta[(qu1[0]+qu1[2]-shift):(qu2[0]-shift)].lower()
quadruplex_toprint.extend(list(qu1))
quadruplex_toprint.extend(list(qu2))
quadruplex_toprint.append(quadruplex[-1])
seq = seq+fasta[(qu2[0]-shift):(qu2[0]+qu2[2]-shift)].upper()
quadruplex_toprint.append(seq)
quadruplexes_toprint.append(tuple(quadruplex_toprint))
return quadruplexes_toprint
def prepare_groups_toprint(self, groups, fasta, tqdm_keep_silence=None):
tqdm_keep_silence = self.verbose if tqdm_keep_silence is None else tqdm_keep_silence
groups_toprint = []
for group in tqdm(groups, desc='Postprocessing groups', disable=tqdm_keep_silence):
seq = ''
group_toprint = []
for qu1, qu2 in zip(group[:-2], group[1:-1]):
seq = seq + fasta[qu1[0]:(qu1[0]+qu1[2])].upper()+fasta[(qu1[0]+qu1[2]):qu2[0]].lower()
group_toprint.extend(qu1)
group_toprint.extend(qu2)
group_toprint.append(group[-1])
seq = seq+fasta[qu2[0]:(qu2[0]+qu2[2])].upper()
group_toprint.append(seq)
groups_toprint.append(tuple(group_toprint))
return groups_toprint
def split_args_for_prepare_quadruplexes_toprint(self, quadruplexes, fasta, n):
quad_len = len(quadruplexes) // n
minimal_chunk_length = (self.q + self.len_bulge + self.L)*(self.nquadruplets)
parts = list(range(0, len(quadruplexes), quad_len))[1:]
if len(quadruplexes) % n != 0:
parts = parts[:-1]
quadruplexes_parts = [quadruplexes[start:end] for start, end in zip(
[0]+parts, parts+[len(quadruplexes)])]
fasta_parts_coordinates = [(quadruplex_set[0][0][0], quadruplex_set[-1][-2][0]+minimal_chunk_length)
for quadruplex_set in quadruplexes_parts]
fasta_parts = [{start:fasta[start:end]} for start, end in fasta_parts_coordinates]
show_status = [True]*(len(quadruplexes_parts)-1)+[None]
return list(zip(quadruplexes_parts, fasta_parts, show_status))
def postprocess_wrapper(self, kwargs):
'''
args: {'args': args, 'func':function}
'''
return kwargs['func'](*kwargs['args'])
def save_tables(self, df, columns, fasta_id, which_table):
n = len(columns)
with open('{}/{}_{}'.format(self.output_path, fasta_id, which_table), 'w') as f:
if n == 4:
f.write('{}\t{}\t{}\t{}\n'.format(columns[0], columns[1], columns[2], columns[3]))
for row in df:
f.write('{}\t{}\t{}\t{}\n'.format(row[0], row[1], row[2], row[3]))
elif n == 14:
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
columns[0], columns[1], columns[2], columns[3],
columns[4], columns[5], columns[6], columns[7],
columns[8], columns[9], columns[10], columns[11],
columns[12], columns[13]))
for row in df:
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
row[0], row[1], row[2], row[3],
row[4], row[5], row[6], row[7],
row[8], row[9], row[10], row[11],
row[12], row[13]))
elif n==3:
for row in df:
f.write('{}\t{}\t{}\n'.format(row[0], row[1], row[2]))
def save_tables_wrapper(self, args):
return self.save_tables(*args)
def description_file(self):
all_members = self.__dict__.keys()
columns_description = '''\n\nColumns Description
Qudruplets File:
Start: an index of a quadruplet begining
Number of Defects: a total number of the mismatches or a number of bulges
Length: a length of a quadruplet
Sequence: quadruplet sequences if not suppressed
Quadruplex & Group Files:
Qi-Start: an index of a quadruplet begining
Qi-Defects: a total number of the mismatches or a number of bulges
Qi-Length: a length of a quadruplet
Defective: a number of quadruplets with defects (mismatches or bulges)
Sequence: a sequence of a quadruplex with loops if not suppressed, quadruplets are uppercase
Ranges File: bed-format
Fasta ID: fasta id
Start: an index of a quadruplex begining
End: an index of the end of the quadruplex
'''
description_file = 'Parametres\n'+'\n'.join(['\t%s = %s'%(item, self.__dict__[item]) for item in all_members if (not item.startswith("_")) & ('nquadruplets' not in item)]) + columns_description
with open('%s/description.txt'%(self.output_path), 'w') as f:
f.write(description_file)
def run(self, print_sequences=True, output_type = [4]):
print('Loading %s'%self.fasta_file)
sequences = self.load_fasta()
print('This fasta file contains %d sequences.'%len(sequences))
for fasta, fasta_id in sequences:
quadruplets, quadruplets_sequences, quadruplexes, groups, ranges, quadruplets_toprint, quadruplexes_toprint, groups_toprint = [[]]*8
print('Processing %s:'%fasta_id)
if (self.nthreads == 1) or (self.len_bulge == 0) or (self.max_bulge == 0):
quadruplets, quadruplets_sequences = self.find_quadruplets(fasta) if ((self.len_bulge > 0) or (self.max_bulge != 0)) else self.find_quadruplets_without_bulges(fasta)
else:
quadruplets, quadruplets_sequences = self.find_quadruplets_in_parallel(fasta)
if output_type[-1] > 0:
if (self.nthreads == 1):
quadruplexes = self.find_quadruplexes(quadruplets)
else:
quadruplexes = self.find_quadruplexes_in_parallel(quadruplets)
if output_type[-1] > 1:
groups = self.group_quadruplexes(quadruplexes)
if output_type[-1] > 2:
ranges = self.group_to_ranges(groups, fasta_id)
columns_set1 = ['Start', 'Number of Defects', 'Length']
columns_set2 = []
[columns_set2.extend(['Q%d-Start'%i, 'Q%d-Defects'%i, 'Q%d-Length'%i]) for i in range(1, self.nquadruplets+1)]
columns_set2.extend(['Defective'])
if output_type[0] < 3:
k = sum([0 if ind_num in output_type else 1 for ind_num in [0, 2]])
if print_sequences:
if self.nthreads > 1:
pool = Pool(processes=self.nthreads)
args_quadruplexes = []
n = 1
if 1 in output_type:
if self.nthreads - k > 2:
n = self.nthreads
args_quadruplexes = self.split_args_for_prepare_quadruplexes_toprint(quadruplexes, fasta, n)
elif self.nthreads - k > 1:
n = self.nthreads - k
args_quadruplexes = self.split_args_for_prepare_quadruplexes_toprint(quadruplexes, fasta, n)
else:
n = 1
args_quadruplexes = {0:fasta}
args_dict = {0: [(quadruplets, quadruplets_sequences)],
1: args_quadruplexes,
2: [(groups, fasta)]}
func_dict = {0: [self.prepare_quadruplets_toprint],
1: [self.prepare_quadruplexes_toprint]*n,
2: [self.prepare_groups_toprint]}
results_inds_dict = {0: [0],
1: [1]*n,
2: [2]
}
args_all = []
functions = []
results_inds = []
for output_ind in output_type:
if output_ind < 3:
functions.extend(func_dict[output_ind])
args_all.extend(args_dict[output_ind])
results_inds.extend(results_inds_dict[output_ind])
uni, inds, counts = np.unique(results_inds, return_index=True, return_counts=True)
slice_dict = {}
for un, ind, count in zip(uni, inds, counts):
slice_dict[un] = (ind,(ind+count))
results = pool.map(self.postprocess_wrapper, ({'func':func, 'args': args}
for func, args in zip(functions, args_all)))
if 0 in slice_dict.keys():
[quadruplets_toprint] = results[slice_dict[0][0]:slice_dict[0][1]]
if 1 in slice_dict.keys():
quadruplexes_toprint_all = results[slice_dict[1][0]:slice_dict[1][1]]
quadruplexes_toprint = []
[quadruplexes_toprint.extend(quad) for quad in quadruplexes_toprint_all];
if 2 in slice_dict.keys():
[groups_toprint] = results[slice_dict[2][0]:slice_dict[2][1]]
pool.close()
pool.join()
else:
if 0 in output_type:
quadruplets_toprint = self.prepare_quadruplets_toprint(quadruplets, quadruplets_sequences)
if 1 in output_type:
quadruplexes_toprint = self.prepare_quadruplexes_toprint(quadruplexes, {0:fasta})
if 2 in output_type:
groups_toprint = self.prepare_groups_toprint(groups, fasta)
columns_set1.extend(['Sequence'])
columns_set2.extend(['Sequence'])
else:
if 0 in output_type:
quadruplets_toprint = quadruplets
if 1 in output_type:
quadruplexes_toprint = []
for quadruplex in quadruplexes:
seq = ''
quadruplex_toprint = []
for qu1 in quadruplex[:-1]:
quadruplex_toprint.extend(qu1)
quadruplex_toprint.append(quadruplex[-1])
quadruplexes_toprint.append(tuple(quadruplex_toprint))
if 2 in output_type:
groups_toprint = []
for group in groups:
seq = ''
group_toprint = []
for qu1 in group[:-1]:
group_toprint.extend(qu1)
group_toprint.append(group[-1])
groups_toprint.append(tuple(group_toprint))
for i in tqdm(range(1), desc='Saving tables', disable=self.verbose):
pool = Pool(processes=self.nthreads)
data = np.array([(quadruplets_toprint, columns_set1, fasta_id, 'quadruplets.csv'),
(quadruplexes_toprint, columns_set2, fasta_id, 'quadruplexes.csv'),
(groups_toprint, columns_set2, fasta_id, 'groups.csv'),
(ranges, ['Fasta ID', 'Start', 'End'], fasta_id, 'ranges.bed')])[output_type]
pool.map(self.save_tables_wrapper, data)
pool.close()
pool.join()
self.description_file()
print('Finished')
# Disable prints
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def main():
parser = argparse.ArgumentParser(prog='ImGQFinder', description='The tool for finding G-, C- quadruplexes. The output positions are represented in a zero based counting.')
parser.add_argument('-i', '--input', help='Assembly scaffolds/contigs or full genomes, required.', required=True)
parser.add_argument('-o', '--output', default='', help='Name/path of a folder for output files. Saves to the current folder if not provided.')
parser.add_argument('-GC', default='G', help='Quad type, G- or C-. By default, G.')
parser.add_argument('-L', default=7, help='Maximum loop length. By default, 7.')
parser.add_argument('-q', default=4, help="The length of a quadruplet.") # the length of a tetrad
parser.add_argument('-nq', '--nquadruplets', default=4, help=argparse.SUPPRESS) # 'Number of quadruplets. By default, 4.'
parser.add_argument('-mdef', default=1, help='Allowed number of defective tetrads. By default, 1.')
parser.add_argument('-bulgelen', default=1, help='Total length of bulges in one quadruplet. By default, 1.')
parser.add_argument('-maxbulge', default=1, help='Maximum number of bulges per quadruplet. By default, 1.')
parser.add_argument('-bp', '--bulge_priority', action='store_true', help='By default, quadrouplexes with shorter bulge or without them are preferable while grouping. This behaviour can be changed with this parameter.')
parser.add_argument('-tetdef', default=1, help='Allowed number of defective nucleotides in tetrads. By default, 1.')
parser.add_argument('-ns', '--no-sequences', action='store_true', help='Not to include sequences to the output.')
parser.add_argument('-r', '--repeats', action='store_true', help='To include soft-masked genome areas. By default, not included.')
parser.add_argument('-v', '--verbose', action='store_true', help='Show the status of procesing or not. By default print stages info.')
parser.add_argument('--nthreads', default=1, help='Number of kernels to use.')
parser.add_argument('--output_type', default=['all'], nargs='+', help='List the numbers of file types you need the tool to generate or write all if you want all files. All - is the default. 0 - quadruplets, 1 - quadruplexes, 2 - groups, 3 - ranges. For example, --output_type 1 2 will generate only 2 files: quadruplexes and groups.')
args = parser.parse_args()
if not os.path.isdir(args.output):
os.mkdir(args.output)
#args.output = os.path.dirname(args.output)
if args.verbose:
blockPrint()
args.output_type = [atype.lower() for atype in args.output_type]
output_type_dict = {'all':4, '0':0, '1':1, '2':2, '3':3, '4':4}
output_type_dict_report = {'all':'all', '0':'quadruplets', '1':'quadruplexes', '2':'groups', '3':'ranges', '4':'all'}
output_type = sorted([output_type_dict[user_type] for user_type in args.output_type if user_type in list(output_type_dict.keys())])
output_type_report = [output_type_dict_report[user_type] for user_type in args.output_type if user_type in list(output_type_dict.keys())]
if output_type[-1] == 4:
output_type = [0, 1, 2, 3]
output_type_report = [output_type_dict_report[user_type] for user_type in ['0', '1', '2', '3']]
if 'all' in output_type:
output_type = ['quadruplets', 'quadruplexes', 'groups', 'ranges']
if len(output_type) == 1:
print('The ImGQfinder will generate %s file.'%(output_type_report[0]))
else:
print('The ImGQfinder will generate %s and %s files.'%(', '.join(output_type_report[:-1]), output_type_report[-1]))
if int(args.mdef) < int(args.tetdef):
print('Warning: The allowed number of defective nucleotides (-tetdef) is more than the number of nucleotides (-mdef).', end='\n\n')
finder = QuadruplexFinder(args.input, output_path = args.output, verbose = args.verbose, repeats=args.repeats,
GC=args.GC, L=int(args.L) , q=int(args.q), nquadruplets=int(args.nquadruplets), mdef=int(args.mdef), tetdef=int(args.tetdef),
len_bulge = int(args.bulgelen), max_bulge = int(args.maxbulge), bulge_priority = args.bulge_priority, nthreads = int(args.nthreads))
finder.run(print_sequences= not args.no_sequences, output_type = output_type)
if __name__ == '__main__':
main()
|
the-stack_0_4025 | import time
import wiotp.sdk.device
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
# Configure
myConfig = {
"identity": {
"orgId": "j4fntv",
"typeId": "Cambien",
"deviceId": "cambien001"
},
"auth": {
"token": "12345678"
},
"options": {
"domain": "internetofthings.ibmcloud.com",
# "logLevel": "error|warning|info|debug",
"mqtt": {
"port": 8883,
"transport": "websockets",
"cleanStart": True,
"sessionExpiry": 3600,
"keepAlive": 60,
# "caFile": "/path/to/certificateAuthorityFile.pem"
}
}
}
client = wiotp.sdk.device.DeviceClient(config=myConfig, logHandlers=None)
client.commandCallback = myCommandCallback
# Connect
client.connect()
# # Send Data
# for x in range(2, 30, 3):
# myData={'name' : 'foo', 'cpu' : x, 'mem' : 50}
# client.publishEvent(eventId="status", msgFormat="json", data=myData, qos=2, onPublish=None)
# Connect and send datapoint(s) into the cloud
# deviceCli.connect()
for x in range(0, 1000):
data = {"simpledev": "ok", "x": x}
def myOnPublishCallback():
print("Confirmed event %s received by IoTF\n" % x)
success = client.publishEvent("test", "json", data, qos=0, onPublish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(500)
# Disconnect
client.disconnect(); |
the-stack_0_4026 | from django.db import models
from django.urls import reverse
from django.utils import timezone
from django_countries import fields
from core.dj_import import get_user_model
from core.models import IntegerRangeField
from core.utils import getReverseWithUpdatedQuery
from models.models import Model
from brands.models import Brand
from categories.models import Category
from searching.models import Search
from ebayinfo import EBAY_SHIPPING_CHOICES
from ebayinfo.models import CategoryHierarchy, Market, EbayCategory
User = get_user_model()
# Item IDs are unique across all eBay sites
# http://developer.ebay.com/devzone/shopping/docs/callref/getsingleitem.html
class ItemFound(models.Model):
EBAY_SHIPPING_CHOICES = EBAY_SHIPPING_CHOICES
iItemNumb = models.BigIntegerField( 'item number',
primary_key = True )
cTitle = models.CharField( 'item title', max_length = 80 )
cSubTitle = models.CharField( 'item sub title', max_length = 80,
null = True, blank = True )
cLocation = models.CharField( 'location',
max_length = 58 )
cCountry = fields.CountryField( "country" )
cMarket = models.CharField( 'market Global ID',
max_length = 14 )
iEbaySiteID = models.ForeignKey( Market, on_delete=models.CASCADE,
verbose_name = 'ebay site ID (PK)', db_index=True )
cGalleryURL = models.CharField( 'gallery pic URL',
max_length = 88, null = True, blank = True )
cEbayItemURL = models.CharField( 'ebay item URL',
max_length =188 )
tTimeBeg = models.DateTimeField( 'beginning date/time',null=True )
tTimeEnd = models.DateTimeField( 'ending date/time', null=True )
bBestOfferable = models.BooleanField(
'best offer enabled?', default = False )
bBuyItNowable = models.BooleanField(
'buy it now enabled?',default = False )
cListingType = models.CharField(
'listing type', max_length = 15 )
lLocalCurrency = models.CharField(
'local currency', max_length = 3, default = 'USD' )
lCurrentPrice = models.DecimalField( 'current price',
max_digits = 10, decimal_places = 2,
null = True, blank = True ) # use DecimalField not MoneyField
dCurrentPrice = models.DecimalField( # form was throwing nonsense error
'current price (converted to USD)', # for MoneyField
max_digits=10, decimal_places=2, # but not for
db_index = False ) # DecimalField
lBuyItNowPrice = models.DecimalField( 'buy it now price',
max_digits = 10, decimal_places = 2,
null = True, blank = True )
dBuyItNowPrice = models.DecimalField(
'buy it now price (converted to USD)',
max_digits=10, decimal_places=2,
null = True, blank = True )
iShippingType = models.PositiveSmallIntegerField(
'shipping type',
choices = EBAY_SHIPPING_CHOICES,
null = True ) # data prior to Feb 2019 d/n have
iHandlingTime = models.PositiveSmallIntegerField(
'hangling time',
null = True, blank = True ) # optional
iCategoryID = models.ForeignKey( EbayCategory,
on_delete=models.DO_NOTHING,
verbose_name = 'primary category ID',
related_name = 'ebay_primary_category',
null = True, blank = True ) # ebay sends but
# EbayCategory table is extremely slow
# CategoryHierarchy has relevant info & is much faster
# but need to get this ebay category ID from API
# to look up CategoryHierarchy
cCategory = models.CharField( 'primary category',
max_length = 48 )
iCatHeirarchy = models.ForeignKey( CategoryHierarchy,
on_delete=models.DO_NOTHING,
verbose_name = 'category hierarchy (primary)',
related_name = 'primary_category',
null = True, blank = True )
i2ndCategoryID = models.ForeignKey( EbayCategory, # optional
on_delete=models.CASCADE,
verbose_name = 'secondary category ID (optional)',
related_name = 'ebay_secondary_category',
null = True, blank = True ) # ebay sends but
# EbayCategory table is extremely slow
# CategoryHierarchy has relevant info & is much faster
# but need to get this ebay category ID from API
# to look up CategoryHierarchy
c2ndCategory = models.CharField( 'secondary category (optional)',
max_length = 48, null = True, blank = True )
i2ndCatHeirarchy= models.ForeignKey( CategoryHierarchy,
on_delete=models.DO_NOTHING,
verbose_name = 'category hierarchy (secondary)',
related_name = 'secondary_category',
null = True, blank = True )
# condition is optional but may become required in the future
# https://developer.ebay.com/DevZone/guides/ebayfeatures/Development/Desc-ItemCondition.html
iConditionID = models.IntegerField( 'condition ID',
null = True, blank = True )
cCondition = models.CharField( 'condition display name',
max_length = 28, null = True, blank = True )
cSellingState = models.CharField( 'selling state',
max_length = 18 )
bCancelledItem = models.BooleanField(
'Invalid Or Non-Existent Item Number',default = False )
tCreate = models.DateTimeField( 'created on',
db_index = True, auto_now_add= True )
tRetrieved = models.DateTimeField( 'retrieved info',
null = True, blank = True )
tRetrieveFinal = models.DateTimeField( 'retrieved info after end',
null = True, blank = True )
def __str__(self):
return self.cTitle
def get_absolute_url(self):
#
return reverse(
'finders:detail', kwargs = { 'pk': self.pk } )
class Meta:
verbose_name_plural = 'itemsfound'
db_table = verbose_name_plural
class UserItemFound(models.Model):
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE )
iHitStars = IntegerRangeField(
'hit stars', null = True, db_index = True,
min_value = 0, max_value = 1000, default = 0 )
bGetResult = models.BooleanField( 'get results?',
default = False )
tLook4Hits = models.DateTimeField(
'assessed interest date/time', null = True )
iSearch = models.ForeignKey( Search,
on_delete=models.CASCADE,
verbose_name = 'Search that found this item' )
iModel = models.ForeignKey( Model, on_delete=models.CASCADE,
null = True, blank = True,
verbose_name = 'Model Name/Number',
help_text = 'You can display models for a particular '
'brand by changing to that brand (just below), '
'hit save, then edit again' )
iBrand = models.ForeignKey( Brand, on_delete=models.CASCADE,
null = True, blank = True, verbose_name = 'Brand' )
iCategory = models.ForeignKey( Category, on_delete=models.CASCADE,
null = True, blank = True,
verbose_name = 'Category' )
cWhereCategory = models.CharField( 'where category was found',
default = 'title',
max_length = 10 ) # title heirarchy1 heirarchy2
bListExclude = models.BooleanField( 'exclude from listing?',
default = False )
# tGotPics = models.DateTimeField( 'got pictures',
# null = True, blank = True )
bAuction = models.BooleanField(
'Auction or Auction with Buy It Now',default = False )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner')
#
# yes the col below repeats the col in ItemFound, the normalized place
# but after writing the query to get the open auctions for a user, and
# after considering that query's load if this project is a success,
# it is clear that de-normalization is the way to go!!!
# besides, time end is fixed when a seller puts up an item for auction
# this is not a variable that will ever be maintained, once set, it is
# absolutely fixed - seller's only option is to cancel and resubmit
# 2019-08-27
#
tTimeEnd = models.DateTimeField( 'ending date/time',
null=True, db_index = True )
#
tCreate = models.DateTimeField( 'created on',
default=timezone.now, db_index = True )
tModify = models.DateTimeField( 'updated on', auto_now = True )
tRetrieved = models.DateTimeField( 'retrieved info',
null = True, blank = True )
tRetrieveFinal = models.DateTimeField( 'retrieved info after end',
null = True, blank = True )
tPutInKeepers = models.DateTimeField( 'User has Keeper row',
null = True, blank = True )
def __str__(self):
return self.iItemNumb.cTitle
class Meta:
verbose_name_plural = 'useritemsfound'
db_table = verbose_name_plural
unique_together = (
'iItemNumb', 'iUser', 'iModel', 'iBrand', 'iCategory' )
def get_absolute_url(self):
#
oUserFinder = UserFinder.objects.get(
iItemNumb_id = self.iItemNumb_id,
iUser = self.iUser )
#
return getReverseWithUpdatedQuery(
'finders:detail',
kwargs = { 'pk': oUserFinder.pk, 'tModify': timezone.now() } )
def get_edit_url(self):
#
return reverse(
'finders:edit', kwargs = { 'pk': self.pk } )
class UserFinder(models.Model):
#
# not normalized but this allows fast selection of finders for a user
# one row per item
# this table can now drive the finder listing for a user all by itself
#
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE,
verbose_name = 'eBay Item Number' )
iHitStars = IntegerRangeField(
'hit stars (max for item)', null = True,
min_value = 0, max_value = 1000, default = 0 )
cTitle = models.CharField( 'item title',
max_length = 80, null=True )
cMarket = models.CharField( 'market Global ID',
max_length = 14, null=True )
cListingType = models.CharField( 'listing type',
max_length = 15, null=True )
tTimeEnd = models.DateTimeField( 'ending date/time', null=True )
iUser = models.ForeignKey( User, on_delete=models.CASCADE,
verbose_name = 'Owner' )
bGetResult = models.BooleanField( 'get results?',
null = True, default = False )
bListExclude = models.BooleanField( 'exclude from listing?',
null = True, default = False )
iMaxModel = models.IntegerField( 'model hit with most stars',
null = True, default = False )
cLookFor = models.TextField( 'dummy for search compatibility',
null=True, blank = True )
#
def __str__(self):
# return '%s - %s' % ( self.iItemNumb, self.iUser )
return self.cTitle
class Meta:
verbose_name_plural = 'userfinders'
db_table = verbose_name_plural
unique_together = ('iItemNumb', 'iUser' )
def get_absolute_url(self):
#
return reverse(
'finders:detail', kwargs = { 'pk': self.pk } )
'''
truncate table userfinders ;
insert into userfinders ( "iItemNumb_id", "iUser_id" )
select distinct "iItemNumb_id", "iUser_id" from useritemsfound uif
where exists
( select 1 from itemsfound if
where
if."iItemNumb" = uif."iItemNumb_id" and
if."tRetrieved" is null ) ;
update userfinders uf
set "iHitStars" =
( select max( uif."iHitStars" )
from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" ) ;
delete from userfinders where "iHitStars" = 0 ;
update userfinders uf
set "iMaxModel" =
( select distinct on (uif."iHitStars") uif."iModel_id"
from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."iHitStars" = uf."iHitStars" ) ;
update userfinders uf
set "bGetResult" = true where exists
( select 1 from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."bGetResult" = true ) ;
temporary 2019-12-26
update useritemsfound uif
set "bGetResult" = true where exists
( select 1 from userfinders uf
where
uf."iItemNumb_id" = uif."iItemNumb_id" and
uf."iUser_id" = uif."iUser_id" and
uf."bGetResult" = true ) ;
update userfinders uf
set "bListExclude" = false ;
update userfinders uf
set "bListExclude" = true where exists
( select 1 from useritemsfound uif
where
uif."iItemNumb_id" = uf."iItemNumb_id" and
uif."iUser_id" = uf."iUser_id" and
uif."bListExclude" = true ) ;
temporary 2019-12-26:
update useritemsfound uif
set "bListExclude" = true where exists
( select 1 from userfinders uf
where
uf."iItemNumb_id" = uif."iItemNumb_id" and
uf."iUser_id" = uif."iUser_id" and
uf."bListExclude" = true ) ;
update userfinders uf
set "cTitle" = if."cTitle",
"cMarket" = if."cMarket",
"cListingType" = if."cListingType",
"tTimeEnd" = if."tTimeEnd"
from itemsfound if
where if."iItemNumb" = uf."iItemNumb_id" ;
double chek for strays:
select count(*) from userfinders where "tTimeEnd" = null ;
class ItemFoundTemp(models.Model):
iItemNumb = models.ForeignKey( ItemFound, on_delete=models.CASCADE )
iHitStars = IntegerRangeField(
'hit stars', null = True,
min_value = 0, max_value = 1000, default = 0 )
iSearch = models.ForeignKey( Search, on_delete=models.CASCADE,
verbose_name = 'Search that first found this item' )
iModel = models.ForeignKey( Model, on_delete=models.CASCADE,
null = True )
iBrand = models.ForeignKey( Brand, on_delete=models.CASCADE,
null = True )
iCategory = models.ForeignKey( Category, on_delete=models.CASCADE,
null = True )
iStarsModel = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
iStarsBrand = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
iStarsCategory = IntegerRangeField( null = True,
min_value = 0, max_value = 10, default = 1 )
cFoundModel = models.CharField(
'model name/number found in auction title',
max_length = 48, null = True )
iFoundModelLen = models.PositiveSmallIntegerField( default = 0 )
bModelKeyWords = models.BooleanField(
'model has key words and they are in auction title?',
null = True, default = False )
cModelAlphaNum = models.CharField(
'model name/number alpha num only',
max_length = 48, null = True )
cTitleLeftOver = models.CharField( 'item title less model match',
max_length = 80, null = True )
cWhereCategory = models.CharField( 'where category was found',
default = 'title',
max_length = 10 ) # title heirarchy1 heirarchy2
bIncludeThis = models.BooleanField(
'include this hit when populating table?',
default = True )
def __str__(self):
#
lOut = [ 'ItemFound - %s' % self.iItemNumb ]
#
for s in vars( self ):
if s.startswith( '_' ): continue
lOut.append( ' %s: %s' % ( s, self.__dict__[s] ) )
#
return '\n'.join( lOut )
class Meta:
verbose_name_plural = 'itemsfoundtemp'
db_table = verbose_name_plural
'''
|
the-stack_0_4027 | import argparse
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import pkg_resources
import yaml
from apple.util.path import mkdir
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_apple_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data = initial_config_file(filename)
path = config_path_for_filename(root_path, filename)
mkdir(path.parent)
with open(path, "w") as f:
f.write(default_config_file_data)
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
path = config_path_for_filename(root_path, filename)
with open(path.with_suffix("." + str(os.getpid())), "w") as f:
yaml.safe_dump(config_data, f)
shutil.move(str(path.with_suffix("." + str(os.getpid()))), path)
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error=True,
) -> Dict:
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `apple init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
r = yaml.safe_load(open(path, "r"))
if sub_config is not None:
r = r.get(sub_config)
return r
def load_config_cli(root_path: Path, filename: str, sub_config: Optional[str] = None) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
|
the-stack_0_4033 | # -*- test-case-name: twisted.test.test_adbapi -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An asynchronous mapping to U{DB-API
2.0<http://www.python.org/topics/database/DatabaseAPI-2.0.html>}.
"""
import sys
from twisted.internet import threads
from twisted.python import reflect, log, compat
class ConnectionLost(Exception):
"""
This exception means that a db connection has been lost. Client code may
try again.
"""
class Connection:
"""
A wrapper for a DB-API connection instance.
The wrapper passes almost everything to the wrapped connection and so has
the same API. However, the L{Connection} knows about its pool and also
handle reconnecting should when the real connection dies.
"""
def __init__(self, pool):
self._pool = pool
self._connection = None
self.reconnect()
def close(self):
# The way adbapi works right now means that closing a connection is
# a really bad thing as it leaves a dead connection associated with
# a thread in the thread pool.
# Really, I think closing a pooled connection should return it to the
# pool but that's handled by the runWithConnection method already so,
# rather than upsetting anyone by raising an exception, let's ignore
# the request
pass
def rollback(self):
if not self._pool.reconnect:
self._connection.rollback()
return
try:
self._connection.rollback()
curs = self._connection.cursor()
curs.execute(self._pool.good_sql)
curs.close()
self._connection.commit()
return
except:
log.err(None, "Rollback failed")
self._pool.disconnect(self._connection)
if self._pool.noisy:
log.msg("Connection lost.")
raise ConnectionLost()
def reconnect(self):
if self._connection is not None:
self._pool.disconnect(self._connection)
self._connection = self._pool.connect()
def __getattr__(self, name):
return getattr(self._connection, name)
class Transaction:
"""
A lightweight wrapper for a DB-API 'cursor' object.
Relays attribute access to the DB cursor. That is, you can call
C{execute()}, C{fetchall()}, etc., and they will be called on the
underlying DB-API cursor object. Attributes will also be retrieved from
there.
"""
_cursor = None
def __init__(self, pool, connection):
self._pool = pool
self._connection = connection
self.reopen()
def close(self):
_cursor = self._cursor
self._cursor = None
_cursor.close()
def reopen(self):
if self._cursor is not None:
self.close()
try:
self._cursor = self._connection.cursor()
return
except:
if not self._pool.reconnect:
raise
else:
log.err(None, "Cursor creation failed")
if self._pool.noisy:
log.msg('Connection lost, reconnecting')
self.reconnect()
self._cursor = self._connection.cursor()
def reconnect(self):
self._connection.reconnect()
self._cursor = None
def __getattr__(self, name):
return getattr(self._cursor, name)
class ConnectionPool:
"""
Represent a pool of connections to a DB-API 2.0 compliant database.
@ivar connectionFactory: factory for connections, default to L{Connection}.
@type connectionFactory: any callable.
@ivar transactionFactory: factory for transactions, default to
L{Transaction}.
@type transactionFactory: any callable
@ivar shutdownID: L{None} or a handle on the shutdown event trigger which
will be used to stop the connection pool workers when the reactor
stops.
@ivar _reactor: The reactor which will be used to schedule startup and
shutdown events.
@type _reactor: L{IReactorCore} provider
"""
CP_ARGS = "min max name noisy openfun reconnect good_sql".split()
noisy = False # If true, generate informational log messages
min = 3 # Minimum number of connections in pool
max = 5 # Maximum number of connections in pool
name = None # Name to assign to thread pool for debugging
openfun = None # A function to call on new connections
reconnect = False # Reconnect when connections fail
good_sql = 'select 1' # A query which should always succeed
running = False # True when the pool is operating
connectionFactory = Connection
transactionFactory = Transaction
# Initialize this to None so it's available in close() even if start()
# never runs.
shutdownID = None
def __init__(self, dbapiName, *connargs, **connkw):
"""
Create a new L{ConnectionPool}.
Any positional or keyword arguments other than those documented here
are passed to the DB-API object when connecting. Use these arguments to
pass database names, usernames, passwords, etc.
@param dbapiName: an import string to use to obtain a DB-API compatible
module (e.g. C{'pyPgSQL.PgSQL'})
@param cp_min: the minimum number of connections in pool (default 3)
@param cp_max: the maximum number of connections in pool (default 5)
@param cp_noisy: generate informational log messages during operation
(default C{False})
@param cp_openfun: a callback invoked after every C{connect()} on the
underlying DB-API object. The callback is passed a new DB-API
connection object. This callback can setup per-connection state
such as charset, timezone, etc.
@param cp_reconnect: detect connections which have failed and reconnect
(default C{False}). Failed connections may result in
L{ConnectionLost} exceptions, which indicate the query may need to
be re-sent.
@param cp_good_sql: an sql query which should always succeed and change
no state (default C{'select 1'})
@param cp_reactor: use this reactor instead of the global reactor
(added in Twisted 10.2).
@type cp_reactor: L{IReactorCore} provider
"""
self.dbapiName = dbapiName
self.dbapi = reflect.namedModule(dbapiName)
if getattr(self.dbapi, 'apilevel', None) != '2.0':
log.msg('DB API module not DB API 2.0 compliant.')
if getattr(self.dbapi, 'threadsafety', 0) < 1:
log.msg('DB API module not sufficiently thread-safe.')
reactor = connkw.pop('cp_reactor', None)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.connargs = connargs
self.connkw = connkw
for arg in self.CP_ARGS:
cpArg = 'cp_%s' % (arg,)
if cpArg in connkw:
setattr(self, arg, connkw[cpArg])
del connkw[cpArg]
self.min = min(self.min, self.max)
self.max = max(self.min, self.max)
# All connections, hashed on thread id
self.connections = {}
# These are optional so import them here
from twisted.python import threadpool
from twisted.python import threadable
self.threadID = threadable.getThreadID
self.threadpool = threadpool.ThreadPool(self.min, self.max)
self.startID = self._reactor.callWhenRunning(self._start)
def _start(self):
self.startID = None
return self.start()
def start(self):
"""
Start the connection pool.
If you are using the reactor normally, this function does *not*
need to be called.
"""
if not self.running:
self.threadpool.start()
self.shutdownID = self._reactor.addSystemEventTrigger(
'during', 'shutdown', self.finalClose)
self.running = True
def runWithConnection(self, func, *args, **kw):
"""
Execute a function with a database connection and return the result.
@param func: A callable object of one argument which will be executed
in a thread with a connection from the pool. It will be passed as
its first argument a L{Connection} instance (whose interface is
mostly identical to that of a connection object for your DB-API
module of choice), and its results will be returned as a
L{Deferred}. If the method raises an exception the transaction will
be rolled back. Otherwise, the transaction will be committed.
B{Note} that this function is B{not} run in the main thread: it
must be threadsafe.
@param *args: positional arguments to be passed to func
@param **kw: keyword arguments to be passed to func
@return: a L{Deferred} which will fire the return value of
C{func(Transaction(...), *args, **kw)}, or a
L{twisted.python.failure.Failure}.
"""
return threads.deferToThreadPool(self._reactor, self.threadpool,
self._runWithConnection,
func, *args, **kw)
def _runWithConnection(self, func, *args, **kw):
conn = self.connectionFactory(self)
try:
result = func(conn, *args, **kw)
conn.commit()
return result
except:
excType, excValue, excTraceback = sys.exc_info()
try:
conn.rollback()
except:
log.err(None, "Rollback failed")
compat.reraise(excValue, excTraceback)
def runInteraction(self, interaction, *args, **kw):
"""
Interact with the database and return the result.
The 'interaction' is a callable object which will be executed in a
thread using a pooled connection. It will be passed an L{Transaction}
object as an argument (whose interface is identical to that of the
database cursor for your DB-API module of choice), and its results will
be returned as a L{Deferred}. If running the method raises an
exception, the transaction will be rolled back. If the method returns a
value, the transaction will be committed.
NOTE that the function you pass is *not* run in the main thread: you
may have to worry about thread-safety in the function you pass to this
if it tries to use non-local objects.
@param interaction: a callable object whose first argument is an
L{adbapi.Transaction}.
@param *args: additional positional arguments to be passed to
interaction
@param **kw: keyword arguments to be passed to interaction
@return: a Deferred which will fire the return value of
C{interaction(Transaction(...), *args, **kw)}, or a
L{twisted.python.failure.Failure}.
"""
return threads.deferToThreadPool(self._reactor, self.threadpool,
self._runInteraction,
interaction, *args, **kw)
def runQuery(self, *args, **kw):
"""
Execute an SQL query and return the result.
A DB-API cursor which will be invoked with C{cursor.execute(*args,
**kw)}. The exact nature of the arguments will depend on the specific
flavor of DB-API being used, but the first argument in C{*args} be an
SQL statement. The result of a subsequent C{cursor.fetchall()} will be
fired to the L{Deferred} which is returned. If either the 'execute' or
'fetchall' methods raise an exception, the transaction will be rolled
back and a L{twisted.python.failure.Failure} returned.
The C{*args} and C{**kw} arguments will be passed to the DB-API
cursor's 'execute' method.
@return: a L{Deferred} which will fire the return value of a DB-API
cursor's 'fetchall' method, or a L{twisted.python.failure.Failure}.
"""
return self.runInteraction(self._runQuery, *args, **kw)
def runOperation(self, *args, **kw):
"""
Execute an SQL query and return L{None}.
A DB-API cursor which will be invoked with C{cursor.execute(*args,
**kw)}. The exact nature of the arguments will depend on the specific
flavor of DB-API being used, but the first argument in C{*args} will be
an SQL statement. This method will not attempt to fetch any results
from the query and is thus suitable for C{INSERT}, C{DELETE}, and other
SQL statements which do not return values. If the 'execute' method
raises an exception, the transaction will be rolled back and a
L{Failure} returned.
The C{*args} and C{*kw} arguments will be passed to the DB-API cursor's
'execute' method.
@return: a L{Deferred} which will fire with L{None} or a
L{twisted.python.failure.Failure}.
"""
return self.runInteraction(self._runOperation, *args, **kw)
def close(self):
"""
Close all pool connections and shutdown the pool.
"""
if self.shutdownID:
self._reactor.removeSystemEventTrigger(self.shutdownID)
self.shutdownID = None
if self.startID:
self._reactor.removeSystemEventTrigger(self.startID)
self.startID = None
self.finalClose()
def finalClose(self):
"""
This should only be called by the shutdown trigger.
"""
self.shutdownID = None
self.threadpool.stop()
self.running = False
for conn in self.connections.values():
self._close(conn)
self.connections.clear()
def connect(self):
"""
Return a database connection when one becomes available.
This method blocks and should be run in a thread from the internal
threadpool. Don't call this method directly from non-threaded code.
Using this method outside the external threadpool may exceed the
maximum number of connections in the pool.
@return: a database connection from the pool.
"""
tid = self.threadID()
conn = self.connections.get(tid)
if conn is None:
if self.noisy:
log.msg('adbapi connecting: %s' % (self.dbapiName,))
conn = self.dbapi.connect(*self.connargs, **self.connkw)
if self.openfun is not None:
self.openfun(conn)
self.connections[tid] = conn
return conn
def disconnect(self, conn):
"""
Disconnect a database connection associated with this pool.
Note: This function should only be used by the same thread which called
L{ConnectionPool.connect}. As with C{connect}, this function is not
used in normal non-threaded Twisted code.
"""
tid = self.threadID()
if conn is not self.connections.get(tid):
raise Exception("wrong connection for thread")
if conn is not None:
self._close(conn)
del self.connections[tid]
def _close(self, conn):
if self.noisy:
log.msg('adbapi closing: %s' % (self.dbapiName,))
try:
conn.close()
except:
log.err(None, "Connection close failed")
def _runInteraction(self, interaction, *args, **kw):
conn = self.connectionFactory(self)
trans = self.transactionFactory(self, conn)
try:
result = interaction(trans, *args, **kw)
trans.close()
conn.commit()
return result
except:
excType, excValue, excTraceback = sys.exc_info()
try:
conn.rollback()
except:
log.err(None, "Rollback failed")
compat.reraise(excValue, excTraceback)
def _runQuery(self, trans, *args, **kw):
trans.execute(*args, **kw)
return trans.fetchall()
def _runOperation(self, trans, *args, **kw):
trans.execute(*args, **kw)
def __getstate__(self):
return {'dbapiName': self.dbapiName,
'min': self.min,
'max': self.max,
'noisy': self.noisy,
'reconnect': self.reconnect,
'good_sql': self.good_sql,
'connargs': self.connargs,
'connkw': self.connkw}
def __setstate__(self, state):
self.__dict__ = state
self.__init__(self.dbapiName, *self.connargs, **self.connkw)
__all__ = ['Transaction', 'ConnectionPool']
|
the-stack_0_4034 | # coding:utf-8
rarity_id = {1: 549,
2: 551,
3: 553,
4: 555,
5: 557,
6: 559,
7: 561,
8: 563,
'1': 549,
'2': 551,
'3': 553,
'4': 555,
'5': 557,
'6': 559,
'7': 561}
rarity_w = {1: '白',
2: '绿',
3: '蓝',
4: '紫',
7: '以太化',
'1': '白',
'2': '绿',
'3': '蓝',
'4': '紫',
'7': '以太化'}
color_id = {'bg': 6,
'bg2': 7,
'fg': 3,
'fg_d': 2,
'fg_dd': 1,
'fg_l': 4,
'fg_ll': 5,
'shadow': 8,
'canUse': 43}
pic_font = {'default': 'HarmonyOS_Sans_SC_Regular.ttf',
'FFXIV': 'FFXIV_Lodestone_SSF.ttf',
'Eorzea': 'Eorzea.ttf',
'mono': 'JetBrainsMono-Regular.ttf'}
pic_res = {'Item_Cover': 'Item_icon_cover_128.png',
'hq': 'Hq.png',
'meld': 'Melding.png',
'meld+': 'MeldingEX.png'}
food_param = {"BaseParam[0]": ("IsRelative[0]", "Value[0]", "Max[0]", "Value{HQ}[0]", "Max{HQ}[0]"),
"BaseParam[1]": ("IsRelative[1]", "Value[1]", "Max[1]", "Value{HQ}[1]", "Max{HQ}[1]"),
"BaseParam[2]": ("IsRelative[2]", "Value[2]", "Max[2]", "Value{HQ}[2]", "Max{HQ}[2]"),
}
trans_true_false = ['IsUnique', 'IsUntradable', 'IsIndisposable', 'IsDyeable', 'IsCrestWorthy', 'IsCollectable',
'IsAdvancedMeldingPermitted', 'IsPvP', 'IsGlamourous', 'CanBeHq', 'Lot', 'AlwaysCollectable',
]
trans_base_param = {"BaseParam[0]": "BaseParamValue[0]",
"BaseParam[1]": "BaseParamValue[1]",
"BaseParam[2]": "BaseParamValue[2]",
"BaseParam[3]": "BaseParamValue[3]",
"BaseParam[4]": "BaseParamValue[4]",
"BaseParam[5]": "BaseParamValue[5]",
"BaseParam{Special}[0]": "BaseParamValue{Special}[0]",
"BaseParam{Special}[1]": "BaseParamValue{Special}[1]",
"BaseParam{Special}[2]": "BaseParamValue{Special}[2]",
"BaseParam{Special}[3]": "BaseParamValue{Special}[3]",
"BaseParam{Special}[4]": "BaseParamValue{Special}[4]",
"BaseParam{Special}[5]": "BaseParamValue{Special}[5]"}
trans_base_special = {"ItemSpecialBonus": "ItemSpecialBonus{Param}", }
trans_base_param_key = ["BaseParam[0]",
"BaseParam[1]",
"BaseParam[2]",
"BaseParam[3]",
"BaseParam[4]",
"BaseParam[5]"]
trans_base_hq = ["BaseParam{Special}[0]",
"BaseParam{Special}[1]",
"BaseParam{Special}[2]",
"BaseParam{Special}[3]",
"BaseParam{Special}[4]",
"BaseParam{Special}[5]"]
Class_Job = {0: {"Name": "冒险者", "Abbreviation": "ADV", },
1: {"Name": "剑术师", "Abbreviation": "GLA", },
2: {"Name": "格斗家", "Abbreviation": "PGL", },
3: {"Name": "斧术师", "Abbreviation": "MRD", },
4: {"Name": "枪术师", "Abbreviation": "LNC", },
5: {"Name": "弓箭手", "Abbreviation": "ARC", },
6: {"Name": "幻术师", "Abbreviation": "CNJ", },
7: {"Name": "咒术师", "Abbreviation": "THM", },
8: {"Name": "刻木匠", "Abbreviation": "CRP", },
9: {"Name": "锻铁匠", "Abbreviation": "BSM", },
10: {"Name": "铸甲匠", "Abbreviation": "ARM", },
11: {"Name": "雕金匠", "Abbreviation": "GSM", },
12: {"Name": "制革匠", "Abbreviation": "LTW", },
13: {"Name": "裁衣匠", "Abbreviation": "WVR", },
14: {"Name": "炼金术士", "Abbreviation": "ALC", },
15: {"Name": "烹调师", "Abbreviation": "CUL", },
16: {"Name": "采矿工", "Abbreviation": "MIN", },
17: {"Name": "园艺工", "Abbreviation": "BTN", },
18: {"Name": "捕鱼人", "Abbreviation": "FSH", },
19: {"Name": "骑士", "Abbreviation": "PLD", },
20: {"Name": "武僧", "Abbreviation": "MNK", },
21: {"Name": "战士", "Abbreviation": "WAR", },
22: {"Name": "龙骑士", "Abbreviation": "DRG", },
23: {"Name": "吟游诗人", "Abbreviation": "BRD", },
24: {"Name": "白魔法师", "Abbreviation": "WHM", },
25: {"Name": "黑魔法师", "Abbreviation": "BLM", },
26: {"Name": "秘术师", "Abbreviation": "ACN", },
27: {"Name": "召唤师", "Abbreviation": "SMN", },
28: {"Name": "学者", "Abbreviation": "SCH", },
29: {"Name": "双剑师", "Abbreviation": "ROG", },
30: {"Name": "忍者", "Abbreviation": "NIN", },
31: {"Name": "机工士", "Abbreviation": "MCH", },
32: {"Name": "暗黑骑士", "Abbreviation": "DRK", },
33: {"Name": "占星术士", "Abbreviation": "AST", },
34: {"Name": "武士", "Abbreviation": "SAM", },
35: {"Name": "赤魔法师", "Abbreviation": "RDM", },
36: {"Name": "青魔法师", "Abbreviation": "BLU", },
37: {"Name": "绝枪战士", "Abbreviation": "GNB", },
38: {"Name": "舞者", "Abbreviation": "DNC", },
39: {"Name": "", "Abbreviation": "None", },
40: {"Name": "", "Abbreviation": "None", }}
Class_Job_Category = {0: {"Name": "", },
1: {"Name": "所有职业", },
2: {"Name": "剑术师", },
3: {"Name": "格斗家", },
4: {"Name": "斧术师", },
5: {"Name": "枪术师", },
6: {"Name": "弓箭手", },
7: {"Name": "幻术师", },
8: {"Name": "咒术师", },
9: {"Name": "刻木匠", },
10: {"Name": "锻铁匠", },
11: {"Name": "铸甲匠", },
12: {"Name": "雕金匠", },
13: {"Name": "制革匠", },
14: {"Name": "裁衣匠", },
15: {"Name": "炼金术士", },
16: {"Name": "烹调师", },
17: {"Name": "采矿工", },
18: {"Name": "园艺工", },
19: {"Name": "捕鱼人", },
20: {"Name": "骑士", },
21: {"Name": "武僧", },
22: {"Name": "战士", },
23: {"Name": "龙骑士", },
24: {"Name": "吟游诗人", },
25: {"Name": "白魔法师", },
26: {"Name": "黑魔法师", },
27: {"Name": "秘术师", },
28: {"Name": "召唤师", },
29: {"Name": "学者", },
30: {"Name": "战斗精英", },
31: {"Name": "魔法导师", },
32: {"Name": "大地使者", },
33: {"Name": "能工巧匠", },
34: {"Name": "战斗精英 魔法导师", },
35: {"Name": "能工巧匠 大地使者", },
36: {"Name": "剑术师之外的战斗精英", },
37: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 战士 暗黑骑士", },
38: {"Name": "剑术师 骑士", },
39: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 骑士 战士 暗黑骑士", },
40: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 武僧 战士 龙骑士 吟游诗人 忍者", },
41: {"Name": "格斗家 武僧", },
42: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 武僧 战士 龙骑士 吟游诗人 忍者", },
43: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 武僧 战士 龙骑士 暗黑骑士", },
44: {"Name": "斧术师 战士", },
45: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 骑士 武僧 战士 龙骑士 暗黑骑士", },
46: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 武僧 龙骑士 吟游诗人 忍者 机工士", },
47: {"Name": "枪术师 龙骑士", },
48: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 武僧 龙骑士 吟游诗人 忍者 机工士", },
49: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 吟游诗人 黑魔法师 召唤师 机工士", },
50: {"Name": "弓箭手 吟游诗人", },
51: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 吟游诗人 机工士", },
52: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 骑士 白魔法师 学者 占星术士", },
53: {"Name": "幻术师 白魔法师", },
54: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者 占星术士", },
55: {"Name": "咒术师 黑魔法师", },
56: {"Name": "剑术师 幻术师 咒术师 骑士 白魔法师 黑魔法师", },
57: {"Name": "剑术师 咒术师 骑士 黑魔法师", },
58: {"Name": "剑术师 幻术师 骑士 白魔法师", },
59: {"Name": "剑术师 斧术师 骑士 战士 暗黑骑士 绝枪战士", },
60: {"Name": "剑术师 斧术师 枪术师 骑士 战士 龙骑士 暗黑骑士 绝枪战士", },
61: {"Name": "幻术师 咒术师 秘术师 白魔法师 学者 占星术士", },
62: {"Name": "幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者 占星术士", },
63: {"Name": "咒术师 秘术师 黑魔法师 召唤师 赤魔法师 青魔法师", },
64: {"Name": "幻术师 白魔法师 学者 占星术士", },
65: {"Name": "格斗家 武僧 武士", },
66: {"Name": "弓箭手 吟游诗人 机工士 舞者", },
67: {"Name": "剑术师 格斗家 斧术师 枪术师 双剑师 武僧 龙骑士 忍者", },
68: {"Name": "秘术师 召唤师 学者", },
69: {"Name": "秘术师 召唤师", },
70: {"Name": "烹调师之外的能工巧匠", },
71: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者", },
72: {"Name": "幻术师 咒术师 秘术师 白魔法师 黑魔法师 召唤师 学者", },
73: {"Name": "幻术师 白魔法师 学者 占星术士", },
74: {"Name": "", },
75: {"Name": "", },
76: {"Name": "", },
77: {"Name": "", },
78: {"Name": "", },
79: {"Name": "", },
80: {"Name": "", },
81: {"Name": "", },
82: {"Name": "", },
83: {"Name": "", },
84: {"Name": "格斗家 枪术师 武僧 龙骑士 武士", },
85: {"Name": "战斗精英 魔法导师 特职专用", },
86: {"Name": "骑士 战士 暗黑骑士 绝枪战士 武僧 龙骑士 忍者 武士", },
87: {"Name": "吟游诗人 机工士 舞者 黑魔法师 召唤师 赤魔法师 白魔法师 学者 占星术士", },
88: {"Name": "剑术师 斧术师 格斗家 枪术师 弓箭手 双剑师 骑士 武僧 战士 龙骑士 吟游诗人 忍者 暗黑骑士 机工士", },
89: {"Name": "黑魔法师 召唤师 赤魔法师", },
90: {"Name": "弓箭手 幻术师 咒术师 秘术师 白魔法师 吟游诗人 黑魔法师 召唤师 学者 机工士 占星术士", },
91: {"Name": "双剑师", },
92: {"Name": "忍者", },
93: {"Name": "双剑师 忍者", },
94: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 忍者", },
95: {"Name": "剑术师 格斗家 斧术师 枪术师 双剑师 忍者", },
96: {"Name": "机工士", },
97: {"Name": "格斗家 枪术师 弓箭手 双剑师 武僧 龙骑士 吟游诗人 忍者 机工士", },
98: {"Name": "暗黑骑士", },
99: {"Name": "占星术士", },
100: {"Name": "弓箭手 双剑师 吟游诗人 忍者 机工士", },
101: {"Name": "格斗家 枪术师 双剑师 武僧 龙骑士 忍者", },
102: {"Name": "格斗家 双剑师 武僧 忍者 武士", },
103: {"Name": "双剑师 忍者", },
104: {"Name": "", },
105: {"Name": "弓箭手 双剑师 吟游诗人 忍者 机工士 舞者", },
106: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 双剑师 吟游诗人", },
107: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士", },
108: {"Name": "战斗精英 魔法导师", },
109: {"Name": "", },
110: {"Name": "战斗精英 魔法导师 特职专用", },
111: {"Name": "武士", },
112: {"Name": "赤魔法师", },
113: {"Name": "剑术师 斧术师 骑士 战士 暗黑骑士 绝枪战士", },
114: {"Name": "格斗家 枪术师 武僧 龙骑士 双剑师 忍者 武士", },
115: {"Name": "弓箭手 吟游诗人 机工士 舞者", },
116: {"Name": "咒术师 黑魔法师 秘术师 召唤师 赤魔法师 青魔法师", },
117: {"Name": "幻术师 白魔法师 学者 占星术士", },
118: {"Name": "格斗家 枪术师 弓箭手 武僧 龙骑士 吟游诗人 双剑师 忍者 机工士 武士 舞者", },
119: {"Name": "格斗家 枪术师 咒术师 武僧 龙骑士 黑魔法师 秘术师 召唤师 双剑师 忍者 武士 赤魔法师 青魔法师", },
120: {"Name": "幻术师 咒术师 白魔法师 黑魔法师 秘术师 召唤师 学者 占星术士 赤魔法师 青魔法师", },
121: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
122: {"Name": "武僧 龙骑士 忍者 武士", },
123: {"Name": "吟游诗人 机工士 舞者", },
124: {"Name": "黑魔法师 召唤师 赤魔法师 青魔法师", },
125: {"Name": "白魔法师 学者 占星术士", },
126: {"Name": "武僧 龙骑士 吟游诗人 忍者 机工士 武士 舞者", },
127: {"Name": "武僧 龙骑士 黑魔法师 召唤师 忍者 武士 赤魔法师 青魔法师", },
128: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师 青魔法师", },
129: {"Name": "青魔法师", },
130: {"Name": "所有(除设限特职)", },
131: {"Name": "武僧 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 武士 赤魔法师 舞者", },
132: {"Name": "武僧 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 占星术士 武士 赤魔法师 舞者", },
133: {"Name": "白魔法师 学者 占星术士", },
134: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
135: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 暗黑骑士 武士 赤魔法师 绝枪战士 舞者", },
136: {"Name": "骑士 战士 白魔法师 学者 暗黑骑士 占星术士 绝枪战士", },
137: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
138: {"Name": "骑士 武僧 战士 龙骑士 忍者 暗黑骑士 武士 绝枪战士", },
139: {"Name": "吟游诗人 机工士 舞者", },
140: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师", },
141: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
142: {"Name": "战斗精英和魔法导师(除设限特职)", },
143: {"Name": "战斗精英(除设限特职)", },
144: {"Name": "魔法导师(除设限特职)", },
145: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
146: {"Name": "战斗精英 魔法导师 特职专用(除设限特职)", },
147: {"Name": "黑魔法师 召唤师 赤魔法师", },
148: {"Name": "武僧 龙骑士 忍者 武士", },
149: {"Name": "绝枪战士", },
150: {"Name": "舞者", },
151: {"Name": "金属工艺(锻铁匠、铸甲匠、雕金匠)", },
152: {"Name": "手工工艺(刻木匠、制革匠、裁衣匠)", },
153: {"Name": "食药工艺(炼金术士、烹调师)", },
154: {"Name": "大地资源(采矿工、园艺工)", },
155: {"Name": "水生资源(捕鱼人)", },
156: {"Name": "防护职业(设限特职除外)", },
157: {"Name": "治疗职业(设限特职除外)", },
158: {"Name": "物理进攻职业(设限特职除外)", },
159: {"Name": "魔法进攻职业(设限特职除外)", },
160: {"Name": "秘术师 学者", },
161: {"Name": "剑术师 格斗家 斧术师 枪术师 弓箭手 骑士 武僧 战士 龙骑士 吟游诗人 双剑师 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
162: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
163: {"Name": "武僧 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 武士 赤魔法师 舞者", },
164: {"Name": "武僧 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 占星术士 武士 赤魔法师 舞者", },
165: {"Name": "白魔法师 学者 占星术士", },
166: {"Name": "骑士 战士 暗黑骑士 绝枪战士", },
167: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 黑魔法师 召唤师 忍者 机工士 暗黑骑士 武士 赤魔法师 绝枪战士 舞者", },
168: {"Name": "骑士 战士 白魔法师 学者 暗黑骑士 占星术士 绝枪战士", },
169: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 忍者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
170: {"Name": "ナイト モンク 戦士 竜騎士 忍者 暗黒騎士 侍 ガンブレイカー", },
171: {"Name": "吟游诗人 机工士 舞者", },
172: {"Name": "白魔法师 黑魔法师 召唤师 学者 占星术士 赤魔法师", },
173: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 白魔法师 黑魔法师 召唤师 学者 机工士 暗黑骑士 占星术士 武士 赤魔法师 绝枪战士 舞者", },
174: {"Name": "骑士 武僧 战士 龙骑士 吟游诗人 忍者 机工士 暗黑骑士 武士 绝枪战士 舞者", },
175: {"Name": "黑魔法师 召唤师 赤魔法师", },
176: {"Name": "武僧 龙骑士 忍者 武士", },
177: {"Name": "武僧 龙骑士 吟游诗人 忍者 机工士 武士 舞者", },
178: {"Name": "ナイト 戦士 黒魔道士 召喚士 暗黒騎士 赤魔道士 ガンブレイカー", },
179: {"Name": "白魔法师 召唤师 学者 占星术士 赤魔法师", },
180: {"Name": "", },
181: {"Name": "", }}
tf = {'TRUE': True,
'FALSE': False,
'0': False,
'1': True,
0: False,
1: True}
reject_words = ["ABORT",
"ACTION",
"ADD",
"AFTER",
"ALL",
"ALTER",
"ANALYZE",
"AND",
"AS",
"ASC",
"ATTACH",
"AUTOINCREMENT",
"BEFORE",
"BEGIN",
"BETWEEN",
"BY",
"CASCADE",
"CASE",
"CAST",
"CHECK",
"COLLATE",
"COLUMN",
"COMMIT",
"CONFLICT",
"CONSTRAINT",
"CREATE",
"CROSS",
"CURRENT_DATE",
"CURRENT_TIME",
"CURRENT_TIMESTAMP",
"DATABASE",
"DEFAULT",
"DEFERRABLE",
"DEFERRED",
"DELETE",
"DESC",
"DETACH",
"DISTINCT",
"DROP",
"EACH",
"ELSE",
"END",
"ESCAPE",
"EXCEPT",
"EXCLUSIVE",
"EXISTS",
"EXPLAIN",
"FAIL",
"FOR",
"FOREIGN",
"FROM",
"FULL",
"GLOB",
"GROUP",
"HAVING",
"IF",
"IGNORE",
"IMMEDIATE",
"IN",
"INDEX",
"INDEXED",
"INITIALLY",
"INNER",
"INSERT",
"INSTEAD",
"INTERSECT",
"INTO",
"IS",
"ISNULL",
"JOIN",
"KEY",
"LEFT",
"LIKE",
"LIMIT",
"MATCH",
"NATURAL",
"NO",
"NOT",
"NOTNULL",
"NULL",
"OF",
"OFFSET",
"ON",
"OR",
"ORDER",
"OUTER",
"PLAN",
"PRAGMA",
"PRIMARY",
"QUERY",
"RAISE",
"REFERENCES",
"REGEXP",
"REINDEX",
"RELEASE",
"RENAME",
"REPLACE",
"RESTRICT",
"RIGHT",
"ROLLBACK",
"ROW",
"SAVEPOINT",
"SELECT",
"SET",
"TABLE",
"TEMP",
"TEMPORARY",
"THEN",
"TO",
"TRANSACTION",
"TRIGGER",
"UNION",
"UNIQUE",
"UPDATE",
"USING",
"VACUUM",
"VALUES",
"VIEW",
"VIRTUAL",
"WHEN",
"WHERE",
"NULL",
"INTEGER",
"REAL",
"TEXT",
"BLOB",
"\"",
"%"]
Base_Param = {
0: (-1, ""),
1: (0, "力量"),
2: (1, "灵巧"),
3: (2, "耐力"),
4: (3, "智力"),
5: (4, "精神"),
6: (5, "信仰"),
7: (6, "体力"),
8: (7, "魔力"),
9: (8, "技力"),
10: (9, "采集力"),
11: (10, "制作力"),
12: (-1, "物理基本性能"),
13: (-1, "魔法基本性能"),
14: (11, "攻击间隔"),
15: (-1, "附加效果"),
16: (-1, "攻击次数"),
17: (-1, "格挡发动力"),
18: (-1, "格挡性能"),
19: (12, "坚韧"),
20: (13, "物理攻击力"),
21: (14, "物理防御力"),
22: (15, "直击"),
23: (16, "回避力"),
24: (17, "魔法防御力"),
25: (-1, "暴击攻击力"),
26: (-1, "暴击防御力"),
27: (18, "暴击"),
28: (-1, "暴击回避力"),
29: (-1, "斩击耐性"),
30: (-1, "突刺耐性"),
31: (-1, "打击耐性"),
32: (-1, "射击耐性"),
33: (19, "攻击魔法威力"),
34: (20, "治疗魔法威力"),
35: (-1, "强化魔法威力"),
36: (21, "元素加持"),
37: (-1, "火"),
38: (-1, "冰"),
39: (-1, "风"),
40: (-1, "土"),
41: (-1, "雷"),
42: (-1, "水"),
43: (-1, "全魔法耐性"),
44: (22, "信念"),
45: (23, "技能速度"),
46: (24, "咏唱速度"),
47: (25, "加速"),
48: (-1, "斗志"),
49: (-1, "仇恨"),
50: (-1, "降低仇恨"),
51: (-1, "分解技能提升率"),
52: (-1, "经验值获得量"),
53: (-1, "生命再生"),
54: (-1, "魔力再生"),
55: (-1, "主状态修正"),
56: (-1, "副状态修正"),
57: (-1, "减速耐性"),
58: (-1, "石化耐性"),
59: (-1, "麻痹耐性"),
60: (-1, "静寂耐性"),
61: (-1, "失明耐性"),
62: (-1, "中毒耐性"),
63: (-1, "眩晕耐性"),
64: (-1, "睡眠耐性"),
65: (-1, "止步耐性"),
66: (-1, "加重耐性"),
67: (-1, "死亡宣告耐性"),
68: (-1, "装备损耗耐性"),
69: (-1, "精炼度提升量"),
70: (26, "作业精度"),
71: (27, "加工精度"),
72: (28, "获得力"),
73: (29, "鉴别力")}
Item_Special_Bonus = {
0: "",
1: "",
2: "套装效果:",
3: "",
4: "神应效果:",
5: "",
6: "套装效果(等级限制):",
7: "优雷卡专用效果:",
8: "天佑女王专用效果:",
9: "",
10: ""}
Item_Series = {
0: "",
1: "黑涡团制式装备",
2: "双蛇党制式装备",
3: "恒辉队制式装备",
4: "东方公子装束",
5: "东方秀女装束",
6: "东方警卫装束",
7: "东方女官装束",
8: "特制上仙装备",
9: "特制女仆装备",
10: "特制管家装备",
11: "哪吒赤莲装备",
12: "哪吒白莲装备",
13: "东方贵人装束",
14: "风雅装束",
15: "东方雅人装束",
16: "东方丽人装束",
17: "绿宝石兽装备",
18: "黄宝石兽装备",
19: "天使装备",
20: "恶魔装备",
21: "王子装备",
22: "公主装备",
23: "东方书生装备",
24: "东方女生装备",
25: "艾普装备",
26: "大召唤士装备",
27: "红宝石兽装备",
28: "圣手饰品装备",
29: "圣地饰品装备"}
Grand_Company = {
0: "平民",
1: "黑涡团",
2: "双蛇党",
3: "恒辉队"}
Item_Sort_Category = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 1,
5: 5,
6: 6,
7: 11,
8: 22,
9: 25,
10: 26,
11: 27,
12: 30,
13: 32,
14: 33,
15: 35,
16: 40,
17: 41,
18: 42,
19: 43,
20: 45,
21: 46,
22: 47,
23: 48,
24: 49,
25: 50,
26: 51,
27: 52,
28: 53,
29: 54,
30: 55,
31: 75,
32: 76,
33: 77,
34: 78,
35: 80,
36: 81,
37: 79,
38: 95,
39: 100,
40: 101,
41: 105,
42: 106,
43: 107,
44: 108,
45: 109,
46: 110,
47: 111,
48: 112,
49: 113,
50: 125,
51: 130,
52: 135,
53: 140,
54: 150,
55: 155,
56: 160,
57: 165,
58: 170,
59: 175,
60: 180,
61: 185,
62: 250,
63: 254,
64: 96,
65: 102,
66: 114,
67: 82,
68: 115,
69: 83,
70: 103}
Equip_Slot_Category = {
0: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
1: {"MainHand": 1, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
2: {"MainHand": 0, "OffHand": 1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
3: {"MainHand": 0, "OffHand": 0, "Head": 1, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
4: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
5: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 1, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
6: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 1, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
7: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 1, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
8: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
9: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 1,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
10: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 1, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
11: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 1, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
12: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 1, "FingerR": 1, "SoulCrystal": 0, },
13: {"MainHand": 1, "OffHand": -1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
14: {"MainHand": 1, "OffHand": 1, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
15: {"MainHand": 0, "OffHand": 0, "Head": -1, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
16: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
17: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 1, },
18: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
19: {"MainHand": 0, "OffHand": 0, "Head": -1, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": -1,
"Ears": 0, "Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
20: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": -1, "Waist": 0, "Legs": -1, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
21: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 1, "Gloves": 0, "Waist": 0, "Legs": -1, "Feet": -1, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, },
22: {"MainHand": 0, "OffHand": 0, "Head": 0, "Body": 0, "Gloves": 0, "Waist": 0, "Legs": 0, "Feet": 0, "Ears": 0,
"Neck": 0, "Wrists": 0, "FingerL": 0, "FingerR": 0, "SoulCrystal": 0, }
}
Item_Search_Category = {
0: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
1: {"Name": "武器", "Icon": 60102, "Category": 0, "Order": 0, "ClassJob": 0, },
2: {"Name": "制作工具", "Icon": 60113, "Category": 0, "Order": 0, "ClassJob": 0, },
3: {"Name": "采集工具", "Icon": 60120, "Category": 0, "Order": 0, "ClassJob": 0, },
4: {"Name": "防具", "Icon": 60126, "Category": 0, "Order": 0, "ClassJob": 0, },
5: {"Name": "饰品", "Icon": 60135, "Category": 0, "Order": 0, "ClassJob": 0, },
6: {"Name": "药品食品", "Icon": 60136, "Category": 0, "Order": 0, "ClassJob": 0, },
7: {"Name": "素材", "Icon": 60137, "Category": 0, "Order": 0, "ClassJob": 0, },
8: {"Name": "其他", "Icon": 60159, "Category": 0, "Order": 0, "ClassJob": 0, },
9: {"Name": "格斗武器", "Icon": 60101, "Category": 1, "Order": 5, "ClassJob": 2, },
10: {"Name": "剑", "Icon": 60102, "Category": 1, "Order": 0, "ClassJob": 1, },
11: {"Name": "斧", "Icon": 60103, "Category": 1, "Order": 1, "ClassJob": 3, },
12: {"Name": "弓", "Icon": 60105, "Category": 1, "Order": 8, "ClassJob": 5, },
13: {"Name": "长枪", "Icon": 60104, "Category": 1, "Order": 4, "ClassJob": 4, },
14: {"Name": "咒杖", "Icon": 60108, "Category": 1, "Order": 11, "ClassJob": 7, },
15: {"Name": "幻杖", "Icon": 60107, "Category": 1, "Order": 14, "ClassJob": 6, },
16: {"Name": "魔导书", "Icon": 60109, "Category": 1, "Order": 12, "ClassJob": 26, },
17: {"Name": "盾", "Icon": 60110, "Category": 2, "Order": 0, "ClassJob": 0, },
18: {"Name": "投掷武器", "Icon": 60111, "Category": 0, "Order": 0, "ClassJob": 0, },
19: {"Name": "刻木工具", "Icon": 60112, "Category": 1, "Order": 17, "ClassJob": 8, },
20: {"Name": "锻铁工具", "Icon": 60113, "Category": 1, "Order": 18, "ClassJob": 9, },
21: {"Name": "铸甲工具", "Icon": 60114, "Category": 1, "Order": 19, "ClassJob": 10, },
22: {"Name": "雕金工具", "Icon": 60115, "Category": 1, "Order": 20, "ClassJob": 11, },
23: {"Name": "制革工具", "Icon": 60116, "Category": 1, "Order": 21, "ClassJob": 12, },
24: {"Name": "裁衣工具", "Icon": 60117, "Category": 1, "Order": 22, "ClassJob": 13, },
25: {"Name": "炼金工具", "Icon": 60118, "Category": 1, "Order": 23, "ClassJob": 14, },
26: {"Name": "烹调工具", "Icon": 60119, "Category": 1, "Order": 24, "ClassJob": 15, },
27: {"Name": "采矿工具", "Icon": 60120, "Category": 1, "Order": 25, "ClassJob": 16, },
28: {"Name": "园艺工具", "Icon": 60121, "Category": 1, "Order": 26, "ClassJob": 17, },
29: {"Name": "捕鱼用具", "Icon": 60122, "Category": 1, "Order": 27, "ClassJob": 18, },
30: {"Name": "钓饵", "Icon": 60123, "Category": 1, "Order": 28, "ClassJob": 18, },
31: {"Name": "头部防具", "Icon": 60124, "Category": 2, "Order": 1, "ClassJob": 0, },
32: {"Name": "内衣", "Icon": 60125, "Category": 0, "Order": 0, "ClassJob": 0, },
33: {"Name": "身体防具", "Icon": 60126, "Category": 2, "Order": 2, "ClassJob": 0, },
34: {"Name": "内裤", "Icon": 60127, "Category": 0, "Order": 0, "ClassJob": 0, },
35: {"Name": "腿部防具", "Icon": 60128, "Category": 2, "Order": 5, "ClassJob": 0, },
36: {"Name": "手部防具", "Icon": 60129, "Category": 2, "Order": 3, "ClassJob": 0, },
37: {"Name": "脚部防具", "Icon": 60130, "Category": 2, "Order": 6, "ClassJob": 0, },
38: {"Name": "腰部防具", "Icon": 60131, "Category": 2, "Order": 4, "ClassJob": 0, },
39: {"Name": "项链", "Icon": 60132, "Category": 2, "Order": 9, "ClassJob": 0, },
40: {"Name": "耳饰", "Icon": 60133, "Category": 2, "Order": 8, "ClassJob": 0, },
41: {"Name": "手镯", "Icon": 60134, "Category": 2, "Order": 10, "ClassJob": 0, },
42: {"Name": "戒指", "Icon": 60135, "Category": 2, "Order": 11, "ClassJob": 0, },
43: {"Name": "药品", "Icon": 60136, "Category": 3, "Order": 0, "ClassJob": 0, },
44: {"Name": "食材", "Icon": 60137, "Category": 3, "Order": 1, "ClassJob": 0, },
45: {"Name": "食品", "Icon": 60146, "Category": 3, "Order": 2, "ClassJob": 0, },
46: {"Name": "水产品", "Icon": 60138, "Category": 3, "Order": 3, "ClassJob": 0, },
47: {"Name": "石材", "Icon": 60139, "Category": 3, "Order": 4, "ClassJob": 0, },
48: {"Name": "金属", "Icon": 60140, "Category": 3, "Order": 5, "ClassJob": 0, },
49: {"Name": "木材", "Icon": 60141, "Category": 3, "Order": 6, "ClassJob": 0, },
50: {"Name": "布料", "Icon": 60142, "Category": 3, "Order": 7, "ClassJob": 0, },
51: {"Name": "皮革", "Icon": 60143, "Category": 3, "Order": 8, "ClassJob": 0, },
52: {"Name": "骨材", "Icon": 60144, "Category": 3, "Order": 9, "ClassJob": 0, },
53: {"Name": "炼金原料", "Icon": 60145, "Category": 3, "Order": 10, "ClassJob": 0, },
54: {"Name": "染料", "Icon": 60147, "Category": 3, "Order": 11, "ClassJob": 0, },
55: {"Name": "部件", "Icon": 60148, "Category": 3, "Order": 12, "ClassJob": 0, },
56: {"Name": "一般家具", "Icon": 60164, "Category": 4, "Order": 3, "ClassJob": 0, },
57: {"Name": "魔晶石", "Icon": 60150, "Category": 3, "Order": 13, "ClassJob": 0, },
58: {"Name": "水晶", "Icon": 60151, "Category": 3, "Order": 14, "ClassJob": 0, },
59: {"Name": "触媒", "Icon": 60152, "Category": 3, "Order": 15, "ClassJob": 0, },
60: {"Name": "杂货", "Icon": 60153, "Category": 3, "Order": 16, "ClassJob": 0, },
61: {"Name": "灵魂水晶", "Icon": 60157, "Category": 0, "Order": 0, "ClassJob": 0, },
62: {"Name": "箭", "Icon": 60153, "Category": 0, "Order": 0, "ClassJob": 0, },
63: {"Name": "任务道具", "Icon": 60158, "Category": 0, "Order": 0, "ClassJob": 0, },
64: {"Name": "其他", "Icon": 60159, "Category": 0, "Order": 0, "ClassJob": 0, },
65: {"Name": "室外建材", "Icon": 60160, "Category": 4, "Order": 0, "ClassJob": 0, },
66: {"Name": "室内建材", "Icon": 60161, "Category": 4, "Order": 1, "ClassJob": 0, },
67: {"Name": "庭具", "Icon": 60168, "Category": 4, "Order": 2, "ClassJob": 0, },
68: {"Name": "椅子睡床", "Icon": 60165, "Category": 4, "Order": 4, "ClassJob": 0, },
69: {"Name": "桌台", "Icon": 60162, "Category": 4, "Order": 5, "ClassJob": 0, },
70: {"Name": "桌上", "Icon": 60163, "Category": 4, "Order": 6, "ClassJob": 0, },
71: {"Name": "壁挂", "Icon": 60166, "Category": 4, "Order": 7, "ClassJob": 0, },
72: {"Name": "地毯", "Icon": 60167, "Category": 4, "Order": 8, "ClassJob": 0, },
73: {"Name": "双剑", "Icon": 60106, "Category": 1, "Order": 7, "ClassJob": 29, },
74: {"Name": "杂货(季节活动)", "Icon": 60154, "Category": 3, "Order": 17, "ClassJob": 0, },
75: {"Name": "宠物", "Icon": 60155, "Category": 3, "Order": 18, "ClassJob": 0, },
76: {"Name": "双手剑", "Icon": 60170, "Category": 1, "Order": 2, "ClassJob": 32, },
77: {"Name": "火枪", "Icon": 60172, "Category": 1, "Order": 9, "ClassJob": 31, },
78: {"Name": "天球仪", "Icon": 60171, "Category": 1, "Order": 16, "ClassJob": 33, },
79: {"Name": "飞空艇/潜水艇部件", "Icon": 60169, "Category": 3, "Order": 19, "ClassJob": 0, },
80: {"Name": "管弦乐琴关联物品", "Icon": 60173, "Category": 3, "Order": 20, "ClassJob": 0, },
81: {"Name": "栽培用品", "Icon": 60174, "Category": 4, "Order": 9, "ClassJob": 0, },
82: {"Name": "绘画作品", "Icon": 60175, "Category": 4, "Order": 10, "ClassJob": 0, },
83: {"Name": "武士刀", "Icon": 60177, "Category": 1, "Order": 6, "ClassJob": 34, },
84: {"Name": "刺剑", "Icon": 60176, "Category": 1, "Order": 13, "ClassJob": 35, },
85: {"Name": "魔导书(学者专用)", "Icon": 60178, "Category": 1, "Order": 15, "ClassJob": 28, },
86: {"Name": "枪刃", "Icon": 60181, "Category": 1, "Order": 3, "ClassJob": 37, },
87: {"Name": "投掷武器", "Icon": 60182, "Category": 1, "Order": 10, "ClassJob": 38, },
88: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
89: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
90: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
91: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
92: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
93: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
94: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
95: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
96: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
97: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
98: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
99: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, },
100: {"Name": "", "Icon": 0, "Category": 0, "Order": 0, "ClassJob": 0, }
}
Item_UI_Category = {
0: {"Name": "", "Icon": 0, "Order{Minor}": 0, "Order{Major}": 0, },
1: {"Name": "格斗武器", "Icon": 60101, "Order{Minor}": 5, "Order{Major}": 1, },
2: {"Name": "单手剑", "Icon": 60102, "Order{Minor}": 0, "Order{Major}": 1, },
3: {"Name": "大斧", "Icon": 60103, "Order{Minor}": 1, "Order{Major}": 1, },
4: {"Name": "弓", "Icon": 60105, "Order{Minor}": 8, "Order{Major}": 1, },
5: {"Name": "长枪", "Icon": 60104, "Order{Minor}": 4, "Order{Major}": 1, },
6: {"Name": "单手咒杖", "Icon": 60108, "Order{Minor}": 11, "Order{Major}": 1, },
7: {"Name": "双手咒杖", "Icon": 60108, "Order{Minor}": 12, "Order{Major}": 1, },
8: {"Name": "单手幻杖", "Icon": 60107, "Order{Minor}": 16, "Order{Major}": 1, },
9: {"Name": "双手幻杖", "Icon": 60107, "Order{Minor}": 17, "Order{Major}": 1, },
10: {"Name": "魔导书", "Icon": 60109, "Order{Minor}": 13, "Order{Major}": 1, },
11: {"Name": "盾", "Icon": 60110, "Order{Minor}": 0, "Order{Major}": 3, },
12: {"Name": "刻木工具(主工具)", "Icon": 60112, "Order{Minor}": 0, "Order{Major}": 2, },
13: {"Name": "刻木工具(副工具)", "Icon": 60112, "Order{Minor}": 1, "Order{Major}": 2, },
14: {"Name": "锻铁工具(主工具)", "Icon": 60113, "Order{Minor}": 2, "Order{Major}": 2, },
15: {"Name": "锻铁工具(副工具)", "Icon": 60113, "Order{Minor}": 3, "Order{Major}": 2, },
16: {"Name": "铸甲工具(主工具)", "Icon": 60114, "Order{Minor}": 4, "Order{Major}": 2, },
17: {"Name": "铸甲工具(副工具)", "Icon": 60114, "Order{Minor}": 5, "Order{Major}": 2, },
18: {"Name": "雕金工具(主工具)", "Icon": 60115, "Order{Minor}": 6, "Order{Major}": 2, },
19: {"Name": "雕金工具(副工具)", "Icon": 60115, "Order{Minor}": 7, "Order{Major}": 2, },
20: {"Name": "制革工具(主工具)", "Icon": 60116, "Order{Minor}": 8, "Order{Major}": 2, },
21: {"Name": "制革工具(副工具)", "Icon": 60116, "Order{Minor}": 9, "Order{Major}": 2, },
22: {"Name": "裁衣工具(主工具)", "Icon": 60117, "Order{Minor}": 10, "Order{Major}": 2, },
23: {"Name": "裁衣工具(副工具)", "Icon": 60117, "Order{Minor}": 11, "Order{Major}": 2, },
24: {"Name": "炼金工具(主工具)", "Icon": 60118, "Order{Minor}": 12, "Order{Major}": 2, },
25: {"Name": "炼金工具(副工具)", "Icon": 60118, "Order{Minor}": 13, "Order{Major}": 2, },
26: {"Name": "烹调工具(主工具)", "Icon": 60119, "Order{Minor}": 14, "Order{Major}": 2, },
27: {"Name": "烹调工具(副工具)", "Icon": 60119, "Order{Minor}": 15, "Order{Major}": 2, },
28: {"Name": "采矿工具(主工具)", "Icon": 60120, "Order{Minor}": 16, "Order{Major}": 2, },
29: {"Name": "采矿工具(副工具)", "Icon": 60120, "Order{Minor}": 17, "Order{Major}": 2, },
30: {"Name": "园艺工具(主工具)", "Icon": 60121, "Order{Minor}": 18, "Order{Major}": 2, },
31: {"Name": "园艺工具(副工具)", "Icon": 60121, "Order{Minor}": 19, "Order{Major}": 2, },
32: {"Name": "捕鱼用具(主工具)", "Icon": 60122, "Order{Minor}": 20, "Order{Major}": 2, },
33: {"Name": "钓饵", "Icon": 60123, "Order{Minor}": 29, "Order{Major}": 7, },
34: {"Name": "头部防具", "Icon": 60124, "Order{Minor}": 1, "Order{Major}": 3, },
35: {"Name": "身体防具", "Icon": 60126, "Order{Minor}": 2, "Order{Major}": 3, },
36: {"Name": "腿部防具", "Icon": 60128, "Order{Minor}": 5, "Order{Major}": 3, },
37: {"Name": "手部防具", "Icon": 60129, "Order{Minor}": 3, "Order{Major}": 3, },
38: {"Name": "脚部防具", "Icon": 60130, "Order{Minor}": 6, "Order{Major}": 3, },
39: {"Name": "腰部防具", "Icon": 60131, "Order{Minor}": 4, "Order{Major}": 3, },
40: {"Name": "项链", "Icon": 60132, "Order{Minor}": 1, "Order{Major}": 4, },
41: {"Name": "耳饰", "Icon": 60133, "Order{Minor}": 0, "Order{Major}": 4, },
42: {"Name": "手镯", "Icon": 60134, "Order{Minor}": 2, "Order{Major}": 4, },
43: {"Name": "戒指", "Icon": 60135, "Order{Minor}": 3, "Order{Major}": 4, },
44: {"Name": "药品", "Icon": 60136, "Order{Minor}": 0, "Order{Major}": 5, },
45: {"Name": "食材", "Icon": 60137, "Order{Minor}": 0, "Order{Major}": 6, },
46: {"Name": "食品", "Icon": 60146, "Order{Minor}": 1, "Order{Major}": 5, },
47: {"Name": "水产品", "Icon": 60138, "Order{Minor}": 1, "Order{Major}": 6, },
48: {"Name": "石材", "Icon": 60139, "Order{Minor}": 2, "Order{Major}": 6, },
49: {"Name": "金属", "Icon": 60140, "Order{Minor}": 3, "Order{Major}": 6, },
50: {"Name": "木材", "Icon": 60141, "Order{Minor}": 4, "Order{Major}": 6, },
51: {"Name": "布料", "Icon": 60142, "Order{Minor}": 5, "Order{Major}": 6, },
52: {"Name": "皮革", "Icon": 60143, "Order{Minor}": 6, "Order{Major}": 6, },
53: {"Name": "骨材", "Icon": 60144, "Order{Minor}": 7, "Order{Major}": 6, },
54: {"Name": "炼金原料", "Icon": 60145, "Order{Minor}": 8, "Order{Major}": 6, },
55: {"Name": "染料", "Icon": 60147, "Order{Minor}": 10, "Order{Major}": 6, },
56: {"Name": "部件", "Icon": 60148, "Order{Minor}": 9, "Order{Major}": 6, },
57: {"Name": "家具", "Icon": 60164, "Order{Minor}": 20, "Order{Major}": 7, },
58: {"Name": "魔晶石", "Icon": 60150, "Order{Minor}": 0, "Order{Major}": 7, },
59: {"Name": "水晶", "Icon": 60151, "Order{Minor}": 1, "Order{Major}": 7, },
60: {"Name": "触媒", "Icon": 60152, "Order{Minor}": 2, "Order{Major}": 7, },
61: {"Name": "杂货", "Icon": 60153, "Order{Minor}": 3, "Order{Major}": 7, },
62: {"Name": "灵魂水晶", "Icon": 60157, "Order{Minor}": 4, "Order{Major}": 4, },
63: {"Name": "其他", "Icon": 60159, "Order{Minor}": 6, "Order{Major}": 7, },
64: {"Name": "房产证书", "Icon": 60160, "Order{Minor}": 7, "Order{Major}": 7, },
65: {"Name": "房顶", "Icon": 60160, "Order{Minor}": 8, "Order{Major}": 7, },
66: {"Name": "外墙", "Icon": 60160, "Order{Minor}": 9, "Order{Major}": 7, },
67: {"Name": "窗户", "Icon": 60160, "Order{Minor}": 10, "Order{Major}": 7, },
68: {"Name": "房门", "Icon": 60160, "Order{Minor}": 11, "Order{Major}": 7, },
69: {"Name": "房顶装饰", "Icon": 60160, "Order{Minor}": 12, "Order{Major}": 7, },
70: {"Name": "外墙装饰", "Icon": 60160, "Order{Minor}": 13, "Order{Major}": 7, },
71: {"Name": "门牌", "Icon": 60160, "Order{Minor}": 14, "Order{Major}": 7, },
72: {"Name": "院墙", "Icon": 60160, "Order{Minor}": 15, "Order{Major}": 7, },
73: {"Name": "内墙", "Icon": 60161, "Order{Minor}": 16, "Order{Major}": 7, },
74: {"Name": "地板", "Icon": 60161, "Order{Minor}": 17, "Order{Major}": 7, },
75: {"Name": "屋顶照明", "Icon": 60161, "Order{Minor}": 18, "Order{Major}": 7, },
76: {"Name": "庭具", "Icon": 60168, "Order{Minor}": 19, "Order{Major}": 7, },
77: {"Name": "桌台", "Icon": 60162, "Order{Minor}": 21, "Order{Major}": 7, },
78: {"Name": "桌上", "Icon": 60163, "Order{Minor}": 22, "Order{Major}": 7, },
79: {"Name": "壁挂", "Icon": 60166, "Order{Minor}": 23, "Order{Major}": 7, },
80: {"Name": "地毯", "Icon": 60167, "Order{Minor}": 24, "Order{Major}": 7, },
81: {"Name": "宠物", "Icon": 60155, "Order{Minor}": 5, "Order{Major}": 7, },
82: {"Name": "栽培用品", "Icon": 60153, "Order{Minor}": 25, "Order{Major}": 7, },
83: {"Name": "半魔晶石", "Icon": 60150, "Order{Minor}": 26, "Order{Major}": 7, },
84: {"Name": "双剑", "Icon": 60106, "Order{Minor}": 7, "Order{Major}": 1, },
85: {"Name": "杂货(季节活动)", "Icon": 60154, "Order{Minor}": 4, "Order{Major}": 7, },
86: {"Name": "九宫幻卡", "Icon": 60156, "Order{Minor}": 27, "Order{Major}": 7, },
87: {"Name": "双手剑", "Icon": 60170, "Order{Minor}": 2, "Order{Major}": 1, },
88: {"Name": "火枪", "Icon": 60172, "Order{Minor}": 9, "Order{Major}": 1, },
89: {"Name": "天球仪", "Icon": 60171, "Order{Minor}": 19, "Order{Major}": 1, },
90: {"Name": "飞空艇部件(船体)", "Icon": 60169, "Order{Minor}": 11, "Order{Major}": 6, },
91: {"Name": "飞空艇部件(舾装)", "Icon": 60169, "Order{Minor}": 12, "Order{Major}": 6, },
92: {"Name": "飞空艇部件(船尾)", "Icon": 60169, "Order{Minor}": 14, "Order{Major}": 6, },
93: {"Name": "飞空艇部件(船首)", "Icon": 60169, "Order{Minor}": 13, "Order{Major}": 6, },
94: {"Name": "管弦乐琴乐谱", "Icon": 60173, "Order{Minor}": 28, "Order{Major}": 7, },
95: {"Name": "绘画作品", "Icon": 60175, "Order{Minor}": 29, "Order{Major}": 7, },
96: {"Name": "武士刀", "Icon": 60177, "Order{Minor}": 6, "Order{Major}": 1, },
97: {"Name": "刺剑", "Icon": 60176, "Order{Minor}": 14, "Order{Major}": 1, },
98: {"Name": "魔导书(学者专用)", "Icon": 60178, "Order{Minor}": 18, "Order{Major}": 1, },
99: {"Name": "捕鱼用具(副工具)", "Icon": 60122, "Order{Minor}": 21, "Order{Major}": 2, },
100: {"Name": "货币", "Icon": 60179, "Order{Minor}": 255, "Order{Major}": 7, },
101: {"Name": "潜水艇部件(船体)", "Icon": 60169, "Order{Minor}": 15, "Order{Major}": 6, },
102: {"Name": "潜水艇部件(船尾)", "Icon": 60169, "Order{Minor}": 16, "Order{Major}": 6, },
103: {"Name": "潜水艇部件(船首)", "Icon": 60169, "Order{Minor}": 17, "Order{Major}": 6, },
104: {"Name": "潜水艇部件(舰桥)", "Icon": 60169, "Order{Minor}": 18, "Order{Major}": 6, },
105: {"Name": "青魔杖", "Icon": 60180, "Order{Minor}": 15, "Order{Major}": 1, },
106: {"Name": "枪刃", "Icon": 60181, "Order{Minor}": 3, "Order{Major}": 1, },
107: {"Name": "投掷武器", "Icon": 60182, "Order{Minor}": 10, "Order{Major}": 1, }
}
Item_Repair = {
0: '',
5594: '1级暗物质',
5595: '2级暗物质',
5596: '3级暗物质',
5597: '4级暗物质',
5598: '5级暗物质',
10386: '6级暗物质',
17837: '7级暗物质',
}
Item_Glamour = {
0: ''
}
|
the-stack_0_4035 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Channelflow(CMakePackage):
"""Channelflow is a software system for numerical analysis of the
incompressible fluid flow in channel geometries, written in C++.
"""
homepage = 'https://github.com/epfl-ecps/channelflow'
url = 'https://github.com/epfl-ecps/channelflow.git'
version(
'develop',
git='https://github.com/epfl-ecps/channelflow.git',
branch='master'
)
variant('shared', default=True, description='Build shared libs')
variant('mpi', default=True, description='Enable MPI parallelism')
variant('hdf5', default=True, description='Enable support for HDF5 I/O')
variant(
'netcdf', default='serial', values=('none', 'serial', 'parallel'),
multi=False, description='Level of support for NetCDF I/O'
)
variant('python', default=False, description='Build python bindings')
depends_on('eigen')
depends_on('fftw')
# MPI related constraints
depends_on('mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
# Support for different I/O formats
depends_on('hdf5+cxx', when='+hdf5')
depends_on('netcdf', when='netcdf=serial')
depends_on('netcdf+mpi', when='netcdf=parallel')
# Python bindings
depends_on('boost+python', when='+python')
conflicts('~mpi', when='netcdf=parallel', msg='Parallel NetCDF requires MPI')
conflicts(
'+mpi', when='+python',
msg='Building python bindings is possible only for the serial code'
)
conflicts('~mpi', when='^mpi',
msg='There should be no MPI in the DAG when ~mpi is active')
def cmake_args(self):
spec = self.spec
on_or_off = lambda predicate: 'ON' if predicate else 'OFF'
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
on_or_off('+shared' in spec)
),
'-DUSE_MPI:BOOL={0}'.format(on_or_off('+mpi' in spec)),
'-DWITH_HDF5CXX:BOOL={0}'.format(on_or_off('+hdf5' in spec)),
'-DWITH_PYTHON:BOOL={0}'.format(on_or_off('+python' in spec))
]
netcdf_str = {
'none': 'OFF',
'serial': 'Serial',
'parallel': 'Parallel'
}
args.append('-DWITH_NETCDF:STRING={0}'.format(
netcdf_str[spec.variants['netcdf'].value]
))
# Set an MPI compiler for parallel builds
if '+mpi' in spec:
args.append(
'-DCMAKE_CXX_COMPILER:PATH={0}'.format(spec['mpi'].mpicxx)
)
return args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.