ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4db810311430482f1753b50480bde9b3981d13 | from pygame import *
from random import *
from time import time as timer
win_widh = 1000
win_hight = 400
win = display.set_mode((win_widh, win_hight))
display.set_caption('Plants')
ImegHero = 'Woodman.png'
ImeBack = 'Forest.png'
ImeAnemi = 'BigCliz.png'
img_bullet = 'Ball.png'
TimeNow = timer()
TimeHit = timer()
# clock = time.Clock()
class GameSprite(sprite.Sprite):
def __init__(self, PLimage, playX, playY, sizeX, sizeY, speed):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(PLimage), (sizeX, sizeY))
self.speed = speed
self.rect = self.image.get_rect()
self.rect.x = playX
self.rect.y = playY
def reset(self):
win.blit(self.image, (self.rect.x, self.rect.y))
class Plaer(GameSprite):
def Went(self):
global LastWent
global TimeNow
went = key.get_pressed()
if went[K_UP]:
self.rect.y -= self.speed
if went[K_DOWN]:
self.rect.y += self.speed
if went[K_LEFT]:
self.rect.x -= self.speed
LastWent = 'Left'
if went[K_RIGHT]:
self.rect.x += self.speed
LastWent = 'Right'
if (timer() - TimeNow) > 2:
if went[K_SPACE]:
TimeNow = timer()
if LastWent == 'Left':
self.rect.x -= 10*self.speed
else:
self.rect.x += 10*self.speed
def fire(self):
bullet = Bullet(img_bullet, self.rect.centerx, self.rect.centery, 15, 20, 15)
bullets.add(bullet)
class PlantsAnami(GameSprite):
def update(self):
self.rect.x += self.speed
if self.rect.x < 0:
self.rect.x = win_widh
self.rect.y = randint(150, 340)
class Bullet(GameSprite):
def update(self):
self.rect.x += self.speed
if self.rect.x > win_widh:
self.kill()
fps = 30
LastWent = 'Left'
HeroGad = Plaer(ImegHero, 50, 50, 60, 60, 10)
GameRun = True
bacgraund = transform.scale(image.load(ImeBack), (win_widh, win_hight))
monsters = sprite.Group()
ds = [50, win_widh-100]
for i in range(1,7):
monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 400-90), 72, 72, randint(-5,-1))
monsters.add(monster)
bullets = sprite.Group()
def DrawAll():
win.blit(bacgraund, (0,0))
# draw.rect(win, (0,255,0), (0, 350, 1000, 50))
HeroGad.reset()
HeroGad.Went()
monsters.update()
monsters.draw(win)
bullets.update()
bullets.draw(win)
display.update()
while GameRun:
for e in event.get():
if e.type == QUIT:
GameRun = False
elif e.type == KEYDOWN:
if e.key == K_q:
HeroGad.fire()
collide = sprite.groupcollide(bullets, monsters, True, True)
for c in collide:
while len(monsters) < 6:
monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
monsters.add(monster)
# tap = key.get_pressed()
# if sprite.spritecollide(HeroGad, monsters, True):
# print('AAAAAAAAAAAAAA')
# # monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
# # monsters.add(monster)
# while len(monsters)<6:
# monster = PlantsAnami(ImeAnemi, win_widh, randint(150, 340), 72, 72, randint(-5,-1))
# monsters.add(monster)
DrawAll()
time.delay(fps) |
py | 1a4dba2822d1bb1e546d09723f9ba9264bb81267 | # pylint: disable=g-direct-third-party-import
# pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates the embedded_tools.zip that is part of the Bazel binary."""
import contextlib
import fnmatch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD*', lambda x: 'tools/jdk/BUILD'),
('*tools/platforms/platforms.BUILD', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*jarjar_command_deploy.jar',
lambda x: 'tools/jdk/jarjar_command_deploy.jar'),
('*BUILD-new.pkg', lambda x: 'tools/jdk/BUILD.pkg'),
('*BUILD.javalangtools', lambda x: 'third_party/java/jdk/langtools/BUILD'),
('*singlejar_local.exe', lambda x: 'tools/jdk/singlejar/singlejar.exe'),
('*singlejar_local', lambda x: 'tools/jdk/singlejar/singlejar'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*ijar.exe', lambda x: 'tools/jdk/ijar/ijar.exe'),
('*ijar', lambda x: 'tools/jdk/ijar/ijar'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*src/objc_tools/*',
lambda x: 'tools/objc/precomp_' + os.path.basename(x)),
('*xcode*StdRedirect.dylib', lambda x: 'tools/objc/StdRedirect.dylib'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*realpath', lambda x: 'tools/objc/realpath'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*.sh', lambda x: 'tools/objc/' + os.path.basename(x)),
('*src/tools/xcode/*',
lambda x: 'tools/objc/' + os.path.basename(x) + '.sh'),
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
# BUILD.tools are stored as BUILD files.
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
"""Returns a sorted list of tuples (archive_file, input_file).
This describes the files that should be put into the generated archive.
Args:
argsfile: The file containing the list of input files.
"""
with open(argsfile, 'r') as f:
input_files = set(x.strip() for x in f.readlines())
result = {}
for input_file in input_files:
# If we have both a BUILD and a BUILD.tools file, take the latter only.
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
# This gives us the same behavior as the older bash version of this
# tool: If two input files map to the same output files, the one that
# comes last in the list of input files overrides all earlier ones.
result[get_output_path(input_file)] = input_file
# By sorting the file list, the resulting ZIP file will not be reproducible
# and deterministic.
return sorted(result.items())
def copy_jdk_into_archive(output_zip, archive_file, input_file):
"""Extract the JDK and adds it to the archive under jdk/*."""
def _replace_dirname(filename):
# Rename the first folder to 'jdk', because Bazel looks for a
# bundled JDK in the embedded tools using that folder name.
return 'jdk/' + '/'.join(filename.split('/')[1:])
# The JDK is special - it's extracted instead of copied.
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
for archive_file, input_file in input_files:
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
|
py | 1a4dbb4b87052f7aa70c84fe83f561f7aa0fb52c | import paths
import os
#os.environ['COPY_EDIT_DATA']=paths.data_dir
os.environ['CUDA_VISIBLE_DEVICES']='0'
from gtd.utils import Config
from editor_code.copy_editor.retrieve_edit_run import RetrieveEditTrainingRuns
print os.environ['COPY_EDIT_DATA']
import sys
#no-profile
profile=False
runs = RetrieveEditTrainingRuns()
config_file = sys.argv[1]
config = Config.from_file('editor_code/configs/editor/'+config_file)
run = runs.new(config)
if profile:
from gtd.chrono import Profiling, Profiler
profiler = Profiler.default()
import editor_code.copy_editor.retriever
import editor_code.copy_editor.editor
profiler.add_module(editor_code.copy_editor.editor)
profiler.add_module(editor_code.copy_editor.retriever)
Profiling.start()
run.train()
Profiler.report(profiler) # prints out report
else:
run.train()
|
py | 1a4dbb630d0fde19193624b3fe8c6ad588674262 | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for checking that required Python packages are installed."""
from collections import deque
import os
import pkg_resources
from perfkitbenchmarker import errors
# Path of the root of the current git branch.
_BRANCH_ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
def _CheckRequirements(requirements_file_path):
"""Checks that all package requirements specified in a file are met.
Args:
requirements_file_path: string. Path to a pip requirements file.
"""
with open(requirements_file_path, 'rb') as fp:
requirements_to_check = [(requirements_file_path, deque(fp.readlines()))]
try:
while requirements_to_check:
file_path, lines = requirements_to_check.pop()
while lines:
line = lines.popleft().strip()
if line.startswith('-r'):
requirements_to_check.append((file_path, lines))
file_path = os.path.join(os.path.dirname(file_path), line[2:])
with open(file_path, 'rb') as fp:
lines = deque(fp.readlines())
elif line:
pkg_resources.require(line)
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict) as e:
# In newer versions of setuptools, these exception classes have a report
# method that provides a readable description of the error.
report = getattr(e, 'report', None)
err_msg = report() if report else str(e)
raise errors.Setup.PythonPackageRequirementUnfulfilled(
'A Python package requirement was not met while checking "{path}": '
'{msg}{linesep}To install required packages, execute the following '
'command:{linesep}pip install -r "{path}"{linesep}To bypass package '
'requirement checks, run PerfKit Benchmarker with the '
'--ignore_package_requirements flag.'.format(
linesep=os.linesep, msg=err_msg, path=requirements_file_path))
def CheckBasicRequirements():
"""Checks that all basic package requirements are met.
The basic requirements include packages used by modules that are imported
regardless of the specified cloud providers. The list of required packages
and versions is found in the requirements.txt file in the git branch's root
directory. If such a file does not exist, then the requirements check is
skipped.
"""
requirements_file_path = os.path.join(_BRANCH_ROOT_DIR, 'requirements.txt')
if os.path.isfile(requirements_file_path):
_CheckRequirements(requirements_file_path)
def CheckProviderRequirements(provider):
"""Checks that all provider-specific requirements are met.
The provider-specific requirements include packages used by modules that are
imported when using a particular cloud provider. The list of required packages
is found in the requirements-<provider>.txt file in the git branch's root
directory. If such a file does not exist, then no additional requirements are
necessary.
Args:
provider: string. Lowercase name of the cloud provider (e.g. 'gcp').
"""
requirements_file_path = os.path.join(
_BRANCH_ROOT_DIR, 'perfkitbenchmarker', 'providers', provider,
'requirements.txt')
if os.path.isfile(requirements_file_path):
_CheckRequirements(requirements_file_path)
|
py | 1a4dbbb5cb2767b882cc6367a72e8e1fae0c4571 | """Load a layout in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, Optional
import bpy
import json
from avalon import api
from avalon.blender.pipeline import AVALON_CONTAINERS
from avalon.blender.pipeline import AVALON_CONTAINER_ID
from avalon.blender.pipeline import AVALON_PROPERTY
from avalon.blender.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin
class JsonLayoutLoader(plugin.AssetLoader):
"""Load layout published from Unreal."""
families = ["layout"]
representations = ["json"]
label = "Load Layout"
icon = "code-fork"
color = "orange"
animation_creator_name = "CreateAnimation"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
api.remove(obj.get(AVALON_PROPERTY))
def _remove_animation_instances(self, asset_group):
instances = bpy.data.collections.get(AVALON_INSTANCES)
if instances:
for obj in list(asset_group.children):
anim_collection = instances.children.get(
obj.name + "_animation")
if anim_collection:
bpy.data.collections.remove(anim_collection)
def _get_loader(self, loaders, family):
name = ""
if family == 'rig':
name = "BlendRigLoader"
elif family == 'model':
name = "BlendModelLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
def _process(self, libpath, asset, asset_group, actions):
bpy.ops.object.select_all(action='DESELECT')
with open(libpath, "r") as fp:
data = json.load(fp)
all_loaders = api.discover(api.Loader)
for element in data:
reference = element.get('reference')
family = element.get('family')
loaders = api.loaders_from_representation(all_loaders, reference)
loader = self._get_loader(loaders, family)
if not loader:
continue
instance_name = element.get('instance_name')
action = None
if actions:
action = actions.get(instance_name, None)
options = {
'parent': asset_group,
'transform': element.get('transform'),
'action': action,
'create_animation': True if family == 'rig' else False,
'animation_asset': asset
}
# This should return the loaded asset, but the load call will be
# added to the queue to run in the Blender main thread, so
# at this time it will not return anything. The assets will be
# loaded in the next Blender cycle, so we use the options to
# set the transform, parent and assign the action, if there is one.
api.load(
loader,
reference,
namespace=instance_name,
options=options
)
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
self._process(libpath, asset, asset_group, None)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = asset_group.children
return asset_group.children
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
actions = {}
for obj in asset_group.children:
obj_meta = obj.get(AVALON_PROPERTY)
if obj_meta.get('family') == 'rig':
rig = None
for child in obj.children:
if child.type == 'ARMATURE':
rig = child
break
if not rig:
raise Exception("No armature in the rig asset group.")
if rig.animation_data and rig.animation_data.action:
namespace = obj_meta.get('namespace')
actions[namespace] = rig.animation_data.action
mat = asset_group.matrix_basis.copy()
self._remove_animation_instances(asset_group)
self._remove(asset_group)
self._process(str(libpath), asset_group, actions)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
if not asset_group:
return False
self._remove_animation_instances(asset_group)
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
return True
|
py | 1a4dbc5992d475613307047a90d2769698339b76 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-04-15 07:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('goods', '0002_auto_20190410_1511'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0004_user_default_address'),
]
operations = [
migrations.CreateModel(
name='OrderGoods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('count', models.IntegerField(default=1, verbose_name='数量')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='单价')),
('comment', models.TextField(default='', verbose_name='评价信息')),
('score', models.SmallIntegerField(choices=[(0, '0分'), (1, '20分'), (2, '40分'), (3, '60分'), (4, '80分'), (5, '100分')], default=5, verbose_name='满意度评分')),
('is_anonymous', models.BooleanField(default=False, verbose_name='是否匿名评价')),
('is_commented', models.BooleanField(default=False, verbose_name='是否评价完成')),
],
options={
'verbose_name': '订单商品',
'verbose_name_plural': '订单商品',
'db_table': 'tb_order_goods',
},
),
migrations.CreateModel(
name='OrderInfo',
fields=[
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('order_id', models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name='订单编号')),
('total_count', models.IntegerField(default=1, verbose_name='商品总数')),
('total_amount', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='实付款')),
('freight', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='运费')),
('pay_method', models.SmallIntegerField(choices=[(1, '货到付款'), (2, '支付宝')], default=1, verbose_name='支付方式')),
('status', models.SmallIntegerField(choices=[(1, '待支付'), (2, '待发货'), (3, '待收货'), (4, '待评价'), (5, '已完成'), (6, '已取消')], default=1, verbose_name='订单状态')),
('address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Address', verbose_name='收货地址')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='下单用户')),
],
options={
'verbose_name': '订单基本信息',
'verbose_name_plural': '订单基本信息',
'db_table': 'tb_order_info',
},
),
migrations.AddField(
model_name='ordergoods',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='skus', to='orders.OrderInfo', verbose_name='订单'),
),
migrations.AddField(
model_name='ordergoods',
name='sku',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='goods.SKU', verbose_name='订单商品'),
),
]
|
py | 1a4dbc65880cde202f7a2e2b66470df24d1b7676 | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of `tf.signal` functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
__all__ = [
'fft',
'fft2d',
'fft3d',
'ifft',
'ifft2d',
'ifft3d',
'irfft',
'irfft2d',
'irfft3d',
'rfft',
'rfft2d',
'rfft3d',
]
fft = utils.copy_docstring(
'tf.signal.fft',
lambda input, name=None: np.fft.fftn(input, axes=[-1]))
fft2d = utils.copy_docstring(
'tf.signal.fft2d',
lambda input, name=None: np.fft.fftn(input, axes=[-2, -1]))
fft3d = utils.copy_docstring(
'tf.signal.fft3d',
lambda input, name=None: np.fft.fftn(input, axes=[-3, -2, -1]))
ifft = utils.copy_docstring(
'tf.signal.ifft',
lambda input, name=None: np.fft.ifftn(input, axes=[-1]))
ifft2d = utils.copy_docstring(
'tf.signal.ifft2d',
lambda input, name=None: np.fft.ifftn(input, axes=[-2, -1]))
ifft3d = utils.copy_docstring(
'tf.signal.ifft3d',
lambda input, name=None: np.fft.ifftn(input, axes=[-3, -2, -1]))
rfft = utils.copy_docstring(
'tf.signal.rfft',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-1]))
rfft2d = utils.copy_docstring(
'tf.signal.rfft2d',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-2, -1]))
rfft3d = utils.copy_docstring(
'tf.signal.rfft3d',
lambda input, fft_length=None, name=None: np.fft.rfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-3, -2, -1]))
irfft = utils.copy_docstring(
'tf.signal.irfft',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-1]))
irfft2d = utils.copy_docstring(
'tf.signal.irfft2d',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-2, -1]))
irfft3d = utils.copy_docstring(
'tf.signal.irfft3d',
lambda input, fft_length=None, name=None: np.fft.irfftn( # pylint:disable=g-long-lambda
input, s=fft_length, axes=[-3, -2, -1]))
|
py | 1a4dbe705a1d91ab1bbc4498b2c37047e6b1bbce | import re
import django.forms
def get_cleaned_text_file_content(uploaded_file):
"""Read uploaded file, try to fix up encoding to UTF-8 and
transform line endings into Unix style, then return the content as
a UTF-8 string. Errors are reported as
django.forms.ValidationError exceptions."""
if not uploaded_file:
return u""
if uploaded_file.size and uploaded_file.size > 10 * 1000 * 1000:
raise django.forms.ValidationError("Text file too large (size %s)." % uploaded_file.size)
content = "".join(uploaded_file.chunks())
# try to fixup encoding
import magic
if hasattr(magic, "open"):
m = magic.open(magic.MAGIC_MIME)
m.load()
filetype = m.buffer(content)
else:
m = magic.Magic()
m.cookie = magic.magic_open(magic.MAGIC_NONE | magic.MAGIC_MIME | magic.MAGIC_MIME_ENCODING)
magic.magic_load(m.cookie, None)
filetype = m.from_buffer(content)
if not filetype.startswith("text"):
raise django.forms.ValidationError("Uploaded file does not appear to be a text file.")
match = re.search("charset=([\w-]+)", filetype)
if not match:
raise django.forms.ValidationError("File has unknown encoding.")
encoding = match.group(1)
if "ascii" not in encoding:
try:
content = content.decode(encoding)
except Exception as e:
raise django.forms.ValidationError("Error decoding file (%s). Try submitting with UTF-8 encoding or remove non-ASCII characters." % str(e))
# turn line-endings into Unix style
content = content.replace("\r\n", "\n").replace("\r", "\n")
return content.encode("utf-8")
|
py | 1a4dbe7d338e63b0cbbd27bc05d2862e9bab27eb | from numpy.random import shuffle
class Card:
def __init__(self, value):
if value not in ['Fascist', 'Liberal']:
raise ValueError('Must be either a Liberal or Fascist card.')
self.value = value
self.public = False
def play(self):
self.public = True
def is_liberal(self):
return self.value == 'Liberal'
def is_fascist(self):
return self.value == 'Fascist'
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
return False
class LiberalCard(Card):
def __init__(self):
super().__init__('Liberal')
class FascistCard(Card):
def __init__(self):
super().__init__('Fascist')
class CardDeck:
def __init__(self, cards):
self.cards = cards
def add_card(self, card):
self.cards.append(card)
def remove_card(self):
return self.cards.pop()
def shuffle_cards(self):
shuffle(self.cards)
class DrawDeck(CardDeck):
def __init__(self, number_of_fascists=11, number_of_liberals=6):
libs = [LiberalCard()] * number_of_liberals
fascists = [FascistCard()] * number_of_fascists
cards = libs + fascists
super().__init__(cards)
self.shuffle_cards()
def top_deck(self):
return super().remove_card()
def top(self, n):
return self.cards[-n:]
def remove_card(self):
if len(self.cards) <= 2:
return 'Reshuffle Condition'
else:
return super().remove_card()
def __add__(self, other):
if isinstance(other, DiscardDeck):
other.shuffle_cards()
self.cards += other
class DiscardDeck(CardDeck):
pass
if __name__ == '__main__':
dd = DrawDeck(20, 10)
print(dd.top(3))
print(dd.remove_card())
print(dd.top(3))
print(dd.add_card(dd.remove_card()))
|
py | 1a4dbf02a96ff4bc00187fa6e75a13c809b77e17 | #!/usr/bin/python3
import spidev
import time
tx_array = [0]*512
# Split an integer input into a two byte array to send via SPI
def write_pot(input):
print(input)
msb = input >> 8
lsb = input & 0xFF
print(spi.xfer([msb,lsb,msb,lsb]))
if __name__ == '__main__':
spi = spidev.SpiDev()
ret = spi.open(0, 0)
print("Spi.open = ", ret)
spi.max_speed_hz = 30000
spi.mode = 0
print("Started SPIDEV = ", spi)
data = 0x555
while True:
# print("Hello, I'm MMRPi-Hardware Energomera Library")
time.sleep(0.5)
data = data + 1
write_pot(data)
# break
|
py | 1a4dc0c863f29e9a2e2bf88b841e7a30b452e5f3 |
safety_hotline_meta = {
'attributes': {
'primary': {
'field': 'description',
'name': 'Description',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': 'date_created',
'date_granularity': 'year',
'default_date_filter': '2017',
'min_date': '2008',
'max_date': '2018'
},
}
crash_meta = {
'attributes': {
'primary': {
'field': 'crash_dt',
'name': 'Crash Date',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': 'crash_dt',
'date_granularity': 'year',
'default_date_filter': '2014',
'min_date': '2004',
'max_date': '2014'
},
}
block_change_meta = {
'attributes': {
'primary': {
'field': 'stops_pct_change',
'name': 'Ridership Change from 2009 to 2017',
'visualization': {
'type': 'Text',
'comparison_value': None,
'comparison_name': None,
},
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2017',
'min_date': None,
'max_date': None
},
}
route_change_meta = {
'attributes': {
'primary': {
'field': 'pct_change',
'name': 'Ridership Change from 2009 to 2017',
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2017',
'min_date': None,
'max_date': None
},
}
sensors_meta = {
'attributes': {
'primary': {
'field': None,
'name': None,
},
'secondary': {
'field': None,
'name': None,
},
},
'dates': {
'date_attribute': None,
'date_granularity': None,
'default_date_filter': '2018',
'min_date': None,
'max_date': None
},
}
|
py | 1a4dc0ed40e0f9a9a400f39481d67c44613f9b9c | import src.sudoku_solver as sudoku_solver
from src.sudoku import Sudoku
correct_sudoku = Sudoku([[9, 5, 7, 6, 1, 3, 2, 8, 4], [4, 8, 3, 2, 5, 7, 1, 9, 6], [6, 1, 2, 8, 4, 9, 5, 3, 7],
[1, 7, 8, 3, 6, 4, 9, 5, 2], [5, 2, 4, 9, 7, 1, 3, 6, 8], [3, 6, 9, 5, 2, 8, 7, 4, 1],
[8, 4, 5, 7, 9, 2, 6, 1, 3], [2, 9, 1, 4, 3, 6, 8, 7, 5], [7, 3, 6, 1, 8, 5, 4, 2, 9]])
starting_sudoku = Sudoku([[0, 0, 0, 0, 0, 0, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test = Sudoku([[1, 3, 4, 5, 6, 7, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0],
[6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test2 = Sudoku([[1, 1, 1, 1, 1, 1, 2, 1, 1], [1, 8, 1, 1, 1, 7, 1, 9, 1],
[6, 0, 2, 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
|
py | 1a4dc141fbbc30e5b846ecb096f89e5a96de61fb | #cfgfactory.py
import utility
import numpy.random
import cfg
import logging
class CFGFactory:
def __init__(self):
self.number_terminals = 100
self.number_nonterminals = 5
self.binary_rules = 40
self.lexical_rules = 100
self.strict_cnf = True
def generate_nonterminals(self):
nonterminals = [ 'S']
for i in range(1,self.number_nonterminals):
nonterminals.append("NT" + str(i))
return nonterminals
def sample_uniform(self, lp=0.5,bp = 0.5):
"""
Sample all productions Bernoulli for lexical and binary. Default 0.5.
"""
lexicon = list(utility.generate_lexicon(self.number_terminals))
#print("Lexicon",lexicon,self.number_terminals)
nonterminals = self.generate_nonterminals()
productions = []
for a in nonterminals:
for b in lexicon:
if numpy.random.random() < lp:
productions.append((a,b))
for a in nonterminals:
for b in nonterminals[1:]:
for c in nonterminals[1:]:
if numpy.random.random() < bp:
productions.append((a,b,c))
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = productions
return my_cfg
def sample_full(self):
lexicon = list(utility.generate_lexicon(self.number_terminals))
#print("Lexicon",lexicon,self.number_terminals)
nonterminals = self.generate_nonterminals()
lprods = set()
bprods= set()
for a in nonterminals:
for b in lexicon:
lprods.add((a,b))
for a in nonterminals:
for b in nonterminals[1:]:
for c in nonterminals[1:]:
bprods.add((a,b,c))
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = lprods | bprods
#print(my_cfg.terminals)
return my_cfg
def sample_trim(self):
"""
Sample one and then trim it.
return the trim one.
If empty, raise an exception.
"""
my_cfg = self.sample_raw()
#print([ prod for prod in my_cfg.productions if len(prod) == 2 and prod[0] == 'S'])
logging.info("CFG nominally has %d nonterminals, %d terminals, %d binary_rules and %d lexical rules", self.number_nonterminals,self.number_terminals,self.binary_rules,self.lexical_rules)
ts = my_cfg.compute_trim_set()
if len(ts) == 0:
# empty language
raise ValueError("Empty language")
prods = my_cfg.compute_usable_productions(ts)
terminals = set()
for prod in prods:
if len(prod) == 2:
terminals.add(prod[1])
tcfg = cfg.CFG()
tcfg.start = my_cfg.start
tcfg.terminals = terminals
tcfg.nonterminals = ts
tcfg.productions = set(prods)
logging.info("Final CFG has %d nonterminals, %d terminals, %d binary_rules and %d lexical rules",
len(tcfg.nonterminals), len(tcfg.terminals),
len([prod for prod in tcfg.productions if len(prod) == 3]),
len([prod for prod in tcfg.productions if len(prod) == 2]))
return tcfg
def sample_raw(self):
"""
return a CFG
"""
lexicon = list(utility.generate_lexicon(self.number_terminals))
#DEBUGGING
lexicon.sort()
print(lexicon[0],lexicon[-1])
nonterminals = self.generate_nonterminals()
lprods = set()
bprods= set()
lexicon_size = len(lexicon)
while len(lprods) < self.lexical_rules:
lhs = numpy.random.choice(nonterminals)
rhs = lexicon[numpy.random.choice(range(lexicon_size))]
lprods.add( (lhs,rhs))
print(lhs,rhs)
while len(bprods) < self.binary_rules:
if self.strict_cnf:
a = numpy.random.choice(nonterminals)
b,c = numpy.random.choice(nonterminals[1:],size=2)
else:
a,b,c = numpy.random.choice(nonterminals,size=3)
bprods.add( (a,b,c))
print(a,b,c)
my_cfg = cfg.CFG()
my_cfg.start = nonterminals[0]
my_cfg.nonterminals = set(nonterminals)
my_cfg.terminals = set(lexicon)
my_cfg.productions = lprods | bprods
return my_cfg
|
py | 1a4dc15c55ac124709bf4a028720cbb4a186984a | import unittest
from unittest import mock
from flumine.markets.blotter import Blotter
from flumine.order.order import OrderStatus
from flumine.order.ordertype import MarketOnCloseOrder, LimitOrder, LimitOnCloseOrder
class BlotterTest(unittest.TestCase):
def setUp(self) -> None:
self.blotter = Blotter("1.23")
def test_init(self):
self.assertEqual(self.blotter.market_id, "1.23")
self.assertFalse(self.blotter.active)
self.assertEqual(self.blotter._orders, {})
self.assertEqual(self.blotter._live_orders, [])
self.assertEqual(self.blotter._trades, {})
self.assertEqual(self.blotter._strategy_orders, {})
self.assertEqual(self.blotter._strategy_selection_orders, {})
def test_strategy_orders(self):
mock_order_one = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTABLE, size_matched=1
)
mock_order_one.trade.strategy = 69
self.blotter["12345"] = mock_order_one
mock_order_two = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTION_COMPLETE, size_matched=0
)
mock_order_two.trade.strategy = 69
self.blotter["12345"] = mock_order_two
mock_order_three = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTION_COMPLETE, size_matched=1
)
mock_order_three.trade.strategy = 69
self.blotter["12345"] = mock_order_three
self.assertEqual(self.blotter.strategy_orders(12), [])
self.assertEqual(
self.blotter.strategy_orders(69),
[mock_order_one, mock_order_two, mock_order_three],
)
self.assertEqual(
self.blotter.strategy_orders(
69, order_status=OrderStatus.EXECUTION_COMPLETE
),
[mock_order_two, mock_order_three],
)
self.assertEqual(
self.blotter.strategy_orders(
69, order_status=OrderStatus.EXECUTION_COMPLETE, matched_only=True
),
[mock_order_three],
)
def test_strategy_selection_orders(self):
mock_order_one = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTABLE, size_matched=1
)
mock_order_one.trade.strategy = 69
self.blotter["12345"] = mock_order_one
mock_order_two = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTION_COMPLETE, size_matched=0
)
mock_order_two.trade.strategy = 69
self.blotter["12345"] = mock_order_two
mock_order_three = mock.Mock(
lookup=(1, 2, 3), status=OrderStatus.EXECUTION_COMPLETE, size_matched=1
)
mock_order_three.trade.strategy = 69
self.blotter["12345"] = mock_order_three
self.assertEqual(self.blotter.strategy_selection_orders(12, 2, 3), [])
self.assertEqual(
self.blotter.strategy_selection_orders(69, 2, 3),
[mock_order_one, mock_order_two, mock_order_three],
)
self.assertEqual(
self.blotter.strategy_selection_orders(
69, 2, 3, order_status=OrderStatus.EXECUTION_COMPLETE
),
[mock_order_two, mock_order_three],
)
self.assertEqual(
self.blotter.strategy_selection_orders(
69, 2, 3, order_status=OrderStatus.EXECUTION_COMPLETE, matched_only=True
),
[mock_order_three],
)
def test_live_orders(self):
self.assertEqual(list(self.blotter.live_orders), [])
mock_order = mock.Mock(complete=False)
self.blotter._live_orders = [mock_order]
self.assertEqual(list(self.blotter.live_orders), [mock_order])
def test_has_live_orders(self):
self.assertFalse(self.blotter.has_live_orders)
self.blotter._live_orders = [mock.Mock()]
self.assertTrue(self.blotter.has_live_orders)
def test_process_closed_market(self):
mock_market_book = mock.Mock(number_of_winners=1)
mock_runner = mock.Mock(selection_id=123, handicap=0.0)
mock_market_book.runners = [mock_runner]
mock_order = mock.Mock(selection_id=123, handicap=0.0)
self.blotter._orders = {"12345": mock_order}
self.blotter.process_closed_market(mock_market_book)
self.assertEqual(mock_order.runner_status, mock_runner.status)
def test_process_cleared_orders(self):
mock_cleared_orders = mock.Mock()
mock_cleared_orders.orders = []
self.assertEqual(self.blotter.process_cleared_orders(mock_cleared_orders), [])
def test_selection_exposure(self):
"""
Check that selection_exposure returns the absolute worse loss
"""
def get_exposures(strategy, lookup):
if strategy == "strategy" and lookup == (1, 2, 3):
return {
"worst_possible_profit_on_win": -1.0,
"worst_possible_profit_on_lose": -2.0,
}
self.blotter.get_exposures = mock.Mock(side_effect=get_exposures)
result = self.blotter.selection_exposure("strategy", (1, 2, 3))
self.assertEqual(2.0, result)
def test_selection_exposure2(self):
"""
Check that selection_exposure returns zero if there is no risk of loss.
"""
def get_exposures(strategy, lookup):
if strategy == "strategy" and lookup == (1, 2, 3):
return {
"worst_possible_profit_on_win": 0.0,
"worst_possible_profit_on_lose": 1.0,
}
self.blotter.get_exposures = mock.Mock(side_effect=get_exposures)
result = self.blotter.selection_exposure("strategy", (1, 2, 3))
self.assertEqual(0.0, result)
def test_get_exposures(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=0.0,
order_type=LimitOrder(price=5.6, size=2.0),
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": -2.0,
"matched_profit_if_win": 9.2,
"worst_possible_profit_on_lose": -2.0,
"worst_possible_profit_on_win": 9.2,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_with_exclusion(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=0.0,
order_type=LimitOrder(price=5.6, size=2.0),
)
mock_order_excluded = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=0.0,
order_type=LimitOrder(price=5.6, size=2.0),
)
self.blotter["12345"] = mock_order
self.blotter["67890"] = mock_order_excluded
self.assertEqual(
self.blotter.get_exposures(
mock_strategy, mock_order.lookup, exclusion=mock_order_excluded
),
{
"matched_profit_if_lose": -2.0,
"matched_profit_if_win": 9.2,
"worst_possible_profit_on_lose": -2.0,
"worst_possible_profit_on_win": 9.2,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_value_error(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=0.0,
order_type=mock.Mock(ORDER_TYPE="INVALID"),
)
self.blotter["12345"] = mock_order
with self.assertRaises(ValueError) as e:
self.blotter.get_exposures(mock_strategy, mock_order.lookup)
self.assertEqual("Unexpected order type: INVALID", e.exception.args[0])
def test_get_exposures_with_price_none(self):
"""
Check that get_exposures works if order.order_type.price is None.
If order.order_type.price is None, the controls will flag the order as a violation
and it won't be set to the exchange, so there won't be any exposure and we can ignore it.
"""
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
lookup = (self.blotter.market_id, 123, 0)
mock_order1 = mock.Mock(
trade=mock_trade,
lookup=lookup,
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=0.0,
order_type=LimitOrder(price=5.6, size=2.0),
)
mock_order2 = mock.Mock(
trade=mock_trade,
lookup=lookup,
side="LAY",
average_price_matched=5.6,
size_matched=0.0,
size_remaining=2.0,
order_type=LimitOrder(price=None, size=2.0),
)
self.blotter["12345"] = mock_order1
self.blotter["23456"] = mock_order2
self.assertEqual(
self.blotter.get_exposures(mock_strategy, lookup),
{
"matched_profit_if_lose": -2.0,
"matched_profit_if_win": 9.2,
"worst_possible_profit_on_lose": -2.0,
"worst_possible_profit_on_win": 9.2,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_no_match(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=0.0,
size_remaining=0.0,
order_type=LimitOrder(price=5.6, size=2.0),
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 0.0,
"matched_profit_if_win": 0.0,
"worst_possible_profit_on_lose": 0.0,
"worst_possible_profit_on_win": 0.0,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_from_unmatched_back(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=2.0,
order_type=LimitOrder(price=6, size=4.0),
)
self.blotter["12345"] = mock_order
# On the win side, we have 2.0 * (5.6-1.0) = 9.2
# On the lose side, we have -2.0-2.0=-4.0
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": -2.0,
"matched_profit_if_win": 9.2,
"worst_possible_profit_on_lose": -4.0,
"worst_possible_profit_on_win": 9.2,
"worst_potential_unmatched_profit_if_lose": -2.0,
"worst_potential_unmatched_profit_if_win": 0,
},
)
def test_get_exposures_from_unmatched_lay(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="LAY",
average_price_matched=5.6,
size_matched=2.0,
size_remaining=2.0,
order_type=LimitOrder(price=6, size=4.0),
)
self.blotter["12345"] = mock_order
# On the win side, we have -2.0 * (5.6-1.0) -2.0 * (6.0-1.0) = -19.2
# On the lose side, we have 2.0 from size_matched
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 2.0,
"matched_profit_if_win": -9.2,
"worst_possible_profit_on_lose": 2.0,
"worst_possible_profit_on_win": -19.2,
"worst_potential_unmatched_profit_if_lose": 0,
"worst_potential_unmatched_profit_if_win": -10.0,
},
)
def test_get_exposures_from_market_on_close_back(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
order_type=MarketOnCloseOrder(liability=10.0),
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 0.0,
"matched_profit_if_win": 0.0,
"worst_possible_profit_on_lose": -10.0,
"worst_possible_profit_on_win": 0.0,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_from_market_on_close_lay(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="LAY",
order_type=MarketOnCloseOrder(liability=10.0),
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 0.0,
"matched_profit_if_win": 0.0,
"worst_possible_profit_on_lose": 0.0,
"worst_possible_profit_on_win": -10.0,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_from_limit_on_close_lay(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="LAY",
order_type=LimitOnCloseOrder(price=1.01, liability=10.0),
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 0.0,
"matched_profit_if_win": 0.0,
"worst_possible_profit_on_lose": 0.0,
"worst_possible_profit_on_win": -10.0,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_get_exposures_voided(self):
mock_strategy = mock.Mock()
mock_trade = mock.Mock(strategy=mock_strategy)
mock_order = mock.Mock(
trade=mock_trade,
lookup=(self.blotter.market_id, 123, 0),
side="BACK",
order_type=LimitOrder(price=5, size=10.0),
status=OrderStatus.VIOLATION,
)
self.blotter["12345"] = mock_order
self.assertEqual(
self.blotter.get_exposures(mock_strategy, mock_order.lookup),
{
"matched_profit_if_lose": 0.0,
"matched_profit_if_win": 0.0,
"worst_possible_profit_on_lose": 0.0,
"worst_possible_profit_on_win": 0.0,
"worst_potential_unmatched_profit_if_lose": 0.0,
"worst_potential_unmatched_profit_if_win": 0.0,
},
)
def test_complete_order(self):
self.blotter._live_orders = ["test"]
self.blotter.complete_order("test")
def test_has_trade(self):
self.assertFalse(self.blotter.has_trade("123"))
self.blotter._trades["123"].append(1)
self.assertTrue(self.blotter.has_trade("123"))
def test__contains(self):
self.blotter._orders = {"123": "test"}
self.assertIn("123", self.blotter)
self.assertNotIn("321", self.blotter)
def test__setitem(self):
mock_order = mock.Mock(lookup=(1, 2, 3))
self.blotter["123"] = mock_order
self.assertTrue(self.blotter.active)
self.assertEqual(self.blotter._orders, {"123": mock_order})
self.assertEqual(self.blotter._live_orders, [mock_order])
self.assertEqual(self.blotter._trades, {mock_order.trade.id: [mock_order]})
self.assertEqual(
self.blotter._strategy_orders, {mock_order.trade.strategy: [mock_order]}
)
self.assertEqual(
self.blotter._strategy_selection_orders,
{(mock_order.trade.strategy, 2, 3): [mock_order]},
)
def test__getitem(self):
self.blotter._orders = {"12345": "test", "54321": "test2"}
self.assertEqual(self.blotter["12345"], "test")
self.assertEqual(self.blotter["54321"], "test2")
def test__len(self):
self.blotter._orders = {"12345": "test", "54321": "test"}
self.assertEqual(len(self.blotter), 2)
|
py | 1a4dc3ecee9d04248d3c8c1f2332acac45eac76f | from PIL import Image
class Painter:
def __init__(self, k, palette_name, color):
self.k = k
self.palette_name = palette_name
self.color = color
self.ctr = 0 # for frames
def format_frame(self,n):
return f"frames/{self.palette_name}-{self.k}-{n}.png"
def current_frame_name(self):
self.ctr += 1
return self.format_frame(self.ctr-1)
def save_to_image(self, grid, filename):
k = self.k
color = self.color
n = len(grid) # assume grid is square, too lazy to generalize
wall = k//2
with Image.new('RGB',(k*n+2*wall,k*n+2*wall)) as painting:
for i in range(k*n+2*wall):
for j in range(wall):
painting.putpixel((i,j),(0,0,0))
painting.putpixel((i,k*n+2*wall-j-1),(0,0,0))
painting.putpixel((j,i),(0,0,0))
painting.putpixel((k*n+2*wall-j-1,i),(0,0,0))
for i in range(k*n):
for j in range(k*n):
painting.putpixel((i+wall,j+wall),color[grid[i//k][j//k]])
painting.save(filename,"PNG")
print(f"Created {filename}") |
py | 1a4dc4902db9ae654e5725af9f3f2c7c82b10102 | # This file is a part of OpenCV project.
# It is a subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html.
#
# Copyright (C) 2018, Intel Corporation, all rights reserved.
# Third party copyrights are property of their respective owners.
#
# Use this script to get the text graph representation (.pbtxt) of SSD-based
# deep learning network trained in TensorFlow Object Detection API.
# Then you can import it with a binary frozen graph (.pb) using readNetFromTensorflow() function.
# See details and examples on the following wiki page: https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API
import argparse
import re
from math import sqrt
from tf_text_graph_common import *
class SSDAnchorGenerator:
def __init__(self, min_scale, max_scale, num_layers, aspect_ratios,
reduce_boxes_in_lowest_layer, image_width, image_height):
self.min_scale = min_scale
self.aspect_ratios = aspect_ratios
self.reduce_boxes_in_lowest_layer = reduce_boxes_in_lowest_layer
self.image_width = image_width
self.image_height = image_height
self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1)
for i in range(num_layers)] + [1.0]
def get(self, layer_id):
if layer_id == 0 and self.reduce_boxes_in_lowest_layer:
widths = [0.1, self.min_scale * sqrt(2.0), self.min_scale * sqrt(0.5)]
heights = [0.1, self.min_scale / sqrt(2.0), self.min_scale / sqrt(0.5)]
else:
widths = [self.scales[layer_id] * sqrt(ar) for ar in self.aspect_ratios]
heights = [self.scales[layer_id] / sqrt(ar) for ar in self.aspect_ratios]
widths += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])]
heights += [sqrt(self.scales[layer_id] * self.scales[layer_id + 1])]
min_size = min(self.image_width, self.image_height)
widths = [w * min_size for w in widths]
heights = [h * min_size for h in heights]
return widths, heights
class MultiscaleAnchorGenerator:
def __init__(self, min_level, aspect_ratios, scales_per_octave, anchor_scale):
self.min_level = min_level
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.scales = [2**(float(s) / scales_per_octave) for s in range(scales_per_octave)]
def get(self, layer_id):
widths = []
heights = []
for a in self.aspect_ratios:
for s in self.scales:
base_anchor_size = 2**(self.min_level + layer_id) * self.anchor_scale
ar = sqrt(a)
heights.append(base_anchor_size * s / ar)
widths.append(base_anchor_size * s * ar)
return widths, heights
def createSSDGraph(modelPath, configPath, outputPath):
# Nodes that should be kept.
keepOps = ['Conv2D', 'BiasAdd', 'Add', 'AddV2', 'Relu', 'Relu6', 'Placeholder', 'FusedBatchNorm',
'DepthwiseConv2dNative', 'ConcatV2', 'Mul', 'MaxPool', 'AvgPool', 'Identity',
'Sub', 'ResizeNearestNeighbor', 'Pad', 'FusedBatchNormV3', 'Mean']
# Node with which prefixes should be removed
prefixesToRemove = ('MultipleGridAnchorGenerator/', 'Concatenate/', 'Postprocessor/', 'Preprocessor/map')
# Load a config file.
config = readTextMessage(configPath)
config = config['model'][0]['ssd'][0]
num_classes = int(config['num_classes'][0])
fixed_shape_resizer = config['image_resizer'][0]['fixed_shape_resizer'][0]
image_width = int(fixed_shape_resizer['width'][0])
image_height = int(fixed_shape_resizer['height'][0])
box_predictor = 'convolutional' if 'convolutional_box_predictor' in config['box_predictor'][0] else 'weight_shared_convolutional'
anchor_generator = config['anchor_generator'][0]
if 'ssd_anchor_generator' in anchor_generator:
ssd_anchor_generator = anchor_generator['ssd_anchor_generator'][0]
min_scale = float(ssd_anchor_generator['min_scale'][0])
max_scale = float(ssd_anchor_generator['max_scale'][0])
num_layers = int(ssd_anchor_generator['num_layers'][0])
aspect_ratios = [float(ar) for ar in ssd_anchor_generator['aspect_ratios']]
reduce_boxes_in_lowest_layer = True
if 'reduce_boxes_in_lowest_layer' in ssd_anchor_generator:
reduce_boxes_in_lowest_layer = ssd_anchor_generator['reduce_boxes_in_lowest_layer'][0] == 'true'
priors_generator = SSDAnchorGenerator(min_scale, max_scale, num_layers,
aspect_ratios, reduce_boxes_in_lowest_layer,
image_width, image_height)
print('Scale: [%f-%f]' % (min_scale, max_scale))
print('Aspect ratios: %s' % str(aspect_ratios))
print('Reduce boxes in the lowest layer: %s' % str(reduce_boxes_in_lowest_layer))
elif 'multiscale_anchor_generator' in anchor_generator:
multiscale_anchor_generator = anchor_generator['multiscale_anchor_generator'][0]
min_level = int(multiscale_anchor_generator['min_level'][0])
max_level = int(multiscale_anchor_generator['max_level'][0])
anchor_scale = float(multiscale_anchor_generator['anchor_scale'][0])
aspect_ratios = [float(ar) for ar in multiscale_anchor_generator['aspect_ratios']]
scales_per_octave = int(multiscale_anchor_generator['scales_per_octave'][0])
num_layers = max_level - min_level + 1
priors_generator = MultiscaleAnchorGenerator(min_level, aspect_ratios,
scales_per_octave, anchor_scale)
print('Levels: [%d-%d]' % (min_level, max_level))
print('Anchor scale: %f' % anchor_scale)
print('Scales per octave: %d' % scales_per_octave)
print('Aspect ratios: %s' % str(aspect_ratios))
else:
print('Unknown anchor_generator')
exit(0)
print('Number of classes: %d' % num_classes)
print('Number of layers: %d' % num_layers)
print('box predictor: %s' % box_predictor)
print('Input image size: %dx%d' % (image_width, image_height))
# Read the graph.
outNames = ['num_detections', 'detection_scores', 'detection_boxes', 'detection_classes']
writeTextGraph(modelPath, outputPath, outNames)
graph_def = parseTextGraph(outputPath)
def getUnconnectedNodes():
unconnected = []
for node in graph_def.node:
unconnected.append(node.name)
for inp in node.input:
if inp in unconnected:
unconnected.remove(inp)
return unconnected
def fuse_nodes(nodesToKeep):
# Detect unfused batch normalization nodes and fuse them.
# Add_0 <-- moving_variance, add_y
# Rsqrt <-- Add_0
# Mul_0 <-- Rsqrt, gamma
# Mul_1 <-- input, Mul_0
# Mul_2 <-- moving_mean, Mul_0
# Sub_0 <-- beta, Mul_2
# Add_1 <-- Mul_1, Sub_0
nodesMap = {node.name: node for node in graph_def.node}
subgraphBatchNorm = ['Add',
['Mul', 'input', ['Mul', ['Rsqrt', ['Add', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
subgraphBatchNormV2 = ['AddV2',
['Mul', 'input', ['Mul', ['Rsqrt', ['AddV2', 'moving_variance', 'add_y']], 'gamma']],
['Sub', 'beta', ['Mul', 'moving_mean', 'Mul_0']]]
# Detect unfused nearest neighbor resize.
subgraphResizeNN = ['Reshape',
['Mul', ['Reshape', 'input', ['Pack', 'shape_1', 'shape_2', 'shape_3', 'shape_4', 'shape_5']],
'ones'],
['Pack', ['StridedSlice', ['Shape', 'input'], 'stack', 'stack_1', 'stack_2'],
'out_height', 'out_width', 'out_channels']]
def checkSubgraph(node, targetNode, inputs, fusedNodes):
op = targetNode[0]
if node.op == op and (len(node.input) >= len(targetNode) - 1):
fusedNodes.append(node)
for i, inpOp in enumerate(targetNode[1:]):
if isinstance(inpOp, list):
if not node.input[i] in nodesMap or \
not checkSubgraph(nodesMap[node.input[i]], inpOp, inputs, fusedNodes):
return False
else:
inputs[inpOp] = node.input[i]
return True
else:
return False
nodesToRemove = []
for node in graph_def.node:
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraphBatchNorm, inputs, fusedNodes) or \
checkSubgraph(node, subgraphBatchNormV2, inputs, fusedNodes):
name = node.name
node.Clear()
node.name = name
node.op = 'FusedBatchNorm'
node.input.append(inputs['input'])
node.input.append(inputs['gamma'])
node.input.append(inputs['beta'])
node.input.append(inputs['moving_mean'])
node.input.append(inputs['moving_variance'])
node.addAttr('epsilon', 0.001)
nodesToRemove += fusedNodes[1:]
inputs = {}
fusedNodes = []
if checkSubgraph(node, subgraphResizeNN, inputs, fusedNodes):
name = node.name
node.Clear()
node.name = name
node.op = 'ResizeNearestNeighbor'
node.input.append(inputs['input'])
node.input.append(name + '/output_shape')
out_height_node = nodesMap[inputs['out_height']]
out_width_node = nodesMap[inputs['out_width']]
out_height = int(out_height_node.attr['value']['tensor'][0]['int_val'][0])
out_width = int(out_width_node.attr['value']['tensor'][0]['int_val'][0])
shapeNode = NodeDef()
shapeNode.name = name + '/output_shape'
shapeNode.op = 'Const'
shapeNode.addAttr('value', [out_height, out_width])
graph_def.node.insert(graph_def.node.index(node), shapeNode)
nodesToKeep.append(shapeNode.name)
nodesToRemove += fusedNodes[1:]
for node in nodesToRemove:
graph_def.node.remove(node)
nodesToKeep = []
fuse_nodes(nodesToKeep)
removeIdentity(graph_def)
def to_remove(name, op):
return (not name in nodesToKeep) and \
(op == 'Const' or (not op in keepOps) or name.startswith(prefixesToRemove))
removeUnusedNodesAndAttrs(to_remove, graph_def)
# Connect input node to the first layer
assert(graph_def.node[0].op == 'Placeholder')
try:
input_shape = graph_def.node[0].attr['shape']['shape'][0]['dim']
input_shape[1]['size'] = image_height
input_shape[2]['size'] = image_width
except:
print("Input shapes are undefined")
# assert(graph_def.node[1].op == 'Conv2D')
weights = graph_def.node[1].input[-1]
for i in range(len(graph_def.node[1].input)):
graph_def.node[1].input.pop()
graph_def.node[1].input.append(graph_def.node[0].name)
graph_def.node[1].input.append(weights)
# check and correct the case when preprocessing block is after input
preproc_id = "Preprocessor/"
if graph_def.node[2].name.startswith(preproc_id) and \
graph_def.node[2].input[0].startswith(preproc_id):
if not any(preproc_id in inp for inp in graph_def.node[3].input):
graph_def.node[3].input.insert(0, graph_def.node[2].name)
# Create SSD postprocessing head ###############################################
# Concatenate predictions of classes, predictions of bounding boxes and proposals.
def addConcatNode(name, inputs, axisNodeName):
concat = NodeDef()
concat.name = name
concat.op = 'ConcatV2'
for inp in inputs:
concat.input.append(inp)
concat.input.append(axisNodeName)
graph_def.node.extend([concat])
addConstNode('concat/axis_flatten', [-1], graph_def)
addConstNode('PriorBox/concat/axis', [-2], graph_def)
for label in ['ClassPredictor', 'BoxEncodingPredictor' if box_predictor is 'convolutional' else 'BoxPredictor']:
concatInputs = []
for i in range(num_layers):
# Flatten predictions
flatten = NodeDef()
if box_predictor is 'convolutional':
inpName = 'BoxPredictor_%d/%s/BiasAdd' % (i, label)
else:
if i == 0:
inpName = 'WeightSharedConvolutionalBoxPredictor/%s/BiasAdd' % label
else:
inpName = 'WeightSharedConvolutionalBoxPredictor_%d/%s/BiasAdd' % (i, label)
flatten.input.append(inpName)
flatten.name = inpName + '/Flatten'
flatten.op = 'Flatten'
concatInputs.append(flatten.name)
graph_def.node.extend([flatten])
addConcatNode('%s/concat' % label, concatInputs, 'concat/axis_flatten')
num_matched_layers = 0
for node in graph_def.node:
if re.match('BoxPredictor_\d/BoxEncodingPredictor/convolution', node.name) or \
re.match('BoxPredictor_\d/BoxEncodingPredictor/Conv2D', node.name) or \
re.match('WeightSharedConvolutionalBoxPredictor(_\d)*/BoxPredictor/Conv2D', node.name):
node.addAttr('loc_pred_transposed', True)
num_matched_layers += 1
assert(num_matched_layers == num_layers)
# Add layers that generate anchors (bounding boxes proposals).
priorBoxes = []
boxCoder = config['box_coder'][0]
fasterRcnnBoxCoder = boxCoder['faster_rcnn_box_coder'][0]
boxCoderVariance = [1.0/float(fasterRcnnBoxCoder['x_scale'][0]), 1.0/float(fasterRcnnBoxCoder['y_scale'][0]), 1.0/float(fasterRcnnBoxCoder['width_scale'][0]), 1.0/float(fasterRcnnBoxCoder['height_scale'][0])]
for i in range(num_layers):
priorBox = NodeDef()
priorBox.name = 'PriorBox_%d' % i
priorBox.op = 'PriorBox'
if box_predictor is 'convolutional':
priorBox.input.append('BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % i)
else:
if i == 0:
priorBox.input.append('WeightSharedConvolutionalBoxPredictor/BoxPredictor/Conv2D')
else:
priorBox.input.append('WeightSharedConvolutionalBoxPredictor_%d/BoxPredictor/BiasAdd' % i)
priorBox.input.append(graph_def.node[0].name) # image_tensor
priorBox.addAttr('flip', False)
priorBox.addAttr('clip', False)
widths, heights = priors_generator.get(i)
priorBox.addAttr('width', widths)
priorBox.addAttr('height', heights)
priorBox.addAttr('variance', boxCoderVariance)
graph_def.node.extend([priorBox])
priorBoxes.append(priorBox.name)
# Compare this layer's output with Postprocessor/Reshape
addConcatNode('PriorBox/concat', priorBoxes, 'concat/axis_flatten')
# Sigmoid for classes predictions and DetectionOutput layer
addReshape('ClassPredictor/concat', 'ClassPredictor/concat3d', [0, -1, num_classes + 1], graph_def)
sigmoid = NodeDef()
sigmoid.name = 'ClassPredictor/concat/sigmoid'
sigmoid.op = 'Sigmoid'
sigmoid.input.append('ClassPredictor/concat3d')
graph_def.node.extend([sigmoid])
addFlatten(sigmoid.name, sigmoid.name + '/Flatten', graph_def)
detectionOut = NodeDef()
detectionOut.name = 'detection_out'
detectionOut.op = 'DetectionOutput'
if box_predictor == 'convolutional':
detectionOut.input.append('BoxEncodingPredictor/concat')
else:
detectionOut.input.append('BoxPredictor/concat')
detectionOut.input.append(sigmoid.name + '/Flatten')
detectionOut.input.append('PriorBox/concat')
detectionOut.addAttr('num_classes', num_classes + 1)
detectionOut.addAttr('share_location', True)
detectionOut.addAttr('background_label_id', 0)
postProcessing = config['post_processing'][0]
batchNMS = postProcessing['batch_non_max_suppression'][0]
if 'iou_threshold' in batchNMS:
detectionOut.addAttr('nms_threshold', float(batchNMS['iou_threshold'][0]))
else:
detectionOut.addAttr('nms_threshold', 0.6)
if 'score_threshold' in batchNMS:
detectionOut.addAttr('confidence_threshold', float(batchNMS['score_threshold'][0]))
else:
detectionOut.addAttr('confidence_threshold', 0.01)
if 'max_detections_per_class' in batchNMS:
detectionOut.addAttr('top_k', int(batchNMS['max_detections_per_class'][0]))
else:
detectionOut.addAttr('top_k', 100)
if 'max_total_detections' in batchNMS:
detectionOut.addAttr('keep_top_k', int(batchNMS['max_total_detections'][0]))
else:
detectionOut.addAttr('keep_top_k', 100)
detectionOut.addAttr('code_type', "CENTER_SIZE")
graph_def.node.extend([detectionOut])
while True:
unconnectedNodes = getUnconnectedNodes()
unconnectedNodes.remove(detectionOut.name)
if not unconnectedNodes:
break
for name in unconnectedNodes:
for i in range(len(graph_def.node)):
if graph_def.node[i].name == name:
del graph_def.node[i]
break
# Save as text.
graph_def.save(outputPath)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run this script to get a text graph of '
'SSD model from TensorFlow Object Detection API. '
'Then pass it with .pb file to cv::dnn::readNetFromTensorflow function.')
parser.add_argument('--input', required=True, help='Path to frozen TensorFlow graph.')
parser.add_argument('--output', required=True, help='Path to output text graph.')
parser.add_argument('--config', required=True, help='Path to a *.config file is used for training.')
args = parser.parse_args()
createSSDGraph(args.input, args.config, args.output)
|
py | 1a4dc53afb7093b167a2d57c1da3c8f3e56308cb | import datetime
import os
import time
from uuid import UUID, uuid4
from django import forms as django_forms, http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Count
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.utils.http import is_safe_url
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import waffle
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import API_TOKEN_COOKIE, logout_user
from olympia.activity.models import ActivityLog, VersionLog
from olympia.activity.utils import log_and_notify
from olympia.addons.models import (
Addon, AddonReviewerFlags, AddonUser, AddonUserPendingConfirmation)
from olympia.addons.views import BaseFilter
from olympia.amo import messages, utils as amo_utils
from olympia.amo.decorators import json_view, login_required, post_required
from olympia.amo.templatetags.jinja_helpers import absolutify, urlparams
from olympia.amo.urlresolvers import get_url_prefix, reverse
from olympia.amo.utils import MenuItem, escape_all, render, send_mail
from olympia.api.models import APIKey, APIKeyConfirmation
from olympia.devhub.decorators import dev_required, no_admin_disabled
from olympia.devhub.models import BlogPost, RssKey
from olympia.devhub.utils import (
add_dynamic_theme_tag, extract_theme_properties,
fetch_existing_translations_from_addon, get_addon_akismet_reports,
UploadRestrictionChecker, wizard_unsupported_properties)
from olympia.files.models import File, FileUpload, FileValidation
from olympia.files.utils import parse_addon
from olympia.lib.crypto.signing import sign_file
from olympia.reviewers.forms import PublicWhiteboardForm
from olympia.reviewers.models import Whiteboard
from olympia.reviewers.templatetags.jinja_helpers import get_position
from olympia.reviewers.utils import ReviewHelper
from olympia.users.models import DeveloperAgreementRestriction
from olympia.versions.models import Version
from olympia.versions.tasks import extract_version_source_to_git
from olympia.versions.utils import get_next_version_number
from olympia.zadmin.models import get_config
from . import feeds, forms, signals, tasks
log = olympia.core.logger.getLogger('z.devhub')
# We use a session cookie to make sure people see the dev agreement.
MDN_BASE = 'https://developer.mozilla.org/en-US/Add-ons'
def get_fileupload_by_uuid_or_404(value):
try:
UUID(value)
except ValueError:
raise http.Http404()
return get_object_or_404(FileUpload, uuid=value)
class AddonFilter(BaseFilter):
opts = (('updated', _(u'Updated')),
('name', _(u'Name')),
('created', _(u'Created')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
class ThemeFilter(BaseFilter):
opts = (('created', _(u'Created')),
('name', _(u'Name')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
def addon_listing(request, theme=False):
"""Set up the queryset and filtering for addon listing for Dashboard."""
if theme:
qs = Addon.objects.filter(
authors=request.user, type=amo.ADDON_STATICTHEME)
filter_cls = ThemeFilter
default = 'created'
else:
qs = Addon.objects.filter(authors=request.user).exclude(
type=amo.ADDON_STATICTHEME)
filter_cls = AddonFilter
default = 'updated'
filter_ = filter_cls(request, qs, 'sort', default)
return filter_.qs, filter_
def index(request):
ctx = {'blog_posts': _get_posts()}
if request.user.is_authenticated:
user_addons = Addon.objects.filter(authors=request.user)
recent_addons = user_addons.order_by('-modified')[:3]
ctx['recent_addons'] = []
for addon in recent_addons:
ctx['recent_addons'].append({'addon': addon,
'position': get_position(addon)})
return render(request, 'devhub/index.html', ctx)
@login_required
def dashboard(request, theme=False):
addon_items = _get_items(
None, Addon.objects.filter(authors=request.user))[:4]
data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(),
timestamp=int(time.time()), addon_tab=not theme,
theme=theme, addon_items=addon_items)
if data['addon_tab']:
addons, data['filter'] = addon_listing(request)
data['addons'] = amo_utils.paginate(request, addons, per_page=10)
if theme:
themes, data['filter'] = addon_listing(request, theme=True)
data['themes'] = amo_utils.paginate(request, themes, per_page=10)
if 'filter' in data:
data['sorting'] = data['filter'].field
data['sort_opts'] = data['filter'].opts
return render(request, 'devhub/addons/dashboard.html', data)
@dev_required
def ajax_compat_status(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_status.html',
dict(addon=addon))
@dev_required
def ajax_compat_error(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_error.html',
dict(addon=addon))
@dev_required
def ajax_compat_update(request, addon_id, addon, version_id):
if not addon.accepts_compatible_apps():
raise http.Http404()
version = get_object_or_404(addon.versions.all(), pk=version_id)
compat_form = forms.CompatFormSet(
request.POST or None,
queryset=version.apps.all().select_related('min', 'max'),
form_kwargs={'version': version})
if request.method == 'POST' and compat_form.is_valid():
for compat in compat_form.save(commit=False):
compat.version = version
compat.save()
for compat in compat_form.deleted_objects:
compat.delete()
for form in compat_form.forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
return render(request, 'devhub/addons/ajax_compat_update.html',
dict(addon=addon, version=version, compat_form=compat_form))
def _get_addons(request, addons, addon_id, action):
"""Create a list of ``MenuItem``s for the activity feed."""
items = []
a = MenuItem()
a.selected = (not addon_id)
(a.text, a.url) = (ugettext('All My Add-ons'), reverse('devhub.feed_all'))
if action:
a.url += '?action=' + action
items.append(a)
for addon in addons:
item = MenuItem()
try:
item.selected = (addon_id and addon.id == int(addon_id))
except ValueError:
pass # We won't get here... EVER
url = reverse('devhub.feed', args=[addon.slug])
if action:
url += '?action=' + action
item.text, item.url = addon.name, url
items.append(item)
return items
def _get_posts(limit=5):
return BlogPost.objects.order_by('-date_posted')[0:limit]
def _get_activities(request, action):
url = request.get_full_path()
choices = (None, 'updates', 'status', 'collections', 'reviews')
text = {None: ugettext('All Activity'),
'updates': ugettext('Add-on Updates'),
'status': ugettext('Add-on Status'),
'collections': ugettext('User Collections'),
'reviews': ugettext('User Reviews'),
}
items = []
for c in choices:
i = MenuItem()
i.text = text[c]
i.url, i.selected = urlparams(url, page=None, action=c), (action == c)
items.append(i)
return items
def _get_items(action, addons):
filters = {
'updates': (amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION),
'status': (amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE,
amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,),
'collections': (amo.LOG.ADD_TO_COLLECTION,
amo.LOG.REMOVE_FROM_COLLECTION,),
'reviews': (amo.LOG.ADD_RATING,)
}
filter_ = filters.get(action)
items = (ActivityLog.objects.for_addons(addons)
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))
if filter_:
items = items.filter(action__in=[i.id for i in filter_])
return items
def _get_rss_feed(request):
key, _ = RssKey.objects.get_or_create(user=request.user)
return urlparams(reverse('devhub.feed_all'), privaterss=key.key.hex)
def feed(request, addon_id=None):
if request.GET.get('privaterss'):
return feeds.ActivityFeedRSS()(request)
addon_selected = None
if not request.user.is_authenticated:
return redirect_for_login(request)
else:
addons_all = Addon.objects.filter(authors=request.user)
if addon_id:
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
addons = addon # common query set
try:
key = RssKey.objects.get(addon=addons)
except RssKey.DoesNotExist:
key = RssKey.objects.create(addon=addons)
addon_selected = addon.id
rssurl = urlparams(reverse('devhub.feed', args=[addon_id]),
privaterss=key.key.hex)
if not acl.check_addon_ownership(request, addons, dev=True,
ignore_disabled=True):
raise PermissionDenied
else:
rssurl = _get_rss_feed(request)
addon = None
addons = addons_all
action = request.GET.get('action')
items = _get_items(action, addons)
activities = _get_activities(request, action)
addon_items = _get_addons(request, addons_all, addon_selected, action)
pager = amo_utils.paginate(request, items, 20)
data = dict(addons=addon_items, pager=pager, activities=activities,
rss=rssurl, addon=addon)
return render(request, 'devhub/addons/activity.html', data)
@dev_required
def edit(request, addon_id, addon):
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
previews = (
addon.current_version.previews.all()
if addon.current_version and addon.has_per_version_previews
else addon.previews.all())
header_preview = (
previews.first() if addon.type == amo.ADDON_STATICTHEME else None)
data = {
'page': 'edit',
'addon': addon,
'whiteboard': whiteboard,
'editable': False,
'show_listed_fields': addon.has_listed_versions(),
'valid_slug': addon.slug,
'tags': addon.tags.not_denied().values_list('tag_text', flat=True),
'previews': previews,
'header_preview': header_preview,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit.html', data)
@dev_required(owner_for_post=True)
@post_required
def delete(request, addon_id, addon):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
msg = ugettext(
'Add-on cannot be deleted. Disable this add-on instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
any_theme = addon.type == amo.ADDON_STATICTHEME
form = forms.DeleteForm(request.POST, addon=addon)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(
request,
ugettext('Theme deleted.')
if any_theme else ugettext('Add-on deleted.'))
return redirect('devhub.%s' % ('themes' if any_theme else 'addons'))
else:
messages.error(
request,
ugettext('URL name was incorrect. Theme was not deleted.')
if any_theme else
ugettext('URL name was incorrect. Add-on was not deleted.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
ActivityLog.create(amo.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
@post_required
def cancel(request, addon_id, addon):
if addon.status == amo.STATUS_NOMINATED:
addon.update(status=amo.STATUS_NULL)
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
for file_ in latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW):
file_.update(status=amo.STATUS_DISABLED)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def disable(request, addon_id, addon):
# Also set the latest listed version to STATUS_DISABLED if it was
# AWAITING_REVIEW, to not waste reviewers time.
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW).update(
status=amo.STATUS_DISABLED)
addon.update_version()
addon.update_status()
addon.update(disabled_by_user=True)
ActivityLog.create(amo.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
# Can't use @dev_required, as the user is not a developer yet. Can't use
# @addon_view_factory either, because it requires a developer for unlisted
# add-ons. So we just @login_required and retrieve the addon ourselves in the
# function.
@login_required
def invitation(request, addon_id):
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
try:
invitation = AddonUserPendingConfirmation.objects.get(
addon=addon, user=request.user)
except AddonUserPendingConfirmation.DoesNotExist:
# To be nice in case the user accidentally visited this page after
# having accepted an invite, redirect to the add-on base edit page.
# If they are an author, they will have access, otherwise will get the
# appropriate error.
return redirect(addon.get_dev_url())
if request.method == 'POST':
value = request.POST.get('accept')
if value == 'yes':
# There is a potential race condition on the position, but it's
# difficult to find a sensible value anyway. Should a position
# conflict happen, owners can easily fix it themselves.
last_position = AddonUser.objects.filter(
addon=invitation.addon).order_by('position').values_list(
'position', flat=True).last() or 0
AddonUser.objects.create(
addon=invitation.addon, user=invitation.user,
role=invitation.role, listed=invitation.listed,
position=last_position + 1)
messages.success(request, ugettext('Invitation accepted.'))
redirect_url = addon.get_dev_url()
else:
messages.success(request, ugettext('Invitation declined.'))
redirect_url = reverse('devhub.addons')
# Regardless of whether or not the invitation was accepted or not,
# it's now obsolete.
invitation.delete()
return redirect(redirect_url)
ctx = {
'addon': addon,
'invitation': invitation,
}
return render(request, 'devhub/addons/invitation.html', ctx)
@dev_required(owner_for_post=True)
def ownership(request, addon_id, addon):
fs = []
ctx = {'addon': addon}
post_data = request.POST if request.method == 'POST' else None
# Authors.
user_form = forms.AuthorFormSet(
post_data,
prefix='user_form',
queryset=AddonUser.objects.filter(addon=addon).order_by('position'),
form_kwargs={'addon': addon})
fs.append(user_form)
ctx['user_form'] = user_form
# Authors pending confirmation (owner can still remove them before they
# accept).
authors_pending_confirmation_form = forms.AuthorWaitingConfirmationFormSet(
post_data,
prefix='authors_pending_confirmation',
queryset=AddonUserPendingConfirmation.objects.filter(
addon=addon).order_by('id'),
form_kwargs={'addon': addon})
fs.append(authors_pending_confirmation_form)
ctx['authors_pending_confirmation_form'] = (
authors_pending_confirmation_form)
# Versions.
license_form = forms.LicenseForm(post_data, version=addon.current_version)
ctx.update(license_form.get_context())
if ctx['license_form']: # if addon has a version
fs.append(ctx['license_form'])
# Policy.
if addon.type != amo.ADDON_STATICTHEME:
policy_form = forms.PolicyForm(post_data, addon=addon)
ctx['policy_form'] = policy_form
fs.append(policy_form)
else:
policy_form = None
def mail_user_changes(author, title, template_part, recipients,
extra_context=None):
from olympia.amo.utils import send_mail
context_data = {
'author': author,
'addon': addon,
'DOMAIN': settings.DOMAIN,
}
if extra_context:
context_data.update(extra_context)
template = loader.get_template(
'users/email/{part}.ltxt'.format(part=template_part))
send_mail(title, template.render(context_data),
None, recipients, use_deny_list=False)
def process_author_changes(source_form, existing_authors_emails):
addon_users_to_process = source_form.save(commit=False)
for addon_user in addon_users_to_process:
action = None
addon_user.addon = addon
if not addon_user.pk:
action = amo.LOG.ADD_USER_WITH_ROLE
mail_user_changes(
author=addon_user,
title=ugettext('An author has been added to your add-on'),
template_part='author_added',
recipients=existing_authors_emails)
mail_user_changes(
author=addon_user,
title=ugettext(
'Author invitation for {addon_name}').format(
addon_name=str(addon.name)),
template_part='author_added_confirmation',
recipients=[addon_user.user.email],
extra_context={'author_confirmation_link': absolutify(
reverse('devhub.addons.invitation', args=(addon.slug,))
)})
messages.success(request, ugettext(
'A confirmation email has been sent to {email}').format(
email=addon_user.user.email))
elif addon_user.role != addon_user._original_role:
action = amo.LOG.CHANGE_USER_WITH_ROLE
title = ugettext(
'An author role has been changed on your add-on')
recipients = list(
set(existing_authors_emails + [addon_user.user.email])
)
mail_user_changes(
author=addon_user,
title=title,
template_part='author_changed',
recipients=recipients)
addon_user.save()
if action:
ActivityLog.create(
action, addon_user.user,
str(addon_user.get_role_display()), addon)
for addon_user in source_form.deleted_objects:
recipients = list(
set(existing_authors_emails + [addon_user.user.email])
)
ActivityLog.create(
amo.LOG.REMOVE_USER_WITH_ROLE, addon_user.user,
str(addon_user.get_role_display()), addon)
mail_user_changes(
author=addon_user,
title=ugettext('An author has been removed from your add-on'),
template_part='author_removed',
recipients=recipients)
addon_user.delete()
if request.method == 'POST' and all([form.is_valid() for form in fs]):
if license_form in fs:
license_form.save()
if policy_form and policy_form in fs:
policy_form.save()
messages.success(request, ugettext('Changes successfully saved.'))
existing_authors_emails = list(
addon.authors.values_list('email', flat=True))
process_author_changes(
authors_pending_confirmation_form, existing_authors_emails)
process_author_changes(
user_form, existing_authors_emails)
return redirect(addon.get_dev_url('owner'))
return render(request, 'devhub/addons/owner.html', ctx)
@login_required
def validate_addon(request):
return render(request, 'devhub/validate_addon.html',
{'title': ugettext('Validate Add-on'),
'new_addon_form': forms.DistributionChoiceForm()})
def handle_upload(filedata, request, channel, addon=None, is_standalone=False,
submit=False):
automated_signing = channel == amo.RELEASE_CHANNEL_UNLISTED
user = request.user if request.user.is_authenticated else None
upload = FileUpload.from_post(
filedata, filedata.name, filedata.size,
automated_signing=automated_signing, addon=addon, user=user)
log.info('FileUpload created: %s' % upload.uuid.hex)
from olympia.lib.akismet.tasks import akismet_comment_check # circ import
if (channel == amo.RELEASE_CHANNEL_LISTED):
existing_data = (
fetch_existing_translations_from_addon(
upload.addon, ('name', 'summary', 'description'))
if addon and addon.has_listed_versions() else ())
akismet_reports = get_addon_akismet_reports(
user=user,
user_agent=request.META.get('HTTP_USER_AGENT'),
referrer=request.META.get('HTTP_REFERER'),
upload=upload,
existing_data=existing_data)
else:
akismet_reports = []
if akismet_reports:
pretask = akismet_comment_check.si(
[report.id for _, report in akismet_reports])
else:
pretask = None
if submit:
tasks.validate_and_submit(
addon, upload, channel=channel, pretask=pretask)
else:
tasks.validate(
upload, listed=(channel == amo.RELEASE_CHANNEL_LISTED),
pretask=pretask)
return upload
@login_required
@post_required
def upload(request, channel='listed', addon=None, is_standalone=False):
channel = amo.CHANNEL_CHOICES_LOOKUP[channel]
filedata = request.FILES['upload']
upload = handle_upload(
filedata=filedata, request=request, addon=addon,
is_standalone=is_standalone, channel=channel)
if addon:
return redirect('devhub.upload_detail_for_version',
addon.slug, upload.uuid.hex)
elif is_standalone:
return redirect('devhub.standalone_upload_detail', upload.uuid.hex)
else:
return redirect('devhub.upload_detail', upload.uuid.hex, 'json')
@post_required
@dev_required
def upload_for_version(request, addon_id, addon, channel):
return upload(request, channel=channel, addon=addon)
@login_required
@json_view
def standalone_upload_detail(request, uuid):
upload = get_fileupload_by_uuid_or_404(uuid)
url = reverse('devhub.standalone_upload_detail', args=[uuid])
return upload_validation_context(request, upload, url=url)
@dev_required(submitting=True)
@json_view
def upload_detail_for_version(request, addon_id, addon, uuid):
try:
upload = get_fileupload_by_uuid_or_404(uuid)
response = json_upload_detail(request, upload, addon_slug=addon.slug)
statsd.incr('devhub.upload_detail_for_addon.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail_for_addon.error')
log.error('Error checking upload status: {} {}'.format(type(exc), exc))
raise
@dev_required(allow_reviewers=True)
def file_validation(request, addon_id, addon, file_id):
file_ = get_object_or_404(File, version__addon=addon, id=file_id)
validate_url = reverse('devhub.json_file_validation',
args=[addon.slug, file_.id])
file_url = reverse('files.list', args=[file_.id, 'file', ''])
context = {'validate_url': validate_url, 'file_url': file_url,
'file': file_, 'filename': file_.filename,
'timestamp': file_.created, 'addon': addon,
'automated_signing': file_.automated_signing}
if file_.has_been_validated:
context['validation_data'] = file_.validation.processed_validation
return render(request, 'devhub/validation.html', context)
@csrf_exempt
@dev_required(allow_reviewers=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, version__addon=addon, id=file_id)
try:
result = file.validation
except FileValidation.DoesNotExist:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
# This API is, unfortunately, synchronous, so wait for the
# task to complete and return the result directly.
pk = tasks.validate(file, synchronous=True).get()
result = FileValidation.objects.get(pk=pk)
response = JsonResponse({
'validation': result.processed_validation,
'error': None,
})
# See: https://github.com/mozilla/addons-server/issues/11048
response['Access-Control-Allow-Origin'] = settings.CODE_MANAGER_URL
response['Access-Control-Allow-Methods'] = 'GET, OPTIONS'
response['Access-Control-Allow-Headers'] = 'Content-Type'
response['Access-Control-Allow-Credentials'] = 'true'
return response
@json_view
def json_upload_detail(request, upload, addon_slug=None):
addon = None
if addon_slug:
addon = get_object_or_404(Addon.objects, slug=addon_slug)
result = upload_validation_context(request, upload, addon=addon)
if result['validation']:
try:
pkg = parse_addon(upload, addon=addon, user=request.user)
except django_forms.ValidationError as exc:
# Don't add custom validation errors if we already
# failed validation (This can happen because validation does
# call `parse_addon` too.)
if result['validation'].get('errors', 0):
return result
# This doesn't guard against client-side tinkering, and is purely
# to display those non-linter errors nicely in the frontend. What
# does prevent clients from bypassing those is the fact that we
# always call parse_addon() before calling from_upload(), so
# ValidationError would be raised before proceeding.
for i, msg in enumerate(exc.messages):
# Simulate a validation error so the UI displays
# it as such
result['validation']['messages'].insert(
i, {'type': 'error',
'message': escape_all(msg), 'tier': 1,
'fatal': True})
if result['validation']['ending_tier'] < 1:
result['validation']['ending_tier'] = 1
result['validation']['errors'] += 1
return json_view.error(result)
else:
result['addon_type'] = pkg.get('type', '')
return result
def upload_validation_context(request, upload, addon=None, url=None):
if not url:
if addon:
url = reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex])
else:
url = reverse(
'devhub.upload_detail',
args=[upload.uuid.hex, 'json'])
full_report_url = reverse('devhub.upload_detail', args=[upload.uuid.hex])
validation = upload.processed_validation or ''
return {'upload': upload.uuid.hex,
'validation': validation,
'error': None,
'url': url,
'full_report_url': full_report_url}
def upload_detail(request, uuid, format='html'):
upload = get_fileupload_by_uuid_or_404(uuid)
if upload.user_id and not request.user.is_authenticated:
return redirect_for_login(request)
if format == 'json' or request.is_ajax():
try:
response = json_upload_detail(request, upload)
statsd.incr('devhub.upload_detail.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail.error')
log.error('Error checking upload status: {} {}'.format(
type(exc), exc))
raise
validate_url = reverse('devhub.standalone_upload_detail',
args=[upload.uuid.hex])
context = {'validate_url': validate_url, 'filename': upload.pretty_name,
'automated_signing': upload.automated_signing,
'timestamp': upload.created}
if upload.validation:
context['validation_data'] = upload.processed_validation
return render(request, 'devhub/validation.html', context)
@dev_required
def addons_section(request, addon_id, addon, section, editable=False):
show_listed = addon.has_listed_versions()
static_theme = addon.type == amo.ADDON_STATICTHEME
models = {}
content_waffle = waffle.switch_is_active('content-optimization')
if show_listed:
models.update({
'describe': (forms.DescribeForm if not content_waffle
else forms.DescribeFormContentOptimization),
'additional_details': forms.AdditionalDetailsForm,
'technical': forms.AddonFormTechnical
})
if not static_theme:
models.update({'media': forms.AddonFormMedia})
else:
models.update({
'describe': (forms.DescribeFormUnlisted if not content_waffle
else forms.DescribeFormUnlistedContentOptimization),
'additional_details': forms.AdditionalDetailsFormUnlisted,
'technical': forms.AddonFormTechnicalUnlisted
})
if section not in models:
raise http.Http404()
tags, previews, restricted_tags = [], [], []
cat_form = dependency_form = whiteboard_form = None
whiteboard = None
if section == 'describe' and show_listed:
category_form_class = (forms.SingleCategoryForm if static_theme else
forms.CategoryFormSet)
cat_form = category_form_class(
request.POST or None, addon=addon, request=request)
elif section == 'additional_details' and show_listed:
tags = addon.tags.not_denied().values_list('tag_text', flat=True)
restricted_tags = addon.tags.filter(restricted=True)
elif section == 'media':
previews = forms.PreviewFormSet(
request.POST or None,
prefix='files', queryset=addon.previews.all())
if section == 'technical':
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
whiteboard_form = PublicWhiteboardForm(request.POST or None,
instance=whiteboard,
prefix='whiteboard')
# Get the slug before the form alters it to the form data.
valid_slug = addon.slug
if editable:
if request.method == 'POST':
form = models[section](request.POST, request.FILES,
instance=addon, request=request)
if form.is_valid() and (not previews or previews.is_valid()):
addon = form.save(addon)
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
ActivityLog.create(amo.LOG.CHANGE_ICON, addon)
else:
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.slug
if cat_form:
if cat_form.is_valid():
cat_form.save()
else:
editable = True
if dependency_form:
if dependency_form.is_valid():
dependency_form.save()
else:
editable = True
if whiteboard_form:
if whiteboard_form.is_valid():
whiteboard_form.save()
else:
editable = True
else:
form = models[section](instance=addon, request=request)
else:
form = False
data = {
'addon': addon,
'whiteboard': whiteboard,
'show_listed_fields': show_listed,
'form': form,
'editable': editable,
'tags': tags,
'restricted_tags': restricted_tags,
'cat_form': cat_form,
'preview_form': previews,
'dependency_form': dependency_form,
'whiteboard_form': whiteboard_form,
'valid_slug': valid_slug,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit/%s.html' % section, data)
@never_cache
@dev_required
@json_view
def image_status(request, addon_id, addon):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
else:
icons = storage.exists(os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.previews.all())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@dev_required
@json_view
def upload_image(request, addon_id, addon, upload_type):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
upload_hash = uuid4().hex
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with storage.open(loc, 'wb') as fd:
for chunk in upload_preview:
fd.write(chunk)
is_icon = upload_type == 'icon'
is_preview = upload_type == 'preview'
image_check = amo_utils.ImageCheck(upload_preview)
is_animated = image_check.is_animated() # will also cache .is_image()
if (upload_preview.content_type not in amo.IMG_TYPES or
not image_check.is_image()):
if is_icon:
errors.append(ugettext('Icons must be either PNG or JPG.'))
else:
errors.append(ugettext('Images must be either PNG or JPG.'))
if is_animated:
if is_icon:
errors.append(ugettext('Icons cannot be animated.'))
else:
errors.append(ugettext('Images cannot be animated.'))
if is_icon:
max_size = settings.MAX_ICON_UPLOAD_SIZE
else:
max_size = None
if max_size and upload_preview.size > max_size:
if is_icon:
errors.append(
ugettext('Please use images smaller than %dMB.')
% (max_size // 1024 // 1024))
content_waffle = waffle.switch_is_active('content-optimization')
if image_check.is_image() and content_waffle and is_preview:
min_size = amo.ADDON_PREVIEW_SIZES.get('min')
# * 100 to get a nice integer to compare against rather than 1.3333
required_ratio = min_size[0] * 100 // min_size[1]
actual_size = image_check.size
actual_ratio = actual_size[0] * 100 // actual_size[1]
if actual_size[0] < min_size[0] or actual_size[1] < min_size[1]:
# L10n: {0} is an image width (in pixels), {1} is a height.
errors.append(
ugettext('Image must be at least {0} pixels wide and {1} '
'pixels tall.').format(min_size[0], min_size[1]))
if actual_ratio != required_ratio:
errors.append(
ugettext('Image dimensions must be in the ratio 4:3.'))
if image_check.is_image() and content_waffle and is_icon:
standard_size = amo.ADDON_ICON_SIZES[-1]
icon_size = image_check.size
if icon_size[0] < standard_size or icon_size[1] < standard_size:
# L10n: {0} is an image width/height (in pixels).
errors.append(
ugettext(u'Icon must be at least {0} pixels wide and '
u'tall.').format(standard_size))
if icon_size[0] != icon_size[1]:
errors.append(
ugettext(u'Icon must be square (same width and height).'))
if errors and is_preview and os.path.exists(loc):
# Delete the temporary preview file in case of error.
os.unlink(loc)
else:
errors.append(ugettext('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def version_edit(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), pk=version_id)
static_theme = addon.type == amo.ADDON_STATICTHEME
version_form = forms.VersionForm(
request.POST or None,
request.FILES or None,
instance=version,
request=request,
) if not static_theme else None
data = {}
if version_form:
data['version_form'] = version_form
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if not static_theme and addon.accepts_compatible_apps():
qs = version.apps.all().select_related('min', 'max')
compat_form = forms.CompatFormSet(
request.POST or None, queryset=qs,
form_kwargs={'version': version})
data['compat_form'] = compat_form
if (request.method == 'POST' and
all([form.is_valid() for form in data.values()])):
if 'compat_form' in data:
for compat in data['compat_form'].save(commit=False):
compat.version = version
compat.save()
for compat in data['compat_form'].deleted_objects:
compat.delete()
for form in data['compat_form'].forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
if 'version_form' in data:
# VersionForm.save() clear the pending info request if the
# developer specifically asked for it, but we've got additional
# things to do here that depend on it.
had_pending_info_request = bool(addon.pending_info_request)
data['version_form'].save()
if 'approval_notes' in version_form.changed_data:
if had_pending_info_request:
log_and_notify(amo.LOG.APPROVAL_NOTES_CHANGED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.APPROVAL_NOTES_CHANGED,
addon, version, request.user)
if ('source' in version_form.changed_data and
version_form.cleaned_data['source']):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
commit_to_git = waffle.switch_is_active(
'enable-uploads-commit-to-git-storage')
if commit_to_git:
# Extract into git repository
extract_version_source_to_git.delay(
version_id=data['version_form'].instance.pk,
author_id=request.user.pk)
if had_pending_info_request:
log_and_notify(amo.LOG.SOURCE_CODE_UPLOADED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.SOURCE_CODE_UPLOADED,
addon, version, request.user)
messages.success(request, ugettext('Changes successfully saved.'))
return redirect('devhub.versions.edit', addon.slug, version_id)
data.update({
'addon': addon,
'version': version,
'is_admin': is_admin,
'choices': File.STATUS_CHOICES,
'files': version.files.all()})
return render(request, 'devhub/versions/edit.html', data)
def _log_max_version_change(addon, version, appversion):
details = {'version': version.version,
'target': appversion.version.version,
'application': appversion.application}
ActivityLog.create(amo.LOG.MAX_APPVERSION_UPDATED,
addon, version, details=details)
@dev_required
@post_required
@transaction.atomic
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(addon.versions.all(), pk=version_id)
if (addon.is_recommended and
version.recommendation_approved and
version == addon.current_version):
# Developers shouldn't be able to delete/disable the current version
# of an approved add-on.
msg = ugettext('The latest approved version of a Recommended extension'
' cannot be deleted or disabled. Please contact AMO '
'Admins if you need help with this.')
messages.error(request, msg)
elif 'disable_version' in request.POST:
messages.success(
request,
ugettext('Version %s disabled.') % version.version)
version.is_user_disabled = True # Will update the files/activity log.
version.addon.update_status()
else:
messages.success(
request,
ugettext('Version %s deleted.') % version.version)
version.delete() # Will also activity log.
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
@transaction.atomic
def version_reenable(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(addon.versions.all(), pk=version_id)
messages.success(
request,
ugettext('Version %s re-enabled.') % version.version)
version.is_user_disabled = False # Will update the files/activity log.
version.addon.update_status()
return redirect(addon.get_dev_url('versions'))
def check_validation_override(request, form, addon, version):
if version and form.cleaned_data.get('admin_override_validation'):
helper = ReviewHelper(request=request, addon=addon, version=version)
helper.set_data({
'operating_systems': '',
'applications': '',
'comments': ugettext(
u'This upload has failed validation, and may '
u'lack complete validation results. Please '
u'take due care when reviewing it.')})
helper.actions['super']['method']()
def auto_sign_file(file_):
"""If the file should be automatically reviewed and signed, do it."""
addon = file_.version.addon
if file_.is_experiment: # See bug 1220097.
ActivityLog.create(amo.LOG.EXPERIMENT_SIGNED, file_)
sign_file(file_)
elif file_.version.channel == amo.RELEASE_CHANNEL_UNLISTED:
# Sign automatically without manual review.
helper = ReviewHelper(request=None, addon=addon,
version=file_.version)
# Provide the file to review/sign to the helper.
helper.set_data({'addon_files': [file_],
'comments': 'automatic validation'})
helper.handler.process_public()
ActivityLog.create(amo.LOG.UNLISTED_SIGNED, file_)
def auto_sign_version(version, **kwargs):
# Sign all the unapproved files submitted, one for each platform.
for file_ in version.files.exclude(status=amo.STATUS_APPROVED):
auto_sign_file(file_, **kwargs)
@dev_required
def version_list(request, addon_id, addon):
qs = addon.versions.order_by('-created')
versions = amo_utils.paginate(request, qs)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
token = request.COOKIES.get(API_TOKEN_COOKIE, None)
data = {'addon': addon,
'versions': versions,
'token': token,
'is_admin': is_admin}
return render(request, 'devhub/versions/list.html', data)
@dev_required
def version_bounce(request, addon_id, addon, version):
# Use filter since there could be dupes.
vs = addon.versions.filter(version=version).order_by('-created').first()
if vs:
return redirect('devhub.versions.edit', addon.slug, vs.id)
else:
raise http.Http404()
@json_view
@dev_required
def version_stats(request, addon_id, addon):
qs = addon.versions.all()
reviews = (qs.annotate(review_count=Count('ratings'))
.values('id', 'version', 'review_count'))
data = {v['id']: v for v in reviews}
files = (
qs.annotate(file_count=Count('files')).values_list('id', 'file_count'))
for id_, file_count in files:
# For backwards compatibility
data[id_]['files'] = file_count
data[id_]['reviews'] = data[id_].pop('review_count')
return data
@login_required
def submit_addon(request):
return render_agreement(
request=request,
template='devhub/addons/submit/start.html',
next_step='devhub.submit.distribution',
)
@dev_required
def submit_version_agreement(request, addon_id, addon):
return render_agreement(
request=request,
template='devhub/addons/submit/start.html',
next_step=reverse('devhub.submit.version', args=(addon.slug,)),
submit_page='version',
)
@transaction.atomic
def _submit_distribution(request, addon, next_view):
# Accept GET for the first load so we can preselect the channel.
form = forms.DistributionChoiceForm(
request.POST if request.method == 'POST' else
request.GET if request.GET.get('channel') else None)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
args = [addon.slug] if addon else []
args.append(data['channel'])
return redirect(next_view, *args)
return render(request, 'devhub/addons/submit/distribute.html',
{'distribution_form': form,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': 'version' if addon else 'addon'})
@login_required
def submit_addon_distribution(request):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
return _submit_distribution(request, None, 'devhub.submit.upload')
@dev_required(submitting=True)
def submit_version_distribution(request, addon_id, addon):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
return _submit_distribution(request, addon, 'devhub.submit.version.upload')
WIZARD_COLOR_FIELDS = [
('frame',
_(u'Header area background'),
_(u'The color of the header area background, displayed in the part of '
u'the header not covered or visible through the header image. Manifest '
u'field: frame.'),
'rgba(229,230,232,1)'),
('tab_background_text',
_(u'Header area text and icons'),
_(u'The color of the text and icons in the header area, except the '
u'active tab. Manifest field: tab_background_text.'),
'rgba(0,0,0,1'),
('toolbar',
_(u'Toolbar area background'),
_(u'The background color for the navigation bar, the bookmarks bar, and '
u'the selected tab. Manifest field: toolbar.'),
False),
('bookmark_text',
_(u'Toolbar area text and icons'),
_(u'The color of the text and icons in the toolbar and the active tab. '
u'Manifest field: bookmark_text.'),
False),
('toolbar_field',
_(u'Toolbar field area background'),
_(u'The background color for fields in the toolbar, such as the URL bar. '
u'Manifest field: toolbar_field.'),
False),
('toolbar_field_text',
_(u'Toolbar field area text'),
_(u'The color of text in fields in the toolbar, such as the URL bar. '
u'Manifest field: toolbar_field_text.'),
False)
]
@transaction.atomic
def _submit_upload(request, addon, channel, next_view, wizard=False):
""" If this is a new addon upload `addon` will be None.
next_view is the view that will be redirected to.
"""
form = forms.NewUploadForm(
request.POST or None,
request.FILES or None,
addon=addon,
request=request
)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
if addon:
version = Version.from_upload(
upload=data['upload'],
addon=addon,
selected_apps=data['compatible_apps'],
channel=channel,
parsed_data=data['parsed_data'])
url_args = [addon.slug, version.id]
else:
addon = Addon.from_upload(
upload=data['upload'],
channel=channel,
selected_apps=data['compatible_apps'],
parsed_data=data['parsed_data'],
user=request.user)
version = addon.find_latest_version(channel=channel)
url_args = [addon.slug]
check_validation_override(request, form, addon, version)
if (addon.status == amo.STATUS_NULL and
addon.has_complete_metadata() and
channel == amo.RELEASE_CHANNEL_LISTED):
addon.update(status=amo.STATUS_NOMINATED)
# auto-sign versions (the method checks eligibility)
auto_sign_version(version)
add_dynamic_theme_tag(version)
return redirect(next_view, *url_args)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if addon:
channel_choice_text = (forms.DistributionChoiceForm().LISTED_LABEL
if channel == amo.RELEASE_CHANNEL_LISTED else
forms.DistributionChoiceForm().UNLISTED_LABEL)
else:
channel_choice_text = '' # We only need this for Version upload.
submit_page = 'version' if addon else 'addon'
template = ('devhub/addons/submit/upload.html' if not wizard else
'devhub/addons/submit/wizard.html')
existing_properties = (
extract_theme_properties(addon, channel)
if wizard and addon else {})
unsupported_properties = (
wizard_unsupported_properties(
existing_properties,
[field for field, _, _, _ in WIZARD_COLOR_FIELDS])
if existing_properties else [])
return render(request, template,
{'new_addon_form': form,
'is_admin': is_admin,
'addon': addon,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': submit_page,
'channel': channel,
'channel_choice_text': channel_choice_text,
'existing_properties': existing_properties,
'colors': WIZARD_COLOR_FIELDS,
'unsupported_properties': unsupported_properties,
'version_number':
get_next_version_number(addon) if wizard else None})
@login_required
def submit_addon_upload(request, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, None, channel_id, 'devhub.submit.source')
@dev_required(submitting=True)
@no_admin_disabled
def submit_version_upload(request, addon_id, addon, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, addon, channel_id, 'devhub.submit.version.source')
@dev_required
@no_admin_disabled
def submit_version_auto(request, addon_id, addon):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
# choose the channel we need from the last upload
last_version = addon.find_latest_version(None, exclude=())
if not last_version:
return redirect('devhub.submit.version.distribution', addon.slug)
channel = last_version.channel
return _submit_upload(
request, addon, channel, 'devhub.submit.version.source')
@login_required
def submit_addon_theme_wizard(request, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.agreement')
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, None, channel_id, 'devhub.submit.source', wizard=True)
@dev_required
@no_admin_disabled
def submit_version_theme_wizard(request, addon_id, addon, channel):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect('devhub.submit.version.agreement', addon.slug)
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(
request, addon, channel_id, 'devhub.submit.version.source',
wizard=True)
def _submit_source(request, addon, version, next_view):
redirect_args = [addon.slug, version.pk] if version else [addon.slug]
if addon.type != amo.ADDON_EXTENSION:
return redirect(next_view, *redirect_args)
latest_version = version or addon.find_latest_version(channel=None)
form = forms.SourceForm(
request.POST or None,
request.FILES or None,
instance=latest_version,
request=request)
if request.method == 'POST' and form.is_valid():
if form.cleaned_data.get('source'):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
activity_log = ActivityLog.objects.create(
action=amo.LOG.SOURCE_CODE_UPLOADED.id,
user=request.user,
details={
'comments': (u'This version has been automatically '
u'flagged for admin review, as it had source '
u'files attached when submitted.')})
VersionLog.objects.create(
version_id=latest_version.id, activity_log=activity_log)
form.save()
# We can extract the actual source file only after the form
# has been saved because the file behind it may not have been
# written to disk yet (e.g for in-memory uploads)
if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
extract_version_source_to_git.delay(
version_id=form.instance.pk,
author_id=request.user.pk)
return redirect(next_view, *redirect_args)
context = {
'form': form,
'addon': addon,
'version': version,
'submit_page': 'version' if version else 'addon',
}
return render(request, 'devhub/addons/submit/source.html', context)
@dev_required(submitting=True)
def submit_addon_source(request, addon_id, addon):
return _submit_source(request, addon, None, 'devhub.submit.details')
@dev_required(submitting=True)
def submit_version_source(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_source(
request, addon, version, 'devhub.submit.version.details')
def _submit_details(request, addon, version):
static_theme = addon.type == amo.ADDON_STATICTHEME
if version:
skip_details_step = (version.channel == amo.RELEASE_CHANNEL_UNLISTED or
(static_theme and addon.has_complete_metadata()))
if skip_details_step:
# Nothing to do here.
return redirect(
'devhub.submit.version.finish', addon.slug, version.pk)
latest_version = version
else:
# Figure out the latest version early in order to pass the same
# instance to each form that needs it (otherwise they might overwrite
# each other).
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if not latest_version:
# No listed version ? Then nothing to do in the listed submission
# flow.
return redirect('devhub.submit.finish', addon.slug)
forms_list = []
context = {
'addon': addon,
'version': version,
'sources_provided': latest_version.sources_provided,
'submit_page': 'version' if version else 'addon',
}
post_data = request.POST if request.method == 'POST' else None
show_all_fields = not version or not addon.has_complete_metadata()
if show_all_fields:
if waffle.switch_is_active('content-optimization'):
describe_form = forms.DescribeFormContentOptimization(
post_data, instance=addon, request=request, version=version,
should_auto_crop=True)
else:
describe_form = forms.DescribeForm(
post_data, instance=addon, request=request, version=version)
cat_form_class = (forms.CategoryFormSet if not static_theme
else forms.SingleCategoryForm)
cat_form = cat_form_class(post_data, addon=addon, request=request)
policy_form = forms.PolicyForm(post_data, addon=addon)
license_form = forms.LicenseForm(
post_data, version=latest_version, prefix='license')
context.update(license_form.get_context())
context.update(
form=describe_form,
cat_form=cat_form,
policy_form=policy_form)
forms_list.extend([
describe_form,
cat_form,
policy_form,
context['license_form']
])
if not static_theme:
# Static themes don't need this form
reviewer_form = forms.VersionForm(
post_data, instance=latest_version, request=request)
context.update(reviewer_form=reviewer_form)
forms_list.append(reviewer_form)
if request.method == 'POST' and all(
form.is_valid() for form in forms_list):
if show_all_fields:
addon = describe_form.save()
cat_form.save()
policy_form.save()
license_form.save(log=False)
if not static_theme:
reviewer_form.save()
if addon.status == amo.STATUS_NULL:
addon.update(status=amo.STATUS_NOMINATED)
signals.submission_done.send(sender=addon)
elif not static_theme:
reviewer_form.save()
if not version:
return redirect('devhub.submit.finish', addon.slug)
else:
return redirect('devhub.submit.version.finish',
addon.slug, version.id)
template = 'devhub/addons/submit/%s' % (
'describe.html' if show_all_fields else 'describe_minimal.html')
return render(request, template, context)
@dev_required(submitting=True)
def submit_addon_details(request, addon_id, addon):
return _submit_details(request, addon, None)
@dev_required(submitting=True)
def submit_version_details(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_details(request, addon, version)
def _submit_finish(request, addon, version):
uploaded_version = version or addon.versions.latest()
try:
author = addon.authors.all()[0]
except IndexError:
# This should never happen.
author = None
if (not version and author and
uploaded_version.channel == amo.RELEASE_CHANNEL_LISTED and
not Version.objects.exclude(pk=uploaded_version.pk)
.filter(addon__authors=author,
channel=amo.RELEASE_CHANNEL_LISTED)
.exclude(addon__status=amo.STATUS_NULL)
.exists()):
# If that's the first time this developer has submitted an listed addon
# (no other listed Version by this author exists) send them a welcome
# email.
# We can use locale-prefixed URLs because the submitter probably
# speaks the same language by the time he/she reads the email.
context = {
'addon_name': str(addon.name),
'app': str(request.APP.pretty),
'detail_url': absolutify(addon.get_url_path()),
'version_url': absolutify(addon.get_dev_url('versions')),
'edit_url': absolutify(addon.get_dev_url('edit')),
}
tasks.send_welcome_email.delay(addon.id, [author.email], context)
submit_page = 'version' if version else 'addon'
return render(request, 'devhub/addons/submit/done.html',
{'addon': addon,
'uploaded_version': uploaded_version,
'submit_page': submit_page,
'preview': uploaded_version.previews.first()})
@dev_required(submitting=True)
def submit_addon_finish(request, addon_id, addon):
# Bounce to the details step if incomplete
if (not addon.has_complete_metadata() and
addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)):
return redirect('devhub.submit.details', addon.slug)
# Bounce to the versions page if they don't have any versions.
if not addon.versions.exists():
return redirect('devhub.submit.version', addon.slug)
return _submit_finish(request, addon, None)
@dev_required
def submit_version_finish(request, addon_id, addon, version_id):
version = get_object_or_404(addon.versions.all(), id=version_id)
return _submit_finish(request, addon, version)
@dev_required
@post_required
def remove_locale(request, addon_id, addon):
POST = request.POST
if 'locale' in POST and POST['locale'] != addon.default_locale:
addon.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
@dev_required
@post_required
def request_review(request, addon_id, addon):
if not addon.can_request_review():
return http.HttpResponseBadRequest()
latest_version = addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED,
exclude=())
if latest_version:
for f in latest_version.files.filter(status=amo.STATUS_DISABLED):
f.update(status=amo.STATUS_AWAITING_REVIEW)
# Clear the nomination date so it gets set again in Addon.watch_status.
latest_version.update(nomination=None)
if addon.has_complete_metadata():
addon.update(status=amo.STATUS_NOMINATED)
messages.success(request, ugettext('Review requested.'))
else:
messages.success(request, _(
'You must provide further details to proceed.'))
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
return redirect(addon.get_dev_url('versions'))
def docs(request, doc_name=None):
mdn_docs = {
None: '',
'getting-started': '',
'reference': '',
'how-to': '',
'how-to/getting-started': '',
'how-to/extension-development': '#Extensions',
'how-to/other-addons': '#Other_types_of_add-ons',
'how-to/thunderbird-mobile': '#Application-specific',
'how-to/theme-development': '#Themes',
'themes': '/Themes/Background',
'themes/faq': '/Themes/Background/FAQ',
'policies': '/AMO/Policy',
'policies/reviews': '/AMO/Policy/Reviews',
'policies/contact': '/AMO/Policy/Contact',
'policies/agreement': '/AMO/Policy/Agreement',
}
if doc_name in mdn_docs:
return redirect(MDN_BASE + mdn_docs[doc_name],
permanent=True)
raise http.Http404()
@login_required
def api_key_agreement(request):
return render_agreement(
request=request,
template='devhub/api/agreement.html',
next_step='devhub.api_key',
)
def render_agreement(request, template, next_step, **extra_context):
form = forms.AgreementForm(
request.POST if request.method == 'POST' else None,
request=request
)
if request.method == 'POST' and form.is_valid():
# Developer has validated the form: let's update its profile and
# redirect to next step. Note that the form is supposed to always be
# invalid if submission is not allowed for this request.
data = {
'read_dev_agreement': datetime.datetime.now(),
}
if 'display_name' in form.cleaned_data:
data['display_name'] = form.cleaned_data['display_name']
request.user.update(**data)
return redirect(next_step)
elif not UploadRestrictionChecker(request).is_submission_allowed():
# Developer has either posted an invalid form or just landed on the
# page but haven't read the agreement yet, or isn't allowed to submit
# for some other reason (denied ip/email): show the form (with
# potential errors highlighted)
context = {
'agreement_form': form,
'agreement_message': str(
DeveloperAgreementRestriction.error_message
),
}
context.update(extra_context)
return render(request, template, context)
else:
# The developer has already read the agreement, we should just redirect
# to the next step.
response = redirect(next_step)
return response
@login_required
@transaction.atomic
def api_key(request):
if not UploadRestrictionChecker(request).is_submission_allowed():
return redirect(reverse('devhub.api_key_agreement'))
try:
credentials = APIKey.get_jwt_key(user=request.user)
except APIKey.DoesNotExist:
credentials = None
try:
confirmation = APIKeyConfirmation.objects.get(
user=request.user)
except APIKeyConfirmation.DoesNotExist:
confirmation = None
if request.method == 'POST':
has_confirmed_or_is_confirming = confirmation and (
confirmation.confirmed_once or confirmation.is_token_valid(
request.POST.get('confirmation_token'))
)
# Revoking credentials happens regardless of action, if there were
# credentials in the first place.
if (credentials and
request.POST.get('action') in ('revoke', 'generate')):
credentials.update(is_active=None)
log.info('revoking JWT key for user: {}, {}'
.format(request.user.id, credentials))
send_key_revoked_email(request.user.email, credentials.key)
msg = ugettext(
'Your old credentials were revoked and are no longer valid.')
messages.success(request, msg)
# If trying to generate with no confirmation instance, we don't
# generate the keys immediately but instead send you an email to
# confirm the generation of the key. This should only happen once per
# user, unless the instance is deleted by admins to reset the process
# for that user.
if confirmation is None and request.POST.get('action') == 'generate':
confirmation = APIKeyConfirmation.objects.create(
user=request.user, token=APIKeyConfirmation.generate_token())
confirmation.send_confirmation_email()
# If you have a confirmation instance, you need to either have it
# confirmed once already or have the valid token proving you received
# the email.
elif (has_confirmed_or_is_confirming and
request.POST.get('action') == 'generate'):
confirmation.update(confirmed_once=True)
new_credentials = APIKey.new_jwt_credentials(request.user)
log.info('new JWT key created: {}'.format(new_credentials))
send_key_change_email(request.user.email, new_credentials.key)
else:
# If we land here, either confirmation token is invalid, or action
# is invalid, or state is outdated (like user trying to revoke but
# there are already no credentials).
# We can just pass and let the redirect happen.
pass
# In any case, redirect after POST.
return redirect(reverse('devhub.api_key'))
context_data = {
'title': ugettext('Manage API Keys'),
'credentials': credentials,
'confirmation': confirmation,
'token': request.GET.get('token') # For confirmation step.
}
return render(request, 'devhub/api/key.html', context_data)
def send_key_change_email(to_email, key):
template = loader.get_template('devhub/email/new-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('New API key created'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
def send_key_revoked_email(to_email, key):
template = loader.get_template('devhub/email/revoked-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('API key revoked'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
@dev_required
@json_view
def theme_background_image(request, addon_id, addon, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
version = addon.find_latest_version(channel_id)
return (version.get_background_images_encoded(header_only=True) if version
else {})
def _clean_next_url(request):
gets = request.GET.copy()
url = gets.get('to', settings.LOGIN_REDIRECT_URL)
if not is_safe_url(url, allowed_hosts=(settings.DOMAIN,)):
log.info(u'Unsafe redirect to %s' % url)
url = settings.LOGIN_REDIRECT_URL
domain = gets.get('domain', None)
if domain in settings.VALID_LOGIN_REDIRECTS.keys():
url = settings.VALID_LOGIN_REDIRECTS[domain] + url
gets['to'] = url
request.GET = gets
return request
def logout(request):
user = request.user
if not user.is_anonymous:
log.debug(u"User (%s) logged out" % user)
if 'to' in request.GET:
request = _clean_next_url(request)
next_url = request.GET.get('to')
if not next_url:
next_url = settings.LOGOUT_REDIRECT_URL
prefixer = get_url_prefix()
if prefixer:
next_url = prefixer.fix(next_url)
response = http.HttpResponseRedirect(next_url)
logout_user(request, response)
return response
|
py | 1a4dc561ebb13a356bc029d532ae445826bc7bc1 | import random
BOARD_WIDTH = 3
BOARD_HEIGHT = 3
def new_board():
board = []
for x in range(0, BOARD_WIDTH):
column = []
for y in range(0, BOARD_HEIGHT):
column.append(None)
board.append(column)
return board
def get_winner(board):
all_line_co_ords = get_all_line_co_ords()
for line in all_line_co_ords:
line_values = [board[x][y] for (x, y) in line]
if len(set(line_values)) == 1 and line_values[0] is not None:
return line_values[0]
return None
def get_all_line_co_ords():
cols = []
for x in range(0, BOARD_WIDTH):
col = []
for y in range(0, BOARD_HEIGHT):
col.append((x, y))
cols.append(col)
rows = []
for y in range(0, BOARD_HEIGHT):
row = []
for x in range(0, BOARD_WIDTH):
row.append((x, y))
rows.append(row)
diagonals = [
[(0, 0), (1, 1), (2, 2)],
[(0, 2), (1, 1), (2, 0)]
]
return cols + rows + diagonals
def render(board):
rows = []
for y in range(0, BOARD_HEIGHT):
row = []
for x in range(0, BOARD_WIDTH):
row.append(board[x][y])
rows.append(row)
row_num = 0
print ' 0 1 2 '
print ' ------'
for row in rows:
output_row = ''
for sq in row:
if sq is None:
output_row += ' '
else:
output_row += sq
print "%d|%s|" % (row_num, ' '.join(output_row))
row_num += 1
print ' ------'
def make_move(player, board, move_co_ords):
if board[move_co_ords[0]][move_co_ords[1]] is not None:
raise Exception("Illegal move!")
board[move_co_ords[0]][move_co_ords[1]] = player
def is_board_full(board):
for col in board:
for sq in col:
if sq is None:
return False
return True
def play(player1_f, player2_f):
players = [
('X', player1_f),
('O', player2_f),
]
turn_number = 0
board = new_board()
while True:
current_player_id, current_player_f = players[turn_number % 2]
render(board)
move_co_ords = current_player_f(board, current_player_id)
make_move(current_player_id, board, move_co_ords)
winner = get_winner(board)
if winner is not None:
render(board)
print "THE WINNER IS %s!" % winner
break
if is_board_full(board):
render(board)
print "IT'S A DRAW!"
break
turn_number += 1
|
py | 1a4dc66f907ea3084cd9e52b2ad191abc7ff8d39 | """
https://pyvis.readthedocs.io/en/latest/index.html#
https://deepgraph.readthedocs.io/en/latest/what_is_deepgraph.html
https://towardsdatascience.com/pyviz-simplifying-the-data-visualisation-process-in-python-1b6d2cb728f1
https://graphviz.org/
"""
|
py | 1a4dc7c96fe1e01d32e7f895ae8a4013db881af7 | """LSTM Controller."""
import torch
from torch import nn
from torch.nn import Parameter
import numpy as np
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
class LSTMController(nn.Module):
"""An NTM controller based on LSTM."""
def __init__(self, num_inputs, num_outputs, num_layers):
super(LSTMController, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size=num_inputs,
hidden_size=num_outputs,
num_layers=num_layers)
# The hidden state is a learned parameter
self.lstm_h_bias = Parameter(torch.randn(self.num_layers, 1, self.num_outputs) * 0.05)
self.lstm_c_bias = Parameter(torch.randn(self.num_layers, 1, self.num_outputs) * 0.05)
self.reset_parameters()
def create_new_state(self, batch_size):
# Dimension: (num_layers * num_directions, batch, hidden_size)
lstm_h = self.lstm_h_bias.clone().repeat(1, batch_size, 1)
lstm_c = self.lstm_c_bias.clone().repeat(1, batch_size, 1)
return lstm_h, lstm_c
def reset_parameters(self):
for p in self.lstm.parameters():
if p.dim() == 1:
nn.init.constant_(p, 0)
else:
stdev = 5 / (np.sqrt(self.num_inputs + self.num_outputs))
nn.init.uniform_(p, -stdev, stdev)
def size(self):
return self.num_inputs, self.num_outputs
def forward(self, x, prev_state):
x = x.unsqueeze(0)
outp, state = self.lstm(x, prev_state)
return outp.squeeze(0), state
|
py | 1a4dc83f3ee84458da3431140fca16d7c5e8c978 | import requests
import os
import zipfile
def unzipper(file_path, dirname):
with zipfile.ZipFile(file_path) as zf:
files = zf.namelist()
zf.extractall(dirname)
def download_http(url, file_path):
r = requests.get(url)
with open(file_path, "wb") as f:
f.write(r.content)
def download(url, file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
print("Starting request for file: " + file_path)
# start http request
if url.startswith('http://'):
return download_http(url, file_path)
# downlaod file form ftp
elif url.startswith('ftp://'):
return self.download_ftp(url, file_path)
|
py | 1a4dc8419809e4701418e09d23fe1cde28bc952c | from unittest import TestCase
from unittest.mock import patch, Mock
from modules.classes import (
ImaginaryServiceConnection,
ServiceClient,
ServiceError,
)
class TestImaginaryServiceConnection(TestCase):
"""
Tests for the `modules.classes.ImaginaryServiceConnection` class.
"""
@patch('modules.classes.SERVICE_URL', 'something-else')
def test_instantiation(self):
"""
Instantiation of ImaginaryServiceConnection works as expected.
"""
isc = ImaginaryServiceConnection('username', 'password')
# Instantiating the class should assign the expected class attributes
self.assertEqual('username', isc.username)
self.assertEqual('password', isc.password)
self.assertEqual('something-else', isc.service_url)
def test_call(self):
"""
`ImaginaryServiceConnection.call` works as expected.
"""
isc = ImaginaryServiceConnection('username', 'password')
# Calling the `.call` function should return an integer
result = isc.call('function', 'a', 'b', {'c': 1, 'd': 2})
self.assertIsInstance(result, int)
def test_call_with_known_exception(self):
"""
`ImaginaryServiceConnection.call` raises `ServiceError` as expected.
"""
isc = ImaginaryServiceConnection('username', 'password')
# Calling the `.call` function with 'fail' should raise ServiceError
with self.assertRaises(ServiceError):
isc.call('fail', 'a', 'b', {'c': 1, 'd': 2})
@patch('modules.classes.ImaginaryServiceConnection.call')
def test_call_with_simulated_exception(self, call):
"""
`ImaginaryServiceConnection.call` raises `ServiceError` as expected
with simulated error.
"""
call.side_effect = ServiceError('Simulated error')
isc = ImaginaryServiceConnection('username', 'password')
# Calling the `.call` function should return the simulated error
with self.assertRaises(ServiceError):
isc.call('fail', 'a', 'b', {'c': 1, 'd': 2})
class TestServiceClient(TestCase):
"""
Tests for the `modules.classes.ServiceClient` class.
"""
def test_instantiation(self):
"""
Instantiation of `ServiceClient` works as expected.
"""
sc = ServiceClient('username', 'password')
# Instantiating the class should create the `connection` attribute on
# the instance using ImaginaryServiceConnection
self.assertIsInstance(sc.connection, ImaginaryServiceConnection)
def test_call_remote_function(self):
"""
ServiceClient.call_remote_function works as expected.
"""
sc = ServiceClient('username', 'password')
result = sc.call_remote_function(
'some_function',
'a', 'b',
{'c': 1, 'd': 2},
)
# Should receive an integer back
self.assertIsInstance(result, int)
@patch('modules.classes.ImaginaryServiceConnection.call')
def test_call_remote_function_with_mocks(self, call):
"""
ServiceClient.call_remote_function works as expected while mocking
`ImaginaryServiceConnection.call`.
"""
# Set up the mock `.call` response for
# `ImaginaryServiceConnection.call`
call.return_value = 'result'
sc = ServiceClient('username', 'password')
result = sc.call_remote_function(
'some_function',
'a', 'b',
{'c': 1, 'd': 2},
)
# Should receive 'result' back
self.assertEqual(call.return_value, result)
@patch('modules.classes.ImaginaryServiceConnection.call')
def test_call_remote_function_with_error(self, call):
"""
ServiceClient.call_remote_function raises `ServiceError` as expected
when an error is encountered via `ImaginaryServiceConnection.call`
"""
# Set up the error encountered during call to
# `ImaginaryServiceConnection.call`
call.side_effect = ServiceError('Something bad!')
sc = ServiceClient('username', 'password')
# `ServiceError` is raised when calling
# `ServiceClient.call_remote_function`
with self.assertRaises(ServiceError):
sc.call_remote_function(
'some_function',
'a', 'b',
{'c': 1, 'd': 2},
)
class TestConstructorMocks(TestCase):
"""
Tests for `modules.classes` using mocked class constructors to limit test
scope and assert calls to nested objects.
"""
@patch('modules.classes.ImaginaryServiceConnection')
def test_service_client_with_mocked_connection_class(
self,
ImaginaryServiceConnection,
):
"""
Mock the `modules.classes.ImaginaryServiceConnection` class and assert
that it was used as expected by the ServiceClient.
"""
# Establish a mock class instance to be returned by the constructor
isc_instance_mock = Mock()
# Set a return value for the instance's `call` function
isc_instance_mock.call.return_value = 'result'
# Set the instance mock as the return value of the class itself
ImaginaryServiceConnection.return_value = isc_instance_mock
# Instantiate `ServiceClient` and call `call_remote_function`
sc = ServiceClient('username', 'password')
result = sc.call_remote_function(
'some_function',
'a', 'b',
{'c': 1, 'd': 2},
)
# Verify the expected calls to the mocked class
# The base class should have been instantiated with the expected args
ImaginaryServiceConnection.assert_called_once_with(
'username',
'password',
)
# The mocked `ImaginaryServiceConnection` instance should have had the
# `.call` function called with the expected args
isc_instance_mock.call.assert_called_once_with(
'some_function',
'a', 'b',
{'c': 1, 'd': 2},
)
# The `ServiceClient.call_remote_function` call should have returned
# the expected result
self.assertEqual('result', result)
|
py | 1a4dc92e84218659a5be29b8641d65ab1407e97c | #coding:utf-8
n = int(raw_input())
m = int(raw_input())
matrix = []
for _ in xrange(n):
matrix.append(raw_input().split())
maxmum = 0
region = 0
seen_set = set()
def is_valid(x, y):
return x > -1 and x < n and y > -1 and y < m
def dfs(cell):
global region
seen_set.add(cell)
region += 1
a, b = cell
#简化写法
for x in [a-1, a, a+1]:
for y in [b-1, b, b+1]:
if is_valid(x, y) and matrix[x][y] == '1' and (x, y) not in seen_set:
dfs((x, y))
return region
for i in xrange(n):
for j in xrange(m):
if matrix[i][j] == '1' and (i, j) not in seen_set:
maxmum = max(maxmum, dfs((i, j)))
region = 0
print maxmum
|
py | 1a4dc93085326eb2ae715e816c62ff6fd29dda2b | import functools
import string
from django.shortcuts import render
# Create your views here.
def validate_headers(*args_, **kwargs_):
def inner_function(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
m = args_[0]
validate(str(m['version']))
validate(str(m['device-id']))
validate(str(m['device-type']))
return wrapper
return inner_function
@validate_headers({'version': 1, 'device-id': "asdasdasd", 'device-type': "android"})
def validate(header: str) -> bool:
return not header
def validate_auth_token(*args_, **kwargs_):
def inner_function(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for _ in range(args_[0]):
func(*args, **kwargs)
return wrapper
return inner_function
|
py | 1a4dc9342b6c50f1fc4206fa9e7b01c20cfd45c9 | '''
OpenIMU SPI package version 0.2.0.
-pip install spidev3.4,
-read package through SPI interface, OpenIMU330BI test on Pi3 board(Raspbian OS,Raspberry 3B+).
-Spi slave: OpenIMU 330 EVK
-Pins connection:
Pi3 330/300 evk
miso <==> miso
mosi <==> mosi
sck <==> sck
gpio(bcm4) <==> cs black line
gpio(bcm17) <==> drdy red line
gnd <==> gnd
bcm 27 nRST
@cek from Aceinna 2019.11.4
'''
import os
import sys
import spidev
import time, math
import struct
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error import RPi.GPIO!")
import traceback
from gpio import *
class SpiOpenIMU:
def __init__(self, target_module = "300", fw='0.0', cs_pin = 4, interrupt_pin = 17, drdy_status = False):
'''
pin number use the BCM code
'''
self.spi = spidev.SpiDev()
self.cs_channel = cs_pin
self.interrupt_channel = interrupt_pin
self.drdy = drdy_status
self.speed = 1000000 # 1M
self.delay = 0 #ns
self.word = 8 #硬件限制为8位
self.fw_version = fw
self.power = aceinna_gpio(use_gpio=True)# bcm gpio rst EVK power
self.gpio_setting()
self.spidev_setting()
self.check_settings()
time.sleep(0.1)
self.module = target_module
print("initialize based on: %s, with DRDY_usage: %s" % (self.module, self.drdy))
def gpio_setting(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.cs_channel,GPIO.OUT)
GPIO.output(self.cs_channel,GPIO.HIGH) # used as CS line replace CS0 default in Pi3 board
if self.drdy:
GPIO.setup(self.interrupt_channel,GPIO.IN) # channel used as IMU data ready detection
time.sleep(0.4)
GPIO.add_event_detect(self.interrupt_channel,GPIO.FALLING)
return True
def single_read(self, target_register):
# if self.drdy and self.module != "300":
# while not GPIO.event_detected(self.interrupt_channel):
# pass
if self.module == "381":
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
self.spi.xfer2([target_register,0x00],self.speed,self.speed) #return data of 0000
GPIO.output(self.cs_channel,GPIO.HIGH)
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp_single = self.spi.xfer2([0x00,0x00],self.speed,self.speed) #receive the back target data
GPIO.output(self.cs_channel,GPIO.HIGH)
return self.combine_reg('>h', resp_single[0],resp_single[1])
else:
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp_single = self.spi.xfer2([target_register,0x00,0x00,0x00],self.speed,self.speed)
GPIO.output(self.cs_channel,GPIO.HIGH)
print("SPI raw read:", hex(resp_single[2]), hex(resp_single[3]))
return self.combine_reg('>h', resp_single[2],resp_single[3])
def single_write(self, target_register, target_data):
# if self.drdy and self.module != "300":
# while not GPIO.event_detected(self.interrupt_channel):
# pass
GPIO.output(self.cs_channel,GPIO.LOW)
temp_reg = target_register | 0x80
self.spi.xfer2([temp_reg, target_data],self.speed,self.speed) #write data, such as 0xF010, target address is 0x70, and data input is 0x10
GPIO.output(self.cs_channel,GPIO.HIGH)
return True
def burst_read(self, first_register, subregister_num, sratm_fac):
'''
sratm_fac={"rate":[0.005, 0], "accel":[0.25, 0]}
status, rate, accel, temp, mag factors dict
'''
sts, rate, acc, deg, tmstp, temp, mag = [], [], [], [], [], [], []
# if self.drdy and self.module != "300": # 300 no drdy now, so only not 300 will go next
# while not GPIO.event_detected(self.interrupt_channel):
# pass
while (not GPIO.event_detected(self.interrupt_channel)) and self.drdy:
pass
if "381" in self.module:
GPIO.output(self.cs_channel,GPIO.LOW)
resp = self.spi.xfer2([first_register,0x00],self.speed,self.speed)
GPIO.output(self.cs_channel,GPIO.HIGH)
for i_381 in range(subregister_num):
time.sleep(0.000010)
GPIO.output(self.cs_channel,GPIO.LOW)
resp += self.spi.xfer2([0x00,0x00],self.speed,self.speed)[:]
GPIO.output(self.cs_channel,GPIO.HIGH)
#unit:degree per second
rate.append(self.combine_reg('>h', resp[4],resp[5]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[8],resp[9]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
#unit:mg
acc.append(self.combine_reg('>h', resp[10],resp[11]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[12],resp[13]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[14],resp[15]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
else: #300,330 is here
GPIO.output(self.cs_channel,GPIO.LOW)
# xfer2([value],speed_hz,delay_usec_cs), SPI bi-direction data transfer.
# default 8 bits mode, if speed_hz set to zero means the maximun supported SPI clock.
# delay_usec_cs is the cs hold delay
first_register_send = [first_register,0x00]
if '330BA' in self.module and first_register == 0x3D:
subregister_num += 6
for i_else in range(2*subregister_num):
first_register_send.append(0x00)
resp = self.spi.xfer2(first_register_send,self.speed,2*self.delay)
GPIO.output(self.cs_channel,GPIO.HIGH)
sts.append(self.combine_reg('>H', resp[2], resp[3]) * (sratm_fac.get("status")[0]) + (sratm_fac.get("status")[1]))
#unit:degree per second
if '330BA' in self.module and first_register == 0x3D:
rate.append(self.combine_reg('>i', resp[4],resp[5],resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>i', resp[8],resp[9], resp[10],resp[11]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>i', resp[12],resp[13], resp[14],resp[15]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
else:
rate.append(self.combine_reg('>h', resp[4],resp[5]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[6],resp[7]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
rate.append(self.combine_reg('>h', resp[8],resp[9]) * (sratm_fac.get("rate")[0]) + (sratm_fac.get("rate")[1]))
#unit:g
if '330BA' in self.module and first_register == 0x3D:
acc.append(self.combine_reg('>i', resp[16],resp[17], resp[18],resp[19]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>i', resp[20],resp[21], resp[22],resp[23]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>i', resp[24],resp[25], resp[26],resp[27]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
else:
acc.append(self.combine_reg('>h', resp[10],resp[11]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[12],resp[13]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
acc.append(self.combine_reg('>h', resp[14],resp[15]) * (sratm_fac.get("accel")[0]) + (sratm_fac.get("accel")[1]))
#unit:deg
if '330BI' in self.module and first_register == 0x3F:
deg.append(self.combine_reg('>h', resp[18],resp[19]) * 360/65536)
deg.append(self.combine_reg('>h', resp[20],resp[21]) * 360/65536)
deg.append(self.combine_reg('>h', resp[22],resp[23]) * 360/65536)
# return rate, acc, deg
if '330BA' in self.module and first_register == 0x3D:
temp.append(self.combine_reg('>h', resp[28],resp[29]) * (sratm_fac.get("temp")[0]) + (sratm_fac.get("temp")[1]))
else:
temp.append(self.combine_reg('>h', resp[16],resp[17]) * (sratm_fac.get("temp")[0]) + (sratm_fac.get("temp")[1]))
if ("330BA" in self.module or '331BI' in self.module) and (first_register == 0x3F or first_register == 0x3D):
if first_register == 0x3F:
tmstp.append(self.combine_reg('>H', resp[18],resp[19]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
tmstp.append(self.combine_reg('>H', resp[20],resp[21]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
else:
tmstp.append(self.combine_reg('>H', resp[30],resp[31]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
tmstp.append(self.combine_reg('>H', resp[32],resp[33]) * (sratm_fac.get("time")[0]) + (sratm_fac.get("time")[1]))
# return rate, acc, tmstp
if '300ZI' in self.module and first_register == 0x3F:
mag.append(self.combine_reg('>h', resp[18],resp[19]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
mag.append(self.combine_reg('>h', resp[20],resp[21]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
mag.append(self.combine_reg('>h', resp[22],resp[23]) * (sratm_fac.get("mag")[0]) + (sratm_fac.get("mag")[1]))
if '300ZI' in self.module and first_register == 0x3D:
# deg.append(self.combine_reg('>h', resp[18],resp[19]) * 57.3 * (2*math.pi)/65536) #65536/(2*math.pi)=10430.378350470453 65536/360=0.0054931640625
# deg.append(self.combine_reg('>h', resp[20],resp[21]) * 57.3 * (2*math.pi)/65536)
# deg.append(self.combine_reg('>h', resp[22],resp[23]) * 57.3 * (2*math.pi)/65536)
deg.append(self.combine_reg('>h', resp[18],resp[19]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
deg.append(self.combine_reg('>h', resp[20],resp[21]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
deg.append(self.combine_reg('>h', resp[22],resp[23]) * (sratm_fac.get("vg_angle")[0]) + (sratm_fac.get("vg_angle")[1]))
return sts, rate, acc, temp, mag, deg, tmstp
def spidev_setting(self):
bus = 0 #supporyed values:0,1
device = 1 #supported values:0,1 default: 0
self.spi.open(bus,device) #connect to the device. /dev/spidev<bus>.<device>
self.spi.bits_per_word = self.word #默认是8,系统上
self.spi.max_speed_hz = self.speed
self.spi.mode = 0b11
#spi.bits_per_word = 0
#spi.cshigh #default CS0 in pi3 board
#spi.lsbfirst = False
#spi.threewire = 0
return True
def check_settings(self):
print(self.spi.mode)
print(self.spi.threewire)
print(self.spi.cshigh)
print(self.spi.bits_per_word)
print(self.spi.lsbfirst)
return True
def combine_reg(self,fmt='>h',*msb_lsb):
temp_bytes = b''
for i in msb_lsb:
temp_bytes += struct.pack('B',i)
return struct.unpack(fmt,temp_bytes)[0] #MSB firstly
def power_reset(self, delay=2):
'''
#special for IMU331, WAIT 1.25S at least
'''
self.power.power_off()
time.sleep(delay)
self.power.power_on()
time.sleep(delay)
def __del__(self):
GPIO.cleanup()
self.spi.close()
if __name__ == "__main__":
openimu_spi = SpiOpenIMU(target_module="330BI",drdy_status=True, fw='1.2.1') #set the module name and drdy status(enalbe or not)-----------------step: 1
burst_read, single_read, single_write = True, True, False # set the read style, burst or single------------step:2
f = open("data_" + str(openimu_spi.module) + ".txt", "w")
str_config = "module style:{0}; drdy:{1}; burst read:{2}; single read:{3} \n".format(openimu_spi.module, openimu_spi.drdy, burst_read, single_read)
print(str_config)
f.write(str_config)
input("Power on IMU !!!!!!!!")
time.sleep(2)
try:
if openimu_spi.drdy == False: # when no drdy, default SPI ODR is 100HZ
time.sleep(0.01)
# for i_wd in range(9,12):
# for i_wd in [0x00, 0x03,0x04,0x05,0x06,0x30,0x40,0x50,0x60,0x0B, 0x0B]:
# ori_list = [0x0000, 0x0009, 0x0023, 0x002A, 0x0041, 0x0048, 0x0062, 0x006B, 0x0085, 0x008C, 0x0092, 0x009B, 0x00C4, 0x00CD, 0x00D3,
# 0x00DA, 0x0111, 0x0118, 0x0124, 0x012D, 0x0150, 0x0159, 0x0165, 0x016C
# ]
# ori_list = [0x0009, 0x016C]
# for i_wd in ori_list:
if single_read:
read_name = [
"X_Rate", "Y_Rate", "Z_Rate", "X_Accel", "Y_Accel", "Z_Accel","X_Mag", "Y_Mag", "Z_Mag", "BOARD_TEMP", "RATE_TEMP", "DRDY_RATE", "ACCEL_LPF", "ACCEL_SCALE_FACTOR", "RATE_SCALE_FACTOR",
"SN_1", "SN_2", "SN_3", "PRODUCT_ID", "MASTER_STATUS", "HW_STATUS", "SW_STATUS", "ACCEL_RANGE/RATE_RANGE",
"ORIENTATION_MSB/ORIENTATION_LSB", "SAVE_CONFIG", "RATE_LPF", "HW_VERSION/SW_VERSION"
]
read_reg = [
0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x37, 0x38, 0x46, 0x47,
0x52, 0x54, 0x58, 0x56, 0x5A, 0x5C, 0x5E, 0x70, 0x74, 0x76, 0x78, 0x7E
]
# read_name = ["ORIENTATION_MSB"]
# read_reg = [0x74]
for i in zip(read_name, read_reg):
read_value = openimu_spi.single_read(i[1])
hex_value = hex(read_value)
prt_list = [i[0], hex(i[1]), hex_value]
print(prt_list)
if 'Rate' in i[0]:
read_value /= 64
elif 'Accel' in i[0]:
read_value /= 4000
elif 'Mag' in i[0]:
read_value /= 16354
elif 'TEMP' in i[0]:
read_value = read_value*0.073111172849435 + 31.0
str_temp = "{0:_<40s}0x{1:<5X} read value: 0d {2:<10} hex value: {3:<10s}\n".format(i[0], i[1], read_value, hex_value)
print(str_temp)
f.write(str(prt_list) + '\n' + str_temp)
if single_write:
while input('need write? y/n?') != 'y':
pass
write_name = ["packet rate", "Accel LPF", "orimsb", "orilsb", "Rate LPF", "save config"]
write_reg = [0x37, 0x38, 0x74, 0x75, 0x78, 0x76]
write_data = [0x01, 0x40, 0x00, 0x6B, 0x40, 0x00]
# write_name = ["ORIENTATION_LSB"]
# write_reg = [0x75]
# write_data = [0x02]
# write_data = [i_wd, i_wd]
# write_data = [i_wd & 0xFF]
for j in zip(write_name, write_reg, write_data): #start to write registers
print("write_name:{0:<40s}, write address:0x{1:<5X}, wirte data:0x{2:<5X}".format(j[0], j[1], j[2]))
openimu_spi.single_write(j[1], j[2])
time.sleep(0.5)
# if single_read or single_write:
# break
while input('need burst read? y/n?') != 'y':
pass
while burst_read: # not seting the ODR, if you use burst read, it will same with frequency of DRDY
if ('330BI' in openimu_spi.module) or ('330BA' in openimu_spi.module):
# list_rate, list_acc = openimu_spi.burst_read(first_register=0x3E,subregister_num=8) #input the read register and numbers of subregisters want to read together
# str_burst = "time:{0:>10f}; gyro:{1:>25s}; accel:{2:>25s} \n".format(
# time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc])
# )
list_sts, list_rate, list_acc, list_temp, list_mag, list_deg, tmstamp = openimu_spi.burst_read(first_register=0x3E,subregister_num=8) #input the read register and numbers of subregisters want to read together
str_burst = "time:{0:>10f}; gyro:{1:>50s}; accel:{2:>50s}; timestamp:{3:>25s} \n".format(
time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc]), ", ".join([str(x) for x in tmstamp])
)
else:
list_sts, list_rate, list_acc, list_temp, list_mag, list_deg, tmstamp= openimu_spi.burst_read(first_register=0x3D,subregister_num=11)
str_burst = "time:{0:>20f}; status:{3:>20s} ; gyro:{1:>50s}; accel:{2:>40s}; temp:{4:>10s}; mag:{5:>20s}; deg:{6:>20s}\n".format(
time.clock(), ", ".join([str(x) for x in list_rate]), ", ".join([str(x) for x in list_acc]), ", ".join([str(x) for x in list_sts]),
", ".join([str(x) for x in list_temp]), ", ".join([str(x) for x in list_mag]), ", ".join([str(x) for x in list_deg])
)
print(str_burst)
f.write(str_burst)
# input('next cycle')
except KeyboardInterrupt:
f.close()
print("stoped by customer!")
# except Exception as e:
# print(e)
# traceback.print_exc()
# polling mode reading spi interface, with drdy pin detection
# try:
# while True:
# # GPIO.output(cs_channel,GPIO.LOW)
# # product_id = spi.xfer2([0x56,0x00,0x00,0x00],0,10)
# # GPIO.output(cs_channel,GPIO.HIGH)
# # print('id',product_id)
# time.sleep(0.1)
# # if GPIO.event_detected(interrupt_channel):
# if True:
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.LOW)
# # xfer2([value],speed_hz,delay_usec_cs), SPI bi-direction data transfer.
# # default 8 bits mode, if speed_hz set to zero means the maximun supported SPI clock.
# # delay_usec_cs is the cs hold delay
# resp = spi.xfer2([openimu_spi.burst_cmd_std,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],0,10)
# GPIO.output(cs_channel,GPIO.HIGH)
# #unit:degree per second
# x_rate = openimu_spi.combine_reg(resp[4],resp[5])/200
# y_rate = openimu_spi.combine_reg(resp[6],resp[7])/200
# z_rate = openimu_spi.combine_reg(resp[8],resp[8])/200
# #unit:mg
# x_acc = openimu_spi.combine_reg(resp[10],resp[11])/4
# y_acc = openimu_spi.combine_reg(resp[12],resp[13])/4
# z_acc = openimu_spi.combine_reg(resp[14],resp[15])/4
# print('g/a',x_rate,y_rate,z_rate,x_acc,y_acc,z_acc)
# #write to register
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.LOW)
# resp1 = spi.xfer2([0x80|0x50,0x23],0,10)
# time.sleep(0.5)
# GPIO.output(cs_channel,GPIO.HIGH)
# 0x56 OPEN300 ID: 0x30(48) 0x00(0)
# 0x56 OPEN330 ID: 0x33(48) 0x00(0)
# 0x56 IMU381 ID: 0X38(56) 0x10(16)
|
py | 1a4dc9aa2388af9bf423a889f2f62435a75be1b1 | import struct
from django.forms import ValidationError
from .const import (
BANDTYPE_FLAG_HASNODATA, GDAL_TO_POSTGIS, GDAL_TO_STRUCT,
POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL, STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return struct.pack('<' + structure, *data)
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, bytes.fromhex(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Remove nodata byte from band nodata value if it exists.
has_nodata = pixeltype & BANDTYPE_FLAG_HASNODATA
if has_nodata:
pixeltype &= ~BANDTYPE_FLAG_HASNODATA
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': bytes.fromhex(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Pack raster header.
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype with BANDTYPE_FLAG_HASNODATA (1 << 6)
# for existing nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) | BANDTYPE_FLAG_HASNODATA
#
# For example, if the byte value is 71, then the datatype is
# 71 & ~BANDTYPE_FLAG_HASNODATA = 7 (32BSI)
# and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype |= BANDTYPE_FLAG_HASNODATA
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Add packed header and band data to result
result += bandheader + band.data(as_memoryview=True)
# Convert raster to hex string before passing it to the DB.
return result.hex()
|
py | 1a4dc9d9111ee6442d657a660be62c2ba7332043 | # -*- coding: utf-8 -*-
from django.db import migrations
import organizations.fields
class Migration(migrations.Migration):
dependencies = [("organizations", "0001_initial")]
operations = [
migrations.AlterField(
model_name="organization",
name="slug",
field=organizations.fields.SlugField(
blank=True,
editable=False,
help_text="The name in all lowercase, suitable for URL identification",
max_length=200,
populate_from=("name",),
unique=True,
),
)
]
|
py | 1a4dca7c6ad50a94610d072e4b6c46c74e56253c | # (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""HPE Container Platform CLI."""
from __future__ import print_function
import sys
from hpecp.k8s_worker import WorkerK8sStatus, WorkerK8s
from hpecp.cli import base
from textwrap import dedent
class K8sWorkerProxy(base.BaseProxy):
"""Proxy object to :py:attr:`<hpecp.client.k8s_worker>`."""
def __dir__(self):
"""Return the CLI method names."""
return [
"create_with_ssh_key",
"delete",
"examples",
"get",
"list",
"set_storage",
"statuses",
"wait_for_status",
]
def __init__(self):
"""Create instance of proxy class with the client module name."""
super(K8sWorkerProxy, self).new_instance("k8s_worker", WorkerK8s)
@base.intercept_exception
def create_with_ssh_key(
self,
ip=None,
ssh_key=None,
ssh_key_file=None,
ssh_passphrase=None,
tags=None,
ephemeral_disks=None,
persistent_disks=None,
wait_for_operation_secs=0,
):
"""Create a K8s Worker using SSH key authentication.
Parameters
----------
ip : str, optional
The IP address of the host, this is used for internal
communication, by default None.
ssh_key : str, optional
The SSH key data as a string, instead of this location to a key
file may also be provided, by default None.
ssh_key_file : str, optional
The SSH key file path, by default None
ssh_passphrase: str, optional
The SSH passphrase
tags : list, optional
Tags to use, e.g. /api/v2/tag/1:foo,/api/v2/tag/1:bar,
by default None
ephemeral_disks : str
Comma separated string containing ephemeral disks.
e.g: "/dev/nvme2n1,/dev/nvme2n2"
persistent_disks : str, optional
Comma separated string containing persistent disks, by default
None.
e.g: "/dev/nvme1n1,/dev/nvme1n2"
wait_for_operation_secs: int
wait for operations to complete. 0 = don't wait
"""
if ssh_key is None and ssh_key_file is None:
print(
"At least one of ssh_key or ssh_key_file must be provided",
file=sys.stderr,
)
sys.exit(1)
if ssh_key is not None and ssh_key_file is not None:
print(
(
"Either ssh_key or ssh_key_file must be provided,"
" but not both."
),
file=sys.stderr,
)
sys.exit(1)
if ssh_key_file:
try:
with open(ssh_key_file) as f:
ssh_key = f.read()
except OSError:
print(
"Could not open/read ssh-key-file: {}".format(
ssh_key_file
),
file=sys.stderr,
)
sys.exit(1)
if (
ephemeral_disks is not None or persistent_disks is not None
) and wait_for_operation_secs == 0:
print(
(
"If setting disks, 'wait-for-operation-secs' parameter"
" must be greater than zero (recommended 600 seconds)"
),
file=sys.stderr,
)
sys.exit(1)
tags_parsed = []
if tags is not None:
for tag in tags.split(","):
k, v = tag.split(":")
tags_parsed.append({"tag_id": k, "tag_value": v})
worker_id = base.get_client().k8s_worker.create_with_ssh_key(
ip=ip,
ssh_key_data=ssh_key,
ssh_passphrase=ssh_passphrase,
tags=tags_parsed,
)
if wait_for_operation_secs > 0:
self.wait_for_status(
id=worker_id,
status=["storage_pending", "error"],
timeout_secs=wait_for_operation_secs,
)
if base.get_client().k8s_worker.get(id=worker_id).status == "error":
print(
(
"Create request has errored. "
"Check status message with `hpecp k8sworker get {}".format(
id
)
),
file=sys.stderr,
)
sys.exit(1)
if ephemeral_disks is not None or persistent_disks is not None:
self.set_storage(
id=worker_id,
ephemeral_disks=ephemeral_disks,
persistent_disks=persistent_disks,
)
if wait_for_operation_secs > 0:
self.wait_for_status(
id=worker_id,
status=["ready"],
timeout_secs=wait_for_operation_secs,
)
print(worker_id)
def examples(self):
"""Show examples for working with k8sclusters."""
print(
dedent(
"""\
# Find id of k8s workers by ip address
$ hpecp k8sworker list --query "[*] | @[?contains('10.0.1.10 10.0.1.210', ipaddr)] | [*][_links.self.href]" --output text
/api/v2/worker/k8shost/5
/api/v2/worker/k8shost/7
# Retrieve the first master node of a K8S Cluster
$ hpecp k8scluster list --query "[?_links.self.href == '/api/v2/k8scluster/1'] | [0] | [k8shosts_config] | [0] | [?role == 'master'] | [0] | [node]" -o text
/api/v2/worker/k8shost/7
""" # noqa: E501
)
)
# TODO: verify with engineering if setup_log is a valid parameter
# def get(self, id, setup_log=False):
# """Get a K8SWorker."""
# if setup_log is True:
# params = {"setup_log": "true"}
# else:
# params = {}
# return super(K8sWorkerProxy, self).get(id=id, params=params)
@base.intercept_exception
def set_storage(
self,
id,
ephemeral_disks,
persistent_disks=None,
):
"""Set storage for a k8s worker.
Parameters
----------
id : str
The k8s worker ID
ephemeral_disks : str
Comma separated string containing ephemeral disks.
e.g: "/dev/nvme2n1,/dev/nvme2n2"
persistent_disks : str, optional
Comma separated string containing persistent disks, by default
None.
e.g: "/dev/nvme1n1,/dev/nvme1n2"
"""
if not ephemeral_disks:
print("'ephemeral_disks' must be provided", file=sys.stderr)
sys.exit(1)
p_disks = (
persistent_disks.split(",") if persistent_disks is not None else []
)
e_disks = ephemeral_disks.split(",")
base.get_client().k8s_worker.set_storage(
worker_id=id,
persistent_disks=p_disks,
ephemeral_disks=e_disks,
)
def statuses(
self,
):
"""Return a list of valid statuses."""
print([s.name for s in WorkerK8sStatus])
|
py | 1a4dca9757b86a132c098bcdce210c8f3c5f7808 | from twython import Twython
def read_strings_from_file(file_path, how_many):
with open(file_path, 'r') as file:
data = file.read()
return data.split()[:how_many]
def read_key_and_secret(file_path):
return read_strings_from_file(file_path, 2)
def read_token_secret_pin(file_path):
return read_strings_from_file(file_path, 2)
def write_token_secret(file_path, token, secret):
with open(file_path, 'w') as file:
file.write("{}\n{}".format(token, secret))
def auth_app(key_file, auth_file):
app_key, app_secret = read_key_and_secret(key_file)
# obtaining URL for authentication
twitter = Twython(app_key, app_secret)
auth = twitter.get_authentication_tokens()
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
# request pin
print('Go here: {}'.format(auth['auth_url']))
pin = input('PIN? ')
# complete authorization with PIN
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
auth = twitter.get_authorized_tokens(pin)
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
# write token and secret to file
write_token_secret(auth_file, oauth_token, oauth_token_secret)
print('auth credentials written to: {}'.format(auth_file))
def twython_from_key_and_auth(key_file, auth_file):
app_key, app_secret = read_key_and_secret(key_file)
oauth_token, oauth_token_secret = read_token_secret_pin(auth_file)
return Twython(app_key, app_secret, oauth_token, oauth_token_secret)
|
py | 1a4dcaa7cc72ca058a95e5421bf6afc065f7564e | import json
from masonite.helpers import config
def mix(filename):
manifest = config("resources.public_path") + "/mix-manifest.json"
with open(manifest) as file:
data = json.load(file)
return "/static" + data[filename]
|
py | 1a4dcd6e191a9b88ad2ad6b9b089bdce191a6e85 |
t1=("only even prime","tallest animal","best person","best sportsperson")
list1=(2,'giraffe','chirag','lionel messi')
print(list(zip(t1,list1)))
|
py | 1a4dcd876a2acb8ed98222753d9260ad10c9074e | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1KeyToPath(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'mode': 'int',
'path': 'str'
}
attribute_map = {
'key': 'key',
'mode': 'mode',
'path': 'path'
}
def __init__(self, key=None, mode=None, path=None, local_vars_configuration=None): # noqa: E501
"""V1KeyToPath - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._mode = None
self._path = None
self.discriminator = None
self.key = key
if mode is not None:
self.mode = mode
self.path = path
@property
def key(self):
"""Gets the key of this V1KeyToPath. # noqa: E501
The key to project. # noqa: E501
:return: The key of this V1KeyToPath. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1KeyToPath.
The key to project. # noqa: E501
:param key: The key of this V1KeyToPath. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def mode(self):
"""Gets the mode of this V1KeyToPath. # noqa: E501
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:return: The mode of this V1KeyToPath. # noqa: E501
:rtype: int
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this V1KeyToPath.
Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:param mode: The mode of this V1KeyToPath. # noqa: E501
:type: int
"""
self._mode = mode
@property
def path(self):
"""Gets the path of this V1KeyToPath. # noqa: E501
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
:return: The path of this V1KeyToPath. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1KeyToPath.
The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
:param path: The path of this V1KeyToPath. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1KeyToPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1KeyToPath):
return True
return self.to_dict() != other.to_dict()
|
py | 1a4dcddd2c0fdbf04c855a725a20614c5a22d5a0 | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import DATA,Float64Array,tdi
import sys
example = '/frame?tree=expt&shot=123&y=SIGNAL:NODE&x=0.0&frame_idx=0'
def doFrame(self):
def getStringExp(self,name,response_headers,_tdi):
if name in self.args:
try:
response_headers.append((name,str(_tdi(self.args[name][-1]).data())))
except Exception as e:
response_headers.append((name,"ERROR: %s"%(e,)))
response_headers = list()
response_headers.append(('Cache-Control','no-store, no-cache, must-revalidate'))
response_headers.append(('Pragma','no-cache'))
response_headers.append(('Content-Type','application/octet-stream'))
if 'tree' in self.args:
tree = self.openTree(self.args['tree'][-1],self.args['shot'][-1].split(',')[0])
_tdi = tree.tdiExecute
else:
tree = None
_tdi = tdi
for name in ('title','xlabel','ylabel'):
getStringExp(self,name,response_headers,_tdi)
if 'frame_idx' in self.args:
frame_idx = self.args['frame_idx'][-1]
else:
frame_idx = '0'
expr = self.args['y'][-1]
sig = _tdi('GetSegment(' + expr + ',' + frame_idx + ')')
frame_data = DATA(sig).evaluate()
response_headers.append(('FRAME_WIDTH',str(sig.getShape()[0])))
response_headers.append(('FRAME_HEIGHT',str(sig.getShape()[1])))
response_headers.append(('FRAME_BYTES_PER_PIXEL',str(frame_data.data().itemsize)))
response_headers.append(('FRAME_LENGTH',str(len(frame_data))))
output = str(frame_data.data().data)
if 'init' in self.args:
if 'x' in self.args:
expr = self.args['x'][-1]
times = DATA(_tdi(expr)).evaulate()
else:
times = list()
numSegments = _tdi('GetNumSegments(' + expr + ')').data()
for i in range(0, numSegments):
times.append(_tdi('GetSegmentLimits(' + expr + ',' + str(i) + ')').data()[0])
times = Float64Array(times)
response_headers.append(('TIMES_DATATYPE',times.__class__.__name__))
response_headers.append(('TIMES_LENGTH',str(len(times))))
output = output + str(times.data().data)
status = '200 OK'
return (status, response_headers, output)
|
py | 1a4dce83aa8be583e9804fe01cca31c87e9f657f | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/item"
# docs_base_url = "https://[org_name].github.io/item"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Item"
|
py | 1a4dce9d86a2830fb3726e6cca6792f0a76a275b | wrong_data = input("잘못된 데이터 : ")
data = ""
wrong_data = wrong_data.split(" ")
for i in wrong_data:
if i[:len(i) // 2] == i[len(i) // 2 :]:
data += i[:len(i) // 2] + " "
else:
data += i + " "
result = ""
for i in data:
if i.isupper() == 1:
i = i.lower()
else:
i = i.upper()
result += i
result = result.rstrip(" ")
print(result)
|
py | 1a4dd082d2b3847c15c6a19fbeeefe916e3791ec | 0
class MDO3000(instrument):
"""Represents the Tektronix mixed domain oscilloscopes md3000/md4000 and provides a
high-level interface for interacting with the instrument.
Depending on the
"""
print('nothing here')
# ToDo: Implement basic OSZI functions to measure and get data
# ToDo: Make a PyQt GUI that displays both, waveform to send to AFG and measurement DATA
##################
# AFG Properties #
##################
AFG_function = Instrument.control(
"AFG:FUNCtion?", "AFG:FUNCtion %s",
"""A string property that controls the function of the AFG""",
validator=strict_discrete_set,
values={'sine': 'SINE', 'square': 'SQUare', 'pulse': 'PULSe', 'ramp': 'RAMP', 'nois': 'NOISe', 'dc': 'DC',
'sinc': 'SINC', 'gauss': 'GAUSsian', 'lorentz': 'LORENtz', 'erise': 'ERISe', 'edecay': 'EDECAy',
'haversine': 'HAVERSINe', 'cardiac': 'CARDIac', 'arbitrary': 'ARBitrary']
)
AFG_offset = Instrument.control(
"AFG:OFFSet?", "AFG:OFFSet %g",
"""A floating point property that represents the AFG offset, in volts. Minimum and Maximum Values depend on the function. NOTE: Load Impedance has to be set before using this function"""
)
AFG_loadimpedance = Instrument.control(
"AFG:OUTPut:LOAd:IMPEDance?", "AFG:OUTPut:LOAd:IMPEDance %s",
"""String property, that sets the AFG load impedance. 'high' or 'fifty' are accepted values.""",
validator=strict_discrete_set,
values={'high': 'HIGHZ', 'fifty': 'FIFty'}
)
AFG_output = Instrument.control(
"AFG:OUTPUT:STATE?", "AFG:OUTPUT:STATE %s",
"""String property, that represents the output state ON or OFF""",
validator=strict_discrete_set,
values={'ON': 'ON', 'OFF': 'OFF']
)
AFG_amplitude = Instrument.control(
"AFG:AMPLitude?", "AFG:AMPLitude %g",
"""A floating point property that represents the AFG amplitude. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property""",
)
AFG_frequency = Instrument.control(
"AFG:FREQency?", "AFG:FREQency %g",
"""Floating point property that represents the AFG frequency. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property"""
)
AFG_highlevel = Instrument.control(
"AFG:HIGHLevel?", "AFG:HIGHLevel %g",
"""Floating point property that represents the AFG low level. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property"""
)
AFG_lowlevel = Instrument.control(
"AFG:LOWLevel?", "AFG:LOWLevel %g",
"""Floating point property that represents the AFG high level. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property"""
)
AFG_period = Instrument.control(
"AFG:PERIod?", "AFG:PERIod %g",
"""Floating point property that represents the AFG period. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property"""
)
AFG_phase = Instrument.control(
"AFG:PHASe?", "AFG:PHASe %g",
"""Floating point property that represents the AFG phase. Maximum and Minimum Values depend on the selected function. NOTE: Load Impedance has to be set before setting this property"""
)
AFG_pulsewidth = Instrument.control(
"AFG:PULse:WIDth?", "AFG:PULse:WIDth %g",
"""Floating Point Property. that represents the AFG pulse width. It has an absolute minimum of 10ns and has a relative range of 10%-90% of the current period setting. Resolution is 0.1ns"""
)
AFG_additiveNoise = Instrument.control(
"AFG:NOISEAdd:PERCent?", "AFG:NOISEAdd:PERCent %g",
"""Floating point property that represents the AFG additive noise level, as a percentage.""",
validator=truncated_range,
values=[0, 100]
)
AFG_additiveNoiseState = Instrument.control(
"AFG:NOISEAdd:STATE?", "AFG:NOISEAdd:STATE %s",
"""String property that represents the AFG additive noise state""",
validator=strict_set,
values={'ON': 'ON', '1': 'ON', 'OFF': 'OFF', '2': 'OFF'}
)
AFG_rampSymmetry = Instrument.control(
"AFG:RAMP:SYMmetry?", "AFG:RAMP:SYMmetry %g",
"""Floating point promerty that represents the AFG ramp symmetry, as a percentage.""",
validator=truncated_range,
values=[0, 100]
)
AFG_dutyCycle = Instrument.control(
"AFG:SQUare:DUty?", "AFG:SQUare:DUty %g",
"""Floating point property that represents the AFG duty cycle, as a percentage""",
validator=truncated_range,
values=[0, 100]
)
# todo: refaactor AFG_Arb_Label to a property - how does this work with multiple arguments?
def AFG_set_arb_label(self, slot, label):
self.write(f'AFG:ARBitrary:ARB{slot}:LABel {label}')
def AFG_get_arb_label(self, slot):
"""Queries the waveform label for arbitrary waveform slot 1-4 """
label = self.ask(f'AFG:ARBitrary:ARB{slot}:LABel?')
|
py | 1a4dd0c70737d2950b7564dd5e2fd86c3c571398 | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from django.http import HttpResponseRedirect
from cfbets.forms import SignUpForm, UserProfileForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from bets.models import ProposedBet, AcceptedBet, UserProfile
from common.stats import *
from django.contrib.auth.models import User
def welcome(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/bets/my_bets')
else:
return render(request, 'base_welcome.html')
def sign_up(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
# check the group id
group_id = request.POST.get('group_id')
if group_id != '' and group_id != 'cl3ms0n':
form.add_error('group_id', 'Not a valid group id.')
elif form.is_valid():
form.save()
new_user = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
if new_user is not None:
login(request, new_user)
return HttpResponseRedirect("/")
else:
return HttpResponseRedirect("/login")
else:
form = SignUpForm()
return render(request, 'base_sign_up.html', {'form': form})
@login_required(login_url='/login/')
def profile(request):
# get the current user
current_user = User.objects.get(id=request.user.id)
# get the current user profile
current_user_profile = UserProfile.objects.get(user=current_user)
# all the form stuff
if request.method == 'POST':
user_profile_form = UserProfileForm(request.POST)
if user_profile_form.is_valid():
# save data for current user / user profile
current_user.first_name = user_profile_form.cleaned_data['first_name']
current_user.last_name = user_profile_form.cleaned_data['last_name']
current_user_profile.get_prop_bet_emails = user_profile_form.cleaned_data[
'get_prop_bet_emails']
current_user_profile.get_accepted_bet_emails = user_profile_form.cleaned_data[
'get_accepted_bet_emails']
current_user.save(update_fields=['first_name', 'last_name'])
current_user_profile.save(
update_fields=[
'get_prop_bet_emails',
'get_accepted_bet_emails'])
messages.success(request, 'Profile saved successfully.')
return HttpResponseRedirect("/profile")
else:
user_profile_form = UserProfileForm(
initial={
'first_name': current_user.first_name,
'last_name': current_user.last_name,
'email': current_user.email,
'get_prop_bet_emails': current_user_profile.get_prop_bet_emails,
'get_accepted_bet_emails': current_user_profile.get_accepted_bet_emails})
total_won_bets = get_total_wins(current_user)
total_loss_bets = get_total_losses(current_user)
total_tie_bets = get_total_ties(current_user)
return render(request,
'base_profile.html',
{'user_profile_form': user_profile_form,
'total_won_bets': total_won_bets,
'total_tie_bets': total_tie_bets,
'total_loss_bets': total_loss_bets})
|
py | 1a4dd1fce478a009a3d3a50e7cf22da550de1b35 | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from .base_vfi_dataset import BaseVFIDataset
from .registry import DATASETS
@DATASETS.register_module()
class VFIVimeo90K7FramesDataset(BaseVFIDataset):
"""Utilize Vimeo90K dataset (7 frames) for video frame interpolation.
Load 7 GT (Ground-Truth) frames from the dataset, predict several frame(s)
from other frames.
Then it applies specified transforms and finally returns a dict
containing paired data and other information.
It reads Vimeo90K keys from the txt file. Each line contains:
1. video frame folder
2. number of frames
3. image shape
Examples:
::
00001/0266 7 (256,448,3)
00001/0268 7 (256,448,3)
Note: Only `video frame folder` is required information.
Args:
folder (str | :obj:`Path`): Path to image folder.
ann_file (str | :obj:`Path`): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transformations.
input_frames (list[int]): Index of input frames.
target_frames (list[int]): Index of target frames.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
folder,
ann_file,
pipeline,
input_frames,
target_frames,
test_mode=False):
super().__init__(
pipeline=pipeline,
folder=folder,
ann_file=ann_file,
test_mode=test_mode)
self.input_frames = input_frames
self.target_frames = target_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for Vimeo-90K dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# get keys
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
inputs_path = [
osp.join(self.folder, key, f'im{i}.png')
for i in self.input_frames
]
target_path = [
osp.join(self.folder, key, f'im{i}.png')
for i in self.target_frames
]
data_infos.append(
dict(
inputs_path=inputs_path, target_path=target_path, key=key))
return data_infos
|
py | 1a4dd3ae6acae271c4212f694b538c00bfd2919a | import unittest
import io
from ppci.common import CompilerError
from ppci.lang.c import CBuilder, render_ast, print_ast, COptions
from ppci.arch.example import ExampleArch
from ppci import ir
from ppci.irutils import verify_module
class CFrontendTestCase(unittest.TestCase):
""" Test if various C-snippets build correctly """
def setUp(self):
arch = ExampleArch()
self.builder = CBuilder(arch.info, COptions())
def do(self, src):
self._do_compile(src)
self._print_ast(src)
def _do_compile(self, src):
f = io.StringIO(src)
try:
ir_module = self.builder.build(f, None)
except CompilerError as compiler_error:
lines = src.split("\n")
compiler_error.render(lines)
raise
assert isinstance(ir_module, ir.Module)
verify_module(ir_module)
def _print_ast(self, src):
# Try to parse ast as well:
tree = self.builder._create_ast(src, None)
print(tree)
print("C-AST:")
print_ast(tree)
# Print rendered c:
print("re-rendered C:")
render_ast(tree)
def expect_errors(self, src, errors):
with self.assertRaises(CompilerError) as cm:
self.do(src)
for row, message in errors:
self.assertEqual(row, cm.exception.loc.row)
self.assertRegex(cm.exception.msg, message)
def test_hello_world(self):
src = r"""
void printf(char*, ...);
void main(int b) {
printf("Hello \x81 world %i\n", 42);
}
"""
self.do(src)
def test_adjecent_strings(self):
src = r"""
void printf(char*);
void main(int b) {
printf("Hello" "world\n");
static unsigned char msg[]= "Woooot\n";
printf(msg);
}
"""
self.do(src)
def test_1(self):
src = """
int a;
void main(int b) {
a = 10 + b;
}
"""
self.do(src)
def test_2(self):
src = """
static int c, d, e;
static float x;
char f, g;
int main() {
int d;
d = 20 + c * 10 + c >> 2 - 123;
return d;
}
"""
self.do(src)
def test_control_structures(self):
src = """
int main() {
int d,i,c;
c = 2;
d = 20 + c * 10 + c >> 2 - 123;
if (d < 10)
{
while (d < 20)
{
d = d + c * 4;
}
}
if (d > 20)
{
do {
d += c;
} while (d < 100);
}
else
{
for (i=i;i<10;i++) { }
for (i=0;;) { }
for (;;) { }
}
return d;
}
"""
self.do(src)
def test_for_statement(self):
src = """
int main() {
int i;
for (i=i;i<10;i++) { }
for (i=0;;) { }
for (;;) { }
for (int x=0;x<10;x++) { }
}
"""
self.do(src)
def test_for_statement_scope(self):
""" Test the scope of declarations inside a for loop. """
src = """
void print(int);
int main() {
for (int i=0;i<10;i++) print(i);
for (int i=0;i<10;i++) print(i);
}
"""
self.do(src)
def test_conditionals(self):
src = """
int main() {
int d, i, c;
c = (( (d < 10) || (i != c) ) | 22) != 0;
return c;
}
"""
self.do(src)
def test_expressions(self):
""" Test various expression constructions """
src = """
void main() {
int a,b,c,d;
c = 2;
d = a + b - c / a * b;
d = !a;
d = a ? b : c + 2;
}
"""
self.do(src)
def test_ternary_operator(self):
""" Test ternary operator with functions. """
src = """
int foo(int x) { return x + 1; }
int bar(int x) { return x - 1; }
void p1(int x);
void p2(int x);
void main(int b) {
int a;
a = (b ? foo : bar)(22); // ternary usage with function pointers
(b ? p1 : p2)(33); // ternary usage with void type
}
"""
self.do(src)
def test_comma_operator(self):
""" Test comma operator """
src = """
void main() {
int a,b,c,d;
a = 2, b=3;
}
"""
self.do(src)
def test_4(self):
""" Test expressions """
src = """
int main(int, int c) {
int stack[2];
struct { int ptr;} *s;
int d;
d = 20 + c * 10 + c >> 2 - 123;
d = stack[--s->ptr];
--d;
d--;
return d;
}
"""
self.do(src)
def test_5(self):
src = """
static int G;
void initialize(int g)
{
G = g;
}
int main(int, int c) {
int d = 2;
initialize(d);
return d;
}
"""
self.do(src)
def test_type_modifiers(self):
""" Test the various constructs of type names """
src = """
void main() {
int n;
n = sizeof(int);
int *a[3];
n = sizeof(int *[3]);
int (*p)[3];
n = sizeof(int (*)[3]);
n = sizeof(int *(void));
volatile const int * volatile vc;
}
int *f(void);
"""
self.do(src)
def test_struct(self):
""" Test structure usage """
src = """
typedef struct {int quot, rem; } div_t;
struct {} empty_unit;
struct z { int foo; };
struct s;
struct s* p;
struct s {
struct s *next;
int b:2+5, c:9, d;
struct z Z;
int *g;
};
struct s AllocS;
void main() {
volatile div_t x, *y;
x.rem = 2;
y = &x;
y->quot = x.rem = sizeof *AllocS.g;
struct s S;
S.next->next->b = 1;
}
"""
self.do(src)
def test_tag_scoping(self):
src = """
void f(int n) {
struct S { int a; } s;
union U { int a; } u;
enum E { E1, E2 } e;
if (n == 10) {
struct S { int b; } s;
s.b = 1;
union U { int b; } u;
u.b = 1;
enum E { E3, E4 } e;
e = E3;
}
s.a = 2;
u.a = 2;
e = E1;
}
"""
self.do(src)
def test_struct_copy(self):
""" Test struct behavior when copied around. """
src = """
typedef struct {int a,b,c,d,e,f; } data_t;
data_t my_f(data_t y) {
data_t z;
z.a = y.a;
z.b = 42;
return z;
}
void main() {
data_t *ptr;
data_t x;
x = *ptr++;
x = my_f(x);
x = my_f(*ptr--);
}
"""
self.do(src)
def test_bad_bitfield_type(self):
""" Test bad bitfield type """
src = """
struct z { float foo : 3; };
"""
self.expect_errors(src, [(2, r"Invalid type \(float\) for bit-field")])
def test_offsetof(self):
""" Test offsetof """
src = """
struct z { int foo; };
void main() {
__builtin_offsetof(struct z, foo);
}
"""
self.do(src)
def test_offsetof_after_bitfield(self):
""" Test offsetof after bitfields works """
src = """
struct z { char foo : 1; int fu : 2; int bar; };
void do_x(struct z g) {
}
void main() {
__builtin_offsetof(struct z, bar);
struct z y;
do_x(y);
}
"""
self.do(src)
def test_offsetof_bitfield(self):
""" Test offsetof on bitfields returns an error """
src = """
struct z { int foo : 23; };
void main() {
__builtin_offsetof(struct z, foo);
}
"""
self.expect_errors(src, [(4, 'address of bit-field "foo"')])
def test_union(self):
""" Test union usage """
src = """
union z { int foo; struct { int b, a, r; } bar;};
union z myZ[2] = {1, 2};
void main() {
union z localZ[2] = {1, 2};
}
"""
self.do(src)
def test_anonymous_union_member(self):
""" Test anonymous union member access. """
src = """
union z { int foo; struct { int b; }; };
void main() {
union z my_z;
my_z.b = 34;
}
"""
self.do(src)
def test_array(self):
""" Test array types """
src = """
int a[10];
int b[] = {1, 2};
int bbb[] = {1, 2,}; // Trailing comma
void main() {
int c[sizeof(long int)/sizeof(char)];
unsigned long long d[] = {1ULL, 2ULL};
a[2] = b[10] + c[2] + d[1];
int* p = a + 2;
int A[][3] = {1,2,3,4,5,6,7,8,9};
}
"""
self.do(src)
def test_array_index_pointer(self):
""" Test array indexing of a pointer type """
src = """
void main() {
int* a, b;
b = a[100];
}
"""
self.do(src)
def test_pointer_arithmatics(self):
""" Test dark pointer voodoo """
src = """
void main() {
int *a, b, *c;
a = &b;
c = a + 10; // pointer + numeric
c = 20 + a; // numeric + pointer
a = a - 10; // pointer - numeric
b = c - a; // pointer - pointer
a += 2;
a -= 4;
}
"""
self.do(src)
def test_size_outside_struct(self):
""" Assert error when using bitsize indicator outside struct """
src = """
int b:2+5, c:9, d;
"""
self.expect_errors(src, [(2, 'Expected ";"')])
def test_wrong_tag_kind(self):
""" Assert error when using wrong tag kind """
src = """
union S { int x;};
int B = sizeof(struct S);
"""
self.expect_errors(src, [(3, "Wrong tag kind")])
def test_enum(self):
""" Test enum usage """
src = """
void main() {
enum E { A, B, C=A+10 };
enum E e = A;
e = B;
e = 2;
}
"""
self.do(src)
def test_enum_implicit_cast(self):
""" Test enum casting """
src = """
void main() {
enum E { A, B, C };
enum D { X, Y, Z };
enum E e = Z;
}
"""
self.do(src)
def test_literal_data(self):
""" Test various formats of literal data """
src = """
void main() {
int i;
char *s, c;
i = 10l;
s = "Hello!" "World!";
c = ' ';
s = &"bla"[2]; // This is fine!
}
"""
self.do(src)
def test_compound_literal(self):
""" Test compund literal """
src = """
typedef struct { int x; } X_t;
X_t main() {
return (X_t){2};
}
"""
self.do(src)
def test_global_compound_literal(self):
""" Test pointer to global compund literals.
Points of interest:
- compound literals can empty initializer lists.
"""
src = """
int *pa1 = (int[]){1,2,3,4};
int *pa2 = (int[4]){1,2,3,4};
struct S2 { int a; };
struct S2* ps1 = &((struct S2){.a=2});
struct S2* ps2 = &((struct S2){});
"""
self.do(src)
def test_assignment_operators(self):
""" Test assignment operators """
src = """
void main() {
int a, b, c;
a += b - c;
a -= b - c;
a /= b - c;
a %= b - c;
a |= b - c;
a &= b - c;
}
"""
self.do(src)
def test_sizeof(self):
""" Test sizeof usage """
src = """
void main() {
int x, *y;
union U;
union U { int x; };
union U u;
x = sizeof(float*);
x = sizeof *y;
x = sizeof(*y);
x = sizeof(union U);
int w = sizeof w; // Sizeof works on the expression before the '='
}
"""
self.do(src)
def test_goto(self):
""" Test goto statements """
src = """
void main() {
goto part2;
part2: goto part2;
switch(0) {
case 34: break;
default: break;
}
}
"""
self.do(src)
def test_continue(self):
""" Test continue statement """
src = """
void main() {
while (1) {
continue;
}
}
"""
self.do(src)
def test_break(self):
""" Test break statement """
src = """
void main() {
while (1) {
break;
}
}
"""
self.do(src)
def test_switch(self):
""" Test switch statement """
src = """
void main() {
int a;
short b = 23L;
switch (b) {
case 34:
a -= 5;
break;
case 342LL:
break;
default:
a += 2;
break;
}
}
"""
self.do(src)
def test_switch_gnu(self):
""" Test switch statement with gnu extension. """
src = """
void main() {
int b = 23;
switch (b) {
case 34 ... 40:
b = 1;
break;
case 342:
b = 2;
break;
}
}
"""
self.do(src)
def test_loose_case(self):
""" Test loose case statement """
src = """
void main() {
case 34: break;
}
"""
self.expect_errors(src, [(3, "Case statement outside")])
def test_loose_default(self):
""" Test loose default statement """
src = """
void main() {
default: break;
}
"""
self.expect_errors(src, [(3, "Default statement outside")])
def test_void_function(self):
""" Test calling of a void function """
src = """
void main(void) {
main();
}
"""
self.do(src)
def test_function_arguments(self):
""" Test calling of functions """
src = """
void add(int a, int b, int c);
void main() {
add((int)22, 2, 3);
}
"""
self.do(src)
def test_function_argument_name(self):
""" Test an argument name with the same name as a typedef """
src = """
typedef double a;
void add(a a) {
a: return;
}
void mul(int a) {
unsigned int a;
}
"""
self.do(src)
def test_forward_declaration(self):
""" Test forward declarations """
src = """
extern char a;
char a = 2;
"""
self.do(src)
def test_afterwards_declaration(self):
""" Test redeclaration """
src = """
char a = 2;
extern char a; // this is fine too!
char a; // this is fine
int add(int a, int b);
int add(int a, int b); // fine!
int add(int a, int b) {
return a + b;
}
int add(int a, int b); // fine!
"""
self.do(src)
def test_variable_double_definition(self):
""" Test double definition raises an error. """
src = """
char a = 2;
char a = 3; // Not cool!
"""
self.expect_errors(src, [(3, "Invalid redefinition")])
def test_function_double_definition(self):
""" Test double definition raises an error. """
src = """
int add(int a, int b) {
return a + b;
}
int add(int a, int b) { // Not cool!
return a + b;
}
"""
self.expect_errors(src, [(5, "invalid redefinition")])
def test_softfloat_bug(self):
""" Bug encountered in softfloat library """
src = """
#define INLINE
typedef short int16;
typedef unsigned int bits32;
typedef char int8;
INLINE void
shift64ExtraRightJamming(
bits32 a0,
bits32 a1,
bits32 a2,
int16 count,
bits32 *z0Ptr,
bits32 *z1Ptr,
bits32 *z2Ptr
)
{
bits32 z0, z1, z2;
int8 negCount = ( - count ) & 31;
if ( count == 0 ) {
z2 = a2;
z1 = a1;
z0 = a0;
}
else {
if ( count < 32 ) {
z2 = a1<<negCount;
z1 = ( a0<<negCount ) | ( a1>>count );
z0 = a0>>count;
}
else {
if ( count == 32 ) {
z2 = a1;
z1 = a0;
}
else {
a2 |= a1;
if ( count < 64 ) {
z2 = a0<<negCount;
z1 = a0>>( count & 31 );
}
else {
z2 = ( count == 64 ) ? a0 : ( a0 != 0 );
z1 = 0;
}
}
z0 = 0;
}
z2 |= ( a2 != 0 );
}
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
"""
self.do(src)
def test_initialization(self):
""" Test initialization of complex data structures. """
src = """
struct rec {
int a, b;
char c[5];
struct {
int x, y;
} d;
};
char x = '\2';
int* ptr = (int*)0x1000;
int data;
int* ptr2 = &data;
struct rec d = {.b = 2, .c = {[2] = 3}, .d.x=100};
int e[] = {1, [2]=3, [0] = 2, [6]=2.2};
int f[] = {1,2,[5]=6};
void main() {
char x = '\2';
int* ptr = (int*)0x1000;
struct rec d = {.b = 2, .c = {[2] = 3}, .d.x=100};
int e[] = {1, [2]=3, [0] = 2, [6]=2.2};
int f[] = {1,2,[5]=6};
}
"""
self.do(src)
def test_anonymous_struct_field_initialization(self):
""" Test designated initialization into an anonymous struct. """
src = """
struct rec {
struct {
int x;
};
};
struct rec d = {.x = 2};
void main() {
struct rec d = {.x = 2};
}
"""
self.do(src)
def test_function_pointer_passing(self):
""" Test passing of function pointers """
src = """
void callback(void)
{
}
static void (*cb)(void);
void register_callback(void (*f)())
{
cb = f;
}
void main() {
register_callback(callback);
callback(); // direct call
cb(); // via function pointer
// TODO: (*cb)(); // again via function pointer
}
"""
self.do(src)
def test_not_all_paths_return_value(self):
""" Test what happens when not all code paths return a value """
src = """
int f(int a)
{
if(a == 0) return(1);
}
"""
self.do(src)
def test_array_of_strings(self):
""" Test array's of strings """
src = """
char *msg[] = {
"Hi",
"Bonjour"
};
"""
self.do(src)
def test_inline_asm(self):
""" Test inline assembly code. """
src = """
void main(int a) {
// This is example arch asm code:
int res;
asm (
"add r0, r1, r2"
: // TODO: "=r" (res)
: "r" (a)
);
}
"""
self.do(src)
if __name__ == "__main__":
unittest.main()
|
py | 1a4dd4046805a0d142e0f6c5f00c122c4516f4aa | """ This process performs a restore of all the application entities from a
given restore.
"""
import argparse
import logging
import os
from appscale.common import appscale_info
from ..backup.datastore_restore import DatastoreRestore
from ..dbconstants import APP_ENTITY_SCHEMA
from ..dbconstants import APP_ENTITY_TABLE
from ..dbconstants import APP_KIND_SCHEMA
from ..dbconstants import APP_KIND_TABLE
from ..dbconstants import ASC_PROPERTY_TABLE
from ..dbconstants import COMPOSITE_SCHEMA
from ..dbconstants import COMPOSITE_TABLE
from ..dbconstants import DSC_PROPERTY_TABLE
from ..dbconstants import PROPERTY_SCHEMA
from ..utils import fetch_and_delete_entities
from ..zkappscale import zktransaction as zk
# Where to look to verify the app is deployed.
_APPS_LOCATION = '/var/apps/'
logger = logging.getLogger(__name__)
def init_parser():
""" Initializes the command line argument parser.
Returns:
A parser object.
"""
parser = argparse.ArgumentParser(
description='Restore application code and data.')
main_args = parser.add_argument_group('main args')
main_args.add_argument('-a', '--app-id', required=True,
help='The application ID to restore data under.')
main_args.add_argument('-b', '--backup-dir', required=True,
help='The backup directory to restore data from.')
main_args.add_argument('-c', '--clear-datastore', required=False,
action="store_true", default=False, help='Start with a clean datastore.')
main_args.add_argument('-d', '--debug', required=False, action="store_true",
default=False, help='Display debug messages.')
# TODO
# Read in source code location and owner and deploy the app
# before restoring data.
return parser
def app_is_deployed(app_id, zk_client):
""" Looks for the app directory in the deployed apps location.
Args:
app_id: A str, the application ID.
Returns:
True on success, False otherwise.
"""
if not zk_client.exists('/appscale/projects/{}'.format(app_id)):
logger.error("Seems that \"{0}\" is not deployed.".format(app_id))
logger.info("Please deploy \"{0}\" and try again.".\
format(app_id))
return False
return True
def backup_dir_exists(backup_dir):
""" Checks it the given backup directory exists.
Args:
backup_dir: A str, the location of the backup directory containing all
backup files.
Returns:
True on success, False otherwise.
"""
if not os.path.exists(backup_dir):
logger.error("Error while accessing backup files.")
logger.info("Please provide a valid backup directory.")
return False
return True
def main():
""" This main function allows you to run the restore manually. """
# Parse CLI arguments.
parser = init_parser()
args = parser.parse_args()
# Set up logging.
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \
'%(lineno)s %(message)s ', level=level)
logger.info("Logging started")
logger.info(args)
zk_connection_locations = appscale_info.get_zk_locations_string()
zookeeper = zk.ZKTransaction(host=zk_connection_locations)
# Verify app is deployed.
if not app_is_deployed(args.app_id, zookeeper.handle):
return
# Verify backup dir exists.
if not backup_dir_exists(args.backup_dir):
return
if args.clear_datastore:
message = "Deleting \"{0}\" data...".\
format(args.app_id, args.backup_dir)
logger.info(message)
try:
tables_to_clear = {
APP_ENTITY_TABLE: APP_ENTITY_SCHEMA,
ASC_PROPERTY_TABLE: PROPERTY_SCHEMA,
DSC_PROPERTY_TABLE: PROPERTY_SCHEMA,
COMPOSITE_TABLE: COMPOSITE_SCHEMA,
APP_KIND_TABLE: APP_KIND_SCHEMA
}
for table, schema in tables_to_clear.items():
fetch_and_delete_entities('cassandra', table, schema, args.app_id, False)
except Exception as exception:
logger.error("Unhandled exception while deleting \"{0}\" data: {1} " \
"Exiting...".format(args.app_id, exception.message))
return
# Initialize connection to Zookeeper and database related variables.
db_info = appscale_info.get_db_info()
table = db_info[':table']
# Start restore process.
ds_restore = DatastoreRestore(args.app_id.strip('/'), args.backup_dir,
zookeeper, table)
try:
ds_restore.run()
finally:
zookeeper.close()
|
py | 1a4dd4b7e41aeded1dff66c6b7df1a42af19df3e | """Helper sensor for calculating utility costs."""
from __future__ import annotations
from dataclasses import dataclass
from functools import partial
from typing import Any, Final, Literal, TypeVar, cast
from homeassistant.components.sensor import (
ATTR_LAST_RESET,
DEVICE_CLASS_MONETARY,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.core import HomeAssistant, State, callback, split_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .data import EnergyManager, async_get_manager
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the energy sensors."""
manager = await async_get_manager(hass)
process_now = partial(_process_manager_data, hass, manager, async_add_entities, {})
manager.async_listen_updates(process_now)
if manager.data:
await process_now()
T = TypeVar("T")
@dataclass
class FlowAdapter:
"""Adapter to allow flows to be used as sensors."""
flow_type: Literal["flow_from", "flow_to"]
stat_energy_key: Literal["stat_energy_from", "stat_energy_to"]
entity_energy_key: Literal["entity_energy_from", "entity_energy_to"]
total_money_key: Literal["stat_cost", "stat_compensation"]
name_suffix: str
entity_id_suffix: str
FLOW_ADAPTERS: Final = (
FlowAdapter(
"flow_from",
"stat_energy_from",
"entity_energy_from",
"stat_cost",
"Cost",
"cost",
),
FlowAdapter(
"flow_to",
"stat_energy_to",
"entity_energy_to",
"stat_compensation",
"Compensation",
"compensation",
),
)
async def _process_manager_data(
hass: HomeAssistant,
manager: EnergyManager,
async_add_entities: AddEntitiesCallback,
current_entities: dict[tuple[str, str], EnergyCostSensor],
) -> None:
"""Process updated data."""
to_add: list[SensorEntity] = []
to_remove = dict(current_entities)
async def finish() -> None:
if to_add:
async_add_entities(to_add)
for key, entity in to_remove.items():
current_entities.pop(key)
await entity.async_remove()
if not manager.data:
await finish()
return
for energy_source in manager.data["energy_sources"]:
if energy_source["type"] != "grid":
continue
for adapter in FLOW_ADAPTERS:
for flow in energy_source[adapter.flow_type]:
# Opting out of the type complexity because can't get it to work
untyped_flow = cast(dict, flow)
# No need to create an entity if we already have a cost stat
if untyped_flow.get(adapter.total_money_key) is not None:
continue
# This is unique among all flow_from's
key = (adapter.flow_type, untyped_flow[adapter.stat_energy_key])
# Make sure the right data is there
# If the entity existed, we don't pop it from to_remove so it's removed
if untyped_flow.get(adapter.entity_energy_key) is None or (
untyped_flow.get("entity_energy_price") is None
and untyped_flow.get("number_energy_price") is None
):
continue
current_entity = to_remove.pop(key, None)
if current_entity:
current_entity.update_config(untyped_flow)
continue
current_entities[key] = EnergyCostSensor(
adapter,
manager.data["currency"],
untyped_flow,
)
to_add.append(current_entities[key])
await finish()
class EnergyCostSensor(SensorEntity):
"""Calculate costs incurred by consuming energy.
This is intended as a fallback for when no specific cost sensor is available for the
utility.
"""
def __init__(
self,
adapter: FlowAdapter,
currency: str,
flow: dict,
) -> None:
"""Initialize the sensor."""
super().__init__()
self._adapter = adapter
self.entity_id = f"{flow[adapter.entity_energy_key]}_{adapter.entity_id_suffix}"
self._attr_device_class = DEVICE_CLASS_MONETARY
self._attr_state_class = STATE_CLASS_MEASUREMENT
self._attr_unit_of_measurement = currency
self._flow = flow
self._last_energy_sensor_state: State | None = None
def _reset(self, energy_state: State) -> None:
"""Reset the cost sensor."""
self._attr_state = 0.0
self._attr_last_reset = dt_util.utcnow()
self._last_energy_sensor_state = energy_state
self.async_write_ha_state()
@callback
def _update_cost(self) -> None:
"""Update incurred costs."""
energy_state = self.hass.states.get(
cast(str, self._flow[self._adapter.entity_energy_key])
)
if energy_state is None or ATTR_LAST_RESET not in energy_state.attributes:
return
try:
energy = float(energy_state.state)
except ValueError:
return
# Determine energy price
if self._flow["entity_energy_price"] is not None:
energy_price_state = self.hass.states.get(self._flow["entity_energy_price"])
if energy_price_state is None:
return
try:
energy_price = float(energy_price_state.state)
except ValueError:
return
else:
energy_price_state = None
energy_price = cast(float, self._flow["number_energy_price"])
if self._last_energy_sensor_state is None:
# Initialize as it's the first time all required entities are in place.
self._reset(energy_state)
return
cur_value = cast(float, self._attr_state)
if (
energy_state.attributes[ATTR_LAST_RESET]
!= self._last_energy_sensor_state.attributes[ATTR_LAST_RESET]
):
# Energy meter was reset, reset cost sensor too
self._reset(energy_state)
else:
# Update with newly incurred cost
old_energy_value = float(self._last_energy_sensor_state.state)
self._attr_state = cur_value + (energy - old_energy_value) * energy_price
self._last_energy_sensor_state = energy_state
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
energy_state = self.hass.states.get(self._flow[self._adapter.entity_energy_key])
if energy_state:
name = energy_state.name
else:
name = split_entity_id(self._flow[self._adapter.entity_energy_key])[
0
].replace("_", " ")
self._attr_name = f"{name} {self._adapter.name_suffix}"
self._update_cost()
# Store stat ID in hass.data so frontend can look it up
self.hass.data[DOMAIN]["cost_sensors"][
self._flow[self._adapter.entity_energy_key]
] = self.entity_id
@callback
def async_state_changed_listener(*_: Any) -> None:
"""Handle child updates."""
self._update_cost()
self.async_write_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
cast(str, self._flow[self._adapter.entity_energy_key]),
async_state_changed_listener,
)
)
async def async_will_remove_from_hass(self) -> None:
"""Handle removing from hass."""
self.hass.data[DOMAIN]["cost_sensors"].pop(
self._flow[self._adapter.entity_energy_key]
)
await super().async_will_remove_from_hass()
@callback
def update_config(self, flow: dict) -> None:
"""Update the config."""
self._flow = flow
|
py | 1a4dd60489beb62128771714ee35d76ffbde7445 | from .combinators import Mix2
from .exponential import Exponential
from .normal import Normal
from .poisson import Poisson
from .polynomial import Polynomial
from .uniform import Uniform, UniformInt
|
py | 1a4dd7740a45db5768576910743107c52daa2d78 | class BaseControlError(Exception):
"""inherited by all exceptions in this module"""
class NoReplyError(BaseControlError):
"""Used when retrying for a result"""
class CommandNotAcceptedError(BaseControlError):
"""Command was not accepted by remote"""
class CommandConfirmationTimeoutError(BaseControlError):
"""Reply from remote host did not arrive in time"""
|
py | 1a4dd8e5ef8d0093df7bcb340964a0c6072c1fa6 | #!/usr/bin/env python3
from common import does_file_exist, authentication_help
from base64 import standard_b64decode
import wave
LOCAL_FILE_PATH = "bin.html"
def switch_endian():
# TODO: implement this for real
# The Python WAV libraries don't support endian order.
# Kludge by "merging" the channels.
# Switching endian order probably can be done via pack.
wf = wave.open("c19.wav", "rb")
frames = wf.readframes(wf.getnframes())
result = wave.open("result.wav", "wb")
result.setnchannels(wf.getnchannels())
result.setsampwidth(wf.getsampwidth() // 2)
result.setframerate(wf.getframerate() * 2)
result.writeframes(frames)
def main():
if not does_file_exist(LOCAL_FILE_PATH, "Challenge 1"):
exit()
line_num = 0
data = ""
with open(LOCAL_FILE_PATH) as in_file, open(f"c19.wav", "wb") as out_file:
for line in in_file:
# TODO - don't hardcode extracting out the attachment
line_num += 1
if line_num < 28:
continue
data += line.strip()
wav_data = standard_b64decode(data)
out_file.write(wav_data)
switch_endian()
print("As you can see, this does not print the result in a traditional format.")
print("In fact, it's not graphical at all. Listen to result.wav. Listen to what you are called.")
print("Use the standard format of: http://www.pythonchallenge.com/pc/hex/{answer}.html")
authentication_help("hex")
if __name__ == "__main__":
main()
|
py | 1a4ddac962638ab60c52cbce834ab0ebda7f7eee | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates a flagfile containing a resign threshold flag
Reads the bigtables defined by the flags --cbt_{project, instance, table} to
compute the 95 percentile of the bleakest-evaluations found in calibration
games, then updates the flagfile on the default bucket path, resetting that
value.
Recommended usage is via common flagfile (e.g. rl_loop/distributed_flags)
"""
import sys
import re
import os
import time
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
sys.path.insert(0, '.')
import mask_flags
import bigtable_input
import rl_loop.fsdb as fsdb
# Fun fact, this only helps for --helpshort. It's not a validator.
flags.adopt_module_key_flags(bigtable_input)
flags.adopt_module_key_flags(fsdb)
FLAGS = flags.FLAGS
RESIGN_FLAG_REGEX = re.compile(r'--resign_threshold=([-\d.]+)')
def get_95_percentile_bleak(games_nr, n_back=500):
"""Gets the 95th percentile of bleakest_eval from bigtable"""
end_game = int(games_nr.latest_game_number)
start_game = end_game - n_back if end_game >= n_back else 0
moves = games_nr.bleakest_moves(start_game, end_game)
evals = np.array([m[2] for m in moves])
return np.percentile(evals, 5)
def update_flagfile(flags_path, new_threshold):
"""Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold`
"""
if abs(new_threshold) > 1:
raise ValueError("Invalid new percentile for resign threshold")
with tf.gfile.GFile(flags_path) as f:
lines = f.read()
if new_threshold > 0:
new_threshold *= -1
if not RESIGN_FLAG_REGEX.search(lines):
print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path))
sys.exit(1)
old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1)
lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines)
if abs(float(old_threshold[0]) - new_threshold) < 0.001:
print("Not updating percentiles; {} ~= {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
else:
print("Updated percentile from {} to {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
with tf.gfile.GFile(flags_path, 'w') as f:
f.write(lines)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
games_nr = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table + '-nr')
while True:
new_pct = get_95_percentile_bleak(games_nr)
update_flagfile(fsdb.flags_path(), new_pct)
time.sleep(60 * 3)
if __name__ == '__main__':
valid_flags = list(map(lambda f: '--' + f, FLAGS.flag_values_dict().keys()))
valid_flags += ['--helpshort', '--helpfull', '--help']
parsed_flags = flags.FlagValues().read_flags_from_files(sys.argv[1:])
filtered_flags = mask_flags.filter_flags(parsed_flags, valid_flags)
print(filtered_flags, flush=True)
app.run(main, argv=sys.argv[:1] + filtered_flags)
|
py | 1a4ddcbc6a001d3195ed5e88dc0c4a7e1738bbc7 | #!/usr/local/bin/python
# coding: utf-8
# Copyright (C) 2011-2012 FeatureSpaceTree Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# FeatureSpaceTree:
#
# Author: Adrian Pastor Lopez-Monroy <[email protected]>
# URL: <https://github.com/beiceman/FeatureSpaceTree>
#
# Language Technologies Lab,
# Department of Computer Science,
# Instituto Nacional de Astrofísica, Óptica y Electrónica
#
# For license information, see:
# * The header of this file
# * The LICENSE.TXT included in the project dir
# ==============================================================================
from postfilter import DecoratorByTokenNormalizer
class NeighboringBigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(NeighboringBigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) :
v[0] = base_mat[i-1][j-1]
if (j-1) >= 0:
v[1] = base_mat[i][j-1]
if ((i+1) < rows) and ((j-1) >= 0):
v[2] = base_mat[i+1][j-1]
if ((i+1) < rows):
v[3] = base_mat[i+1][j]
if ((i+1) < rows) and ((j+1) < cols):
v[4] = base_mat[i+1][j+1]
if (j+1) < cols:
v[5] = base_mat[i][j+1]
if ((i-1) >= 0) and ((j+1) < cols):
v[6] = base_mat[i-1][j+1]
if (i-1) >= 0:
v[7] = base_mat[i-1][j]
for v_e in v:
if v_e != "NOT_A_VISUAL_FEATURE":
new_list_of_tokens += [actual + "~" + v_e]
# print new_list_of_tokens
return new_list_of_tokens
class OrientationNeighboringBigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(OrientationNeighboringBigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = "c" + base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) :
v[0] = "no" + base_mat[i-1][j-1]
if (j-1) >= 0:
v[1] = "o" + base_mat[i][j-1]
if ((i+1) < rows) and ((j-1) >= 0):
v[2] = "so" + base_mat[i+1][j-1]
if ((i+1) < rows):
v[3] = "s" + base_mat[i+1][j]
if ((i+1) < rows) and ((j+1) < cols):
v[4] = "se" + base_mat[i+1][j+1]
if (j+1) < cols:
v[5] = "e" + base_mat[i][j+1]
if ((i-1) >= 0) and ((j+1) < cols):
v[6] = "ne" + base_mat[i-1][j+1]
if (i-1) >= 0:
v[7] = "n" + base_mat[i-1][j]
for v_e in v:
if v_e != "NOT_A_VISUAL_FEATURE":
new_list_of_tokens += [actual + "~" + v_e]
# print new_list_of_tokens
return new_list_of_tokens
class NeighboringNoOrderBigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(NeighboringNoOrderBigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) :
v[0] = base_mat[i-1][j-1]
if (j-1) >= 0:
v[1] = base_mat[i][j-1]
if ((i+1) < rows) and ((j-1) >= 0):
v[2] = base_mat[i+1][j-1]
if ((i+1) < rows):
v[3] = base_mat[i+1][j]
if ((i+1) < rows) and ((j+1) < cols):
v[4] = base_mat[i+1][j+1]
if (j+1) < cols:
v[5] = base_mat[i][j+1]
if ((i-1) >= 0) and ((j+1) < cols):
v[6] = base_mat[i-1][j+1]
if (i-1) >= 0:
v[7] = base_mat[i-1][j]
for v_e in v:
if v_e != "NOT_A_VISUAL_FEATURE":
generated_bigram = ""
if actual <= v_e:
generated_bigram = actual + "~" + v_e
else:
generated_bigram = v_e + "~" + actual
new_list_of_tokens += [generated_bigram]
# print new_list_of_tokens
return new_list_of_tokens
class W2VNeighboringNoOrderBigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(W2VNeighboringNoOrderBigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
#print "UUUUUUUUUUUUUUUUUUY"
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) :
v[0] = base_mat[i-1][j-1]
if (j-1) >= 0:
v[1] = base_mat[i][j-1]
if ((i+1) < rows) and ((j-1) >= 0):
v[2] = base_mat[i+1][j-1]
if ((i+1) < rows):
v[3] = base_mat[i+1][j]
if ((i+1) < rows) and ((j+1) < cols):
v[4] = base_mat[i+1][j+1]
if (j+1) < cols:
v[5] = base_mat[i][j+1]
if ((i-1) >= 0) and ((j+1) < cols):
v[6] = base_mat[i-1][j+1]
if (i-1) >= 0:
v[7] = base_mat[i-1][j]
if "NOT_A_VISUAL_FEATURE" in set(v):
continue
generated_bigram = []
cw = 0
for v_e in v:
generated_bigram += [v_e]
if cw == 3:
generated_bigram += [actual]
cw += 1
new_list_of_tokens += generated_bigram
#print new_list_of_tokens
#print "HEEEEEEEEEEEEEEEEEEEEEEY"
#print new_list_of_tokens
return new_list_of_tokens
class NeighboringNoOrderTrigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(NeighboringNoOrderTrigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
v_ext = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) and ((i-2) >= 0) and ((j-2) >= 0):
v[0] = base_mat[i-1][j-1]
v_ext[0] = base_mat[i-2][j-2]
if ((j-1) >= 0) and ((j-2) >= 0):
v[1] = base_mat[i][j-1]
v_ext[1] = base_mat[i][j-2]
if (((i+1) < rows) and ((j-1) >= 0)) and (((i+2) < rows) and ((j-2) >= 0)):
v[2] = base_mat[i+1][j-1]
v_ext[2] = base_mat[i+2][j-2]
if ((i+1) < rows) and ((i+2) < rows):
v[3] = base_mat[i+1][j]
v_ext[3] = base_mat[i+2][j]
if (((i+1) < rows) and ((j+1) < cols)) and (((i+2) < rows) and ((j+2) < cols)):
v[4] = base_mat[i+1][j+1]
v_ext[4] = base_mat[i+2][j+2]
if ((j+1) < cols) and ((j+2) < cols):
v[5] = base_mat[i][j+1]
v_ext[5] = base_mat[i][j+2]
if (((i-1) >= 0) and ((j+1) < cols)) and (((i-2) >= 0) and ((j+2) < cols)):
v[6] = base_mat[i-1][j+1]
v_ext[6] = base_mat[i-2][j+2]
if ((i-1) >= 0) and ((i-2) >= 0):
v[7] = base_mat[i-1][j]
v_ext[7] = base_mat[i-2][j]
for v_e, v_e_ext in zip(v, v_ext):
if (v_e != "NOT_A_VISUAL_FEATURE") and (v_e_ext != "NOT_A_VISUAL_FEATURE"):
my_seq = [actual, v_e, v_e_ext]
if my_seq[0] <= my_seq[-1]:
my_seq.reverse()
generated_trigram = my_seq[0] + "~" + my_seq[1] + "~" + my_seq[2]
new_list_of_tokens += [generated_trigram]
# print new_list_of_tokens
return new_list_of_tokens
#####
class NeighboringNoOrderTetragramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(NeighboringNoOrderTetragramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOUL CHECK THIS CASE."
for i in range(rows):
for j in range(cols):
v = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
v_ext = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
v_ext_2 = ["NOT_A_VISUAL_FEATURE" for e in range(8)]
actual = base_mat[i][j]
if ((i-1) >= 0) and ((j-1) >= 0) and ((i-2) >= 0) and ((j-2) >= 0) and ((i-3) >= 0) and ((j-3) >= 0):
v[0] = base_mat[i-1][j-1]
v_ext[0] = base_mat[i-2][j-2]
v_ext_2[0] = base_mat[i-3][j-3]
if ((j-1) >= 0) and ((j-2) >= 0) and ((j-3) >= 0):
v[1] = base_mat[i][j-1]
v_ext[1] = base_mat[i][j-2]
v_ext_2[1] = base_mat[i][j-3]
if (((i+1) < rows) and ((j-1) >= 0)) and (((i+2) < rows) and ((j-2) >= 0)) and (((i+3) < rows) and ((j-3) >= 0)):
v[2] = base_mat[i+1][j-1]
v_ext[2] = base_mat[i+2][j-2]
v_ext_2[2] = base_mat[i+3][j-3]
if ((i+1) < rows) and ((i+2) < rows) and ((i+3) < rows):
v[3] = base_mat[i+1][j]
v_ext[3] = base_mat[i+2][j]
v_ext_2[3] = base_mat[i+3][j]
if (((i+1) < rows) and ((j+1) < cols)) and (((i+2) < rows) and ((j+2) < cols)) and (((i+3) < rows) and ((j+3) < cols)):
v[4] = base_mat[i+1][j+1]
v_ext[4] = base_mat[i+2][j+2]
v_ext_2[4] = base_mat[i+3][j+3]
if ((j+1) < cols) and ((j+2) < cols) and ((j+3) < cols):
v[5] = base_mat[i][j+1]
v_ext[5] = base_mat[i][j+2]
v_ext_2[5] = base_mat[i][j+3]
if (((i-1) >= 0) and ((j+1) < cols)) and (((i-2) >= 0) and ((j+2) < cols)) and (((i-3) >= 0) and ((j+3) < cols)):
v[6] = base_mat[i-1][j+1]
v_ext[6] = base_mat[i-2][j+2]
v_ext_2[6] = base_mat[i-3][j+3]
if ((i-1) >= 0) and ((i-2) >= 0) and ((i-3) >= 0):
v[7] = base_mat[i-1][j]
v_ext[7] = base_mat[i-2][j]
v_ext_2[7] = base_mat[i-3][j]
for v_e, v_e_ext, v_e_ext_2 in zip(v, v_ext, v_ext_2):
if (v_e != "NOT_A_VISUAL_FEATURE") and (v_e_ext != "NOT_A_VISUAL_FEATURE") and (v_e_ext_2 != "NOT_A_VISUAL_FEATURE"):
my_seq = [actual, v_e, v_e_ext, v_e_ext_2]
if my_seq[0] <= my_seq[-1]:
my_seq.reverse()
generated_tetragram = my_seq[0] + "~" + my_seq[1] + "~" + my_seq[2] + "~" + my_seq[3]
new_list_of_tokens += [generated_tetragram]
# print new_list_of_tokens
return new_list_of_tokens
#####
class SkipSizeInfoFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer):
super(SkipSizeInfoFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
new_list_of_tokens = old_list_of_tokens[2:]
return new_list_of_tokens
class MaxPatternsByRowFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
max_patterns = None
def __init__(self, by_token_normalizer, path_to_max_patterns, a, b):
super(MaxPatternsByRowFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
self.path_to_max_patterns = path_to_max_patterns
self.a = a
self.b = b
def count_max_patterns_in_row(self, max_pattern, row):
count = 0
if len(set(max_pattern) & set(row)) == len(set(max_pattern)):
index_max_pattern = 0
for elem_row in row:
if elem_row == max_pattern[index_max_pattern]:
index_max_pattern += 1
# if you have found the last element of the pattern, then restart the index and count it.
if index_max_pattern == len(max_pattern):
index_max_pattern = 0
count += 1
return count
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
# print old_list_of_tokens
# Read the list of max patterns ----------------------------------------
max_patterns = []
if MaxPatternsByRowFilterDecoratorByTokenNormalizer.max_patterns == None:
f_of_max_patterns = open(self.path_to_max_patterns, "r")
for line in f_of_max_patterns:
elements = line.split()
if int(elements[1]) >= self.a and int(elements[1]) <= self.b:
max_patterns += [elements[2:]]
print "MAX_PATTERNS: ", max_patterns
MaxPatternsByRowFilterDecoratorByTokenNormalizer.max_patterns = max_patterns
f_of_max_patterns.close()
else:
max_patterns = MaxPatternsByRowFilterDecoratorByTokenNormalizer.max_patterns
# ----------------------------------------------------------------------
new_list_of_tokens = []
rows = int(old_list_of_tokens[0])
cols = int(old_list_of_tokens[1])
tokens = old_list_of_tokens[2:]
base_mat = []
for i in range(rows):
a = i*cols
b = i*cols + cols
base_mat += [tokens[a:b]]
# print base_mat
if (len(base_mat) != rows) or (len(base_mat[0]) != cols) or (len(base_mat[-1]) != cols):
print "THE MATRIX HAS A STRANGE SIZE!!!, YOU SHOULD CHECK THIS CASE."
for i in range(rows):
for max_pattern in max_patterns:
freq = 0
freq = self.count_max_patterns_in_row(max_pattern, base_mat[i])
tag_pattern = "~".join(max_pattern)
new_list_of_tokens += [tag_pattern] * freq
# print new_list_of_tokens
return new_list_of_tokens
class GapBigramsFilterDecoratorByTokenNormalizer(DecoratorByTokenNormalizer):
def __init__(self, by_token_normalizer, gap_size):
super(GapBigramsFilterDecoratorByTokenNormalizer, self).__init__(by_token_normalizer)
self.gap_size = gap_size
def get_list_of_tokens(self):
old_list_of_tokens = self._by_token_normalizer.get_list_of_tokens()
print old_list_of_tokens
new_list_of_tokens = []
# NEW CODE HERE: --------------------
self.gap_size
print "HERE YYYY"
#new_list_of_tokens = old_list_of_tokens
#new_list_of_tokens[1] = u"PERRO"
new_list_of_tokens = []
for palabra in old_list_of_tokens:
new_list_of_tokens += [palabra + "XXX"]
# -----------------------------------
print new_list_of_tokens
return new_list_of_tokens
|
py | 1a4ddf51e29c268d8df51b306b91b7b5cfa3885a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-07-24 21:32:13
# @Author : ZiQiangWang
# Email : [email protected]
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRECT_KEY') or 'hard to guess string'
SQLALCHEMY_TRACK_MODIFICATIONS = True
ARTICLES_PER_PAGE = 10
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
pass
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
py | 1a4de0604022461ca75dfa4e1da5e8fb55027e2c | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent import types as rlt
from reagent.core.configuration import param_hash
from reagent.core.dataclasses import dataclass
from reagent.model_utils.seq2slate_utils import (
DECODER_START_SYMBOL,
PADDING_SYMBOL,
Seq2SlateMode,
Seq2SlateOutputArch,
attention,
clones,
mask_logits_by_idx,
per_symbol_to_per_seq_log_probs,
per_symbol_to_per_seq_probs,
subsequent_mask,
)
from reagent.models.base import ModelBase
from reagent.torch_utils import gather
from torch.nn.parallel.distributed import DistributedDataParallel
logger = logging.getLogger(__name__)
class Generator(nn.Module):
""" Define standard linear + softmax generation step. """
def __init__(self, dim_model, candidate_size, temperature):
super(Generator, self).__init__()
self.dim_model = dim_model
self.candidate_size = candidate_size
self.temperature = temperature
def forward(self, mode, logits=None, tgt_in_idx=None, greedy=None):
if mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return self._log_probs(logits, tgt_in_idx, mode)
elif mode == Seq2SlateMode.DECODE_ONE_STEP_MODE:
assert greedy is not None
return self._decode_one_step(logits, tgt_in_idx, greedy)
else:
raise NotImplementedError()
def _log_probs(self, logits, tgt_in_idx, mode):
"""
Return the log probability distribution at each decoding step
:param logits: logits of decoder outputs. Shape: batch_size, seq_len, candidate_size
:param tgt_idx: the indices of candidates in decoder input sequences.
The first symbol is always DECODER_START_SYMBOL.
Shape: batch_size, seq_len
"""
assert mode in (
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
)
logits = mask_logits_by_idx(logits, tgt_in_idx)
# log_probs shape: batch_size, seq_len, candidate_size
log_probs = F.log_softmax(logits / self.temperature, dim=2)
return log_probs
def _decode_one_step(self, logits, tgt_in_idx, greedy):
"""
Decode one-step
:param logits: logits of decoder outputs. Shape: batch_size, seq_len, candidate_size
:param tgt_in_idx: input to the decoder, the first symbol is always the
starting symbol. Shape: batch_size, seq_len
:param greedy: whether to greedily pick or sample the next symbol
"""
batch_size = logits.shape[0]
# get the last step logits shape: batch_size, candidate_size
logits = logits[:, -1, :]
# invalidate the padding symbol and decoder-starting symbol
logits[:, :2] = float("-inf")
# invalidate symbols already appeared in decoded sequences
logits = logits.scatter(1, tgt_in_idx, float("-inf"))
prob = F.softmax(logits / self.temperature, dim=1)
if greedy:
_, next_candidate = torch.max(prob, dim=1)
else:
next_candidate = torch.multinomial(prob, num_samples=1, replacement=False)
next_candidate = next_candidate.reshape(batch_size, 1)
# next_candidate: the decoded symbols for the latest step
# shape: batch_size x 1
# prob: generative probabilities of the latest step
# shape: batch_size x candidate_size
return next_candidate, prob
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
"""
def __init__(self, dim_model):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(dim_model)
def forward(self, x, sublayer):
return x + sublayer(self.norm(x))
class Encoder(nn.Module):
"Core encoder is a stack of num_layers layers"
def __init__(self, layer, num_layers):
super(Encoder, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.dim_model)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
""" Encoder is made up of self-attn and feed forward """
def __init__(self, dim_model, self_attn, feed_forward):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(dim_model), 2)
self.dim_model = dim_model
def forward(self, src_embed, src_mask):
# src_embed shape: batch_size, seq_len, dim_model
# src_src_mask shape: batch_size, seq_len, seq_len
def self_attn_layer(x):
return self.self_attn(x, x, x, src_mask)
# attn_output shape: batch_size, seq_len, dim_model
attn_output = self.sublayer[0](src_embed, self_attn_layer)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[1](attn_output, self.feed_forward)
class Decoder(nn.Module):
""" Generic num_layers layer decoder with masking."""
def __init__(self, layer, num_layers):
super(Decoder, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = nn.LayerNorm(layer.size)
def forward(self, x, memory, tgt_src_mask, tgt_tgt_mask):
# each layer is one DecoderLayer
for layer in self.layers:
x = layer(x, memory, tgt_src_mask, tgt_tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
""" Decoder is made of self-attn, src-attn, and feed forward """
def __init__(self, size, self_attn, src_attn, feed_forward):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size), 3)
def forward(self, x, m, tgt_src_mask, tgt_tgt_mask):
# x is target embedding or the output of previous decoder layer
# x shape: batch_size, seq_len, dim_model
# m is the output of the last encoder layer
# m shape: batch_size, seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
def self_attn_layer_tgt(x):
return self.self_attn(query=x, key=x, value=x, mask=tgt_tgt_mask)
def self_attn_layer_src(x):
return self.src_attn(query=x, key=m, value=m, mask=tgt_src_mask)
x = self.sublayer[0](x, self_attn_layer_tgt)
x = self.sublayer[1](x, self_attn_layer_src)
# return shape: batch_size, seq_len, dim_model
return self.sublayer[2](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
def __init__(self, num_heads, dim_model):
""" Take in model size and number of heads """
super(MultiHeadedAttention, self).__init__()
assert dim_model % num_heads == 0
# We assume d_v always equals d_k
self.d_k = dim_model // num_heads
self.num_heads = num_heads
self.linears = clones(nn.Linear(dim_model, dim_model), 4)
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all num_heads heads.
# mask shape: batch_size, 1, seq_len, seq_len
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from dim_model => num_heads x d_k
# self.linear[0, 1, 2] is query weight matrix, key weight matrix, and
# value weight matrix, respectively.
# l(x) represents the transformed query matrix, key matrix and value matrix
# l(x) has shape (batch_size, seq_len, dim_model). You can think l(x) as
# the matrices from a one-head attention; or you can think
# l(x).view(...).transpose(...) as the matrices of num_heads attentions,
# each attention has d_k dimension.
query, key, value = [
l(x).view(nbatches, -1, self.num_heads, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
# x shape: batch_size, num_heads, seq_len, d_k
x, _ = attention(query, key, value, mask, self.d_k)
# 3) "Concat" using a view and apply a final linear.
# each attention's output is d_k dimension. Concat num_heads attention's outputs
# x shape: batch_size, seq_len, dim_model
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.num_heads * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, dim_model, dim_feedforward):
super(PositionwiseFeedForward, self).__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(dim_model, dim_feedforward),
torch.nn.ReLU(),
torch.nn.Linear(dim_feedforward, dim_model),
)
def forward(self, x):
return self.net(x)
class Embedder(nn.Module):
def __init__(self, dim_in, dim_out):
super(Embedder, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.linear = nn.Linear(self.dim_in, self.dim_out)
def forward(self, x):
# x: raw input features. Shape: batch_size, seq_len, dim_in
output = self.linear(x) * math.sqrt(self.dim_out)
# output shape: batch_size, seq_len, dim_out
return output
class PositionalEncoding(nn.Module):
def __init__(self, dim_model, max_len):
super(PositionalEncoding, self).__init__()
self.pos_embed = nn.Embedding(max_len, dim_model)
def forward(self, x):
device = x.device
batch_size, seq_len, _ = x.shape
position_idx = (
torch.arange(0, seq_len).unsqueeze(0).repeat(batch_size, 1).to(device)
)
x = x + self.pos_embed(position_idx)
return x
class BaselineNet(nn.Module):
def __init__(self, state_dim, dim_feedforward, num_stacked_layers):
super(BaselineNet, self).__init__()
nn_blocks = [nn.Linear(state_dim, dim_feedforward), nn.ReLU()]
assert num_stacked_layers >= 1
for _ in range(num_stacked_layers - 1):
nn_blocks.extend([nn.Linear(dim_feedforward, dim_feedforward), nn.ReLU()])
nn_blocks.append(nn.Linear(dim_feedforward, 1))
self.mlp = nn.Sequential(*nn_blocks)
def forward(self, input: rlt.PreprocessedRankingInput):
x = input.state.float_features
return self.mlp(x)
class Seq2SlateTransformerModel(nn.Module):
"""
A Seq2Slate network with Transformer. The network is essentially an
encoder-decoder structure. The encoder inputs a sequence of candidate feature
vectors and a state feature vector, and the decoder outputs an ordered
list of candidate indices. The output order is learned through REINFORCE
algorithm to optimize sequence-wise reward.
One application example is to rank candidate feeds to a specific user such
that the final list of feeds as a whole optimizes the user's engagement.
Seq2Slate paper: https://arxiv.org/abs/1810.02019
Transformer paper: https://arxiv.org/abs/1706.03762
The model archtecture can also adapt to some variations.
(1) The decoder can be autoregressive
(2) The decoder can take encoder scores and perform iterative softmax (aka frechet sort)
(3) No decoder and the output order is solely based on encoder scores
"""
def __init__(
self,
state_dim: int,
candidate_dim: int,
num_stacked_layers: int,
num_heads: int,
dim_model: int,
dim_feedforward: int,
max_src_seq_len: int,
max_tgt_seq_len: int,
output_arch: Seq2SlateOutputArch,
temperature: float = 1.0,
):
"""
:param state_dim: state feature dimension
:param candidate_dim: candidate feature dimension
:param num_stacked_layers: number of stacked layers in Transformer
:param num_heads: number of attention heads used in Transformer
:param dim_model: number of attention dimensions in Transformer
:param dim_feedforward: number of hidden units in FeedForward layers
in Transformer
:param max_src_seq_len: the maximum length of input sequences
:param max_tgt_seq_len: the maximum length of output sequences
:param output_arch: determines seq2slate output architecture
:param temperature: temperature used in decoder sampling
"""
super().__init__()
self.state_dim = state_dim
self.candidate_dim = candidate_dim
self.num_stacked_layers = num_stacked_layers
self.num_heads = num_heads
self.dim_model = dim_model
self.dim_feedforward = dim_feedforward
self.max_src_seq_len = max_src_seq_len
self.max_tgt_seq_len = max_tgt_seq_len
self.output_arch = output_arch
self._DECODER_START_SYMBOL = DECODER_START_SYMBOL
self._PADDING_SYMBOL = PADDING_SYMBOL
self._RANK_MODE = Seq2SlateMode.RANK_MODE
self._PER_SYMBOL_LOG_PROB_DIST_MODE = (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE
)
self._PER_SEQ_LOG_PROB_MODE = Seq2SlateMode.PER_SEQ_LOG_PROB_MODE
self._DECODE_ONE_STEP_MODE = Seq2SlateMode.DECODE_ONE_STEP_MODE
self._ENCODER_SCORE_MODE = Seq2SlateMode.ENCODER_SCORE_MODE
c = copy.deepcopy
attn = MultiHeadedAttention(num_heads, dim_model)
ff = PositionwiseFeedForward(dim_model, dim_feedforward)
self.encoder = Encoder(
EncoderLayer(dim_model, c(attn), c(ff)), num_stacked_layers
)
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
# Compute score at each encoder step
self.encoder_scorer = nn.Linear(dim_model, 1)
# Generator needs to know the output symbol size,
# Possible output symbols include candidate indices, decoder-start symbol
# and padding symbol
self.generator = Generator(dim_model, max_src_seq_len + 2, temperature)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
self.decoder = Decoder(
DecoderLayer(dim_model, c(attn), c(attn), c(ff)), num_stacked_layers
)
self.decoder_logit_proj = nn.Linear(dim_model, max_src_seq_len + 2)
self.generator = Generator(dim_model, max_src_seq_len + 2, temperature)
elif self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
# Compute score at each encoder step
self.encoder_scorer = nn.Linear(dim_model, 1)
self.candidate_embedder = Embedder(candidate_dim, dim_model // 2)
self.state_embedder = Embedder(state_dim, dim_model // 2)
self.positional_encoding_encoder = PositionalEncoding(
dim_model, max_len=max_src_seq_len
)
self.positional_encoding_decoder = PositionalEncoding(
dim_model, max_len=max_tgt_seq_len
)
# Initialize parameters with Glorot / fan_avg.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self._print_model_info()
__constants__ = [
"state_dim",
"candidate_dim",
"num_stacked_layers",
"num_heads",
"dim_model",
"dim_feedforward",
"max_src_seq_len",
"max_tgt_seq_len",
"output_path",
"_DECODER_START_SYMBOL",
"_PADDING_SYMBOL",
"_RANK_MODE",
"_PER_SYMBOL_LOG_PROB_DIST_MODE",
"_PER_SEQ_LOG_PROB_MODE",
"_DECODE_ONE_STEP_MODE",
"_ENCODER_SCORE_MODE",
]
def _print_model_info(self):
def _num_of_params(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {_num_of_params(self)}")
logger.info(f"Num of Encoder params: {_num_of_params(self.encoder)}")
logger.info(
f"Num of Candidate Embedder params: {_num_of_params(self.candidate_embedder)}"
)
logger.info(
f"Num of State Embedder params: {_num_of_params(self.state_embedder)}"
)
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(self.encoder_scorer)}"
)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
logger.info(f"Num of Decoder params: {_num_of_params(self.decoder)}")
logger.info(
f"Num of Decoder Projection params: {_num_of_params(self.decoder_logit_proj)}"
)
elif self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(self.encoder_scorer)}"
)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
"""
:param input: model input
:param mode: a string indicating which mode to perform.
"rank": return ranked actions and their generative probabilities.
"per_seq_log_probs": return generative log probabilities of given
tgt sequences (used for REINFORCE training)
"per_symbol_log_probs": return generative log probabilties of each
symbol in given tgt sequences (used in TEACHER FORCING and
DIFFERENTIABLE_REWARD training)
:param tgt_seq_len: the length of output sequence to be decoded. Only used
in rank mode
:param greedy: whether to sample based on softmax distribution or greedily
when decoding. Only used in rank mode
"""
if mode == self._RANK_MODE:
if tgt_seq_len is None:
tgt_seq_len = self.max_tgt_seq_len
return self._rank(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_seq_len=tgt_seq_len,
greedy=greedy,
)
elif mode in (self._PER_SEQ_LOG_PROB_MODE, self._PER_SYMBOL_LOG_PROB_DIST_MODE):
assert input.tgt_in_seq is not None
return self._log_probs(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
# pyre-fixme[16]: `Optional` has no attribute `float_features`.
tgt_in_seq=input.tgt_in_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_tgt_mask=input.tgt_tgt_mask,
tgt_in_idx=input.tgt_in_idx,
tgt_out_idx=input.tgt_out_idx,
mode=mode,
)
elif mode == self._ENCODER_SCORE_MODE:
assert self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE
return self.encoder_output_to_scores(
state=input.state.float_features,
src_seq=input.src_seq.float_features,
src_src_mask=input.src_src_mask,
tgt_out_idx=input.tgt_out_idx,
)
def _rank(self, state, src_seq, src_src_mask, tgt_seq_len, greedy):
""" Decode sequences based on given inputs """
device = src_seq.device
batch_size, src_seq_len, candidate_dim = src_seq.shape
candidate_size = src_seq_len + 2
# candidate_features is used as look-up table for candidate features.
# the second dim is src_seq_len + 2 because we also want to include
# features of start symbol and padding symbol
candidate_features = torch.zeros(
batch_size, src_seq_len + 2, candidate_dim, device=device
)
# TODO: T62502977 create learnable feature vectors for start symbol
# and padding symbol
candidate_features[:, 2:, :] = src_seq
# memory shape: batch_size, src_seq_len, dim_model
memory = self.encode(state, src_seq, src_src_mask)
ranked_per_symbol_probs = torch.zeros(
batch_size, tgt_seq_len, candidate_size, device=device
)
ranked_per_seq_probs = torch.zeros(batch_size, 1)
if self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
tgt_out_idx = torch.argsort(encoder_scores, dim=1, descending=True)[
:, :tgt_seq_len
]
# +2 to account for start symbol and padding symbol
tgt_out_idx += 2
# every position has propensity of 1 because we are just using argsort
ranked_per_symbol_probs = ranked_per_symbol_probs.scatter(
2, tgt_out_idx.unsqueeze(2), 1.0
)
ranked_per_seq_probs[:, :] = 1.0
return ranked_per_symbol_probs, ranked_per_seq_probs, tgt_out_idx
tgt_in_idx = (
torch.ones(batch_size, 1, device=device)
.fill_(self._DECODER_START_SYMBOL)
.type(torch.long)
)
assert greedy is not None
for l in range(tgt_seq_len):
tgt_in_seq = gather(candidate_features, tgt_in_idx)
tgt_src_mask = src_src_mask[:, : l + 1, :]
# shape batch_size, l + 1, candidate_size
logits = self.decode(
memory=memory,
state=state,
tgt_src_mask=tgt_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=subsequent_mask(l + 1, device),
tgt_seq_len=l + 1,
)
# next candidate shape: batch_size, 1
# prob shape: batch_size, candidate_size
next_candidate, prob = self.generator(
mode=self._DECODE_ONE_STEP_MODE,
logits=logits,
tgt_in_idx=tgt_in_idx,
greedy=greedy,
)
ranked_per_symbol_probs[:, l, :] = prob
tgt_in_idx = torch.cat([tgt_in_idx, next_candidate], dim=1)
# remove the decoder start symbol
# tgt_out_idx shape: batch_size, tgt_seq_len
tgt_out_idx = tgt_in_idx[:, 1:]
ranked_per_seq_probs = per_symbol_to_per_seq_probs(
ranked_per_symbol_probs, tgt_out_idx
)
# ranked_per_symbol_probs shape: batch_size, tgt_seq_len, candidate_size
# ranked_per_seq_probs shape: batch_size, 1
# tgt_out_idx shape: batch_size, tgt_seq_len
return ranked_per_symbol_probs, ranked_per_seq_probs, tgt_out_idx
def _log_probs(
self,
state,
src_seq,
tgt_in_seq,
src_src_mask,
tgt_tgt_mask,
tgt_in_idx,
tgt_out_idx,
mode,
):
"""
Compute log of generative probabilities of given tgt sequences
(used for REINFORCE training)
"""
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq, src_src_mask)
tgt_seq_len = tgt_in_seq.shape[1]
src_seq_len = src_seq.shape[1]
assert tgt_seq_len <= src_seq_len
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
tgt_src_mask = src_src_mask[:, :tgt_seq_len, :]
# decoder_logits shape: batch_size, tgt_seq_len, candidate_size
decoder_logits = self.decode(
memory=encoder_output,
state=state,
tgt_src_mask=tgt_src_mask,
tgt_in_seq=tgt_in_seq,
tgt_tgt_mask=tgt_tgt_mask,
tgt_seq_len=tgt_seq_len,
)
# log_probs shape:
# if mode == PER_SEQ_LOG_PROB_MODE: batch_size, 1
# if mode == PER_SYMBOL_LOG_PROB_DIST_MODE: batch_size, tgt_seq_len, candidate_size
log_probs = self._decoder_logits_to_log_probs(
decoder_logits, tgt_in_idx, tgt_out_idx, mode
)
return log_probs
def _decoder_logits_to_log_probs(self, logits, tgt_in_idx, tgt_out_idx, mode):
"""
:param logits: the logits from the decoder, with shape:
(batch_size, seq_len, candidate_size)
:param tgt_in_idx: input idx to the decoder, the first symbol is
always the DECODER_START_SYMBOL. Shape: batch_size x seq_len
:param tgt_out_idx: output idx of the decoder. Shape: batch_size x seq_len
:param mode: return log prob distribution per symbol or reduce them per sequence
"""
assert mode in (
self._PER_SEQ_LOG_PROB_MODE,
self._PER_SYMBOL_LOG_PROB_DIST_MODE,
)
# per_symbol_log_probs: log probability distribution of each symbol
# shape: batch_size, seq_len, candidate_size
per_symbol_log_probs = self.generator(
mode=mode, logits=logits, tgt_in_idx=tgt_in_idx
)
if mode == self._PER_SYMBOL_LOG_PROB_DIST_MODE:
return per_symbol_log_probs
# shape: batch_size, 1
return per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx)
def encoder_output_to_scores(self, state, src_seq, src_src_mask, tgt_out_idx):
# encoder_output shape: batch_size, src_seq_len, dim_model
encoder_output = self.encode(state, src_seq, src_src_mask)
# encoder_output shape: batch_size, src_seq_len, dim_model
# tgt_out_idx shape: batch_size, tgt_seq_len
batch_size, tgt_seq_len = tgt_out_idx.shape
# order encoder_output by tgt_out_idx
# slate_encoder_output shape: batch_size, tgt_seq_len, dim_model
slate_encoder_output = gather(encoder_output, tgt_out_idx - 2)
# encoder_scores shape: batch_size, tgt_seq_len
return self.encoder_scorer(slate_encoder_output).squeeze()
def encode(self, state, src_seq, src_mask):
# state: batch_size, state_dim
# src_seq: batch_size, src_seq_len, dim_candidate
# src_src_mask shape: batch_size, src_seq_len, src_seq_len
batch_size = src_seq.shape[0]
# candidate_embed: batch_size, src_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(src_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# transform state_embed into shape: batch_size, src_seq_len, dim_model/2
state_embed = state_embed.repeat(1, self.max_src_seq_len).reshape(
batch_size, self.max_src_seq_len, -1
)
# Input at each encoder step is actually concatenation of state_embed
# and candidate embed. state_embed is replicated at each encoding step.
# src_embed shape: batch_size, src_seq_len, dim_model
src_embed = self.positional_encoding_encoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# encoder_output shape: batch_size, src_seq_len, dim_model
return self.encoder(src_embed, src_mask)
def decode(
self, memory, state, tgt_src_mask, tgt_in_seq, tgt_tgt_mask, tgt_seq_len
):
# memory is the output of the encoder, the attention of each input symbol
# memory shape: batch_size, src_seq_len, dim_model
# tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len
# tgt_seq shape: batch_size, tgt_seq_len, dim_candidate
# tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len
batch_size, src_seq_len, _ = memory.shape
candidate_size = src_seq_len + 2
if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
# encoder_scores shape: batch_size, src_seq_len
encoder_scores = self.encoder_scorer(memory).squeeze(dim=2)
logits = torch.zeros(batch_size, tgt_seq_len, candidate_size).to(
encoder_scores.device
)
logits[:, :, :2] = float("-inf")
logits[:, :, 2:] = encoder_scores.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, src_seq_len
)
elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
# candidate_embed shape: batch_size, tgt_seq_len, dim_model/2
candidate_embed = self.candidate_embedder(tgt_in_seq)
# state_embed: batch_size, dim_model/2
state_embed = self.state_embedder(state)
# state_embed: batch_size, tgt_seq_len, dim_model/2
state_embed = state_embed.repeat(1, tgt_seq_len).reshape(
batch_size, tgt_seq_len, -1
)
# tgt_embed: batch_size, tgt_seq_len, dim_model
tgt_embed = self.positional_encoding_decoder(
torch.cat((state_embed, candidate_embed), dim=2)
)
# output of decoder will be later transformed into probabilities over symbols.
# shape: batch_size, tgt_seq_len, dim_model
decoder_output = self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask)
# logits shape: batch_size, seq_len, candidate_size
logits = self.decoder_logit_proj(decoder_output)
return logits
@dataclass
class Seq2SlateNet(ModelBase):
__hash__ = param_hash
state_dim: int
candidate_dim: int
num_stacked_layers: int
dim_model: int
max_src_seq_len: int
max_tgt_seq_len: int
output_arch: Seq2SlateOutputArch
temperature: float
def __post_init_post_parse__(self) -> None:
super(Seq2SlateNet, self).__init__()
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
self.seq2slate = self._build_model()
def _build_model(self):
return None
def input_prototype(self):
return rlt.PreprocessedRankingInput.from_tensors(
state=torch.randn(1, self.state_dim),
src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim),
tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim),
src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len),
tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len),
slate_reward=torch.randn(1),
)
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
res = self.seq2slate(input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy)
if mode == Seq2SlateMode.RANK_MODE:
return rlt.RankingOutput(
ranked_per_symbol_probs=res[0],
ranked_per_seq_probs=res[1],
ranked_tgt_out_idx=res[2],
)
elif mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return rlt.RankingOutput(log_probs=res)
elif mode == Seq2SlateMode.ENCODER_SCORE_MODE:
return rlt.RankingOutput(encoder_scores=res)
else:
raise NotImplementedError()
def get_distributed_data_parallel_model(self):
return _DistributedSeq2SlateNet(self)
@dataclass
class Seq2SlateTransformerNet(Seq2SlateNet):
__hash__ = param_hash
num_heads: int
dim_feedforward: int
def _build_model(self):
return Seq2SlateTransformerModel(
state_dim=self.state_dim,
candidate_dim=self.candidate_dim,
num_stacked_layers=self.num_stacked_layers,
num_heads=self.num_heads,
dim_model=self.dim_model,
dim_feedforward=self.dim_feedforward,
max_src_seq_len=self.max_src_seq_len,
max_tgt_seq_len=self.max_tgt_seq_len,
output_arch=self.output_arch,
temperature=self.temperature,
)
class _DistributedSeq2SlateNet(ModelBase):
def __init__(self, seq2slate_net: Seq2SlateNet):
super().__init__()
current_device = torch.cuda.current_device()
self.data_parallel = DistributedDataParallel(
# pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`.
seq2slate_net.seq2slate,
device_ids=[current_device],
output_device=current_device,
)
self.seq2slate_net = seq2slate_net
def input_prototype(self):
return self.seq2slate_net.input_prototype()
def cpu_model(self):
return self.seq2slate_net.cpu_model()
def forward(
self,
input: rlt.PreprocessedRankingInput,
mode: str,
tgt_seq_len: Optional[int] = None,
greedy: Optional[bool] = None,
):
res = self.data_parallel(
input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy
)
if mode == Seq2SlateMode.RANK_MODE:
return rlt.RankingOutput(
ranked_per_symbol_probs=res[0],
ranked_per_seq_probs=res[1],
ranked_tgt_out_idx=res[2],
)
elif mode in (
Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE,
Seq2SlateMode.PER_SEQ_LOG_PROB_MODE,
):
return rlt.RankingOutput(log_probs=res)
elif mode == Seq2SlateMode.ENCODER_SCORE_MODE:
return rlt.RankingOutput(encoder_scores=res)
else:
raise NotImplementedError()
|
py | 1a4de080d158b5b57e60a93f77aa74c06a253837 | # -*- coding: utf-8 -*-
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or Cs_w
"""
# -----------------------------------
# Bjørn Ådlandsvik <[email protected]>
# Institute of Marine Research
# Bergen, Norway
# 2010-09-30
# -----------------------------------
from __future__ import absolute_import, division
import numpy as np
def sdepth(H, Hc, C, stagger="rho", Vtransform=1):
"""Depth of s-levels
*H* : arraylike
Bottom depths [meter, positive]
*Hc* : scalar
Critical depth
*cs_r* : 1D array
s-level stretching curve
*stagger* : [ 'rho' | 'w' ]
*Vtransform* : [ 1 | 2 ]
defines the transform used, defaults 1 = Song-Haidvogel
Returns an array with ndim = H.ndim + 1 and
shape = cs_r.shape + H.shape with the depths of the
mid-points in the s-levels.
Typical usage::
>>> fid = Dataset(roms_file)
>>> H = fid.variables['h'][:, :]
>>> C = fid.variables['Cs_r'][:]
>>> Hc = fid.variables['hc'].getValue()
>>> z_rho = sdepth(H, Hc, C)
"""
H = np.asarray(H)
Hshape = H.shape # Save the shape of H
H = H.ravel() # and make H 1D for easy shape maniplation
C = np.asarray(C)
N = len(C)
outshape = (N,) + Hshape # Shape of output
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N # Unstretched coordinates
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vtransform == 1: # Default transform by Song and Haidvogel
A = Hc * (S - C)[:, None]
B = np.outer(C, H)
return (A + B).reshape(outshape)
elif Vtransform == 2: # New transform by Shchepetkin
N = Hc * S[:, None] + np.outer(C, H)
D = 1.0 + Hc / H
return (N / D).reshape(outshape)
else:
raise ValueError("Unknown Vtransform")
# ------------------------------------
def sdepth_w(H, Hc, cs_w):
"""Return depth of w-points in s-levels
Kept for backwards compatibility
use *sdepth(H, Hc, cs_w, stagger='w')* instead
"""
return sdepth(H, Hc, cs_w, stagger="w")
# ------------------------------------------
# Vertical slicing e.t.c.
# ------------------------------------------
def zslice(F, S, z):
"""Vertical slice of a 3D ROMS field
Vertical interpolation of a field in s-coordinates to
(possibly varying) depth level
*F* : array with vertical profiles, first dimension is vertical
*S* : array with depths of the F-values,
*z* : Depth level(s) for output, scalar or ``shape = F.shape[1:]``
The z values should be negative
Return value : array, `shape = F.shape[1:]`, the vertical slice
Example:
H is an array of depths (positive values)
Hc is the critical depth
C is 1D containing the s-coordinate stretching at rho-points
returns F50, interpolated values at 50 meter with F50.shape = H.shape
>>> z_rho = sdepth(H, Hc, C)
>>> F50 = zslice(F, z_rho, -50.0)
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
z = np.asarray(z, dtype="float")
Fshape = F.shape # Save original shape
if S.shape != Fshape:
raise ValueError("F and z_r must have same shape")
if z.shape and z.shape != Fshape[1:]:
raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F.size // N # Combined length of horizontal dimension(s)
F = F.reshape((N, M))
S = S.reshape((N, M))
if z.shape:
z = z.reshape((M,))
# Find integer array C with shape (M,)
# with S[C[i]-1, i] < z <= S[C[i], i]
# C = np.apply_along_axis(np.searchsorted, 0, S, z)
# but the following is much faster
C = np.sum(S < z, axis=0)
C = C.clip(1, N - 1)
# For vectorisation
# construct index array tuples D and Dm such that
# F[D][i] = F[C[i], i]
# F[Dm][i] = F[C[i]-1, i]
I = np.arange(M, dtype="int")
D = (C, I)
Dm = (C - 1, I)
# Compute interpolation weights
A = (z - S[Dm]) / (S[D] - S[Dm])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the linear interpolation
R = (1 - A) * F[Dm] + A * F[D]
# Give the result the correct s
R = R.reshape(Fshape[1:])
return R
# -----------------------------------------------
def multi_zslice(F, S, Z):
"""Slice a 3D ROMS field to fixed depth
Vertical interpolation of a field in s-coordinates to
fixed vertical level
*F* : array of with vertical profiles, first dimension is vertical
*S* : array with depth of s-levels (at rho-points)
1D (constant depth) or S.shape = F.shape
*Z* : single depth value, negative
Returns : array, ``shape = F.shape[1:]`` the vertical slice
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
Fshape = F.shape # Save original shape
# Flat all dimensions after first
N = F.shape[0]
M = F.size // N
F = F.reshape((N, M))
S = S.reshape((N, M))
# Make z.shape = (M,)
Z = np.asarray(Z, dtype="float")
# Valid possibilities
# 1) Z = single scalar (shape = ()), one constant value
# 2) Z = 1D array, shape=(kmax), a set of constant depths
# 3) Z = 2D or more, reshapeable to (kmax, M)
if Z.ndim == 0:
Z = Z + np.zeros((1, M))
kmax = 1
elif Z.ndim == 1:
kmax = Z.size
Z = Z[:, np.newaxis] + np.zeros((kmax, M))
else:
kmax = Z.size // M
Z = Z.reshape((kmax, M))
# Find C, C.shape = (kmax, M) such that
# z_r[C[k,i]-1, i] < Z[k] <= z_r[C[k,i], i]
# shape: kmax, N, M => kmax, M
C = np.sum(S[np.newaxis, :, :] < Z[:, np.newaxis, :], axis=1)
C = C.clip(1, N - 1)
# Horizontal index
I = np.arange(M, dtype=int)
# Compute interpolation weights
A = (Z - S[(C - 1, I)]) / (S[(C, I)] - S[(C - 1, I)])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the interpolation
R = (1 - A) * F[(C - 1, I)] + A * F[(C, I)]
# Give the result the correct shape
R = R.reshape((kmax,) + Fshape[1:])
return R
# ------------------------------------------------------
def z_average(F, z_r, z0, z1):
"""Slice a 3D ROMS field to fixed depth
Vertical interpolation of a field in s-coordinates to
fixed vertical level
*F* : array
Vertical profiles, first dimension is vertical
*z_r* : array
Depth of s-levels (at rho-points), requires `z_r.shape = F.shape`
*z0*, *z1* : floats
Single depth values with z0 <= z1 <= 0
return value : array
`shape = F.shape[1:]`, the vertical average
"""
F = np.asarray(F)
z_r = np.asarray(z_r)
Fshape = F.shape # Save original shape
# Flatten all dimensions after first
N = F.shape[0]
M = F.size // N
F = F.reshape((N, M))
z_r = z_r.reshape((N, M))
# z0, z1 are scalars or horizontal arrays
z0 = np.asarray(z0)
if z0.shape: # Array, must be 2D
z0 = z0.reshape((M,))
z1 = np.asarray(z1)
if z1.shape:
z1 = z1.reshape((M,))
# Bracket z0, i.e.
# Find integer array C0 with shape (M,)
# with z_r[C0[i]-1, i] < z0 <= z_r[C0[i], i]
# Can be done with:
# C0 = np.apply_along_axis(np.searchsorted, 0, z_r, z0)
# but the following is much faster
C0 = np.sum(z_r < z0, axis=0)
C0 = C0.clip(1, N - 1) # Clip to avoid illegal indices
# Bracket z1
C1 = np.sum(z_r < z1, axis=0)
C1 = C1.clip(1, N - 1)
# Use advanced indexing for vectorisation
# F[(C0,I)][i] = F[C0[i], i]
I = np.arange(M, dtype="int")
# Interpolate F to the two levels
A0 = (z0 - z_r[(C0 - 1, I)]) / (z_r[(C0, I)] - z_r[(C0 - 1, I)])
A0 = A0.clip(0.0, 1.0) # Control the extrapolation
F0 = (1 - A0) * F[(C0 - 1, I)] + A0 * F[(C0, I)]
A1 = (z1 - z_r[(C1 - 1, I)]) / (z_r[(C1, I)] - z_r[(C1 - 1, I)])
A1 = A1.clip(0.0, 1.0)
F1 = (1 - A1) * F[(C1 - 1, I)] + A1 * F[(C1, I)]
# Find indices again (unclipped)
C0 = np.sum(z_r < z0, axis=0)
C1 = np.sum(z_r < z1, axis=0)
R = np.zeros(M, dtype=np.float64)
X = np.zeros(N + 2, dtype=np.float64)
Y = np.zeros(N + 2, dtype=np.float64)
z0 = z0 + R # Make sure they are spatial arrays
z1 = z1 + R # For indexing below
for i in I:
X[:] = 0.0
Y[:] = 0.0
nz = C1[i] - C0[i] # Number of rho-points between z0 and z1
# Set up arrays for trapezoidal integration
X[0] = z0[i]
X[1 : nz + 1] = z_r[C0[i] : C1[i], i]
X[nz + 1] = z1[i]
Y[0] = F0[i]
Y[1 : nz + 1] = F[C0[i] : C1[i], i]
Y[nz + 1] = F1[i]
# Perform the integration
R[i] = 0.5 * np.dot(
X[1 : nz + 2] - X[0 : nz + 1], Y[1 : nz + 2] + Y[0 : nz + 1]
)
# Compute average and revert to correct shape
R = R / (z1 - z0)
R = R.reshape(Fshape[1:])
return R
# ----------------------------------
def s_stretch(N, theta_s, theta_b, stagger="rho", Vstretching=1):
"""Compute a s-level stretching array
*N* : Number of vertical levels
*theta_s* : Surface stretching factor
*theta_b* : Bottom stretching factor
*stagger* : "rho"|"w"
*Vstretching* : 1|2|3|4|5
"""
# if stagger == "rho":
# S = -1.0 + (0.5 + np.arange(N)) / N
# elif stagger == "w":
# S = np.linspace(-1.0, 0.0, N + 1)
if stagger == "rho":
K = np.arange(0.5, N)
elif stagger == "w":
K = np.arange(N + 1)
else:
raise ValueError("stagger must be 'rho' or 'w'")
S = -1 + K / N
if Vstretching == 1:
cff1 = 1.0 / np.sinh(theta_s)
cff2 = 0.5 / np.tanh(0.5 * theta_s)
return (1.0 - theta_b) * cff1 * np.sinh(theta_s * S) + theta_b * (
cff2 * np.tanh(theta_s * (S + 0.5)) - 0.5
)
elif Vstretching == 2:
a, b = 1.0, 1.0
Csur = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
Cbot = np.sinh(theta_b * (S + 1)) / np.sinh(theta_b) - 1
mu = (S + 1) ** a * (1 + (a / b) * (1 - (S + 1) ** b))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 3:
gamma_ = 3.0
Csur = -np.log(np.cosh(gamma_ * (-S) ** theta_s)) / np.log(np.cosh(gamma_))
# Csur = -np.log(np.cosh(gamma_ * np.abs(S) ** theta_s)) / np.log(np.cosh(gamma_))
Cbot = (
np.log(np.cosh(gamma_ * (S + 1) ** theta_b)) / np.log(np.cosh(gamma_)) - 1
)
mu = 0.5 * (1 - np.tanh(gamma_ * (S + 0.5)))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 4:
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
elif Vstretching == 5:
S1 = (K * K - 2 * K * N + K + N * N - N) / (N * N - N)
S2 = (K * K - K * N) / (1 - N)
S = -S1 - 0.01 * S2
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
else:
raise ValueError("Unknown Vstretching")
# wrapper for backwards compatibility
def s_stretch_w(N, theta_s, theta_b, Vstretching=1):
"""Obsolete use *s_stretch* instead"""
return s_stretch(N, theta_s, theta_b, stagger="w", Vstretching=Vstretching)
|
py | 1a4de0c57d8c114e92f9ea1abefe481ed367c284 | from errors import ArchMinerError
class StaticMinerError(ArchMinerError):
def __init__(self, message):
self.message = message
class DynamicMinerError(ArchMinerError):
def __init__(self, message):
self.message = message
|
py | 1a4de12e4e8379421cfd4198127f0d98e94c2406 | word = input('>').split()
emojis = {
':)' : '😊',
':(' : '😥',
':D' : '😄'
}
for i in word:
print(emojis.get(i, i), end = ' ')
|
py | 1a4de152823b45361759740383933a723c593497 | {%- if cookiecutter.copyright != "None" -%}
# Copyright (c) {% now "utc", '%Y' %}, {{ cookiecutter.copyright }}. Unauthorised use, distribution or duplication is prohibited
{% endif %}
"""
{{ cookiecutter.project_name }}.
{{ cookiecutter.library_name }}
"""
import argparse
from flask import Flask
from flask_cors import CORS
from flask_swagger_ui import get_swaggerui_blueprint
from blueprints.health import blueprint as health_endpoint
from blueprints.version import blueprint as version_endpoint
from observability.logger import Logger
SWAGGER_URL = "/api/docs"
API_URL = "/static/swagger.json"
SWAGGER_UI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={"app_name": "{{ cookiecutter.project_name }} {{ cookiecutter.library_name }} Api"},
)
logger = Logger.init("{{ cookiecutter.__project_name_slug }}")
def create_app():
"""Initialize the core application."""
app = Flask(__name__)
CORS(app, resources=r"/api/*")
app.config["CORS_HEADERS"] = "Content-Type"
app.register_blueprint(version_endpoint)
app.register_blueprint(health_endpoint)
app.register_blueprint(SWAGGER_UI_BLUEPRINT, url_prefix=SWAGGER_URL)
return app
def serve(app, address, port, middleware=None):
"""Serve the application."""
if middleware is not None:
middleware(app)
logger.info("{{ cookiecutter.project_name }} {{ cookiecutter.library_name }} Server starting...")
app.run(host=address, port=port)
if __name__ == "__main__":
app = create_app()
logger.info("server.py main : parsing arguments")
parser = argparse.ArgumentParser()
parser.add_argument("--address", help="Set server address", default="0.0.0.0")
parser.add_argument("-p", "--port", type=int, help="Set server port", default=5000)
args = parser.parse_args()
serve(app=app, address=args.address, port=args.port)
|
py | 1a4de3f22b0711a3086ef33d10468c4c3d8a8459 | import subprocess
import os
import urllib.request
import sys
from typing import Optional
from conapp.file_paths import get_snapshot_filename
from conapp.validate import validate_subprocess
from conapp.definitions import USER_HOME_DIR, DEFAULT_STRIP_COMPONENTS
def apply_config(file_name: str) -> None:
"""
A wrapper around apply snapshot but for stripping the top level
:param file_name:
:return:
"""
return apply_snapshot(file_name, True)
def apply_snapshot(file_name: str, strip_top_level=False) -> None:
"""Given file_name use tar to apply it to the users home directory"""
if not os.path.isfile(file_name):
print(f"Error! attempted to apply nonexistent snapshot {file_name}")
print(f"Applying snapshot {file_name}")
validate_subprocess(
subprocess.run([
'tar',
'-C',
USER_HOME_DIR,
DEFAULT_STRIP_COMPONENTS if strip_top_level else '',
'--show-transformed-names',
'-zvxf',
file_name,
])
)
def create_snapshot(file_name: str) -> Optional[str]:
file_names_result = get_files_from_tar(file_name, True)
files = list(
filter(
lambda file_path: os.path.isfile(os.path.expanduser(f"~/{file_path}")),
file_names_result.stdout.split()
)
)
if len(files) > 0:
snapshot_name = get_snapshot_filename()
backup_command = [
'tar',
'-C',
USER_HOME_DIR,
'-czvf',
snapshot_name,
] + files
print(f"Local files would get overridden, creating backup of: {' '.join(files)}")
validate_subprocess(subprocess.run(
backup_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
))
print(f"Successfully backed up files to {snapshot_name}")
return snapshot_name
else:
print("No files will be overridden, not creating backup")
return None
def get_files_from_tar(file_name: str, strip_top_level=False) -> subprocess.CompletedProcess:
get_file_names_command = [
"tar",
DEFAULT_STRIP_COMPONENTS if strip_top_level else '',
'--show-transformed-names',
'-tf',
file_name
]
result = subprocess.run(
get_file_names_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
validate_subprocess(result)
return result
def download_file(file_name: str, url: str) -> None:
"""Attempt to download a file or exit"""
try:
print(f"Attempting to download {url}")
urllib.request.urlretrieve(url, file_name)
print(f"Success, downloaded to {file_name}")
except urllib.request.HTTPError as ex:
print(f"Error occurred, does {url} exist?\n{ex}")
sys.exit(-1)
|
py | 1a4de4325c17085aef09084eb7bbe59bda40e6fa | from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
|
py | 1a4de55feda76c0205e62e04cd7aa25cefb6ab05 | # Generated by Django 3.0.6 on 2020-06-03 19:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a4de59f5dd9e4ab818d3c023e5aa8f40ae3df04 | from typing import Tuple
import PIL
import torch
import torchvision.transforms as transforms
from .datasets import DATASET_STATS, SUPPORTED_DATASETS
from .gaussian_blur import GaussianBlur
class SimCLRDataTransform:
"""Applies augmentations to sample two times, as described in SimCLR paper"""
def __init__(self, transform: transforms.Compose):
self.transform = transform
def __call__(self, sample: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
xi = self.transform(sample)
xj = self.transform(sample)
return xi, xj
class ContrastiveAugmentor:
"""Applies augmentation for contrastive learning, as in SimCLR paper"""
def __init__(self, dataset: str, input_size: Tuple[int, int, int]):
"""
Args:
dataset: dataset to apply augmentations to
input_size: input image size
Raises:
ValueError: if specified dataset is unsupported
"""
if dataset not in SUPPORTED_DATASETS:
raise ValueError('Unsupported dataset')
stats = DATASET_STATS[dataset]
h, w = input_size[:2]
size = (h, w)
blur_kernel_size = 2 * int(.05 * h) + 1
color = transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)
augmentations = transforms.Compose([
transforms.Resize(size, interpolation=PIL.Image.LANCZOS),
transforms.RandomResizedCrop(size=size, interpolation=PIL.Image.LANCZOS),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color], p=0.8),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(kernel_size=blur_kernel_size),
transforms.ToTensor(),
transforms.Normalize(mean=stats['mean'], std=stats['std'])
])
self._augmentations = SimCLRDataTransform(augmentations)
def __call__(self, sample: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self._augmentations(sample)
class ValidAugmentor:
"""Applies augmentation for validation and testing"""
def __init__(self, dataset: str, input_size: Tuple[int, int, int]):
"""
Args:
dataset: dataset to apply augmentations to
input_size: input image size
Raises:
ValueError: if specified dataset is unsupported
"""
if dataset not in SUPPORTED_DATASETS:
raise ValueError('Unsupported dataset')
stats = DATASET_STATS[dataset]
h, w = input_size[:2]
size = (h, w)
self._augmentations = transforms.Compose([
transforms.Resize(size=size),
transforms.ToTensor(),
transforms.Normalize(mean=stats['mean'], std=stats['std'])
])
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
return self._augmentations(sample)
class PatchAugmentor:
"""Applies augmentations to patch"""
def __init__(self, input_size: Tuple[int, int, int]):
"""
Args:
input_size: input image size
"""
h, w = input_size[:2]
size = (h, w)
self._augmentations = transforms.Compose([
transforms.Resize(size=size),
transforms.ToTensor()
])
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
return self._augmentations(sample)
|
py | 1a4de5bfd25348d210660ea307cdfd440a52e9a1 | import reveallib
import reveallib64
from utils import *
from multiprocessing.pool import Pool
import signal
import os
import math
import argparse
import logging
import intervaltree
import matplotlib
import sortedcontainers
import time
def plot(plt,anchors,sep,wait=True,nc='r',rc='g',color=None,edges=False,lines=False,alpha=1,args=None):
if len(anchors)==0:
return
if len(anchors[0])==2: #unaligned blocks
for start,stop in anchors:
ax = plt.axes()
if start<sep: #ref
ax.add_patch(
matplotlib.patches.Rectangle(
(start, 0), #bottom left
stop-start, #width
sep, #height #should be qry length!
alpha=.25,
color="blue"
)
)
else:
ax.add_patch(
matplotlib.patches.Rectangle(
(0, start-sep), #bottom left
sep, #width
stop-start, #height
alpha=.25,
color="grey"
)
)
elif len(anchors[0])==3: #mums
for l,sps,revcomp in anchors:
if revcomp:
plt.plot( (sps[0],sps[0]+l), ((sps[1]-sep)+l, (sps[1]-sep)),'%s-'%rc,alpha=alpha)
else:
plt.plot( (sps[0],sps[0]+l), ((sps[1]-sep), (sps[1]-sep)+l),'%s-'%nc,alpha=alpha)
elif len(anchors[0])==4: #synteny blocks, without orientation
for anchor in anchors:
s1,e1,s2,e2=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.5,
color=color
)
)
elif len(anchors[0])==5: #synteny blocks with orientation
for anchor in anchors:
s1,e1,s2,e2,revcomp=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.25,
color="green" if revcomp else "red"
)
)
elif len(anchors[0])==8: #synteny blocks with score and ctg
if edges:
for c in [0,2]:
anchors.sort(key=lambda a:a[c])
xedges,yedges=[],[]
panchor=None
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
if panchor!=None:
ps1,pe1,ps2,pe2,prevcomp,pscore,pref,pctg=panchor
if pctg!=ctg and pref!=ref:
panchor=anchor
continue
if c==0:
xedges.append(pe1)
xedges.append(s1)
xedges.append(None)
if prevcomp:
yedges.append(ps2-sep)
else:
yedges.append(pe2-sep)
if revcomp:
yedges.append(e2-sep)
else:
yedges.append(s2-sep)
yedges.append(None)
else:
if prevcomp:
xedges.append(ps1)
else:
xedges.append(pe1)
if revcomp:
xedges.append(e1)
else:
xedges.append(s1)
xedges.append(None)
yedges.append(pe2-sep)
yedges.append(s2-sep)
yedges.append(None)
panchor=anchor
if c==0:
plt.plot(xedges,yedges,'b--',alpha=alpha)
else:
plt.plot(xedges,yedges,'y--',alpha=alpha)
if lines:
rcxpoints,xpoints=[],[]
rcypoints,ypoints=[],[]
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
# plt.text(s1+((e1-s1)/2),(s2-sep)+(((e2-sep)-(s2-sep))/2) ,str(anchor),fontsize=6)
if revcomp:
# plt.plot((s1,e1), (e2-sep,s2-sep),'g-')
rcxpoints.append(s1)
rcxpoints.append(e1)
rcxpoints.append(None)
rcypoints.append(e2-sep)
rcypoints.append(s2-sep)
rcypoints.append(None)
else:
# plt.plot((s1,e1), (s2-sep,e2-sep),'r-')
xpoints.append(s1)
xpoints.append(e1)
xpoints.append(None)
ypoints.append(s2-sep)
ypoints.append(e2-sep)
ypoints.append(None)
plt.plot(xpoints,ypoints,'r-' if color==None else '%s-'%color,alpha=alpha)
plt.plot(rcxpoints,rcypoints,'g-' if color==None else '%s-'%color,alpha=alpha)
else: #plot squares
for anchor in anchors:
s1,e1,s2,e2,revcomp,score,ref,ctg=anchor
ax = plt.axes()
ax.add_patch(
matplotlib.patches.Rectangle(
(s1, s2-sep), #bottom left
e1-s1, #width
e2-s2, #height
alpha=.25,
color="green" if revcomp else "red"
)
)
if wait:
plt.show()
else:
plt.draw()
def addctginfo(mums,ctg2range):
logging.debug("Augment contig information.")
#add ref information to mums
mums.sort(key=lambda m: m[1][0]) #sort mums by ref domain
intvidx=0
for i in range(len(mums)):
while mums[i][1][0]>ctg2range[intvidx][1]:
intvidx+=1
mums[i]=mums[i]+(intvidx,)
#add contig information to mums
mums.sort(key=lambda m: m[1][1]) #sort mums by query domain
intvidx=0
for i in range(len(mums)):
while mums[i][1][1]>ctg2range[intvidx][1]:
intvidx+=1
mums[i]=mums[i]+(intvidx,)
logging.debug("Done.")
return mums
def transform_cmd(args):
for qry in args.contigs:
logging.info("Running transform for %s"%qry)
transform(args,qry)
logging.info("Done")
def transform(args,qry):
if not args.interactive:
matplotlib.use("agg")
if args.plot:
from matplotlib import pyplot as plt
if args.output==None:
prefix=os.path.splitext(os.path.basename(qry))[0]
else:
prefix=args.output
refnames=[]
ctgnames=[]
if args.sa64:
idx=reveallib64.index()
else:
idx=reveallib.index()
ctg2range=[]
for sample in [args.reference[0],qry]:
idx.addsample(os.path.basename(sample))
for name,seq in fasta_reader(sample, cutN=args.cutn):
if len(seq)<args.minctglength:
logging.debug("Skip transform for contig: %s"%name)
continue
intv=idx.addsequence(seq)
ctg2range.append(intv)
if sample==args.reference[0]:
refnames.append(name)
else:
ctgnames.append(name)
T=idx.T
logging.info("Compute mums.")
idx.construct(rc=False)
mums=addctginfo(idx.getmums(args.minlength),ctg2range)
logging.info("Done, %d mums."%len(mums))
if args.cluster:
logging.info("Cluster mums by diagonals.")
blocks=clustermumsbydiagonal(mums,maxdist=args.maxdist,minclustsize=args.mincluster,rcmums=False)
logging.info("Done, %d clusters."%len(blocks))
else:
blocks=[(mum[1][0], mum[1][0]+mum[0], mum[1][1], mum[1][1]+mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in mums]
# rcidx=idx.copy()
# rcidx.construct(rc=True)
# mums+=rcidx.getmums(args.minlength)
logging.info("Compute RC mums.")
idx.construct(rc=True)
rcmums=addctginfo(idx.getmums(args.minlength),ctg2range)
logging.info("Done, %d rc mums."%len(rcmums))
sep=idx.nsep[0]
idxn=idx.n
rlength=idx.nsep[0]
qlength=idxn-idx.nsep[0]
del idx
if args.cluster:
logging.info("Cluster rc mums by anti-diagonals.")
if len(rcmums)==0:
rcblocks = [(mum[1][0], mum[1][0] + mum[0], mum[1][1], mum[1][1] + mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in rcmums]
else:
rcblocks=clustermumsbydiagonal(rcmums,maxdist=args.maxdist,minclustsize=args.mincluster,rcmums=True)
logging.info("Done, %d rc clusters."%len(rcblocks))
else:
rcblocks=[(mum[1][0], mum[1][0]+mum[0], mum[1][1], mum[1][1]+mum[0], mum[2], mum[0], mum[3], mum[4]) for mum in rcmums]
blocks+=rcblocks
if args.plot:
plot(plt,blocks,sep,wait=False,lines=True,alpha=0.2,args=args)
# if args.plot:
# plot(blocks,sep,wait=False,lines=True)
logging.info("Start glocal chaining for filtering anchors (reference).")
# blocks.sort(key=lambda b: b[1]-b[0])
# logging.info("Largest ref block: %s"%str(blocks[-1]))
# minbacktrack=blocks[-1][1]-blocks[-1][0]
nbefore=len(blocks)
syntenyblocks=blocks
nafter=None
refiteration=0
# maxiter=1
while nbefore!=nafter:# or refiteration==maxiter:
logging.info("Glocal chain iteration %d"%refiteration)
nbefore=len(syntenyblocks)
syntenyblocks=glocalchain(syntenyblocks,rlength,qlength,ctg2range,rearrangecost=args.rearrangecost,
inversioncost=args.inversioncost,
_lambda=args._lambda,
eps=args.eps,
useheap=args.useheap,
lastn=args.lastn,
lastbp=args.lastbp,
alfa=args.alfa,
gapopen=args.gapopen,
axis=0)
nafter=len(syntenyblocks)
logging.info("Anchor before chaining: %s"%nbefore)
logging.info("Anchor after chaining: %s"%nafter)
refiteration+=1
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,lines=True,color='k',alpha=.7)
logging.info("%d anchors remain after glocal chaining (reference)."%len(syntenyblocks))
logging.info("Start glocal chaining for filtering anchors (query).")
nbefore=len(syntenyblocks)
nafter=None
qryiteration=0
while nbefore!=nafter:# or qryiteration==maxiter:
logging.info("Glocal chain iteration %d"%qryiteration)
nbefore=len(syntenyblocks)
syntenyblocks=glocalchain(syntenyblocks,rlength,qlength,ctg2range,rearrangecost=args.rearrangecost,
inversioncost=args.inversioncost,
_lambda=args._lambda,
eps=args.eps,
useheap=args.useheap,
lastn=args.lastn,
lastbp=args.lastbp,
alfa=args.alfa,
gapopen=args.gapopen,
axis=1)
nafter=len(syntenyblocks)
logging.info("Anchor before chaining: %s"%nbefore)
logging.info("Anchor after chaining: %s"%nafter)
qryiteration+=1
# G=localcolinearchains(syntenyblocks,rlength,qlength,rearrangecost=rearrangecost,inversioncost=inversioncost)
# chain,rcchain=colinearchains(syntenyblocks,rlength,qlength)
logging.info("%d anchors remain after glocal chaining (query)."%len(syntenyblocks))
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,lines=True,color='b',alpha=.7)
#take the intersection of both the chains
# logging.info("Determine intersection between the chains...")
# syntenyblocks=list(set(rsyntenyblocks) & set(qsyntenyblocks))
# logging.info("Done. %d chains remain."%len(qsyntenyblocks))
# logging.info("Remove anchors that are contained in other clusters."
# syntenyblocks=remove_contained_blocks(blocks)
# logging.info("Done, %d anchors remain."%len(syntenyblocks))
# logging.info("Done.")
logging.info("Merge consecutive blocks.")
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
# if args.plot:
# plot(syntenyblocks,sep,wait=True,lines=True,color='b')
# logging.info("Merge consecutive blocks.")
# syntenyblocks=merge_consecutive(syntenyblocks)
# logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
if args.greedy:
logging.info("Assign overlap between MUMs in a greedy manner.")
syntenyblocks=remove_overlap_greedy_blocks(syntenyblocks)
logging.info("Done.")
else:
logging.info("Assign overlap between MUMs in a conservative manner.")
syntenyblocks=remove_overlap_conservative_blocks(syntenyblocks)
logging.info("Done.")
logging.info("Remove all blocks that are shorter than minchainsum (%d)."%args.minchainsum)
syntenyblocks=[b for b in syntenyblocks if b[5] >= args.minchainsum]
logging.info("%d blocks after filtering for minchainsum."%len(syntenyblocks))
logging.info("Merge consecutive blocks.")
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("%d blocks after merging consecutive blocks."%len(syntenyblocks))
if args.optimise and len(syntenyblocks)>1:
weight,cost,edgecosts=chainscore(syntenyblocks, rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen) #determine the actual cost of the glocal chain
score=weight-cost
assert(len(edgecosts) == len(syntenyblocks)+1)
iteration=0
while True:
iteration+=1
logging.info("Optimise chain, iteration %d."%iteration)
tsyntenyblocks,tweight,tcost,tedgecosts=optimise(syntenyblocks,rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen)
nscore=tweight-tcost
if nscore<=score:
break
else:
score=nscore
syntenyblocks=tsyntenyblocks
weight=tweight
cost=tcost
edgecosts=tedgecosts
syntenyblocks=merge_consecutive(syntenyblocks)
logging.info("Done. %d blocks after optimisation."%len(syntenyblocks))
syntenyblocks=merge_consecutive(syntenyblocks)
weight,cost,edgecosts=chainscore(syntenyblocks, rlength, qlength, ctg2range,rearrangecost=args.rearrangecost,inversioncost=args.inversioncost,_lambda=args._lambda,eps=args.eps,alfa=args.alfa,gapopen=args.gapopen) #determine the actual cost of the glocal chain
score=weight-cost
assert(len(edgecosts) == len(syntenyblocks)+1)
if args.outputbed: #before extending to the edges of the contig, output the breakpoint regions
logging.info("Write bedfile with contig mappings on reference to: %s.bed"%prefix)
with open(prefix+".bed",'w') as bedout:
block2ctgidx=dict()
pctgid=None
ctgid2lastblock=dict()
ci=0
syntenyblocks.sort(key=lambda b: b[2]) #sort by query
for i,block in enumerate(syntenyblocks): #sorted by query
s1,e1,s2,e2,o,score,refid,ctgid=block
if ctgid!=pctgid:
if pctgid!=None:
ctgid2lastblock[pctgid]=ci
ci=0
else:
ci+=1
block2ctgidx[block]=ci
pctgid=ctgid
ctgid2lastblock[pctgid]=ci
syntenyblocks.sort(key=lambda b: b[0]) #sort by reference
bedout.write("#reference\trefbegin\trefend\tcontig:segmentidx:lastsegmentidx:begin:end\tscore:cost\torientation\taln-start\taln-end\n")
pblock=None
for i,block in enumerate(syntenyblocks): #sorted by reference
s1,e1,s2,e2,o,score,refid,ctgid=block
if i>0:
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=pblock
else:
pblock=None
cost=edgecosts[i] #cost to connect to pblock to block
if i<len(syntenyblocks)-2:
nblock=syntenyblocks[i+1]
ns1,ne1,ns2,ne2,no,nscore,nrefid,nctgid=nblock
else:
nblock=None
ctgoffsets=ctg2range[ctgid]
refoffsets=ctg2range[refid]
if pblock!=None and prefid==refid:
start=(s1-refoffsets[0])-((s1-pe1)/2)
else:
start=s1-refoffsets[0]
if nblock!=None and nrefid==refid:
end=(e1-refoffsets[0])+((ns1-e1)/2)
else:
end=e1-refoffsets[0]
qstart=s2-ctgoffsets[0]
qend=e2-ctgoffsets[0]
chromname=refnames[refid].split()[0]
qi=block2ctgidx[block]
bedout.write("%s\t%d\t%d\t%s:%d:%d:%d:%d\t%d:%d\t%s\t%d\t%d\n"%(chromname, #chrom
start, #start
end, #end
ctgnames[ctgid-len(refnames)].split()[0], #name, make sure there's no whitespace to comply with bed 'format'
qi,
ctgid2lastblock[ctgid],
qstart,
qend,
score,
cost,
'+' if o==False else '-', #strand
s1-refoffsets[0], #thick start
e1-refoffsets[0]) #thick end
#itemRgb
#blockCount
#blockSizes
#blockStarts
)
#bedout.write("%s\t%d\t%d\t%s\t%s\t%s\t%s\n"%(refnames[refid], pe1-refoffsets[0], s1-refoffsets[0], ctgnames[ctgid-len(refnames)], ctgnames[pctgid-len(refnames)], 'n' if po==False else 'r', 'n' if o==False else 'r'))
pblock=block
if args.plot:
plot(plt,syntenyblocks,sep,wait=False,args=args)
logging.debug("Extend %d blocks to query borders."%len(syntenyblocks))
extendblocks(syntenyblocks,ctg2range)
logging.debug("Done.")
if args.plot:
for start,end in ctg2range:
if start<sep:
plt.axvline(x=start, ymin=0, ymax=idxn-sep, linewidth=.1, linestyle='solid')
else:
plt.axhline(y=start-sep, xmin=0, xmax=sep, linewidth=.1, linestyle='solid')
plot(plt,syntenyblocks,sep,wait=False,edges=False,args=args)
plt.xlim(0,rlength)
plt.ylim(0,qlength)
if args.interactive:
plt.show()
else:
plt.savefig("%s.png"%(prefix))
plt.clf()
#determine the subset of mappable contigs from ref and qry
mappablectgs=set()
for s1,e1,s2,e2,o,score,refid,ctgid in syntenyblocks:
mappablectgs.add(ctgid)
mappablectgs.add(refid)
if len(mappablectgs)!=0:
logging.info("Write breakpoint graph to: %s.gfa"%prefix)
write_breakpointgraph(syntenyblocks,T,refnames,ctgnames,mappablectgs,prefix)
else:
logging.info("No mappable contigs.")
def clustermumsbydiagonal(mums,maxdist=90,minclustsize=65,rcmums=True):
logging.debug("Sorting anchors by diagonals...")
if rcmums:
mums.sort(key=lambda m: (m[1][0]+(m[1][1]+m[0]), m[1][0]-(m[1][1]+m[0])) ) #sort mums by anti-diagonal, then diagonal
else:
mums.sort(key=lambda m: (m[1][0]-m[1][1], m[1][0]+m[1][1])) #sort mums by diagonal, then anti-diagonal
logging.debug("Done.")
l,sps,rc,ctg,ref=mums[0]
clusters=[(sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref)]
update_progress(0,len(mums))
for i in xrange(1,len(mums)):
update_progress(i,len(mums))
l,sps,rc,ctg,ref=mums[i]
s1,e1,s2,e2,prc,score,pctg,pref=clusters[-1]
if rcmums:
d=mums[i][1][0]+(mums[i][1][1]+mums[i][0])
pd=e1+s2
else:
d=mums[i][1][0]-mums[i][1][1]
pd=s1-s2
if d==pd and pctg==ctg and pref==ref: #same diagonal and same contigs
dist=mums[i][1][0]-e1
assert(dist>=0)
if dist < maxdist:
if rc==0:
clusters[-1]=(s1,sps[0]+l,s2,sps[1]+l,rc,score+l,ctg,ref)
else:
clusters[-1]=(s1,sps[0]+l,sps[1],e2,rc,score+l,ctg,ref)
else:
clusters.append((sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref))
else:
clusters.append((sps[0],sps[0]+l,sps[1],sps[1]+l,rc,l,ctg,ref))
return [c for c in clusters if c[5]>=minclustsize]
def write_breakpointgraph(syntenyblocks,T,refnames,ctgnames,mappablectgs,outputprefix):
#build a breakpoint graph, that we can write to GFA
G=nx.MultiDiGraph()
start=uuid.uuid4().hex
end=uuid.uuid4().hex
G.graph['startnodes']=[start]
G.graph['endnodes']=[end]
G.graph['paths']=[]
G.graph['path2id']={}
G.graph['id2path']={}
G.add_node(start,offsets=dict())
G.add_node(end,offsets=dict())
pid=0
for name in refnames:
if pid in mappablectgs:
# name=os.path.splitext(os.path.basename(reference))[0]+"_"+name
name=os.path.basename(outputprefix+"_"+name)
G.graph['paths'].append(name)
G.graph['path2id'][name]=pid
G.graph['id2path'][pid]=name
G.node[start]['offsets'][pid]=0
else:
logging.info("No contigs were mapped to: %s"%name)
pid+=1
for name in ctgnames:
if pid in mappablectgs:
name="*"+name #prefix so we can recognise the two paths afterwards
G.graph['paths'].append(name)
G.graph['path2id'][name]=pid
G.graph['id2path'][pid]=name
G.node[start]['offsets'][pid]=0
else:
logging.info("Contig: %s could not be uniquely placed on the reference"%name)
pid+=1
#write the reference layout of the query sequences
syntenyblocks.sort(key=lambda b: b[0]) #TODO: check if not already the case..
prefid=None
pnid=None
l=0
mapping=dict()
nid=0
for i,block in enumerate(syntenyblocks):
s1,e1,s2,e2,o,score,refid,ctgid=block
mapping[(s2,e2)]=nid
if refid!=prefid:
if prefid!=None:
G.add_edge(pnid,end,paths=set([prefid]),ofrom="+", oto="+")
pnid=start
l=0
if o==0:
G.add_node(nid,seq=T[s2:e2],offsets={refid:l})
else:
G.add_node(nid,seq=rc(T[s2:e2]),offsets={refid:l})
G.add_edge(pnid,nid,paths=set([refid]),ofrom="+", oto="+")
prefid=refid
pnid=nid
nid+=1
l+=e2-s2
if i!=len(syntenyblocks)-1: #add gap node, so we later know which bubbles are caused by gaps in the assembly
gapsize=1 #TODO: if specified use reference to add a gap
G.add_node(nid,seq="N"*gapsize,offsets={refid:l})
l+=gapsize
G.add_edge(pnid,nid,paths=set([refid]),ofrom="+", oto="+")
pnid=nid
nid+=1
G.add_edge(pnid,end,paths=set([refid]),ofrom="+", oto="+")
writeorg=True
if writeorg: #write the original layout of the query sequences, so we can reconstruct the input afterwards
syntenyblocks.sort(key=lambda b: b[2])
pctgid=None
pnid=None
l=0
for nid,block in enumerate(syntenyblocks):
s1,e1,s2,e2,o,score,refid,ctgid=block
nid=mapping[(s2,e2)]
if ctgid!=pctgid:
if pctgid!=None:
G.add_edge(pnid,end,paths=set([pctgid]),ofrom="+" if o==0 else "-", oto="+")
pnid=start
l=0
po=0
G.node[nid]['offsets'][ctgid]=l
l+=e2-s2
G.add_edge(pnid,nid,paths=set([ctgid]),ofrom="+" if po==0 else "-", oto="+" if o==0 else "-")
po=o
pctgid=ctgid
pnid=nid
G.add_edge(pnid,end,paths=set([ctgid]),ofrom="+" if o==0 else "-", oto="+")
write_gfa(G,None,outputfile=outputprefix if outputprefix.endswith(".gfa") else outputprefix+".gfa")
def merge_consecutive(syntenyblocks):
if len(syntenyblocks)<2:
return syntenyblocks
#first merge consecutive blocks in the chain
syntenyblocks.sort(key=lambda s: s[0]) #order by ref position
qryorder = sorted(xrange(len(syntenyblocks)), key= lambda i: syntenyblocks[i][2]) #qry order
qryorder_inv = sorted(xrange(len(syntenyblocks)), key=qryorder.__getitem__) #inverse qry order
head=0
for ri in xrange(1,len(syntenyblocks)):
pblock=syntenyblocks[ri-1]
block=syntenyblocks[ri]
pqi=qryorder_inv[ri-1] #index within the qryorder of pblock
qi=qryorder_inv[ri] #index within the qryorder of block
ps1,pe2,ps2,pe2,po,pscore,prefid,pctgid=pblock #previous block on reference
s1,e1,s2,e2,o,score,refid,ctgid=block
es1,ee1,es2,ee2,eo,escore,erefid,ectgid=syntenyblocks[head]
if ctgid==pctgid:
if pqi+1==qi and o==po==0:
syntenyblocks[head]=(es1,e1,es2,e2,eo,escore+score,erefid,ectgid)
elif pqi-1==qi and o==po==1:
syntenyblocks[head]=(es1,e1,s2,ee2,eo,escore+score,erefid,ectgid)
else:
head+=1
syntenyblocks[head]=block
else:
head+=1
syntenyblocks[head]=block
while head!=ri:#len(syntenyblocks)-1:
syntenyblocks.pop()
head+=1
return syntenyblocks
def extendblocks(syntenyblocks,ctg2range):
syntenyblocks.sort(key=lambda s: s[0]) #order by reference position
for i in xrange(len(syntenyblocks)):
s1,e1,s2,e2,o,score,ref,ctg=syntenyblocks[i]
if i==0: #first
s1=ctg2range[ref][0]
else:
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=syntenyblocks[i-1]
if pref==ref:
s1=pe1
else:
s1=ctg2range[ref][0]
if i==len(syntenyblocks)-1: #last
e1=ctg2range[ref][1]
else:
ns1,ne1,ns2,ne2,no,nscore,nref,nctg=syntenyblocks[i+1]
if nref==ref:
e1+=((ns1-e1)/2)
else:
e1=ctg2range[ref][1]
assert(s1<e1)
syntenyblocks[i]=(s1,e1,s2,e2,o,score,ref,ctg)
syntenyblocks.sort(key=lambda s: s[2]) #order by qry position
for i in xrange(len(syntenyblocks)):
s1,e1,s2,e2,o,score,ref,ctg=syntenyblocks[i]
if i==0: #first
s2=ctg2range[ctg][0]
else:
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=syntenyblocks[i-1]
if pctg==ctg:
s2=pe2
else:
s2=ctg2range[ctg][0]
if i==len(syntenyblocks)-1: #last
e2=ctg2range[ctg][1]
else:
ns1,ne1,ns2,ne2,no,nscore,nref,nctg=syntenyblocks[i+1]
if nctg==ctg:
e2+=((ns2-e2)/2)
else:
e2=ctg2range[ctg][1]
assert(s2<e2)
syntenyblocks[i]=(s1,e1,s2,e2,o,score,ref,ctg)
def optimise(syntenyblocks,rlength, qlength, ctg2range,rearrangecost=1000,inversioncost=1,_lambda=5,eps=1,alfa=1,gapopen=10):
orgchain=sorted(syntenyblocks,key=lambda c: c[5])
maxchain=syntenyblocks
maxchain_weight,maxchain_cost,maxchain_edgecosts=chainscore(maxchain, rlength, qlength, ctg2range, rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,alfa=alfa,gapopen=gapopen)
maxchainscore=maxchain_weight-maxchain_cost
stack=[]
loglevel=logging.getLogger().getEffectiveLevel()
if loglevel>logging.DEBUG:
update_progress(0,len(orgchain))
for i in xrange(len(orgchain)):
if loglevel>logging.DEBUG:
update_progress(i,len(orgchain))
tmp=list(stack+orgchain[i+1:])
weight,cost,edgecosts=chainscore(tmp, rlength, qlength, ctg2range, rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,alfa=alfa,gapopen=gapopen)
tmpchainscore=weight-cost
if tmpchainscore<maxchainscore:
stack.append(orgchain[i]) #keep it
else:
logging.debug("Dropped block %s, gain: %d"%(orgchain[i],tmpchainscore-maxchainscore))
maxchainscore=tmpchainscore
maxchain=tmp
maxchain_cost=cost
maxchain_weight=weight
maxchain_edgecosts=edgecosts
logging.debug("Optimal chain has %d blocks and scores: %d"%(len(maxchain),maxchainscore))
return maxchain,maxchain_weight,maxchain_cost,maxchain_edgecosts
def chainscore(chain, rlength, qlength, ctg2range, rearrangecost=1000, inversioncost=1, _lambda=5, eps=1, alfa=1, gapopen=10):
# logging.debug("rearrangecost=%d, inversioncost=%d, _lambda=%d, eps=%d, alfa=%d, gapopen=%d"%(rearrangecost, inversioncost, _lambda, eps, alfa, gapopen))
if len(chain)==0:
start=(0,0,rlength,rlength,0,0,0,0)
end=(rlength,rlength,rlength+qlength,rlength+qlength,0,0,0,0)
cost=gapcost(start,end,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
return 0,cost,[cost]
chain.sort(key=lambda s: s[0]) #order by reference position
qryorder = sorted(xrange(len(chain)), key= lambda i: chain[i][2]) #qry order
qryorder_inv = sorted(xrange(len(chain)), key=qryorder.__getitem__) #inverse qry order
lastqstart,lastqend=ctg2range[chain[-1][7]]
if chain[0][4]==0:
end=(rlength,rlength,lastqend,lastqend,chain[0][4])
else:
end=(rlength,rlength,lastqstart,lastqstart,chain[0][4])
firstqstart,firstqend=ctg2range[chain[0][7]]
if chain[0][4]==0:
start=(0,0,firstqstart,firstqstart,chain[0][4])
else:
start=(0,0,firstqend,firstqend,chain[0][4])
#count out of order traversals
rearrangements=0
inversions=0
startcost=gapcost(start,chain[0],rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost=startcost
edgecosts=[startcost]
weight=alfa*chain[0][5]
for ri in xrange(1,len(chain)):
pblock=chain[ri-1]
block=chain[ri]
ps1,pe1,ps2,pe2,po,pscore,pref,pctg=pblock
s1,e1,s2,e2,o,score,ref2,ctg=block
weight+=(alfa*score)
# xgap=0#s1-pe1
pqi=qryorder_inv[ri-1] #index within the qryorder of pblock
qi=qryorder_inv[ri] #index within the qryorder of block
if pctg==ctg and pref==ref2:
if (pqi==qi-1) or (pqi==qi+1): #check if the two blocks are colinear
gc=gapcost(pblock,block,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost+=gc
edgecosts.append(gc)
else: #all other options use rearrangement penalty
rearrangements+=1
cost+=(gapopen+rearrangecost)
edgecosts.append(gapopen+rearrangecost)
else: #cross contigs
if o==0:
if qi>0:
pqs1,pqe1,pqs2,pqe2,pqo,pqscore,pq_ref,pq_ctg=chain[qryorder[qi-1]]
else:
pq_ctg='start'
else:
if qi<len(qryorder)-1:
pqs1,pqe1,pqs2,pqe2,pqo,pqscore,pq_ref,pq_ctg=chain[qryorder[qi+1]]
else:
pq_ctg='end'
if po==0:
if pqi<len(qryorder)-1:
nqs1,nqe1,nqs2,nqe2,nqo,nqscore,nq_ref,nq_ctg=chain[qryorder[pqi+1]]
else:
nq_ctg='end'
else:
if pqi>0:
nqs1,nqe1,nqs2,nqe2,nqo,nqscore,nq_ref,nq_ctg=chain[qryorder[pqi-1]]
else:
nq_ctg='start'
if pq_ctg==ctg or nq_ctg==pctg: #there exists another block on this query contig before changing contigs, so has to be rearranged
rearrangements+=1
cost+=(gapopen+rearrangecost)
edgecosts.append((gapopen+rearrangecost))
else:
edgecosts.append(gapopen) #simple traversal between two contigs
endcost=gapcost(chain[-1],end,rearrangecost=rearrangecost,inversioncost=inversioncost,_lambda=_lambda,eps=eps,gapopen=gapopen,axis=0)
cost+=endcost
edgecosts.append(endcost)
return weight,cost,edgecosts
def update_progress(i,n):
fullbar=100
if (i+1) % (n/fullbar if n>fullbar else 1)==0 or i+1==n:
done=int(fullbar*((i+1)/float(n)))
todo=fullbar-done
sys.stdout.write('\r[%s%s]'%("#"*done," "*todo))
if i+1==n:
sys.stdout.write('\n')
sys.stdout.flush()
def glocalchain(syntenyblocks, rlength, qlength, ctg2range, rearrangecost=1000, inversioncost=1, lastn=50, lastbp=10000, useheap=False, axis=0, _lambda=5, eps=1, alfa=1, gapopen=10):
sep=rlength
#add some dummy blocks for the contig start/ends
if axis==0:
for refid,(refstart,refend) in enumerate(ctg2range):
if refstart>=sep:
break
if refid==0:
start=(refstart,refstart,None,None,0,0,None,None)
syntenyblocks.append((refend,refend,None,None,0,0,None,None))
end=syntenyblocks[-1]
if axis==1:
first=True
for ctgid,(ctgstart,ctgend) in enumerate(ctg2range):
if ctgstart<sep:
continue
if first:
start=(None,None,ctgstart,ctgstart,0,0,None,None)
first=False
syntenyblocks.append((None,None,ctgend,ctgend,0,0,None,None))
end=syntenyblocks[-1]
if axis==0: #sort by ref
c1,c2=0,2
else: #sort by qry
c1,c2=2,0
syntenyblocks.sort(key=lambda s: (s[c1],-s[5]) ) #order by reference position, then score
if useheap:
heap=sortedcontainers.SortedList()
heap.add((0,start))
else:
heap=[(0,start)]+[None]*(len(syntenyblocks))
G={b:None for b in syntenyblocks}
maxscore=None
n=len(syntenyblocks)
bt=range(n+1)
update_progress(0,n)
pri=0
t0=time.time()
deepest=0
# best=None
for ri in xrange(n):
block=syntenyblocks[ri]
while syntenyblocks[deepest][c1+1]<block[c1]:
deepest+=1
if ri%1000==0:
t1=time.time()
sec=t1-t0
bd=ri-pri
logging.debug("Blocks per sec: %d"%(bd/sec))
t0=t1
pri=ri
update_progress(ri,n)
s1,e1,s2,e2,o,score,refid,ctgid=block
trace=False
# starttrace=105637436
# endtrace=starttrace+10
# if s1>=starttrace and s1<endtrace: # and refid==ctgtrace:
# # # if block==(4499237, 4502780, 9008394, 9011937, 0, 3543, 0, 1) or block==end:
# logging.info("BLOCK: %s"%str(block))
# print "deepest",syntenyblocks[deepest], syntenyblocks[deepest][c1+1]
# trace=True
bestscore=None
bestblock=None
bestcost=0
# checkedbest=False
l=0
for j in bt: #back track on the heap
if useheap:
if j>=len(heap):
break
cscore,pblock=heap[-j]
else:
i=(ri+1)-j-1
if i<0:
break
cscore,pblock=heap[i]
# if best==None or cscore==best:
# checkedbest=True
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=pblock
if (pblock[c1]==block[c1] and prefid!=None and refid!=None) or (pblock[c1+1]>=block[c1+1] and prefid!=None and refid!=None):
continue
if (pblock[c2]>=block[c2] and prefid!=None and refid!=None) and (pblock[c2+1]<=block[c2+1] and prefid!=None and refid!=None):
continue
l+=1
if bestscore!=None:
if cscore<=bestscore:
if useheap:
break
else:
if block[c1]-pblock[c1]>lastbp and l>=lastn and pblock[c1]<syntenyblocks[deepest][c1]:
break
else:
continue
#if block is a dummy block, make it relative to pblock, if possible
if block[6]==None and pblock[6]!=None: #update current block to be relative to pblock
if axis==0:
_block=(s1, e1, pe2 if po==0 else ps2, pe2 if po==0 else ps2, po, 0, prefid, pctgid)
else:
_block=(pe1 if po==0 else ps1, pe1 if po==0 else ps1, s2, e2, po, 0, prefid, pctgid)
else:
_block=block
#if pblock is a dummy block, make it relative to block, if possible
if pblock[6]==None and block[6]!=None:
if axis==0:
_pblock=(ps1, pe1, s2 if o==0 else e2, s2 if o==0 else e2, o, 0, refid, ctgid)
else:
_pblock=(s1 if o==0 else e1, s1 if o==0 else e1, ps2, pe2, o, 0, refid, ctgid)
else:
_pblock=pblock
#if blocks come from same query contig and reference contig, compute gapcost, else introduce rearrangement cost
if _pblock[6]==_block[6]!=None and _pblock[7]==_block[7]!=None:
c=gapcost(_pblock,_block,rearrangecost=rearrangecost,inversioncost=inversioncost,eps=eps,_lambda=_lambda,gapopen=gapopen,axis=axis)
elif _pblock[6]==_block[6]==None and _pblock[7]==_block[7]==None: #connect two dummy blocks
c=gapopen+(abs(block[c1]-(pblock[c1+1]))*eps)
else: #blocks cross contigs or ref without passing a dummy block, introduce rearrangement cost
pblockctgstart,pblockctgend=ctg2range[_pblock[7]]
blockctgstart,blockctgend=ctg2range[_block[7]]
pblockrefstart,pblockrefend=ctg2range[_pblock[6]]
blockrefstart,blockrefend=ctg2range[_block[6]]
if _pblock[6]==_block[6] and axis==0:
if _pblock[4]==0:
cp=abs( pblockctgend-_pblock[3])
else:
cp=abs( _pblock[2]-pblockctgstart)
if _block[4]==0:
cb=abs( blockctgend-_block[3] )
else:
cb=abs( _block[2]-blockctgstart )
c=gapopen+min((rearrangecost,((cp+cb)*eps)))
elif _pblock[7]==_block[7] and axis==1:
if _pblock[4]==0:
cp=abs( pblockrefend-_pblock[1])
else:
cp=abs( _pblock[0]-pblockrefstart)
if _block[4]==0:
cb=abs( _block[0]-blockrefstart )
else:
cb=abs( blockrefend-_block[1])
c=gapopen+min((rearrangecost,((cp+cb)*eps)))
else:
c=rearrangecost+gapopen+(abs(block[c1]-(pblock[c1+1]))*eps)
assert(c>=0)
if trace:
logging.info("Connect to PBLOCK: %s costs %s, depth=%s, lastbp=%d, cscore,%s, cscore-c=%d, bestscore=%s"%(pblock,c,l,block[c1]-pblock[c1],cscore,cscore-c,bestscore))
if bestscore==None or cscore-c > bestscore:
bestscore=cscore-c
bestblock=pblock
bestcost=c
if not useheap:
if block[c1]-pblock[c1]>lastbp and l>=lastn and pblock[c1]<syntenyblocks[deepest][c1]:
break
# if l>lastn:
# logging.info("Forced deeper %d backtrack for block: %s"%(l,block))
cscore=bestscore+(alfa*score)
# if best==None or cscore>best:
# best=cscore
if useheap:
heap.add((cscore,block))
else:
heap[ri+1]=(cscore,block)
if maxscore==None or maxscore<cscore:
maxscore=cscore
maxnode=block
if trace:
logging.info("CONNECT TO BLOCK: %s, score=%s, cost=%s, depth=%s"%(bestblock,bestscore,bestcost,l))
G[block]=(bestblock,bestscore)
node,cscore=G[end]
chain=[]
while node!=start:# and node!=startrc:
if node[6]!=None: #only add to the chain if it is an actual anchor, exclude contig endpoints
chain.append(node)
s1,e1,s2,e2,o,score,refid,ctgid=node
nnode,score=G[node]
if node==nnode:
logging.fatal("Loop in chain!")
sys.exit(1)
node=nnode
logging.info("Optimal glocal chain contains: %d anchors and scores %d"%(len(chain),cscore))
return chain[::-1]
def gapcost(block1,block2,rearrangecost=10000,inversioncost=0,eps=0,_lambda=0.5,gapopen=10,axis=0):
if axis==0: #sorted by ref
c1,c2=0,2
else: #sorted by qry
c1,c2=2,0
assert(block1[c1]<=block2[c1])
d1=block2[c1]-block1[c1+1]
if block1[4]==block2[4]==0: #both normal orientation
if block2[c2]<block1[c2]:#always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*(d1 if d1>0 else 0) #do not penalize if overlap
return gapopen+indelcost+substitutioncost
else:
d2=block2[c2]-block1[c2+1]
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost
elif block1[4]==block2[4]==1: #both reverse comp orientation
if block2[c2]>block1[c2]: #always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*(d1 if d1>0 else 0)
return gapopen+indelcost+substitutioncost
else:
d2=block1[c2]-block2[c2+1]
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost
elif block1[4]==1 and block2[4]==0:
d1=max((0,d1))
if block2[c2]>block1[c2]:
d2=block2[c2]-block1[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=block1[c2]-block2[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
# assert(block1[4]==0 and block2[4]==1)
d1=max((0,d1))
if block2[c2]>block1[c2]:
d2=block2[c2]-block1[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=block1[c2]-block2[c2+1]
d2=max((0,d2))
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*max(((d1 if d1<d2 else d2),0))
return gapopen+indelcost+substitutioncost+inversioncost
def _gapcost(block1,block2,rearrangecost=10000,inversioncost=0,eps=0,_lambda=0.5,gapopen=10,axis=0):
if axis==0: #sorted by ref
c1,c2=0,2
else: #sorted by qry
c1,c2=2,0
assert(block1[c1]<=block2[c1])
d1=abs(block2[c1]-block1[c1+1])
if block1[4]==block2[4]==0: #both normal orientation
if block2[c2]<block1[c2]:#always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*d1
return gapopen+indelcost+substitutioncost
else:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost
elif block1[4]==block2[4]==1: #both reverse comp orientation
if block2[c2]>block1[c2]: #always has to be rearranged!
indelcost=rearrangecost
substitutioncost=eps*d1
return gapopen+indelcost+substitutioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost
elif block1[4]==1 and block2[4]==0:
if block2[c2]>block1[c2]:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
# assert(block1[4]==0 and block2[4]==1)
if block2[c2]>block1[c2]:
d2=abs(block2[c2]-block1[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
else:
d2=abs(block1[c2]-block2[c2+1])
indelcost=min((rearrangecost,_lambda*abs(d1-d2)))
substitutioncost=eps*(d1 if d1<d2 else d2)
return gapopen+indelcost+substitutioncost+inversioncost
def printSA(index,maxline=100,start=0,end=None,fn="sa.txt"):
sa=index.SA
lcp=index.LCP
t=index.T
#so=index.SO
if end==None:
end=len(sa)
# with open(fn,'w') as f:
sys.stdout.write("%d\t%d\n"%(len(sa), len(lcp)))
assert(len(sa)==len(lcp))
for i in xrange(len(sa)):
s=sa[i]
lcpi=lcp[i]
if i>0 and i<len(sa)-1:
l1=lcp[i]
l2=lcp[i+1]
elif i==len(sa)-1:
l1=max([lcp[i-1],lcp[i]])
l2=0
else:
l1=0
l2=lcp[i+1]
if i>=start and i<=end:
#f.write("%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline].ljust(maxline) if l1<=maxline else t[s:s+maxline]+"..."+t[s+l1-40:s+l1].ljust(maxline) ) )
sys.stdout.write("%s\t%s\t%s\t%s\t%s\n"%(str(s).zfill(8), str(lcpi).zfill(6), t[s:s+maxline] ,t[s+l1-maxline:s+l1], t[s+l2-maxline:s+l2] ) )
def remove_overlap_conservative_blocks(anchors):
for coord in [0,2]:
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
for anchor in anchors[1:]:
s1,e1,s2,e2,o,score,refid,ctgid=anchor
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
pl=pe1-ps1
if overlap > 0: #overlap
if score<=overlap:
continue
assert(score-overlap >= 0)
if o==0:
anchor=(s1+overlap,e1,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
if coord==0:
anchor=(s1+overlap,e1,s2,e2-overlap,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
anchor=(s1,e1-overlap,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
assert(anchor[coord+1]>_anchors[-1][coord+1])
while pl<=overlap or pscore<=overlap:
_anchors.pop()
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap<0:
break
pl=pe1-ps1
if overlap>0:
assert(pscore-overlap >= 0)
if po==0:
_anchors[-1]=(ps1,pe1-overlap,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
if coord==0:
_anchors[-1]=(ps1,pe1-overlap, ps2+overlap,pe2, po,pscore-overlap if overlap<pscore else 0, prefid,pctgid)
else:
_anchors[-1]=(ps1+overlap,pe1,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0, prefid,pctgid)
_anchors.append(anchor)
anchors=_anchors
return anchors
def remove_overlap_greedy_blocks(anchors):
#TODO: remove duplicates!
for coord in [0,2]:
if len(anchors)<=1: #by definition no containment
return anchors
update_progress(0,len(anchors))
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
# for anchor in anchors[1:]:
for i in xrange(1,len(anchors)):
anchor=anchors[i]
update_progress(i,len(anchors))
s1,e1,s2,e2,o,score,refid,ctgid=anchor
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
pl=pe1-ps1
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap > 0: #overlap
if pscore > score: #update current anchor
if score<=overlap:
continue
assert(score-overlap >= 0)
if o==0:
anchor=(s1+overlap,e1,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
if coord==0:
anchor=(s1+overlap,e1,s2,e2-overlap,o,score-overlap if overlap<score else 0,refid,ctgid)
else:
anchor=(s1,e1-overlap,s2+overlap,e2,o,score-overlap if overlap<score else 0,refid,ctgid)
_anchors.append(anchor)
else:
while pl<=overlap or pscore<=overlap:
_anchors.pop()
ps1,pe1,ps2,pe2,po,pscore,prefid,pctgid=_anchors[-1]
overlap=(_anchors[-1][coord+1]) - anchor[coord]
if overlap<0:
break
pl=pe1-ps1
if overlap>0:
assert(pl>overlap)
assert(pscore>overlap)
assert(pscore-overlap >= 0)
if po==0:
_anchors[-1]=(ps1,pe1-overlap,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
if coord==0:
_anchors[-1]=(ps1,pe1-overlap, ps2+overlap,pe2, po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
else:
_anchors[-1]=(ps1+overlap,pe1,ps2,pe2-overlap,po,pscore-overlap if overlap<pscore else 0,prefid,pctgid)
_anchors.append(anchor)
else:
_anchors.append(anchor)
anchors=_anchors
return anchors
def remove_contained_blocks(anchors):
#remove duplicates!
for coord in [0,2]:
logging.info("Remove overlap in %s dimension."%("first" if coord==0 else "second"))
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[coord], (m[coord+1]-m[coord])*-1) ) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
update_progress(0,len(anchors))
# for anchor in anchors[1:]:
for i in xrange(1,len(anchors)):
anchor=anchors[i]
update_progress(i,len(anchors))
if anchor[coord] < last[coord+1]: #overlap
if anchor[coord+1]<=last[coord+1]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
#unused
def remove_overlap_greedy_mums(anchors):
#remove duplicates!
n=2
for coord in range(n):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
for anchor in anchors[1:]:
overlap=(_anchors[-1][1][coord]+_anchors[-1][0]) - anchor[1][coord]
if overlap > 0: #overlap
if _anchors[-1][0] > anchor[0]:
if anchor[2]==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]+overlap), anchor[2])
else:
if coord==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]), anchor[2])
else:
anchor=(anchor[0]-overlap, (anchor[1][0], anchor[1][1]+overlap), anchor[2])
_anchors.append(anchor)
else:
while _anchors[-1][0]<=overlap and overlap>0:
_anchors.pop()
overlap=(_anchors[-1][1][coord]+_anchors[-1][0]) - anchor[1][coord]
if overlap>0:
if _anchors[-1][2]==0:
_anchors[-1]=(_anchors[-1][0]-overlap,_anchors[-1][1],_anchors[-1][2]) #update stack
else:
if coord==0:
_anchors[-1]=(_anchors[-1][0]-overlap,_anchors[-1][1],_anchors[-1][2])
else:
_anchors[-1]=(_anchors[-1][0]-overlap,(_anchors[-1][1][0]+overlap, _anchors[-1][1][1]),_anchors[-1][2])
_anchors.append(anchor)
else:
_anchors.append(anchor)
anchors=_anchors
return anchors
#unused
def remove_contained_mums(anchors):
#remove duplicates!
for coord in range(2):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
#unused
def remove_overlap_conservative_mums(anchors):
#remove duplicates!
n=2
for coord in range(n):
if len(anchors)<=1: #by definition no containment
return anchors
anchors.sort(key=lambda m: (m[1][coord], m[0]*-1)) #sort by start position, then -1*size
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
if anchor[1][coord]+anchor[0]<=last[1][coord]+last[0]: #contained
continue
_anchors.append(anchor)
last=anchor
anchors=_anchors
_anchors=[anchors[0]]
last=anchors[0]
for anchor in anchors[1:]:
if anchor[1][coord] < last[1][coord]+last[0]: #overlap
assert(anchor[1][coord]+anchor[0] > last[1][coord]+last[0]) #may not be contained, as we filtered these out already
overlap=(last[1][coord]+last[0])-anchor[1][coord]
assert(overlap>=0)
assert(anchor[0]>overlap)
if anchor[2]==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]+overlap), anchor[2])
else:
if coord==0:
anchor=(anchor[0]-overlap, (anchor[1][0]+overlap, anchor[1][1]), anchor[2])
else:
anchor=(anchor[0]-overlap, (anchor[1][0], anchor[1][1]+overlap), anchor[2])
# assert(last[0]>overlap)
if last[2]==0:
_anchors[-1]=(last[0]-overlap,last[1],last[2]) #update last
else:
if coord==0:
_anchors[-1]=(last[0]-overlap,(last[1][0], last[1][1]+overlap),last[2])
else:
_anchors[-1]=(last[0]-overlap,(last[1][0]+overlap, last[1][1]),last[2])
if _anchors[-1][0]<=0:
_anchors[-1]=anchor
else:
_anchors.append(anchor)
last=anchor
anchors=_anchors
return anchors
|
py | 1a4de5de3d5a2b2a79966e4828471ae2420d5ee1 | from .a_star import a_star
from .graph import Path, Node, Graph
|
py | 1a4de661e9bfe46cc29e248dbcdce5f9275307fb | # ------------------------------------------------------------------------------
# Training code.
# Example command:
# python -m torch.distributed.launch --nproc_per_node=4 tools/train_net.py --cfg PATH_TO_CONFIG_FILE
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import argparse
import os
import pprint
import logging
import time
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
import tools._init_paths
from fvcore.common.file_io import PathManager
from segmentation.config import config, update_config
from segmentation.utils.logger import setup_logger
from segmentation.model import build_segmentation_model_from_cfg
from segmentation.utils import comm
from segmentation.solver import build_optimizer, build_lr_scheduler
from segmentation.data import build_train_loader_from_cfg, build_test_loader_from_cfg
from segmentation.solver import get_lr_group_id
from segmentation.utils import save_debug_images
from segmentation.utils import AverageMeter
from segmentation.utils.utils import get_loss_info_str, to_cuda, get_module
def parse_args():
parser = argparse.ArgumentParser(description='Train segmentation network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger = logging.getLogger('segmentation')
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called
setup_logger(output=config.OUTPUT_DIR, distributed_rank=args.local_rank)
# logger.info(pprint.pformat(args))
# logger.info(config)
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
cudnn.deterministic = config.CUDNN.DETERMINISTIC
cudnn.enabled = config.CUDNN.ENABLED
gpus = list(config.GPUS)
distributed = len(gpus) > 1
device = torch.device('cuda:{}'.format(args.local_rank))
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://",
)
# build model
model = build_segmentation_model_from_cfg(config)
# logger.info("Model:\n{}".format(model))
logger.info("Rank of current process: {}. World size: {}".format(comm.get_rank(), comm.get_world_size()))
if distributed:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(device)
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
data_loader = build_train_loader_from_cfg(config)
optimizer = build_optimizer(config, model)
lr_scheduler = build_lr_scheduler(config, optimizer)
data_loader_iter = iter(data_loader)
start_iter = 0
max_iter = config.TRAIN.MAX_ITER
best_param_group_id = get_lr_group_id(optimizer)
# initialize model
if os.path.isfile(config.MODEL.WEIGHTS):
model_weights = torch.load(config.MODEL.WEIGHTS)
get_module(model, distributed).load_state_dict(model_weights, strict=False)
logger.info('Pre-trained model from {}'.format(config.MODEL.WEIGHTS))
elif config.MODEL.BACKBONE.PRETRAINED:
if os.path.isfile(config.MODEL.BACKBONE.WEIGHTS):
pretrained_weights = torch.load(config.MODEL.BACKBONE.WEIGHTS)
get_module(model, distributed).backbone.load_state_dict(pretrained_weights, strict=False)
logger.info('Pre-trained backbone from {}'.format(config.MODEL.BACKBONE.WEIGHTS))
else:
logger.info('No pre-trained weights for backbone, training from scratch.')
# load model
if config.TRAIN.RESUME:
model_state_file = os.path.join(config.OUTPUT_DIR, 'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
start_iter = checkpoint['start_iter']
get_module(model, distributed).load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
logger.info('Loaded checkpoint (starting from iter {})'.format(checkpoint['start_iter']))
data_time = AverageMeter()
batch_time = AverageMeter()
loss_meter = AverageMeter()
# 显示模型的参数量
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
# return {'Total': total_num/1000000, 'Trainable': trainable_num/1000000}
logger.info('Total:{}M, Trainable:{}M'.format(total_num/1000000, trainable_num/1000000))
print(get_parameter_number(model))
# Debug output.
if config.DEBUG.DEBUG:
debug_out_dir = os.path.join(config.OUTPUT_DIR, 'debug_train')
PathManager.mkdirs(debug_out_dir)
# Train loop.
try:
for i in range(start_iter, max_iter):
# data
start_time = time.time()
data = next(data_loader_iter)
if not distributed:
data = to_cuda(data, device)
data_time.update(time.time() - start_time)
# 取出mini-bach的数据和标签
image = data.pop('image')
label = data.pop('label')
# import imageio
# import numpy as np
# print(label.shape)
# label_image = np.array(label.cpu()[0])
# print(label_image.shape)
# imageio.imwrite('%s/%d_%s.png' % ('./', 1, 'debug_batch_label'), label_image.transpose(1, 2, 0))
# 向前传播
out_dict = model(image, data)
# 计算代价函数
loss = out_dict['loss']
# 清零梯度准备计算
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新训练参数
optimizer.step()
# Get lr.
lr = optimizer.param_groups[best_param_group_id]["lr"]
lr_scheduler.step()
batch_time.update(time.time() - start_time)
loss_meter.update(loss.detach().cpu().item(), image.size(0))
if i == 0 or (i + 1) % config.PRINT_FREQ == 0:
msg = '[{0}/{1}] LR: {2:.7f}\t' \
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Data: {data_time.val:.3f}s ({data_time.avg:.3f}s)\t'.format(
i + 1, max_iter, lr, batch_time=batch_time, data_time=data_time)
msg += get_loss_info_str(get_module(model, distributed).loss_meter_dict)
logger.info(msg)
if i == 0 or (i + 1) % config.DEBUG.DEBUG_FREQ == 0:
if comm.is_main_process() and config.DEBUG.DEBUG:
save_debug_images(
dataset=data_loader.dataset,
label=label,
batch_images=image,
batch_targets=data,
batch_outputs=out_dict,
out_dir=debug_out_dir,
iteration=i,
target_keys=config.DEBUG.TARGET_KEYS,
output_keys=config.DEBUG.OUTPUT_KEYS,
iteration_to_remove=i - config.DEBUG.KEEP_INTERVAL
)
if i == 0 or (i + 1) % config.CKPT_FREQ == 0:
if comm.is_main_process():
torch.save({
'start_iter': i + 1,
'state_dict': get_module(model, distributed).state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(config.OUTPUT_DIR, 'checkpoint.pth.tar'))
except Exception:
logger.exception("Exception during training:")
raise
finally:
if comm.is_main_process():
torch.save(get_module(model, distributed).state_dict(),
os.path.join(config.OUTPUT_DIR, 'final_state.pth'))
logger.info("Training finished.")
if __name__ == '__main__':
main()
|
py | 1a4de67a4eadf6e9fa8054abf93a2a913d56009b | # 利用鸢尾花数据集,实现前向传播、反向传播,可视化loss曲线
# 导入所需模块
import tensorflow as tf
from sklearn import datasets
from matplotlib import pyplot as plt
import numpy as np
import time ##1##
# 导入数据,分别为输入特征和标签
x_data = datasets.load_iris().data
y_data = datasets.load_iris().target
# 随机打乱数据(因为原始数据是顺序的,顺序不打乱会影响准确率)
# seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样(为方便教学,以保每位同学结果一致)
np.random.seed(116) # 使用相同的seed,保证输入特征和标签一一对应
np.random.shuffle(x_data)
np.random.seed(116)
np.random.shuffle(y_data)
tf.random.set_seed(116)
# 将打乱后的数据集分割为训练集和测试集,训练集为前120行,测试集为后30行
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 转换x的数据类型,否则后面矩阵相乘时会因数据类型不一致报错
x_train = tf.cast(x_train, tf.float32)
x_test = tf.cast(x_test, tf.float32)
# from_tensor_slices函数使输入特征和标签值一一对应。(把数据集分批次,每个批次batch组数据)
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
# 生成神经网络的参数,4个输入特征故,输入层为4个输入节点;因为3分类,故输出层为3个神经元
# 用tf.Variable()标记参数可训练
# 使用seed使每次生成的随机数相同(方便教学,使大家结果都一致,在现实使用时不写seed)
w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1, seed=1))
b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1))
lr = 0.1 # 学习率为0.1
train_loss_results = [] # 将每轮的loss记录在此列表中,为后续画loss曲线提供数据
test_acc = [] # 将每轮的acc记录在此列表中,为后续画acc曲线提供数据
epoch = 500 # 循环500轮
loss_all = 0 # 每轮分4个step,loss_all记录四个step生成的4个loss的和
##########################################################################
m_w, m_b = 0, 0
beta = 0.9
##########################################################################
# 训练部分
now_time = time.time() ##2##
for epoch in range(epoch): # 数据集级别的循环,每个epoch循环一次数据集
for step, (x_train, y_train) in enumerate(train_db): # batch级别的循环 ,每个step循环一个batch
with tf.GradientTape() as tape: # with结构记录梯度信息
y = tf.matmul(x_train, w1) + b1 # 神经网络乘加运算
y = tf.nn.softmax(y) # 使输出y符合概率分布(此操作后与独热码同量级,可相减求loss)
y_ = tf.one_hot(y_train, depth=3) # 将标签值转换为独热码格式,方便计算loss和accuracy
loss = tf.reduce_mean(tf.square(y_ - y)) # 采用均方误差损失函数mse = mean(sum(y-out)^2)
loss_all += loss.numpy() # 将每个step计算出的loss累加,为后续求loss平均值提供数据,这样计算的loss更准确
# 计算loss对各个参数的梯度
grads = tape.gradient(loss, [w1, b1])
##########################################################################
# sgd-momentun
m_w = beta * m_w + (1 - beta) * grads[0]
m_b = beta * m_b + (1 - beta) * grads[1]
w1.assign_sub(lr * m_w)
b1.assign_sub(lr * m_b)
##########################################################################
# 每个epoch,打印loss信息
print("Epoch {}, loss: {}".format(epoch, loss_all / 4))
train_loss_results.append(loss_all / 4) # 将4个step的loss求平均记录在此变量中
loss_all = 0 # loss_all归零,为记录下一个epoch的loss做准备
# 测试部分
# total_correct为预测对的样本个数, total_number为测试的总样本数,将这两个变量都初始化为0
total_correct, total_number = 0, 0
for x_test, y_test in test_db:
# 使用更新后的参数进行预测
y = tf.matmul(x_test, w1) + b1
y = tf.nn.softmax(y)
pred = tf.argmax(y, axis=1) # 返回y中最大值的索引,即预测的分类
# 将pred转换为y_test的数据类型
pred = tf.cast(pred, dtype=y_test.dtype)
# 若分类正确,则correct=1,否则为0,将bool型的结果转换为int型
correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32)
# 将每个batch的correct数加起来
correct = tf.reduce_sum(correct)
# 将所有batch中的correct数加起来
total_correct += int(correct)
# total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数
total_number += x_test.shape[0]
# 总的准确率等于total_correct/total_number
acc = total_correct / total_number
test_acc.append(acc)
print("Test_acc:", acc)
print("--------------------------")
total_time = time.time() - now_time ##3##
print("total_time", total_time) ##4##
# 绘制 loss 曲线
plt.title('Loss Function Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Loss') # y轴变量名称
plt.plot(train_loss_results, label="$Loss$") # 逐点画出trian_loss_results值并连线,连线图标是Loss
plt.legend() # 画出曲线图标
plt.show() # 画出图像
# 绘制 Accuracy 曲线
plt.title('Acc Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Acc') # y轴变量名称
plt.plot(test_acc, label="$Accuracy$") # 逐点画出test_acc值并连线,连线图标是Accuracy
plt.legend()
plt.show()
# 请将loss曲线、ACC曲线、total_time记录到 class2\优化器对比.docx 对比各优化器收敛情况
|
py | 1a4de68769a3ec4f0dffcb9f36ee9c6a724ae981 | import cgi
import re
import urllib.parse
import warnings
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
import requests
import requests.auth
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
from poetry.core.packages.package import Package
from poetry.core.packages.utils.link import Link
from poetry.core.semver.helpers import parse_constraint
from poetry.core.semver.version import Version
from poetry.core.semver.version_constraint import VersionConstraint
from poetry.core.semver.version_range import VersionRange
from poetry.locations import REPOSITORY_CACHE_DIR
from poetry.utils.helpers import canonicalize_name
from poetry.utils.patterns import wheel_file_re
from ..config.config import Config
from ..inspection.info import PackageInfo
from ..installation.authenticator import Authenticator
from .exceptions import PackageNotFound
from .exceptions import RepositoryError
from .pypi_repository import PyPiRepository
if TYPE_CHECKING:
from poetry.core.packages.dependency import Dependency
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import html5lib
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url: str, content: str, headers: Dict[str, Any]) -> None:
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self) -> Iterator[Version]:
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self) -> Iterator[Link]:
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urllib.parse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version: Version) -> Iterator[Link]:
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link: Link) -> Optional[Version]:
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url: str) -> str:
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(
self,
name: str,
url: str,
config: Optional[Config] = None,
disable_cache: bool = False,
cert: Optional[Path] = None,
client_cert: Optional[Path] = None,
) -> None:
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._client_cert = client_cert
self._cert = cert
self._cache_dir = REPOSITORY_CACHE_DIR / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._authenticator = Authenticator(
config=config or Config(use_environment=True)
)
self._session = CacheControl(
self._authenticator.session, cache=FileCache(str(self._cache_dir / "_http"))
)
username, password = self._authenticator.get_credentials_for_url(self._url)
if username is not None and password is not None:
self._authenticator.session.auth = requests.auth.HTTPBasicAuth(
username, password
)
if self._cert:
self._authenticator.session.verify = str(self._cert)
if self._client_cert:
self._authenticator.session.cert = str(self._client_cert)
self._disable_cache = disable_cache
@property
def cert(self) -> Optional[Path]:
return self._cert
@property
def client_cert(self) -> Optional[Path]:
return self._client_cert
@property
def authenticated_url(self) -> str:
if not self._session.auth:
return self.url
parsed = urllib.parse.urlparse(self.url)
return "{scheme}://{username}:{password}@{netloc}{path}".format(
scheme=parsed.scheme,
username=quote(self._session.auth.username, safe=""),
password=quote(self._session.auth.password, safe=""),
netloc=parsed.netloc,
path=parsed.path,
)
def find_packages(self, dependency: "Dependency") -> List[Package]:
packages = []
constraint = dependency.constraint
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
key = dependency.name
if not constraint.is_any():
key = "{}:{}".format(key, str(constraint))
ignored_pre_release_versions = []
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(dependency.name.replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if version.is_prerelease() and not allow_prereleases:
if constraint.is_any():
# we need this when all versions of the package are pre-releases
ignored_pre_release_versions.append(version)
continue
if constraint.allows(version):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for package_versions in (versions, ignored_pre_release_versions):
for version in package_versions:
package = Package(
dependency.name,
version,
source_type="legacy",
source_reference=self.name,
source_url=self._url,
)
packages.append(package)
self._log(
"{} packages found for {} {}".format(
len(packages), dependency.name, str(constraint)
),
level="debug",
)
if packages or not constraint.is_any():
# we have matching packages, or constraint is not (*)
break
return packages
def package(
self, name: str, version: str, extras: Optional[List[str]] = None
) -> Package:
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(Package(name, version, version))
return self._packages[index]
except ValueError:
package = super(LegacyRepository, self).package(name, version, extras)
package._source_type = "legacy"
package._source_url = self._url
package._source_reference = self.name
return package
def find_links_for_package(self, package: Package) -> List[Link]:
page = self._get("/{}/".format(package.name.replace(".", "-")))
if page is None:
return []
return list(page.links_for_version(package.version))
def _get_release_info(self, name: str, version: str) -> dict:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise PackageNotFound('No package named "{}"'.format(name))
data = PackageInfo(
name=name,
version=version,
summary="",
platform=None,
requires_dist=[],
requires_python=None,
files=[],
cache_version=str(self.CACHE_VERSION),
)
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise PackageNotFound(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = defaultdict(list)
files = []
for link in links:
if link.is_wheel:
urls["bdist_wheel"].append(link.url)
elif link.filename.endswith(
(".tar.gz", ".zip", ".bz2", ".xz", ".Z", ".tar")
):
urls["sdist"].append(link.url)
h = link.hash
if h:
h = link.hash_name + ":" + link.hash
files.append({"file": link.filename, "hash": h})
data.files = files
info = self._get_info_from_urls(urls)
data.summary = info.summary
data.requires_dist = info.requires_dist
data.requires_python = info.requires_python
return data.asdict()
def _get(self, endpoint: str) -> Optional[Page]:
url = self._url + endpoint
try:
response = self.session.get(url)
if response.status_code == 404:
return
response.raise_for_status()
except requests.HTTPError as e:
raise RepositoryError(e)
if response.status_code in (401, 403):
self._log(
"Authorization error accessing {url}".format(url=response.url),
level="warn",
)
return
if response.url != url:
self._log(
"Response URL {response_url} differs from request URL {url}".format(
response_url=response.url, url=url
),
level="debug",
)
return Page(response.url, response.content, response.headers)
|
py | 1a4de6ab051143561b5441cc2f92d4714fba7efd | '''
Client.Processor.* tests.
'''
from tests.integration.util import (
create_client
)
import pytest
import plaid
import json
from plaid.model.processor_token_create_request import ProcessorTokenCreateRequest
from plaid.model.processor_stripe_bank_account_token_create_request import ProcessorStripeBankAccountTokenCreateRequest
def test_stripe_processor_token():
client = create_client()
# Just test the failure case - behavior here depends on the API keys used
with pytest.raises(plaid.ApiException) as e:
request = ProcessorStripeBankAccountTokenCreateRequest(
access_token='fakeAccessToken',
account_id='fakeAccountId',
)
client.processor_stripe_bank_account_token_create(request)
response = json.loads(e.body)
assert response['error_code'] == 'INVALID_INPUT'
def test_dwolla_processor_token():
client = create_client()
# Just test the failure case - behavior here depends on the API keys used
with pytest.raises(plaid.ApiException) as e:
request = ProcessorTokenCreateRequest(
access_token='fakeAccessToken',
account_id='fakeAccountId',
processor='dwolla'
)
client.processor_token_create(request)
response = json.loads(e.body)
assert response['error_code'] == 'INVALID_INPUT'
|
py | 1a4de709b22c210df2f7a64714fa848616179aa4 | # Generated by Django 3.2.7 on 2021-10-05 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fck', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='category',
name='slug',
field=models.CharField(default='-', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='status',
field=models.CharField(choices=[('DRAFT', 'Draft'), ('PUBLISHED', 'Published post')], default='DRAFT', max_length=32),
),
]
|
py | 1a4de728c7938f7dd5582fdc5a0353ddde8a1157 | from time import time
TIMEOUT = 30
class Users:
def __init__(self):
self.users_logged_in = {}
def add_user_activity(self, username):
self.users_logged_in[username] = time()
def remove_user(self, username):
self.users_logged_in.pop(username, None)
def get_num_logged_users(self):
t = time()
rm = []
for a in self.users_logged_in.keys():
if self.users_logged_in[a] + TIMEOUT < t:
rm.append(a)
for b in rm:
self.users_logged_in.pop(b, None)
return len(self.users_logged_in)
|
py | 1a4de986a276e42187cbaf2ad3d83475db61b9a9 | import pathlib
from setuptools import setup, find_packages
BASE_DIR = pathlib.Path(__file__).parent
PACKAGE_NAME = 'nlp_api'
VERSION = '0.0.01'
AUTHOR = 'Aivin V. Solatorio'
URL = 'https://github.com/avsolatorio/wb_nlp/app/nlp_api'
LICENSE = 'MIT'
DESCRIPTION = 'Python API'
INSTALL_REQUIRES = ['fastapi']
# Setting up
setup(
name=PACKAGE_NAME,
version=VERSION,
author=AUTHOR,
url=URL,
description=DESCRIPTION,
install_requires=INSTALL_REQUIRES,
packages=find_packages(include=['wb_nlp'])
)
|
py | 1a4de9fbd9a8a4bcaf481f6f337db310b4f640e7 | import re
# The rules of whether a file name is valid is based on
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
def is_valid_windows_file_name(file_name):
if is_reserved_word(file_name):
raise Exception(f'You entered {file_name}, but you cannot use any of these reserved '
f'words: {get_reserved_words()}')
elif contains_reserved_char(file_name):
raise Exception(f'You entered {file_name}, but you cannot use any of these characters: '
f'{get_reserved_char_list()}')
elif ends_in_space_or_period(file_name):
raise Exception(f'You entered {file_name}, but you cannot have a period or space at the end.')
return True
def get_reserved_char_list():
return ['<', '>', ':', '"', '/', '\\ (single backslash)', '|', '?', '*']
def contains_reserved_char(word: str):
"""
The following reserved chars are not permitted at all:
< (less than)
> (greater than)
: (colon)
" (double quote)
/ (forward slash)
\\ (single backslash (escaped here))
| (vertical bar or pipe)
? (question mark)
* (asterisk)
"""
# single backslash, vertical bar, question mark and asterisk are escaped
return re.match(r'\S*(<+|>+|:+|"+|/+|\\+|\|+|\?+|\*+)', word) is not None
def get_reserved_words():
return {
'CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5',
'COM6', 'COM7', 'COM8', 'COM9', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5',
'LPT6', 'LPT7', 'LPT8', 'LPT9'
}
def is_reserved_word(word: str):
return word in get_reserved_words()
def ends_in_space_or_period(word: str):
return word.endswith(' ') or word.endswith('.')
|
py | 1a4deb0a945718790fb0e02a12399aa8a3ba6ba4 | """
This is a sample simulation that does not represent any particular biological system. It is just a showcase
of how create a Simulation object, add forces, and initialize the reporter.
In this simulation, a simple polymer chain of 10,000 monomers is
"""
import time
import numpy as np
import os, sys
import polychrom
from polychrom import simulation, starting_conformations, forces, forcekits
from polychrom.integrators import ActiveBrownianIntegrator, CorrelatedNoiseIntegrator
import openmm
from polychrom.hdf5_format import HDF5Reporter
from simtk import unit
from pathlib import Path
total_runs = 2500
runs_per_gpu = total_runs // 2
def run_sim(i, gpuid=None, timestep=170, ntimesteps=10000, blocksize=100):
""" Run a single simulation on GPU i."""
N=100
density = 0.224
r = (3 * N / (4 * 3.141592 * density)) ** (1/3)
print(f"Radius of confinement: {r}")
D = np.ones((N, 3))
rhos = 0.5*np.ones((1, N))
rhos[0, 0:20] = -0.5
rhos[0, 20:40] = 0.0
rhos[0, 60:80] = 0.0
timestep = timestep
collision_rate = 2.0
friction = collision_rate * (1.0/unit.picosecond)
conlen = 1.0 * unit.nanometer
mass = 100 * unit.amu
temperature = 300
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
kT = kB * temperature * unit.kelvin
particleD = unit.Quantity(D, kT/(friction * mass))
integrator = CorrelatedNoiseIntegrator(timestep, collision_rate, particleD, rhos)
if gpuid is None:
gpuid = f"{i % 4}"
traj = f"/net/dau/home/dkannan/simulations/corr_sameT/ensemble10000_100/run{i}"
Path(traj).mkdir(parents=True, exist_ok=True)
reporter = HDF5Reporter(folder=traj, max_data_length=100, overwrite=True)
sim = simulation.Simulation(
platform="CUDA",
integrator=integrator,
timestep=timestep,
temperature=temperature,
GPU=gpuid,
collision_rate=collision_rate,
N=N,
save_decimals=2,
PBCbox=False,
reporters=[reporter],
)
polymer = starting_conformations.grow_cubic(N, 5)
sim.set_data(polymer, center=True) # loads a polymer, puts a center of mass at zero
sim.set_velocities(v=np.zeros((N,3)))
sim.add_force(forces.spherical_confinement(sim, density=density, k=5.0))
sim.add_force(
forcekits.polymer_chains(
sim,
chains=[(0, None, False)],
# By default the library assumes you have one polymer chain
# If you want to make it a ring, or more than one chain, use self.setChains
# self.setChains([(0,50,True),(50,None,False)]) will set a 50-monomer ring and a chain from monomer 50 to the end
bond_force_func=forces.harmonic_bonds,
bond_force_kwargs={
"bondLength": 1.0,
"bondWiggleDistance": 0.3, # Bond distance will fluctuate +- 0.05 on average
},
angle_force_func=None,
angle_force_kwargs={},
nonbonded_force_func=forces.polynomial_repulsive,
nonbonded_force_kwargs={
"trunc": 3.0, # this will let chains cross sometimes
#'trunc':10.0, # this will resolve chain crossings and will not let chain cross anymore
},
except_bonds=True,
)
)
tic = time.perf_counter()
for _ in range(ntimesteps): # Do 10 blocks
sim.do_block(blocksize) # Of 100 timesteps each. Data is saved automatically.
toc = time.perf_counter()
print(f'Ran simulation in {(toc - tic):0.4f}s')
sim.print_stats() # In the end, print very simple statistics
reporter.dump_data() # always need to run in the end to dump the block cache to the disk
if __name__ == '__main__':
#run 8 simulations, one on each gpu, for the same parameters
#run_sim(1)
for i in range(1, 1 + 2*runs_per_gpu, 2):
run_sim(i, gpuid="3")
|
py | 1a4dec17b0ee5f363321e79ba4068cd3b544fbb6 | """
Command line tool to copy experiment metadata from one NeXus file to the other.
"""
import sys
import logging
import argparse
import freephil
from pathlib import Path
from . import (
version_parser,
full_copy_parser,
tristan_copy_parser,
)
from ..nxs_copy import CopyNexus, CopyTristanNexus
# Define a logger object and a formatter
logger = logging.getLogger("CopyNeXus")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)s %(message)s")
# Phil scopes
general_scope = freephil.parse(
"""
input{
original_nexus = None
.type = path
.help = "NeXus file to be copied."
data_filename = None
.multiple = True
.type = path
.help = "HDF5 data file."
data_type = *images events
.type = choice
.help = "Type of data in the HDF5 file, can be either images or events."
simple_copy = False
.type = bool
.help = "If True, the full NeXus tree is copied."
skip = NXdata
.multiple = True
.optional = True
.type = str
.help = "NX_class object, or list of, to be skipped when copying metadata.
If called, it will always first skip NXdata."
}
"""
)
tristan_scope = freephil.parse(
"""
input {
tristan_nexus = None
.type = path
.help = "NeXus file associated with Tristan detector"
data_filename = None
.multiple = True
.type = path
.help = "HDF5 file with binned images"
experiment_type = stationary *rotation
.type = choice
.help = "Specify whether an experiment is stationary or a rotation scan. Defaults to rotation."
write_mode = r+ w *x a
.type = choice
.help = "Specify write mode for new NeXus file."
}
"""
)
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Copy metadata from input NeXus file.",
parents=[version_parser],
)
parser.add_argument("--debug", action="store_const", const=True)
parser.add_argument(
"-c",
"--show-config",
action="store_true",
default=False,
dest="show_config",
help="Show the configuration parameters.",
)
# CLIs
def copy_nexus(args):
clai = general_scope.command_line_argument_interpreter()
working_phil = general_scope.fetch(clai.process_and_fetch(args.phil_args))
params = working_phil.extract()
working_phil.show()
logger.info("Copy metadata from one NeXus file to another.")
# Path to data file and original nexus file
data_file = [Path(d).expanduser().resolve() for d in params.input.data_filename]
nexus_file = Path(params.input.original_nexus).expanduser().resolve()
logger.info(f"NeXus file to be copied: {nexus_file}")
logger.info(f"Input data to be saved in NeXus file: {data_file}")
logger.info(f"Data type: {params.input.data_type}")
if params.input.simple_copy is True:
logger.info(f"{nexus_file} will be copied in its entirety.")
else:
logger.info(
f"The following groups will not be copied from NXentry of {nexus_file}: {params.input.skip}"
)
try:
if params.input.data_type == "images":
new_nxs = CopyNexus.images_nexus(
data_file,
nexus_file,
simple_copy=params.input.simple_copy,
skip_group=params.input.skip,
)
elif params.input.data_type == "events":
new_nxs = CopyNexus.pseudo_events_nexus(
data_file,
nexus_file,
)
logger.info(f"File {nexus_file} correctly copied to {new_nxs}.")
except Exception as err:
logger.info(f"File {nexus_file} could not be copied.")
logger.exception(err)
def copy_tristan_nexus(args):
clai = tristan_scope.command_line_argument_interpreter()
working_phil = tristan_scope.fetch(clai.process_and_fetch(args.phil_args))
params = working_phil.extract()
working_phil.show()
logger.info("Copy metadata from Tristan NeXus file.")
# Path to data and original nexus file
data_file = [Path(d).expanduser().resolve() for d in params.input.data_filename]
nexus_file = Path(params.input.tristan_nexus).expanduser().resolve()
logger.info(f"Working directory: {data_file[0].parent}")
logger.info(f"NeXus file to be copied: {nexus_file}")
logger.info(f"Input data to be saved in NeXus file: {data_file}")
try:
if params.input.experiment_type == "stationary":
logger.info(
"Copying metadata for a stationary dataset. \n"
"This means either a single image or a pump-probe experiment.\n"
"The 'scan_axis' will be a single scalar."
)
nxs_img = CopyTristanNexus.single_image_nexus(
data_file[0],
nexus_file,
params.input.write_mode,
)
elif params.input.experiment_type == "rotation":
logger.info(
"Copying metadata for a roation dataset. \n"
"This means either a multiple images or a multi sequences pump-probe experiment.\n"
)
if args.osc_angle:
logger.info(
f"Scan_axis will be a list of values defined by an oscillation angle of {args.osc_angle}."
)
elif args.num_bins:
logger.info(f"Scan_ axis will be a list of {args.num_bins} values.")
for filename in data_file:
nxs_img = CopyTristanNexus.multiple_images_nexus(
filename,
nexus_file,
params.input.write_mode,
args.osc_angle,
args.num_bins,
)
logger.info(
f"Experiment metadata correctly copied from {nexus_file} to {nxs_img}."
)
except Exception as err:
logger.info(f"File {nexus_file} could not be copied.")
logger.exception(err)
# Define subparsers
subparsers = parser.add_subparsers(
help="Choose copy methods.",
required=True,
dest="sub-command",
)
parser_general = subparsers.add_parser(
"gen",
aliases=["copy-file"],
description=("Copy experiment metadata to a new NeXus file."),
parents=[full_copy_parser],
)
parser_general.set_defaults(func=copy_nexus)
parser_tristan = subparsers.add_parser(
"tristan",
aliases=["copy-tristan"],
description=(
"Create a new NeXus file for binned images by copying the metadata from the original experiment NeXus file."
),
parents=[tristan_copy_parser],
)
parser_tristan.set_defaults(func=copy_tristan_nexus)
def main():
# Define a stream handler
CH = logging.StreamHandler(sys.stdout)
CH.setLevel(logging.DEBUG)
CH.setFormatter(formatter)
# Add handler to logger
logger.addHandler(CH)
args = parser.parse_args()
args.func(args)
main()
|
py | 1a4dec2724438e1df66aa8fb286e74979847df02 | def main(request, response):
try:
count = int(request.server.stash.take(request.GET[b"id"]))
except:
count = 0
if b"count" in request.GET:
return str(count)
request.server.stash.put(request.GET[b"id"], str(count + 1))
return u'body { color: red }'
|
py | 1a4dec6d13ceb244e45e7154cc07c869329a528f | #
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""test for str and repr
Make sure things can print and in a nice form. Put all the print tests together so that running this test file alone
can inspect all the print messages in the project
"""
import torch
from torch import nn
from pytorch_quantization import calib
from pytorch_quantization import tensor_quant
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
# pylint:disable=missing-docstring, no-self-use
class TestPrint():
def test_print_descriptor(self):
test_desc = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
print(test_desc)
def test_print_tensor_quantizer(self):
test_quantizer = TensorQuantizer()
print(test_quantizer)
def test_print_module(self):
class _TestModule(nn.Module):
def __init__(self):
super(_TestModule, self).__init__()
self.conv = nn.Conv2d(33, 65, 3)
self.quant_conv = quant_nn.Conv2d(33, 65, 3)
self.linear = nn.Linear(33, 65)
self.quant_linear = quant_nn.Linear(33, 65)
test_module = _TestModule()
print(test_module)
def test_print_calibrator(self):
print(calib.MaxCalibrator(7, 1, False))
hist_calibrator = calib.HistogramCalibrator(8, None, True)
hist_calibrator.collect(torch.rand(10))
print(hist_calibrator)
|
py | 1a4decf341966cee54d2db5c27d03c3b31491665 |
if __name__ != "__main__":
import csv
import time
import pandas as pd
class ObrasBot():
def __init__(self, browser, portal_url, categorias, veiculo, nome_csv, *colunas):
self.browser = browser
self.categorias = [c.upper() for c in categorias]
self.veiculo = [v.upper() for v in veiculo]
self.portal_url = portal_url
self.nome_csv = nome_csv
self.colunas = list(colunas)
#acessa o site
self.browser.get(self.portal_url)
self.browser.maximize_window()
#botao busca avancada
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[3]/td/a"
).click()
time.sleep(2)
def criaBase(self):
with open(self.nome_csv, 'w') as base:
writer = csv.DictWriter(base, self.colunas)
writer.writeheader()
def alimentaBase(self, obra, diretores, veiculo, distribuidor, classificacao):
with open(self.nome_csv, 'a') as base:
writer = csv.DictWriter(base, self.colunas)
writer.writerow({self.colunas[0]: obra,
self.colunas[1]: diretores,
self.colunas[2]: veiculo,
self.colunas[3]: distribuidor,
self.colunas[4]: classificacao})
def limpaBase(self):
base = pd.read_csv(self.nome_csv, encoding='ISO-8859-1') #ou latin 1
base.drop_duplicates(inplace=True)
base.to_csv(self.nome_csv, index=False, encoding='ISO-8859-1')
self.browser.quit()
def portalObras(self, obra):
obra = obra.upper().replace('"', '')
#limpa o campo
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[1]/td[2]/input").clear()
#digita o titulo
titulo_br_input = self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[1]/tbody/tr[1]/td[2]/input")
for o in obra:
time.sleep(0.1)
titulo_br_input.send_keys(o)
#botao consultar
self.browser.find_element_by_xpath(
"/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/form[1]/table[2]/tbody/tr/td/a"
).click()
time.sleep(2)
linha = 1
while True:
try:
#pega o titulo br e categoria presentes na tabela 1
titulo_br_tabela = self.browser.find_element_by_xpath(
'//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[1]'
).text
categoria_tabela = self.browser.find_element_by_xpath(
'//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[4]'
).text
if titulo_br_tabela.strip().upper() == obra and categoria_tabela.strip().upper() in self.categorias:
#abre a página de interesse clicando num botão presente em cada linha da tabela 1
self.browser.find_element_by_xpath('//*[@id="lista"]/tbody/tr[' + str(linha) + ']/td[5]/a').click()
time.sleep(2)
aux = 1
while True:
try:
#acessa a tabela 2, filtra veiculo e novamente a categoria
categoria_tabela2 = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[4]'
).text
veiculo_tabela2 = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[3]'
).text
if categoria_tabela2.strip().upper() in self.categorias and veiculo_tabela2.strip().upper() in self.veiculo:
diretores_tabela = self.browser.find_element_by_xpath(
'//*[@id="TRbl_report_TbObra"]/tbody/tr[6]/td'
).text
#quando é mais de um diretor(a)
#costuma vir assim: Jurandir Muller/Roberto Tibiriçá/Claudia Priscilla
diretores_tabela = diretores_tabela.replace('Diretores:', '').replace('/', ' - ').replace('"', '').strip().upper()
veiculo = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[3]').text
distribuidor = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[5]').text
distribuidor = distribuidor.replace('"', "")
classificacao = self.browser.find_element_by_xpath('//*[@id="TRbl_report_ClassificacaoProcessoObraView'+str(aux)+'"]/td[6]').text
self.alimentaBase(obra, diretores_tabela, veiculo, distribuidor, classificacao)
aux += 1
except:
break
#voltar à página anterior
self.browser.find_element_by_xpath('/html/body/table/tbody/tr/td/table[6]/tbody/tr/td/a').click()
linha += 1
except:
break
|
py | 1a4dedcbad3b9a80d2c4ca42fed5489de9233bca | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
import pandas as pd
import time
st.title("Empty charts")
st.write(
"""
This file tests what happens when you pass an empty dataframe or `None` into
a chart.
In some cases, we handle it nicely. In others, we show an error. The reason
for the latter is because some chart types derive their configuration from
the dataframe you pass in at the start. So when there's no dataframe we
cannot detect that configuration.
"""
)
data = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 3, 2, 4]})
spec = {
"mark": "line",
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
}
st.subheader("Here are 4 empty charts")
st.vega_lite_chart(spec)
st.line_chart()
st.area_chart()
st.bar_chart()
st.write("Below is an empty pyplot chart (i.e. just a blank image)")
st.pyplot()
st.write("...and that was it.")
st.subheader("Here are 2 filled charts")
x = st.vega_lite_chart(spec)
x.vega_lite_chart(data, spec)
x = st.vega_lite_chart(spec)
time.sleep(0.2) # Sleep a little so the add_rows gets sent separately.
x.add_rows(data)
x = st.line_chart()
x.add_rows(data)
x = st.area_chart()
x.add_rows(data)
x = st.bar_chart()
x.add_rows(data)
st.subheader("Here is 1 empty map")
st.deck_gl_chart()
# TODO: Implement add_rows on DeckGL
# st.subheader('1 filled map')
# x = st.deck_gl_chart()
# x.add_rows({'lat': 0, 'lon': 0})
# TODO: write Python tests for these:
# (This manual test doesn't work anymore since errors break execution now)
# st.subheader('Here are 10 errors')
# st.write(1)
# st.vega_lite_chart({})
# st.write(2)
# st.vega_lite_chart(data, {})
# st.write(3)
# st.vega_lite_chart(data)
# st.write(4)
# st.vega_lite_chart()
# st.write(5)
# st.altair_chart()
# st.write(6)
# st.line_chart()
# st.write(7)
# st.area_chart()
# st.write(8)
# st.bar_chart()
# st.write(9)
# st._native_chart()
# st.write(10)
# st.map()
|
py | 1a4dee32d155e29dbd33a89d02a48998a725efac |
from torchvision.transforms import ToPILImage
from datasets.data_utils import DatasetOutput, default_transform
from typing import Callable
from PIL import Image
from .data_utils import slide_windows_over_img, DatasetOutput
import torch
import torch.nn as nn
from torch.utils.data import Dataset
class GenericImageDataset(Dataset):
"""Generic dataset which defines all basic operations for the images."""
def __init__(
self,
path_to_images: str,
get_sub_images: bool = False,
sub_images_nr_windows: int = 10,
sub_images_batch_size: int = 10,
sub_images_min_size: int = 30,
sub_images_max_size: int = 64,
sub_images_stride: float = 0.2,
classification_label: int = 0,
transform: Callable = default_transform,
**kwargs
):
self.path_to_images = path_to_images
self.transform = transform
self.classification_label = classification_label
# Sub images properties
self.get_sub_images = get_sub_images
self.sub_images_min_size = sub_images_min_size
self.sub_images_max_size = sub_images_max_size
self.sub_images_nr_windows = sub_images_nr_windows
self.sub_images_batch_size = sub_images_batch_size
self.sub_images_stride = sub_images_stride
self.pil_transformer = ToPILImage()
# Create store for data
self.store = None
def __getitem__(self, idx: int):
# Read the image from the store index, and a dataset-defined `.read_image`
img: Image = self.read_image(idx)
# Apply transformation to the image
tensor_img: torch.Tensor = self.transform(img)
sub_images: torch.Tensor = torch.tensor(0)
# Extract sub images if applicable
if self.get_sub_images:
sub_images = slide_windows_over_img(
tensor_img,
min_win_size=self.sub_images_min_size,
max_win_size=self.sub_images_max_size,
nr_windows=self.sub_images_nr_windows,
stride=self.sub_images_stride
)
return DatasetOutput(
image=tensor_img,
label=torch.tensor(self.classification_label),
idx=torch.tensor(idx).long(),
sub_images=sub_images
)
def read_image(self, idx: int):
"""Interface, returns an PIL Image using the index."""
pass
def __len__(self):
return len(self.store)
|
py | 1a4deeee564dfd4fc1c9a0ec798b65292d848f52 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
__version__ = "0.0.1"
"""validation test package initialisation."""
|
py | 1a4def92cfc636f46285555db7bac4fd1e5aaee0 | #!/usr/bin/env python3
# dinkum_mas_unittest/test_data/multiple_testcases.py
'''
This is test program input to unittests for coding_standards.py.
It has several TestCase's defined.
It has NO violations of the coding standard.
'''
def foo() :
pass
import unittest
class Test_1 (unittest.TestCase) :
def test_1_first_thing(self) :
pass
class Test_2 (unittest.TestCase) :
def test_2_first_thing(self) :
pass
if __name__ == "__main__" :
# Run the unittests
unittest.main()
|
py | 1a4defbb028eaac072cea81c6dff83d4a2758763 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("kron", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("fmax", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("fmin", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("fmod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np()
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
|
py | 1a4deffa47c48cc5e66ae59fe9c7aaaf75defea7 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('img/entrada/folha-de-mamao-menor.jpg',0)
edges = cv.Canny(img,100,200)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show() |
py | 1a4df0775c0ccb513c265f2096e08b5dc9b7325d | # coding=utf-8
# Copyright 2021 TF-Transformers Authors and The TensorFlow Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tf_transformers.core import LegacyLayer
from tf_transformers.layers import dense_einsum
from tf_transformers.layers.attention import BartAttention
from tf_transformers.utils import tf_utils
class TransformerBART(LegacyLayer):
"""Transformer
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
"""
def __init__(
self,
hidden_size,
num_attention_heads,
intermediate_size,
intermediate_activation,
use_auto_regressive,
attention_head_size=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_bias=True,
use_decoder=False,
share_attention_layers=True,
layer_norm_epsilon=None,
is_training=False,
use_dropout=False,
name="transformer",
**kwargs,
):
"""
Args:
num_attention_heads: int, Number of attention heads.
intermediate_size: int, Size of the intermediate layer.
intermediate_activation: keras object, Activation for the intermediate layer.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance.
dropout_rate: float (between 0 and 1), Dropout probability
for the post-attention and output dropout.
attention_dropout_rate: float (between 0 and 1), Dropout probability
for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
share_attention_layers: To share same attention layers in decoder cross attentions
cross_attention_inside_encoder: Whether we want to use cross attention \
inside encoder.
is_decoder: bool
"""
super(TransformerBART, self).__init__(name=name, is_training=is_training, use_dropout=use_dropout, **kwargs)
# mostly embedding_size is same as projecting after attention
self._hidden_size = hidden_size
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_head_size = attention_head_size
self._dropout_rate = dropout_rate
self._attention_dropout_rate = attention_dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._use_bias = use_bias
self._use_decoder = use_decoder
self._layer_norm_epsilon = layer_norm_epsilon
self._is_training = is_training
self._use_dropout = use_dropout
self._use_auto_regressive = use_auto_regressive
def build(self, input_shape):
"""Build variables based on shape at run time.
Args:
input_shape ([input_word_embeddings 3D, attention_mask 3D]): input_word_embeddings
(b x s x h) and attention_mask (b x 1 x s)
Raises:
ValueError: [description]
ValueError: [description]
"""
input_tensor = input_shape[0]
input_tensor_shape = tf.TensorShape(input_tensor)
batch_size, sequence_length, embedding_size = input_tensor_shape
if not self._attention_head_size:
# If attention_head is None, then make sure
# it can be inferred from (embedding_size // self._num_heads)
if embedding_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (embedding_size, self._num_heads)
)
self._attention_head_size = int(embedding_size // self._num_heads)
# Common kwargs
common_kwargs = dict(
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
)
# Self Attention Layer
self._attention_layer = BartAttention(
num_heads=self._num_heads,
head_size=self._attention_head_size,
dropout_rate=self._attention_dropout_rate,
name="self_attention",
is_training=self._is_training,
use_decoder=self._use_decoder,
use_auto_regressive=self._use_auto_regressive,
use_dropout=self._use_dropout,
**common_kwargs,
)
# Dense layer
self._attention_output_dense = dense_einsum.DenseEinsum(
output_shape=self._hidden_size, name="self_attention_output", **common_kwargs
)
# Attention Dropout
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Self Attention Norm
self._attention_layer_norm = tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=self._layer_norm_epsilon,
dtype=tf.float32,
)
# Cross Attention for Decoder
if self._use_decoder:
# Cross Attention layer
self._cross_attention_layer = BartAttention(
num_heads=self._num_heads,
head_size=self._attention_head_size,
dropout_rate=self._attention_dropout_rate,
name="cross_attention",
is_training=self._is_training,
use_auto_regressive=self._use_auto_regressive,
use_decoder=self._use_decoder,
use_dropout=self._use_dropout,
**common_kwargs,
)
# Dense
self._cross_attention_output_dense = dense_einsum.DenseEinsum(
output_shape=self._hidden_size, name="cross_attention_output", **common_kwargs
)
# Norm
self._cross_attention_layer_norm = tf.keras.layers.LayerNormalization(
name="cross_attention_layer_norm",
axis=-1,
epsilon=self._layer_norm_epsilon,
dtype=tf.float32,
)
# Main Dense Layer after Attention, with activation
self._intermediate_dense = dense_einsum.DenseEinsum(
output_shape=self._intermediate_size,
activation=self._intermediate_activation,
# This layer is always float32 for numeric stability.
dtype=tf.float32,
name="intermediate",
**common_kwargs,
)
# intermediate Dense
self._output_dense = dense_einsum.DenseEinsum(output_shape=self._hidden_size, name="output", **common_kwargs)
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
# Use float32 in layernorm for numeric stability.
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=self._layer_norm_epsilon, dtype=tf.float32
)
super(TransformerBART, self).build(input_shape)
def get_config(self):
config = {
"hidden_size": self._hidden_size,
"num_attention_heads": self._num_heads,
"intermediate_size": self._intermediate_size,
"intermediate_activation": self._intermediate_activation,
"dropout_rate": self._dropout_rate,
"attention_dropout_rate": self._attention_dropout_rate,
"kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer": tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint": tf.keras.constraints.serialize(self._bias_constraint),
"is_training": self.is_training,
"use_auto_regressive": self._use_auto_regressive,
}
base_config = super(TransformerBART, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call_encoder(self, inputs, cache_key=None, cache_value=None):
"""
Training pipeline
"""
# b x s x h # b x s x s
input_tensor, attention_mask = inputs
# [from_tensor, to_tensor]
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
# attention_inputs = [from_tensor, to_tensor, attention_mask] ((b x s x 768))
attention_output, key, value = self._attention_layer(
attention_inputs, cache_key=cache_key, cache_value=cache_value
)
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self._use_dropout)
attention_output = self._attention_layer_norm(input_tensor + attention_output)
# mixed precision stability requires Normalization to be in tf.ffloat32
attention_output = tf.cast(attention_output, dtype=tf_utils.get_dtype())
intermediate_output = self._intermediate_dense(attention_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = self._output_layer_norm(layer_output + attention_output)
return layer_output, key, value
def call_decoder(self, inputs, cache_key=None, cache_value=None):
"""
Training pipeline
"""
input_tensor, attention_mask, encoder_output, decoder_encoder_mask = inputs
# Decoder Self Attention (Call goes to bart_attention.py call_training)
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
attention_output, key, value = self._attention_layer(
attention_inputs, cache_key=cache_key, cache_value=cache_value
)
# Self Attention Dense + Norm
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self.use_dropout)
attention_output = self._attention_layer_norm(attention_output + input_tensor)
if self._use_decoder:
# Cross Attention
attention_output_copy = tf.identity(attention_output, name="attention_output_copy")
attention_inputs_for_decoder = [
attention_output_copy,
encoder_output,
decoder_encoder_mask,
]
# For auto-regressive we need this
# cache_key has to be zeros, because nothng
# to cache in cross_attention
cache_key_cross = None
cache_value_cross = None
if cache_key is not None and self._use_auto_regressive:
cache_key_cross = tf.zeros_like(cache_key)
cache_value_cross = tf.zeros_like(cache_value)
attention_output, _, _ = self._cross_attention_layer(
attention_inputs_for_decoder, cache_key=cache_key_cross, cache_value=cache_value_cross
)
attention_output = self._cross_attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output, training=self.use_dropout)
attention_output_copy = tf.cast(attention_output_copy, dtype=tf_utils.get_dtype())
attention_output = self._cross_attention_layer_norm(attention_output_copy + attention_output)
attention_output = tf.cast(attention_output, dtype=tf_utils.get_dtype())
# Last Projection
intermediate_output = self._intermediate_dense(attention_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
layer_output = self._output_layer_norm(layer_output + attention_output)
layer_output = tf.cast(layer_output, dtype=tf_utils.get_dtype())
return layer_output, key, value
def call(self, inputs, mode="encoder", cache_key=None, cache_value=None):
"""Call
Args:
inputs ([embeddings 3D, attention_mask 3D]): List of [embeddings,
attention_mask]
mode (str, optional): [description]. Defaults to "encoder".
cache_key ([type], optional): [description]. Defaults to None.
cache_value ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if self._use_decoder:
outputs = self.call_decoder(inputs, cache_key=cache_key, cache_value=cache_value)
else:
outputs = self.call_encoder(inputs, cache_key=cache_key, cache_value=cache_value)
return outputs
|
py | 1a4df0804a4f7839afb525e324804e9a29fde223 | #!/usr/bin/python
"""
Convert polarised CST element files to OSKAR scalar element pattern format.
"""
from __future__ import print_function
import sys
import numpy
def load_cst_file(filename):
""""
Loads a CST element pattern file into a numpy matrix.
Parameters
----------
filename : string
Path of the CST element pattern file to load.
Returns
-------
Matrix of values from the CST file.
"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
X = []
for line in lines:
values = line.split()
if not len(values) == 8:
continue
else:
x_all = numpy.array(values, dtype=numpy.dtype('f8'))
X.append(x_all)
return numpy.array(X, dtype=numpy.dtype('f8'))
def convert(cst_file_in, scalar_file_out):
"""
Calculates a scalar element pattern file from a CST element pattern file
Parameters
----------
cst_file_in : string
Input CST format element pattern file
scalar_file_out : string
Output scalar format element pattern file
Notes
-----
This function is designed to be used to create scalar element input files
for the oskar_fit_element_data application.
"""
# Load the CST element pattern data.
X = load_cst_file(cst_file_in)
# Only require columns for:
# Theta, Phi, Abs(Theta), Phase(Theta), Abs(Phi), Phase(Phi)
X = numpy.copy(X[:, [0, 1, 3, 4, 5, 6]])
# Discard any data at values of phi >= 360 degrees,
# as any duplicated entries will cause this method to fail.
X = X[X[:, 1] < 360.0, :]
# Generate the rotated data for Y from X by adding 90 degrees to the phi
# values
Y = numpy.copy(X)
Y[:, 1] += 90.0
Y[Y[:, 1] >= 360.0, 1] -= 360.0
# Linked column sort by phi and then theta for both X and Y.
X = X[numpy.lexsort((X[:, 0], X[:, 1])), :]
Y = Y[numpy.lexsort((Y[:, 0], Y[:, 1])), :]
# Check that the coordinate columns in X and Y now match.
assert numpy.sum(numpy.abs(X[:, 0] - Y[:, 0])) < 1e-6
assert numpy.sum(numpy.abs(X[:, 1] - Y[:, 1])) < 1e-6
# Generate scalar values from sorted data.
X_theta = X[:, 2] * numpy.exp(1j * numpy.radians(X[:, 3]))
X_phi = X[:, 4] * numpy.exp(1j * numpy.radians(X[:, 5]))
Y_theta = Y[:, 2] * numpy.exp(1j * numpy.radians(Y[:, 3]))
Y_phi = Y[:, 4] * numpy.exp(1j * numpy.radians(Y[:, 5]))
s = X_theta * numpy.conj(X_theta) + X_phi * numpy.conj(X_phi) + \
Y_theta * numpy.conj(Y_theta) + Y_phi * numpy.conj(Y_phi)
# Take the sqrt to convert to a 'voltage'
s = numpy.sqrt(0.5 * s)
s_amp = numpy.absolute(s)
s_phase = numpy.angle(s, deg=True)
# Write scalar values to file Columns = (theta, phi, amp, phase).
o = numpy.column_stack((X[:, 0], X[:, 1], s_amp, s_phase))
numpy.savetxt(scalar_file_out, o,
fmt=['%12.4f', '%12.4f', '%20.6e', '%12.4f'])
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: oskar_convert_cst_to_scalar.py "
"<input CST file> <output scalar file>")
sys.exit(1)
convert(sys.argv[1], sys.argv[2])
|
py | 1a4df0dcc16cc7b0a25634c6058dfdca8e6c2dba | import asyncio
from datetime import datetime
import io
import os
from pathlib import Path
from telethon import events, functions, types
from telethon.tl.types import InputMessagesFilterDocument
from . import *
@bot.on(phoenix_cmd(pattern=r"cmds"))
@bot.on(sudo_cmd(pattern=r"cmds", allow_sudo=True))
async def kk(event):
if event.fwd_from:
return
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
cmd = "ls phoenix/plugins"
thumb = phoenix_logo
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
o = stdout.decode()
_o = o.split("\n")
o = "\n".join(_o)
OUTPUT = f"List of Plugins in bot :- \n\n{o}\n\n<><><><><><><><><><><><><><><><><><><><><><><><>\nHELP:- If you want to know the commands for a plugin, do :- \n.plinfo <plugin name> without the < > brackets. \nJoin {hell_grp} for help."
if len(OUTPUT) > 69:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "cmd_list.text"
phoenix_file = await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
thumb=thumb,
reply_to=reply_to_id,
)
await edit_or_reply(phoenix_file, f"Output Too Large. This is the file for the list of plugins in bot.\n\n**BY :-** {PHOENIX_USER}")
await event.delete()
@bot.on(phoenix_cmd(pattern=r"send (?P<shortname>\w+)", outgoing=True))
@bot.on(sudo_cmd(pattern=r"send (?P<shortname>\w+)", allow_sudo=True))
async def send(event):
if event.fwd_from:
return
message_id = event.message.id
thumb = phoenix_logo
input_str = event.pattern_match.group(1)
omk = f"**• Plugin name ≈** `{input_str}`\n**• Uploaded by ≈** {phoenix_mention}\n\n⚡ **[ρнσєηιχ ]({chnl_link})** ⚡"
the_plugin_file = "./hellbot/plugins/{}.py".format(input_str)
if os.path.exists(the_plugin_file):
lauda = await event.client.send_file(
event.chat_id,
the_plugin_file,
thumb=thumb,
caption=omk,
force_document=True,
allow_cache=False,
reply_to=message_id,
)
await event.delete()
else:
await eod(event, "File not found..... Kk vaii !!!")
@bot.on(phoenix_cmd(pattern="install$", outgoing=True))
@bot.on(sudo_cmd(pattern="install$", allow_sudo=True))
async def install(event):
if event.fwd_from:
return
a = "__Installing.__"
b = 1
await event.edit(a)
if event.fwd_from:
return
if event.reply_to_msg_id:
try:
downloaded_file_name = await event.client.download_media( # pylint:disable=E0602
await event.get_reply_message(),
"./phoenix/plugins/" # pylint:disable=E0602
)
if "(" not in downloaded_file_name:
path1 = Path(downloaded_file_name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
if shortname in CMD_LIST:
string = "**Commands found in** `{}`\n".format((os.path.basename(downloaded_file_name)))
for i in CMD_LIST[shortname]:
string += " • `" + i
string += "`\n"
if b == 1:
a = "__Installing..__"
b = 2
else:
a = "__Installing...__"
b = 1
await eor(event, a)
return await eor(event, f"✅ **Installed module** :- `{shortname}` \n✨ BY :- {phoenix_mention}\n\n{string}\n\n ⚡ **[ʟɛɢɛռɖaʀʏ ᴀғ ρнσєηιχ]({chnl_link})** ⚡", link_preview=False)
return await eor(event, f"Installed module `{os.path.basename(downloaded_file_name)}`")
else:
os.remove(downloaded_file_name)
return await eod(event, f"**Failed to Install** \n`Error`\nModule already installed or unknown format")
except Exception as e:
await eod(event, f"**Failed to Install** \n`Error`\n{str(e)}")
return os.remove(downloaded_file_name)
@bot.on(phoenix_cmd(pattern=r"uninstall (?P<shortname>\w+)", outgoing=True))
@bot.on(sudo_cmd(pattern=r"uninstall (?P<shortname>\w+)", allow_sudo=True))
async def uninstall(kraken):
if kraken.fwd_from:
return
shortname = kraken.pattern_match["shortname"]
dir_path =f"./phoenix/plugins/{shortname}.py"
try:
remove_plugin(shortname)
os.remove(dir_path)
await eod(kraken, f"Uninstalled `{shortname}` successfully")
except OSError as e:
await kraken.edit("Error: %s : %s" % (dir_path, e.strerror))
@bot.on(phoenix_cmd(pattern=r"unload (?P<shortname>\w+)$"))
@bot.on(sudo_cmd(pattern=r"unload (?P<shortname>\w+)$", allow_sudo=True))
async def unload(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
remove_plugin(shortname)
await event.edit(f"Successfully unloaded `{shortname}`")
except Exception as e:
await event.edit(
"Successfully unloaded {shortname}\n{}".format(
shortname, str(e)
)
)
@bot.on(phoenix_cmd(pattern=r"load (?P<shortname>\w+)$"))
@bot.on(sudo_cmd(pattern=r"load (?P<shortname>\w+)$", allow_sudo=True))
async def load(event):
if event.fwd_from:
return
shortname = event.pattern_match["shortname"]
try:
try:
remove_plugin(shortname)
except BaseException:
pass
load_module(shortname)
await event.edit(f"Successfully loaded `{shortname}`")
except Exception as e:
await event.edit(
f"Sorry, could not load {shortname} because of the following error.\n{str(e)}"
)
CmdHelp("core").add_command(
"install", "<reply to a .py file>", "Installs the replied python file if suitable to Hêllẞø†'s codes."
).add_command(
"uninstall", "<plugin name>", "Uninstalls the given plugin from ρнσєηιχ. To get that again do .restart", "uninstall alive"
).add_command(
"load", "<plugin name>", "Loades the unloaded plugin to your userbot", "load alive"
).add_command(
"unload", "<plugin name>", "Unloads the plugin from your userbot", "unload alive"
).add_command(
"send", "<file name>", "Sends the given file from your userbot server, if any.", "send alive"
).add_command(
"cmds", None, "Gives out the list of modules in HellBot."
).add_warning(
"❌ Install External Plugin On Your Own Risk. We won't help if anything goes wrong after installing a plugin."
).add()
# hellbot
|
py | 1a4df0dedc1ec1a2c49ba6736a95eeaa019a6e5f | import datetime
import django
from django.test import TestCase
from SteamProphet.apps.SteamProphet.models import VotingPeriod, Week
from .. import services
class TestServices(TestCase):
def test_getCurrentVotingPeriodReturnsNoneIfThereIsNoTimePeriod(self):
self.assertIsNone(services.getCurrentVotingPeriod())
def test_getCurrentVotingPeriodReturnsNoneIfAllVotingPeriodsAreInTheFuture(self):
now = django.utils.timezone.now()
week1 = Week.objects.create(week=1)
week2 = Week.objects.create(week=2)
VotingPeriod.objects.create(
week=week1,
start=now + datetime.timedelta(days=7),
end=now + datetime.timedelta(days=10)
)
VotingPeriod.objects.create(
week=week2,
start=now + datetime.timedelta(days=14),
end=now + datetime.timedelta(days=17)
)
self.assertIsNone(services.getCurrentVotingPeriod())
def test_getCurrentVotingPeriodReturnsNoneIfAllVotingPeriodsAreInThePast(self):
now = django.utils.timezone.now()
week1 = Week.objects.create(week=1)
week2 = Week.objects.create(week=2)
VotingPeriod.objects.create(
week=week1,
start=now - datetime.timedelta(days=14),
end=now - datetime.timedelta(days=17)
)
VotingPeriod.objects.create(
week=week2,
start=now - datetime.timedelta(days=7),
end=now - datetime.timedelta(days=10)
)
self.assertIsNone(services.getCurrentVotingPeriod())
def test_getCurrentVotingPeriodsReturnsTheCurrentVotingPeriodIfVotingPeriodIsNow(self):
now = django.utils.timezone.now()
week1 = Week.objects.create(week=1)
week2 = Week.objects.create(week=2)
VotingPeriod.objects.create(
week=week1,
start=now - datetime.timedelta(days=7),
end=now - datetime.timedelta(days=10)
)
votingPeriod2 = VotingPeriod.objects.create(
week=week2,
start=now - datetime.timedelta(days=2),
end=now + datetime.timedelta(days=1),
)
self.assertEqual(votingPeriod2, services.getCurrentVotingPeriod())
|
py | 1a4df0e13633e066219b75cd501d5a03920a7309 | import os
import pwd
class NoSuchUser(Exception):
pass
class User(object):
def __init__(self, uid, gid, username, homedir):
self.uid = uid
self.gid = gid
self.username = username
self.homedir = homedir
def get_pegasus_dir(self):
return os.path.join(self.homedir, ".pegasus")
def get_ensembles_dir(self):
return os.path.join(self.homedir, ".pegasus", "ensembles")
def get_master_db(self):
return os.path.join(self.homedir, ".pegasus", "workflow.db")
def get_master_db_url(self):
return "sqlite:///%s" % self.get_master_db()
def __user_from_pwd(pw):
return User(pw.pw_uid, pw.pw_gid, pw.pw_name, pw.pw_dir)
def get_user_by_uid(uid):
try:
pw = pwd.getpwuid(uid)
return __user_from_pwd(pw)
except KeyError as e:
raise NoSuchUser(uid)
def get_user_by_username(username):
try:
pw = pwd.getpwnam(username)
return __user_from_pwd(pw)
except KeyError:
raise NoSuchUser(username)
except TypeError:
raise NoSuchUser(username)
|
py | 1a4df29249c103bfc14315f8459e1afca6b45fca | #!/usr/bin/python
#
# sslsniff Captures data on read/recv or write/send functions of OpenSSL and
# GnuTLS
# For Linux, uses BCC, eBPF.
#
# USAGE: sslsniff.py [-h] [-p PID] [-c COMM] [-o] [-g] [-d]
#
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 12-Aug-2016 Adrian Lopez Created this.
# 13-Aug-2016 Mark Drayton Fix SSL_Read
# 17-Aug-2016 Adrian Lopez Capture GnuTLS and add options
#
from __future__ import print_function
import ctypes as ct
from bcc import BPF
import argparse
# arguments
examples = """examples:
./sslsniff # sniff OpenSSL and GnuTLS functions
./sslsniff -p 181 # sniff PID 181 only
./sslsniff -c curl # sniff curl command only
./sslsniff --no-openssl # don't show OpenSSL calls
./sslsniff --no-gnutls # don't show GnuTLS calls
"""
parser = argparse.ArgumentParser(
description="Sniff SSL data",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, help="sniff this PID only.")
parser.add_argument("-c", "--comm",
help="sniff only commands matching string.")
parser.add_argument("-o", "--no-openssl", action="store_false", dest="openssl",
help="do not show OpenSSL calls.")
parser.add_argument("-g", "--no-gnutls", action="store_false", dest="gnutls",
help="do not show GnuTLS calls.")
parser.add_argument('-d', '--debug', dest='debug', action='count', default=0,
help='debug mode.')
args = parser.parse_args()
prog = """
#include <linux/ptrace.h>
#include <linux/sched.h> /* For TASK_COMM_LEN */
struct probe_SSL_data_t {
u64 timestamp_ns;
u32 pid;
char comm[TASK_COMM_LEN];
char v0[464];
u32 len;
};
BPF_PERF_OUTPUT(perf_SSL_write);
int probe_SSL_write(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
struct probe_SSL_data_t __data = {0};
__data.timestamp_ns = bpf_ktime_get_ns();
__data.pid = pid;
__data.len = num;
bpf_get_current_comm(&__data.comm, sizeof(__data.comm));
if ( buf != 0) {
bpf_probe_read(&__data.v0, sizeof(__data.v0), buf);
}
perf_SSL_write.perf_submit(ctx, &__data, sizeof(__data));
return 0;
}
BPF_PERF_OUTPUT(perf_SSL_read);
BPF_HASH(bufs, u32, u64);
int probe_SSL_read_enter(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
bufs.update(&pid, (u64*)&buf);
return 0;
}
int probe_SSL_read_exit(struct pt_regs *ctx, void *ssl, void *buf, int num) {
u32 pid = bpf_get_current_pid_tgid();
FILTER
u64 *bufp = bufs.lookup(&pid);
if (bufp == 0) {
return 0;
}
struct probe_SSL_data_t __data = {0};
__data.timestamp_ns = bpf_ktime_get_ns();
__data.pid = pid;
__data.len = PT_REGS_RC(ctx);
bpf_get_current_comm(&__data.comm, sizeof(__data.comm));
if (bufp != 0) {
bpf_probe_read(&__data.v0, sizeof(__data.v0), (char *)*bufp);
}
bufs.delete(&pid);
perf_SSL_read.perf_submit(ctx, &__data, sizeof(__data));
return 0;
}
"""
if args.pid:
prog = prog.replace('FILTER', 'if (pid != %d) { return 0; }' % args.pid)
else:
prog = prog.replace('FILTER', '')
if args.debug:
print(prog)
b = BPF(text=prog)
# It looks like SSL_read's arguments aren't available in a return probe so you
# need to stash the buffer address in a map on the function entry and read it
# on its exit (Mark Drayton)
#
if args.openssl:
b.attach_uprobe(name="ssl", sym="SSL_write", fn_name="probe_SSL_write",
pid=args.pid or -1)
b.attach_uprobe(name="ssl", sym="SSL_read", fn_name="probe_SSL_read_enter",
pid=args.pid or -1)
b.attach_uretprobe(name="ssl", sym="SSL_read",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
if args.gnutls:
b.attach_uprobe(name="gnutls", sym="gnutls_record_send",
fn_name="probe_SSL_write", pid=args.pid or -1)
b.attach_uprobe(name="gnutls", sym="gnutls_record_recv",
fn_name="probe_SSL_read_enter", pid=args.pid or -1)
b.attach_uretprobe(name="gnutls", sym="gnutls_record_recv",
fn_name="probe_SSL_read_exit", pid=args.pid or -1)
# define output data structure in Python
TASK_COMM_LEN = 16 # linux/sched.h
MAX_BUF_SIZE = 464 # Limited by the BPF stack
# Max size of the whole struct: 512 bytes
class Data(ct.Structure):
_fields_ = [
("timestamp_ns", ct.c_ulonglong),
("pid", ct.c_uint),
("comm", ct.c_char * TASK_COMM_LEN),
("v0", ct.c_char * MAX_BUF_SIZE),
("len", ct.c_uint)
]
# header
print("%-12s %-18s %-16s %-6s %-6s" % ("FUNC", "TIME(s)", "COMM", "PID",
"LEN"))
# process event
start = 0
def print_event_write(cpu, data, size):
print_event(cpu, data, size, "WRITE/SEND")
def print_event_read(cpu, data, size):
print_event(cpu, data, size, "READ/RECV")
def print_event(cpu, data, size, rw):
global start
event = ct.cast(data, ct.POINTER(Data)).contents
# Filter events by command
if args.comm:
if not args.comm == event.comm:
return
if start == 0:
start = event.timestamp_ns
time_s = (float(event.timestamp_ns - start)) / 1000000000
s_mark = "-" * 5 + " DATA " + "-" * 5
e_mark = "-" * 5 + " END DATA " + "-" * 5
truncated_bytes = event.len - MAX_BUF_SIZE
if truncated_bytes > 0:
e_mark = "-" * 5 + " END DATA (TRUNCATED, " + str(truncated_bytes) + \
" bytes lost) " + "-" * 5
print("%-12s %-18.9f %-16s %-6d %-6d\n%s\n%s\n%s\n\n" % (rw, time_s,
event.comm.decode(),
event.pid,
event.len,
s_mark,
event.v0.decode(),
e_mark))
b["perf_SSL_write"].open_perf_buffer(print_event_write)
b["perf_SSL_read"].open_perf_buffer(print_event_read)
while 1:
b.kprobe_poll()
|
py | 1a4df517bff40e98dc117da6a07fcdc03069bb5c | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
import time
import logging
from . import builder_config
from .utils import save_profiled_results, merge_info
from nn_meter.builder.backends import connect_backend
logging = logging.getLogger("nn-Meter")
def convert_models(backend, models, mode = 'predbuild', broken_point_mode = False):
""" convert the model to the needed format by backend, in order to increase efficiency when profiling on device.
@params:
backend (subclass instance of BaseBackend): applied backend instance
models (str or dict): the Dict of models or the path of the json file about models information
mode (str): the mode for running models, including ['ruletest', 'predbuild']
broken_point_mode (boolean): broken_point_mode will skip all models have attributes "converted_model"
"""
if isinstance(models, str):
save_name = os.path.basename(models)
with open(models, 'r') as fp:
models = json.load(fp)
else:
save_name = "converted_results.json"
workspace_path = builder_config.get('WORKSPACE', mode)
model_save_path = os.path.join(workspace_path, 'models')
os.makedirs(model_save_path, exist_ok=True)
info_save_path = os.path.join(workspace_path, "results")
os.makedirs(info_save_path, exist_ok=True)
# convert models
count = 0
for module in models.values():
for id, model in module.items():
if broken_point_mode and 'converted_model' in model:
continue
try:
model_path = model['model']
converted_model = backend.convert_model(model_path, model_save_path, model['shapes'])
model['converted_model'] = converted_model
except Exception as e:
open(os.path.join(info_save_path, "convert_error.log"), 'a').write(f"{id}: {e}\n")
# save information to json file for per 50 models
count += 1
if count % 50 == 0:
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"{count} models complete. Still converting... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"Complete convert all {count} models. Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
# save information to json file
with open(os.path.join(info_save_path, save_name), 'w') as fp:
json.dump(models, fp, indent=4)
logging.keyinfo(f"Save the converted models information to {os.path.join(info_save_path, save_name)}")
return models
def profile_models(backend, models, mode = 'ruletest', metrics = ["latency"], save_name = None,
have_converted = False, log_frequency = 50, broken_point_mode = False, **kwargs):
""" run models with given backend and return latency of testcase models
@params:
backend (subclass instance of BaseBackend): applied backend instance
models (str or dict): the Dict of models or the path of the json file about models information
mode (str): the mode for running models, including ['ruletest', 'predbuild']
metrics (list): required metrics to report. We only support latency for metric by now.
save_name (str): the save name to store profiled results. The whole path should be `<workspace>/<mode-folder>/results/<save-name>`
have_converted (boolean): if the model have been converted to the needed format by backend, the model will not be converted
before profiling. The model path of `model['converted_model']` will be profiled on device directly. The conversion of
model could be done by appling `nn_meter.builder.convert_models`
broken_point_mode (boolean): broken_point_mode will check file in `<workspace>/<mode-folder>/results/<save-name>` (if the file exists)
and skip all models already have attributes "latency"
**kwargs: arguments for profiler, such as `taskset` and `close_xnnpack` in TFLite profiler
"""
if isinstance(models, str):
with open(models, 'r') as fp:
models = json.load(fp)
workspace_path = builder_config.get('WORKSPACE', mode)
model_save_path = os.path.join(workspace_path, 'models')
os.makedirs(model_save_path, exist_ok=True)
info_save_path = os.path.join(workspace_path, "results")
os.makedirs(info_save_path, exist_ok=True)
# in broken point model, if the output file `<workspace>/<mode-folder>/results/<save-name>` exists,
# load the existing latency and skip these model in profiling
if broken_point_mode and os.path.isfile(os.path.join(info_save_path, save_name)):
from nn_meter.builder.backend_meta.utils import read_profiled_results
with open(os.path.join(info_save_path, save_name), 'r') as fp:
profiled_models = read_profiled_results(json.load(fp))
for module_key, module in models.items():
if module_key not in profiled_models:
continue
for id, model in module.items():
if id in profiled_models[module_key]:
model.update(profiled_models[module_key][id])
# profile models and get metric results
count = 0
detail = builder_config.get('DETAIL', mode)
save_name = save_name or "profiled_results.json"
logging.info("Profiling ...")
for module in models.values():
for id, model in module.items():
if broken_point_mode and 'latency' in model and model['latency'].avg != 0:
continue
if have_converted: # the models have been converted for the backend
try:
model_path = model['converted_model']
profiled_res = backend.profile(model_path, metrics, model['shapes'], **kwargs)
for metric in metrics:
model[metric] = profiled_res[metric]
time.sleep(0.2)
count += 1
except Exception as e:
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
else: # the models have not been converted
try:
model_path = model['model']
profiled_res = backend.profile_model_file(model_path, model_save_path, model['shapes'], metrics, **kwargs)
for metric in metrics:
model[metric] = profiled_res[metric]
time.sleep(0.2)
count += 1
except Exception as e:
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
# save information to json file for per 50 models
if count > 0 and count % log_frequency == 0:
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
logging.keyinfo(f"{count} models complete. Still profiling... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
# save information to json file
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
logging.keyinfo(f"All {count} models profiling complete. Save all success profiled results to {os.path.join(info_save_path, save_name)}.")
return models
def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mode = 'prior', configs = None, mark = '', detail = True,
metrics = ["latency"], **kwargs):
''' sample kernel configs and profile kernel model based on configs
'''
from nn_meter.builder.kernel_predictor_builder import generate_config_sample
# sample configs for kernel and generate models
models = generate_config_sample(kernel_type, sample_num, mark=mark,
sampling_mode=sampling_mode, configs=configs)
# connect to backend, run models and get latency
backend = connect_backend(backend_name=backend)
profiled_results = profile_models(backend, models, mode='predbuild', metrics=metrics, save_name=f"profiled_{kernel_type}.json")
return profiled_results
def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10,
iteration = 5, error_threshold = 0.1, predict_label = "latency", mark = ""):
"""
Build latency predictor for given kernel. This method contains three main steps:
1. sample kernel configs and profile kernel model based on configs;
2. initialize latency predictor of kernel based on the profiled data;
3. adopt adaptive sampler with iteratively doing step 1 for finegrained sampling to improve predictor performance
@params
kernel_type (str): the type of kernel
backend (str): the name of backend instance to profile models
init_sample_num (int, optional): the data size for predictor initialization. Defaults to 1000.
finegrained_sample_num (int, optional): the data size for adaptive sampling. For each data with error higher than
error_threshold, #finegrained_sample_num data will be generated based the the large error data. Defaults to 10.
iteration (int, optional): the iteration for sampling and training. Initial sampling is regarded as iteration 1,
thus `iteration == 2` means one iteration for adaptive sampling. Defaults to 5.
error_threshold (float, optional): the threshold of large error. Defaults to 0.2.
predict_label (str): the predicting label to build kernel predictor. Defaults to "latency"
"""
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data
workspace_path = builder_config.get('WORKSPACE', 'predbuild')
mark = mark if mark == "" else "_" + mark
# init predictor builder with prior data sampler
kernel_data = sample_and_profile_kernel_data(kernel_type, init_sample_num, backend, sampling_mode='prior', mark=f'prior{mark}')
# use current sampled data to build regression model, and locate data with large errors in testset
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark=f'prior{mark}',
save_path=os.path.join(workspace_path, "results"), predict_label=predict_label)
logging.keyinfo(f'Iteration 0: acc10 {acc10}, error_configs number: {len(error_configs)}')
for i in range(1, iteration):
# finegrained sampling and profiling for large error data
new_kernel_data = sample_and_profile_kernel_data(kernel_type, finegrained_sample_num, backend,
sampling_mode='finegrained', configs=error_configs, mark=f'finegrained{i}{mark}')
# merge finegrained data with previous data and build new regression model
kernel_data = merge_info(new_info=new_kernel_data, prev_info=kernel_data)
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark=f'finegrained{i}{mark}',
save_path=os.path.join(workspace_path, "results"), predict_label=predict_label)
logging.keyinfo(f'Iteration {i}: acc10 {acc10}, error_configs number: {len(error_configs)}')
return predictor, kernel_data
def build_initial_predictor_by_data(kernel_type, backend = None, init_sample_num = 20, error_threshold = 0.1, mark = '', predict_label = "latency"):
return build_predictor_for_kernel(kernel_type, backend, init_sample_num=init_sample_num, iteration=1, error_threshold=error_threshold, predict_label=predict_label, mark=f'{mark}')
def build_adaptive_predictor_by_data(kernel_type, kernel_data, backend = None, finegrained_sample_num = 20, error_threshold = 0.1, mark = '', predict_label = "latency"):
""" Run adaptive sampler in one iteration based
"""
workspace_path = builder_config.get('WORKSPACE', 'predbuild')
save_path = os.path.join(workspace_path, "results")
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data, collect_kernel_data
_, _, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend = backend, error_threshold=error_threshold, save_path=None, predict_label=predict_label)
new_kernel_data = sample_and_profile_kernel_data(kernel_type, finegrained_sample_num, backend,
sampling_mode='finegrained', configs=error_configs, mark=mark)
# merge finegrained data with previous data and build new regression model
mark = mark if mark == "" else "_" + mark
kernel_data = merge_info(new_info=new_kernel_data, prev_info=collect_kernel_data(kernel_data))
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold,
mark=f'finegrained{mark}', save_path=save_path, predict_label=predict_label)
logging.keyinfo(f'{mark}: acc10 {acc10}, error_configs number: {len(error_configs)}')
return predictor, kernel_data
def build_latency_predictor(backend):
"""
Build latency predictor for all kernel in `<workspace-path>/configs/predictorbuild_config.yaml`
@params
backend (str): the name of backend instance to profile models
"""
kernels = builder_config.get("KERNELS", 'predbuild')
for kernel_type in kernels:
init_sample_num = kernels[kernel_type]["INIT_SAMPLE_NUM"]
finegrained_sample_num = kernels[kernel_type]["FINEGRAINED_SAMPLE_NUM"]
iteration = kernels[kernel_type]["ITERATION"]
error_threshold = kernels[kernel_type]["ERROR_THRESHOLD"]
build_predictor_for_kernel(
kernel_type, backend,
init_sample_num = init_sample_num,
finegrained_sample_num = finegrained_sample_num,
iteration = iteration,
error_threshold = error_threshold
)
|
py | 1a4df58050b23efc732bc8ddc186f0479a907e57 | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='hpfspecmatch',
version='0.1.1',
description='Matching HPF Spectra',
long_description=readme(),
url='https://github.com/gummiks/hpfspecmatch/',
author='Gudmundur Stefansson',
author_email='[email protected]',
install_requires=['barycorrpy','emcee','lmfit','hpfspec','crosscorr','pyde','astroquery','glob2'],
packages=['hpfspecmatch'],
license='GPLv3',
classifiers=['Topic :: Scientific/Engineering :: Astronomy'],
keywords='HPF Spectra Astronomy',
dependency_links=['http://github.com/user/repo/tarball/master#egg=package-1.0'],
include_package_data=True,
zip_safe=False
)
|
py | 1a4df5ddcb2550b666a8e5d6b60c78d60e0966e3 | import numpy as np
class KNN:
"""
K-neariest-neighbor classifier using L1 loss
"""
def __init__(self, k=1):
self.k = k
def fit(self, X, y):
self.train_X = X
self.train_y = y
def predict(self, X, num_loops=0):
'''
Uses the KNN model to predict clases for the data samples provided
Arguments:
X, np array (num_samples, num_features) - samples to run
through the model
num_loops, int - which implementation to use
Returns:
predictions, np array of ints (num_samples) - predicted class
for each sample
'''
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
else:
dists = self.compute_distances_two_loops(X)
if self.train_y.dtype == np.bool:
return self.predict_labels_binary(dists)
else:
return self.predict_labels_multiclass(dists)
def compute_distances_two_loops(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Uses simplest implementation with 2 Python loops
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i_test in range(num_test):
for i_train in range(num_train):
dists[i_test][i_train] = np.sum(np.abs(X[i_test] - self.train_X[i_train]))
return dists
def compute_distances_one_loop(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Vectorizes some of the calculations, so only 1 loop is used
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
dists = np.zeros((num_test, num_train), np.float32)
for i_test in range(num_test):
dists[i_test] = np.sum(np.abs(X[i_test] - self.train_X), axis=1)
return dists
def compute_distances_no_loops(self, X):
'''
Computes L1 distance from every sample of X to every training sample
Fully vectorizes the calculations using numpy
Arguments:
X, np array (num_test_samples, num_features) - samples to run
Returns:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
'''
num_train = self.train_X.shape[0]
num_test = X.shape[0]
# Using float32 to to save memory - the default is float64
dists = np.zeros((num_test, num_train), np.float32)
dists = np.abs(X[:, None] - self.train_X).sum(-1)
return dists
def predict_labels_binary(self, dists):
'''
Returns model predictions for binary classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of bool (num_test_samples) - binary predictions
for every test sample
'''
num_test = dists.shape[0]
pred = np.zeros(num_test, np.bool)
for i in range(num_test):
pred[i] = self.train_y[np.argsort(dists[i])[:self.k]].sum() > self.k / 2
return pred
def predict_labels_multiclass(self, dists):
'''
Returns model predictions for multi-class classification case
Arguments:
dists, np array (num_test_samples, num_train_samples) - array
with distances between each test and each train sample
Returns:
pred, np array of int (num_test_samples) - predicted class index
for every test sample
'''
num_test = dists.shape[0]
num_test = dists.shape[0]
pred = np.zeros(num_test, np.int)
for i in range(num_test):
# TODO: Implement choosing best class based on k
# nearest training samples
pass
return pred
|
py | 1a4df697b38726cb4c905f66b3cc03ed383f88b9 | def BMI_calculator(weight, height):
""" Function to calculate the BMI of an individual given their respective weight (lb) and height (in)"""
# Calculate BMI using lb / inches equation
bmi = ((weight)/(height * height)) * 703
return bmi
|
py | 1a4df7913743e00adb3d3be3ffc09e6782387687 |
"""Class :py:class:`CMDBBUtils` utilities for calib manager DB methods
==============================================================================
Usage ::
# Test: python lcls2/psana/psana/graphqt/CMDBUtils.py
# Import
from psana.graphqt.CMDBUtils import dbu
# See test at the EOF
See:
- :class:`CMWMain`
- :class:`CMWConfig`
- `on github <https://github.com/slac-lcls/lcls2>`_.
Created on 2018-04-10 by Mikhail Dubrovin
"""
import logging
logger = logging.getLogger(__name__)
#_name = 'DCMDBUtils'
#from psana.pyalgos.generic.Logger import logger
import psana.pscalib.calib.MDBUtils as dbu
ObjectId = dbu.ObjectId
connect_to_server = dbu.connect_to_server
#database_names = dbu.database_names
database = dbu.database
#collection_names = dbu.collection_names
collection = dbu.collection
timestamp_id = dbu.timestamp_id
doc_add_id_ts = dbu.doc_add_id_ts
db_prefixed_name = dbu.db_prefixed_name
time_and_timestamp= dbu.time_and_timestamp
exportdb = dbu.exportdb
importdb = dbu.importdb
out_fname_prefix = dbu.out_fname_prefix
save_doc_and_data_in_file = dbu.save_doc_and_data_in_file
#insert_data_and_doc = dbu.insert_data_and_doc
#document_info = dbu.document_info
#db_prefixed_name = dbu.db_prefixed_name # ('')
#delete_databases = dbu.delete_databases # (list_db_names)
#delete_collections= dbu.delete_collections # (dic_db_cols)
#collection_info = dbu.collection_info # (client, dbname, colname)
from psana.graphqt.CMConfigParameters import cp
def connect_client(host=None, port=None, user=cp.user, upwd=cp.upwd): # user=dbu.cc.USERNAME
_host = cp.cdb_host.value() if host is None else host
_port = cp.cdb_port.value() if port is None else port
#logger.debug('CMDBBUtils: Connect client to host: %s port: %d user: %s upwd: %s' % (_host, _port, user, upwd))
return dbu.connect_to_server(_host, _port, user, upwd)
# if cp.upwd else dbu.connect_to_server(_host, _port, cp.user)
def database_names(client=None):
"""
"""
if client is None:
client = connect_client()
return dbu.database_names(client)
def collection_names(db):
"""
"""
if isinstance(db, str):
client = connect_client()
db = dbu.database(client, db)
return dbu.collection_names(db)
def delete_databases(list_db_names):
"""Delete databases specified in the list_db_names
"""
client = connect_client()
logger.debug('Delete databases:\n %s' % ('\n '.join(list_db_names)))
dbu.delete_databases(client, list_db_names)
def delete_collections(dic_db_cols):
"""Delete collections specified in the dic_db_cols consisting of pairs {dbname:lstcols}
"""
msg = 'Delete collections:'
client = connect_client()
for dbname, lstcols in dic_db_cols.items():
db = dbu.database(client, dbname)
msg += '\nFrom database: %s delete collections:\n %s' % (dbname, '\n '.join(lstcols))
dbu.delete_collections(db, lstcols)
logger.debug(msg)
def delete_documents(dbname, colname, doc_ids):
"""Delete documents with _id-s in doc_ids from dbname, colname
"""
#logger.debug('Deleting documents:\n %s' % ('\n '.join(doc_ids)))
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
col = collection(db, colname)
#msg = 'Deleted documents from db: %s col: %s' % (dbname, colname)
for s in doc_ids:
oid = ObjectId(s)
doc = dbu.find_doc(col, query={'_id':oid})
if doc is None: continue
#msg += '\n %s and its data' % doc.get('_id', 'N/A')
dbu.del_document_data(doc, fs)
dbu.delete_document_from_collection(col, oid)
#logger.debug(msg)
def insert_document_and_data(dbname, colname, doc, data):
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
col = collection(db, colname)
id_data, id_doc = dbu.insert_data_and_doc(data, fs, col, **doc)
return id_data, id_doc
def get_data_for_doc(dbname, doc):
client = connect_client()
db, fs = dbu.db_and_fs(client, dbname)
return dbu.get_data_for_doc(fs, doc)
def collection_info(dbname, colname):
"""Delete collections specified in the dic_db_cols consisting of pairs {dbname:lstcols}
"""
client = connect_client()
return dbu.collection_info(client, dbname, colname)
def list_of_documents(dbname, colname):
client = connect_client()
db = database(client, dbname)
#db, fs = dbu.db_and_fs(client, dbname='cdb-cxi12345')
col = collection(db, colname)
docs = col.find().sort('_id', dbu.DESCENDING)
return [d for d in docs]
def document_info(doc, keys=('time_sec','time_stamp','experiment',\
'detector','ctype','run','id_data_ts','data_type','data_dtype', '_id'),\
fmt='%10s %24s %11s %24s %16s %4s %30s %10s %10s %24s'):
"""The same as dbu.document_info, but with different default parameters (added _id).
"""
return dbu.document_info(doc, keys, fmt)
# EOF
|
py | 1a4df870e26b739d125d8729dd9c207a718d6125 | # Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for data sets.
Represents real or simulated impression log data for multiple publishers.
"""
from collections import defaultdict
from copy import deepcopy
from os import listdir
from os.path import isfile, join
from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import List
from wfa_planning_evaluation_framework.data_generators.publisher_data import (
PublisherData,
)
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
class DataSet:
"""Real or simulated impression log data for multiple publishers.
A DataSet represents a real or simulated configuration of impression log
data for a collection of related campaigns across multiple publishers.
It represents the basic unit across which modeling strategies are compared.
It is expected that this class will be sub-classed for each of the different
types of publisher overlap models that will be investigate. Thus, we might
have an IndependentDataSet, a SequentiallyCorrelatedDataSet, etc.
"""
def __init__(self, publisher_data_list: Iterable[PublisherData], name: str = None):
"""Constructor
Args:
publisher_data_list: An iterable list of PublisherDatas,
one for each of the publishers that comprise this DataSet.
name: If specified, a human-readable name that will be associated
to this DataSet. For example, it could be an encoding
of the parameters that were used to create this DataSet,
such as "homog_p=10_rep=3". If no name is given, then a random
digit string is assigned as the name.
"""
self._data = deepcopy(publisher_data_list)
total_audience = set()
for pub in self._data:
total_audience.update([id for id, _ in pub._data])
self._maximum_reach = len(total_audience)
if name:
self._name = name
else:
self._name = "{:012d}".format(randint(0, 1e12))
@property
def publisher_count(self):
"""Number of publishers represented in this DataSet."""
return len(self._data)
@property
def maximum_reach(self):
"""Total number of reachable people across all publishers."""
return self._maximum_reach
@property
def name(self):
"""Name of this DataSet."""
return self._name
def spend_by_impressions(self, impressions: Iterable[int]) -> List[float]:
"""Returns spend vector corresponding to a given impression vector.
Args:
impressions: Iterable of hypothetical impression buys, having
one value per publisher.
Returns:
List of corresponding spends. If I is the vector of impressions
and S is the returned vector of spends, then S[k] is the amount
that would need to be spent with the k-th publisher to obtain
I[k] impressions.
"""
return [
self._data[i].spend_by_impressions(impressions[i])
for i in range(len(self._data))
]
def impressions_by_spend(self, spends: Iterable[float]) -> List[int]:
"""Returns impression vector corresponding to a given spend vector.
Args:
spends: Iterable of hypothetical spend amounts, having
one value per publisher.
Returns:
List of corresponding impression counts. If S is the vector of
spends and I is the returned vector of impression counts, then
I[k] is the number of impressions that would be obtained for
a spend of S[k] with publisher k.
"""
return [
self._data[i].impressions_by_spend(spends[i])
for i in range(len(self._data))
]
def reach_by_impressions(
self, impressions: Iterable[int], max_frequency: int = 10
) -> ReachPoint:
"""Number of people reached for a given impression count.
Args:
impressions: A list of impression counts. The length of the list must
equal the value of publisher_count. Specifies the number of impressions
that each publisher will deliver.
max_frequency: int, The maximum frequency that should be counted. All
frequencies about this amount will be grouped into a single bucket.
Returns:
A ReachPoint object representing the k+ reach for each frequency
in the range 1..max_frequency.
"""
if len(impressions) != self.publisher_count:
raise ValueError(
"Invalid impression vector length. Got {}, expected {}".format(
len(impressions), self.publisher_count
)
)
counts = defaultdict(int)
spends = []
for i, imp in enumerate(impressions):
spends.append(self._data[i].spend_by_impressions(imp))
for id, freq in self._data[i].user_counts_by_impressions(imp).items():
counts[id] += freq
kplus_reaches = self._counts_to_histogram(counts, max_frequency)
return ReachPoint(impressions, kplus_reaches, spends)
def _counts_to_histogram(
self, counts: Dict[int, int], max_frequency: int
) -> List[int]:
"""Constructs k+ reach list from a dictionary of per-id reach counts."""
frequency_counts = [0] * max_frequency
for c in counts.values():
frequency_counts[min(c, max_frequency) - 1] += 1
# At this point, frequency_counts[k] represents the number of people who are
# reach exactly k+1 times, except that frequency_counts[max_frequency-1] contains
# the number of people reached at least max_frequency times. Now, we convert this
# to a list of k+ reach values.
for i in range(max_frequency - 2, -1, -1):
frequency_counts[i] += frequency_counts[i + 1]
return frequency_counts
def reach_by_spend(
self, spends: Iterable[float], max_frequency: int = 10
) -> ReachPoint:
"""Number of people reached for a given spend.
Args:
spends: A list of spend amounts. The length of the list must
equal the value of publisher_count. Specifies the amount spent with
each publisher.
max_frequency: int, The maximum frequency that should be counted. All
frequencies about this amount will be grouped into a single bucket.
Returns:
A ReachPoint object representing the k+ reach for each frequency
in the range 1..max_frequency.
"""
if len(spends) != self.publisher_count:
raise ValueError(
"Invalid spends vector length. Got {}, expected {}".format(
len(spends), self.publisher_count
)
)
counts = defaultdict(int)
impressions = []
for i, publisher_spend in enumerate(spends):
user_counts = self._data[i].user_counts_by_spend(publisher_spend)
impressions.append(sum(user_counts.values()))
for id, freq in user_counts.items():
counts[id] += freq
kplus_reaches = self._counts_to_histogram(counts, max_frequency)
return ReachPoint(impressions, kplus_reaches, spends)
def write_data_set(self, parent_dir: str, dataset_dir: str = None) -> None:
"""Writes this DataSet object to disk.
Args:
parent_dir: The directory where the DataSet is to be written.
dataset:dir: The directory name of the DataSet itself. If not
specified, then the name given in the object constructor is
used. If no name was given in the object constructor, then a
random name is used.
"""
if not dataset_dir:
dataset_dir = self._name
fulldir = join(parent_dir, dataset_dir)
Path(fulldir).mkdir(parents=True, exist_ok=True)
for pdf in self._data:
with open(join(fulldir, pdf.name), "w") as file:
pdf.write_publisher_data(file)
file.close()
@classmethod
def read_data_set(cls, dirpath: str) -> "DataSet":
"""Reads a DataSet from disk.
A DataSet is given by a directory containing a collection of files,
each of which represents a PublisherDataSet. The name associated to
the DataSet object is the last component of the dirpath.
Args:
dirpath: Directory containing the PublisherDataSets that comprise
this DataSet.
Returns:
The DataSet object representing the contents of this directory.
"""
pdf_list = []
for f in sorted(listdir(dirpath)):
filepath = join(dirpath, f)
if isfile(filepath):
with open(filepath) as file:
try:
pdf = PublisherData.read_publisher_data(file)
pdf.name = f
pdf_list.append(pdf)
except (ValueError, RuntimeError) as e:
raise RuntimeError(
"In publisher file {}".format(filepath)
) from e
name = dirpath.split("/")[-1]
return cls(pdf_list, name)
|
py | 1a4df8a9039600358d821d9776256f0bc57d76c8 | import torch
torch.cuda.manual_seed(3)
torch.manual_seed(3)
import data_handler, tracking_nn
import sys
from torch.optim import Adam
flag = int(sys.argv[1])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Working on", device)
batch_size = 32
cnn = tracking_nn.CNN().to(device)
if flag:
cnn.load_state_dict(torch.load("cnn_model.pt", map_location = device))
for param in cnn.parameters():
param.requires_grad = False
rnn = tracking_nn.RNN().to(device)
model = tracking_nn.Net(device, cnn, rnn).to(device)
paths = ["p11/2.a", "p11/3.a", "p16/3.a", "p17/2.a", "p17/3.a", "p1/2.a", "p18/2.a", "p18/3.a"]
data = data_handler.LegDataLoader(batch_size = batch_size)
# Train the nn
epochs = 1000
patience = 0
learning_rate = 0.0001
grid = 7
optimizer = Adam(model.parameters(), lr = learning_rate)
best_acc = float("Inf")
if flag:
save_path = "model.pt"
else:
save_path = "cnn_model.pt"
def eucl_dist(out, labels):
ret = 0
m = 0
for i in range(out.shape[0]):
yh = out[i]
p1_h = yh[0, :, :]
p2_h = yh[3, :, :]
detect_cell1 = p1_h.reshape(-1).argmax(axis = 0)
detect_cell2 = p2_h.reshape(-1).argmax(axis = 0)
x1, y1 = detect_cell1 // grid, detect_cell1 % grid
x2, y2 = detect_cell2 // grid, detect_cell2 % grid
d1 = (torch.sqrt((x1 + out[i, 1, x1, y1] - labels[i, 0, 0]) ** 2 + (y1 + out[i, 2, x1, y1] - labels[i, 0, 1]) ** 2)).item()
d2 = (torch.sqrt((x2 + out[i, 4, x2, y2] - labels[i, 1, 0]) ** 2 + (y2 + out[i, 5, x2, y2] - labels[i, 1, 1]) ** 2)).item()
if d1 > m:
m = d1
if d2 > m:
m = d2
ret += (d1 + d2) / 2
return m, ret / out.shape[0]
print("Started training...")
for epoch in range(epochs):
running_loss = 0
if epoch == 20 or epoch == 50:
learning_rate *= 0.1
optimizer = Adam(model.parameters(), lr = learning_rate)
f, input, label = data.load(0)
model.init_hidden()
c = 0
while(True):
input, label = input.to(device), label.to(device)
optimizer.zero_grad()
output = model.forward(input)
#print("labels", labels[0])
loss = model.loss(output, label)
loss.backward()
optimizer.step()
running_loss += loss.item() / input.shape[0]
c += 1
if f == -1:
break
if f:
model.init_hidden()
f, input, label = data.load(0)
#model.init_hidden()
model.detach_hidden()
print("epoch:{}, running loss: {}".format(epoch, running_loss / c))
running_loss = 0
if epoch >= patience:
with torch.no_grad():
acc = 0
dist = 0
c = 0
f, input, label = data.load(1)
model.init_hidden()
m = 0
while(True):
input, label = input.to(device), label.to(device)
output = model.forward(input)
acc += model.loss(output, label).item() / input.shape[0]
m1, d = eucl_dist(output, label)
dist += d
if m1 > m:
m = m1
c += 1
if f == -1:
break
if f:
model.init_hidden()
f, input, label = data.load(1)
#model.init_hidden()
if acc < best_acc:
best_acc = acc
print("Saving model with acc:", acc / c, ", mean dist:", dist / c / grid * 100, ", max dist:", m / grid * 100) #mean dist in cm
if flag:
torch.save(model, save_path)
else:
torch.save(cnn.state_dict(), save_path)
|
py | 1a4df90aa0fb702c65d19883ae63a8fe8c80fb1c | import tkinter as tk
ventana = tk.Tk()
mensaje = """Hola a todos
¿Cómo están?
Adiós"""
labelMessage = tk.Label(ventana, text=mensaje, justify=tk.CENTER).pack()
ventana.mainloop() |
py | 1a4df95d223ea5dd0de0ba2053fdb6baae8ff662 | import numpy as np
import multidim
import itertools
import os
import hdbscan
import sys
import time
import pandas as pd
from copy import deepcopy
from matplotlib.patches import Ellipse
from ripser import ripser
from persim import plot_diagrams
from numba import jit, njit, prange
from sklearn import mixture
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import RidgeClassifier
from multidim.covertree import CoverTree
from multidim.models import CDER
import matplotlib.pyplot as plt
np.set_printoptions(precision=2)
sys.path.append('../..')
from ATS import *
# -----------------------------------------------------------------------------
# ------------------------------ IMPORT DATA ----------------------------------
# -----------------------------------------------------------------------------
num_dgms = int(sys.argv[1])
N = num_dgms*6
d = 10
colors = ['red', 'yellow', 'magenta', 'green', 'blue', 'black']
os.system('mkdir cder_images')
os.system('rm -r cder_images/*')
score_train = []
score_test = []
for n in range(10):
X = testSetManifolds(numDgms = num_dgms, numPts = 200, permute = True)
F_labels = X.trainingLabel
labels = X.trainingLabel.unique()
X_dgm0 = X.Dgm0.tolist()
# We need to perturbate H_0 to use CDER.
for h0 in X_dgm0:
h0[:,0] = h0[:,0] + np.random.uniform(-0.05, 0.05, len(h0))
h0[:,1][h0[:,1]==np.inf] = 10 # Changge all inf values in H_0 for 10.
X_dgm1 = X.Dgm1.tolist()
i=0
for l in labels:
F_labels[F_labels == l]=i
i += 1
F = F_labels.tolist()
# -----------------------------------------------------------------------------
# ------------------------------ H_0 ------------------------------------------
# -----------------------------------------------------------------------------
X_train, X_test, F_train, F_test = train_test_split(X_dgm0, F, test_size=0.33, random_state=10)
# -----------------------------------------------------------------------------
# ------------------------------ GMM ------------------------------------------
# -----------------------------------------------------------------------------
print('Begin GMM...')
t0 = time.time()
X_train_temp = np.vstack(X_train)
X_train_temp = X_train_temp[:,1]
X_train_temp = X_train_temp.reshape((-1,1))
gmm_f_train=[]
for i in range(len(X_train)):
gmm_f_train.append(F_train[i]*np.ones(len(X_train[i])))
gmm_f_train = np.concatenate(gmm_f_train)
gmm = mixture.BayesianGaussianMixture(n_components=d, covariance_type='full', max_iter=int(10e4)).fit(X_train_temp, gmm_f_train)
ellipses = []
for i in range(len(gmm.means_)):
L, v = np.linalg.eig(gmm.covariances_[i])
temp = {'mean':gmm.means_[i], 'std':np.sqrt(L), 'rotation':v.transpose(), 'radius':max(np.sqrt(L)), 'entropy':gmm.weights_[i]}
ellipses.append(temp)
t1 = time.time()
print('Finish GMM. Time: {}'.format(t1-t0))
# -----------------------------------------------------------------------------
# ------------------------------ GMM features ---------------------------------
# -----------------------------------------------------------------------------
t0 = time.time()
X_train_temp = [dgm[:,1] for dgm in X_train]
X_train_features_0 = get_all_features(X_train_temp, ellipses, f_gaussian)
X_test_temp = [dgm[:,1] for dgm in X_test]
X_test_features_0 = get_all_features(X_test_temp, ellipses, f_gaussian)
t1 = time.time()
print('Features H_0:{}'.format(t1-t0))
# -----------------------------------------------------------------------------
# ------------------------------ H_1 ------------------------------------------
# -----------------------------------------------------------------------------
X_train, X_test, F_train, F_test = train_test_split(X_dgm1, F, test_size=0.33, random_state=10)
# -----------------------------------------------------------------------------
# ------------------------------ CDER -----------------------------------------
# -----------------------------------------------------------------------------
F_train_cder = F_train.copy()
for l in range(6):
for k, j in enumerate(F_train_cder):
if j == l:
F_train_cder[k] = colors[l]
pc_train = multidim.PointCloud.from_multisample_multilabel(X_train, F_train_cder)
ct_train = CoverTree(pc_train)
cder = CDER(parsimonious=True)
cder.fit(ct_train)
cder_result = cder.gaussians
ellipses = []
for c in cder_result:
temp = {key:c[key] for key in ['mean', 'std', 'rotation', 'radius', 'entropy']}
temp['std'] = 3*temp['std']
ellipses.append(temp)
for i in range(len(X_train)):
dgm = np.array(X_train[i])
plt.scatter(dgm[:,0], dgm[:,1], color='grey')
ellipses_plot_cder = []
for i in range(len(ellipses)):
e = ellipses[i]
ellipses_plot_cder.append(Ellipse(xy=e['mean'], width=e['std'][0], height=e['std'][1], angle=np.arccos(e['rotation'][0,0])))
for e in ellipses_plot_cder:
plt.gca().add_artist(e)
e.set_clip_box(plt.gca().bbox)
e.set_alpha(0.5)
e.set_facecolor([1,0,0])
plt.savefig('cder_images/{}_h1_cder_n_{}.png'.format(n, num_dgms))
plt.close()
# -----------------------------------------------------------------------------
# ------------------------------ CDER features --------------------------------
# -----------------------------------------------------------------------------
X_train_features_1 = get_all_features(X_train, ellipses, f_ellipse)
X_test_features_1 = get_all_features(X_test, ellipses, f_ellipse)
# -----------------------------------------------------------------------------
# ------------------------------ Ridge Classification ------------------------
# -----------------------------------------------------------------------------
X_train_features = np.column_stack((X_train_features_0, X_train_features_1))
X_test_features = np.column_stack((X_test_features_0, X_test_features_1))
ridge_model = RidgeClassifier().fit(X_train_features, F_train)
score_train.append(ridge_model.score(X_train_features, F_train))
score_test.append(ridge_model.score(X_test_features, F_test))
# print('train', score_train[-1])
# print('test', score_test[-1])
print(np.mean(score_train), np.std(score_train))
print(np.mean(score_test), np.std(score_test)) |
py | 1a4dfac7fb13435a7f2e7ea0086d1172c7d315f5 | from os.path import abspath, join, dirname
from sys import path
from envs.keys_and_passwords import *
PROJECT_ROOT = abspath(join(dirname(__file__), "../"))
APPS_DIR = abspath(join(dirname(__file__), "../", "apps"))
path.insert(0, PROJECT_ROOT)
path.insert(0, APPS_DIR)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Steven Skoczen', '[email protected]'),
)
DEFAULT_FROM_EMAIL = "[email protected]"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': DB_PASSWORD, # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Vancouver'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = True
MEDIA_ROOT = join(PROJECT_ROOT, "media_root")
MEDIA_URL = ''
STATIC_ROOT = join(PROJECT_ROOT, "collected_static")
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
AUTH_PROFILE_MODULE = 'my_schools.Person'
FACEBOOK_APP_ID = '400474649994341'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^7!$isr6jd!o+mgl1qy@+8197dm53uhp2i*vp8k4p#*g#8mg1n'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_facebook.auth_backends.FacebookBackend',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
join(abspath(PROJECT_ROOT), "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
"analytical",
"annoying",
"compressor",
"django_extensions",
"django_facebook",
"lettuce.django",
"gunicorn",
"south",
"home",
"schools",
"events",
"my_schools",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
'django_facebook.context_processors.facebook',
)
STATICFILES_EXCLUDED_APPS = []
COMPRESS_ROOT = STATIC_ROOT
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
GOOGLE_ANALYTICS_PROPERTY_ID = "UA-35602695-1"
GAUGES_SITE_ID = ""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
py | 1a4dfb313c6abecf60e6166af742576fcb089ce1 | #!/usr/bin/env python3
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex_boards.platforms import arty
from ring import *
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
clk = platform.request("clk100")
rst_n = platform.request("cpu_reset")
self.comb += self.cd_sys.clk.eq(clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~rst_n)
platform.add_period_constraint(clk, 1e9/100e6)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCMini):
def __init__(self, sys_clk_freq=int(100e6), mode=mode.DOUBLE, **kwargs):
platform = arty.Platform(variant="a7-35", toolchain="vivado")
from litex.build.generic_platform import Pins, IOStandard
platform.add_extension([("do", 0, Pins("B7"), IOStandard("LVCMOS33"))])
SoCMini.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Arty A7-35",
ident_version = True)
self.submodules.crg = CRG(platform, sys_clk_freq)
led = RingControl(platform.request("do"), mode, 12, sys_clk_freq)
self.submodules.ledring = led
self.add_csr("ledring")
self.add_uartbone()
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Arty A7-35")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--mode-single", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--flash", action="store_true", help="Flash Bitstream")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
m = mode.DOUBLE
if args.mode_single:
m = mode.SINGLE
soc = BaseSoC(
sys_clk_freq = 100e6,
mode = m,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
exit()
if __name__ == "__main__":
main()
|
py | 1a4dfb4ea21ec66e8915d2ccaf7997d0591c9ea0 | import os, sys
import math
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
import argparse
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
# parser.add_argument("--ir_optim", type=str2bool, default=True)
# parser.add_argument("--use_tensorrt", type=str2bool, default=False)
# parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
# params for text detector
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_path", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
# DB parmas
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.5)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_path", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument("--drop_score", type=float, default=0.5)
parser.add_argument("--limited_max_width", type=int, default=1280)
parser.add_argument("--limited_min_width", type=int, default=16)
parser.add_argument(
"--vis_font_path", type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'doc/fonts/simfang.ttf'))
parser.add_argument(
"--rec_char_dict_path",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'pytorchocr/utils/ppocr_keys_v1.txt'))
# params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_path", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
# params for e2e
parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
parser.add_argument("--e2e_model_path", type=str)
parser.add_argument("--e2e_limit_side_len", type=float, default=768)
parser.add_argument("--e2e_limit_type", type=str, default='max')
# PGNet parmas
parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
parser.add_argument(
"--e2e_char_dict_path", type=str,
default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'pytorchocr/utils/ic15_dict.txt'))
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
# params .yaml
parser.add_argument("--det_yaml_path", type=str, default=None)
parser.add_argument("--rec_yaml_path", type=str, default=None)
parser.add_argument("--cls_yaml_path", type=str, default=None)
parser.add_argument("--e2e_yaml_path", type=str, default=None)
return parser.parse_args()
def get_default_config(args):
return vars(args)
def read_network_config_from_yaml(yaml_path):
if not os.path.exists(yaml_path):
raise FileNotFoundError('{} is not existed.'.format(yaml_path))
import yaml
with open(yaml_path, encoding='utf-8') as f:
res = yaml.safe_load(f)
if res.get('Architecture') is None:
raise ValueError('{} has no Architecture'.format(yaml_path))
return res['Architecture']
def AnalysisConfig(weights_path, yaml_path=None):
if not os.path.exists(os.path.abspath(weights_path)):
raise FileNotFoundError('{} is not found.'.format(weights_path))
if yaml_path is not None:
return read_network_config_from_yaml(yaml_path)
weights_basename = os.path.basename(weights_path)
weights_name = weights_basename.lower()
# supported_weights = ['ch_ptocr_server_v2.0_det_infer.pth',
# 'ch_ptocr_server_v2.0_rec_infer.pth',
# 'ch_ptocr_mobile_v2.0_det_infer.pth',
# 'ch_ptocr_mobile_v2.0_rec_infer.pth',
# 'ch_ptocr_mobile_v2.0_cls_infer.pth',
# ]
# assert weights_name in supported_weights, \
# "supported weights are {} but input weights is {}".format(supported_weights, weights_name)
if weights_name == 'ch_ptocr_server_v2.0_det_infer.pth':
network_config = {'model_type':'det',
'algorithm':'DB',
'Transform':None,
'Backbone':{'name':'ResNet', 'layers':18, 'disable_se':True},
'Neck':{'name':'DBFPN', 'out_channels':256},
'Head':{'name':'DBHead', 'k':50}}
elif weights_name == 'ch_ptocr_server_v2.0_rec_infer.pth':
network_config = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'name':'ResNet', 'layers':34},
'Neck':{'name':'SequenceEncoder', 'hidden_size':256, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'fc_decay': 4e-05}}
elif weights_name == 'ch_ptocr_mobile_v2.0_det_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': True},
'Neck': {'name': 'DBFPN', 'out_channels': 96},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'ch_ptocr_mobile_v2.0_rec_infer.pth':
network_config = {'model_type':'rec',
'algorithm':'CRNN',
'Transform':None,
'Backbone':{'model_name':'small', 'name':'MobileNetV3', 'scale':0.5, 'small_stride':[1,2,2,2]},
'Neck':{'name':'SequenceEncoder', 'hidden_size':48, 'encoder_type':'rnn'},
'Head':{'name':'CTCHead', 'fc_decay': 4e-05}}
elif weights_name == 'ch_ptocr_mobile_v2.0_cls_infer.pth':
network_config = {'model_type':'cls',
'algorithm':'CLS',
'Transform':None,
'Backbone':{'name':'MobileNetV3', 'model_name':'small', 'scale':0.35},
'Neck':None,
'Head':{'name':'ClsHead', 'class_dim':2}}
elif weights_name == 'det_mv3_db_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'},
'Neck': {'name': 'DBFPN', 'out_channels': 256},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'det_r50_vd_db_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'DB',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'DBFPN', 'out_channels': 256},
'Head': {'name': 'DBHead', 'k': 50}}
elif weights_name == 'det_mv3_east_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'EAST',
'Transform': None,
'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'},
'Neck': {'name': 'EASTFPN', 'model_name': 'small'},
'Head': {'name': 'EASTHead', 'model_name': 'small'}}
elif weights_name == 'det_r50_vd_east_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'EAST',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'EASTFPN', 'model_name': 'large'},
'Head': {'name': 'EASTHead', 'model_name': 'large'}}
elif weights_name == 'det_r50_vd_sast_icdar15_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'SAST',
'Transform': None,
'Backbone': {'name': 'ResNet_SAST', 'layers': 50},
'Neck': {'name': 'SASTFPN', 'with_cab': True},
'Head': {'name': 'SASTHead'}}
elif weights_name == 'det_r50_vd_sast_totaltext_v2.0_infer.pth':
network_config = {'model_type': 'det',
'algorithm': 'SAST',
'Transform': None,
'Backbone': {'name': 'ResNet_SAST', 'layers': 50},
'Neck': {'name': 'SASTFPN', 'with_cab': True},
'Head': {'name': 'SASTHead'}}
elif weights_name == 'en_server_pgneta_infer.pth':
network_config = {'model_type': 'e2e',
'algorithm': 'PGNet',
'Transform': None,
'Backbone': {'name': 'ResNet', 'layers': 50},
'Neck': {'name': 'PGFPN'},
'Head': {'name': 'PGHead'}}
else:
network_config = {'model_type': 'rec',
'algorithm': 'CRNN',
'Transform': None,
'Backbone': {'model_name': 'small', 'name': 'MobileNetV3', 'scale': 0.5,
'small_stride': [1, 2, 2, 2]},
'Neck': {'name': 'SequenceEncoder', 'hidden_size': 48, 'encoder_type': 'rnn'},
'Head': {'name': 'CTCHead', 'fc_decay': 4e-05}}
# raise NotImplementedError
return network_config
def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs):
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
cv2.putText(
src_im,
str,
org=(int(box[0, 0, 0]), int(box[0, 0, 1])),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(0, 255, 0),
thickness=1)
return src_im
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
"""
resize img and limit the longest side of the image to input_size
"""
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
"""
Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters
"""
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
"""
create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array):
"""
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
# whether add new blank img or not
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import base64
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
return image |
py | 1a4dfb8241e3ae093a083ff4c9fe07524eeda1a1 | """
Generic setup of the data sources and the model training.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
and also on
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
"""
import logging
# Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, Callback
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# Scipy
from scipy.stats import pearsonr
# Sklearn
from sklearn.model_selection import train_test_split
from GA.utils.utils import clean_data
# Helper: Early stopping.
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=2, verbose=0, mode='auto')
# In case that your training loss is not dropping - which means you are learning nothing after each epoch.
# It look like there's nothing to learn in this model, aside from some trivial linear-like fit or cutoff value.
def compile_model_mlp(geneparam, input_shape):
"""Compile a sequential model.
Args:
geneparam (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = geneparam['nb_layers']
nb_neurons = geneparam['nb_neurons']
activation = geneparam['activation']
optimizer = geneparam['optimizer']
dropout = geneparam['dropout']
weight_decay = geneparam['weight_decay']
print("Architecture:%d,%s,%s,%d,%.2f%%,%.2f%%" % (nb_neurons, activation, optimizer,
nb_layers, dropout,weight_decay))
logging.info("Architecture:%d,%s,%s,%d,%.2f%%,%.2f%%" % (nb_neurons, activation, optimizer,
nb_layers, dropout, weight_decay))
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
if weight_decay>0:
model.add(Dense(nb_neurons, activation=activation, input_dim=input_shape,
kernel_regularizer=l2(weight_decay)))
else:
model.add(Dense(nb_neurons, activation=activation, input_dim=input_shape))
else:
if weight_decay > 0:
model.add(Dense(nb_neurons, activation=activation, kernel_regularizer=l2(weight_decay)))
else:
model.add(Dense(nb_neurons, activation=activation))
if dropout > 0:
model.add(Dropout(dropout)) # dropout for each layer
# Output layer.
model.add(Dense(1))
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
def compile_model_cnn(geneparam, nb_classes, input_shape):
"""Compile a sequential model.
Args:
geneparam (dict): the parameters of the genome
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = geneparam['nb_layers']
nb_neurons = geneparam['nb_neurons']
activation = geneparam['activation']
optimizer = geneparam['optimizer']
logging.info("Architecture:%d,%s,%s,%d" % (nb_neurons, activation, optimizer, nb_layers))
model = Sequential()
# Add each layer.
for i in range(0, nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(
Conv2D(nb_neurons, kernel_size=(3, 3), activation=activation, padding='same', input_shape=input_shape))
else:
model.add(Conv2D(nb_neurons, kernel_size=(3, 3), activation=activation))
if i < 2: # otherwise we hit zero
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
# BAYESIAN CONVOLUTIONAL NEURAL NETWORKS WITH BERNOULLI APPROXIMATE VARIATIONAL INFERENCE
# need to read this paper
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def get_data(dataset):
markers, pheno = clean_data(dataset.trait, dataset.k)
x_train, x_test, y_train, y_test = train_test_split(markers, pheno, test_size=0.33, random_state=42)
return markers.shape[1], x_train, x_test, y_train, y_test
def train_and_score(geneparam, dataset):
"""Train the model, return test loss.
Args:
geneparam (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
logging.info("Getting datasets")
input_shape, x_train, x_test, y_train, y_test = get_data(dataset)
logging.info("Compling Keras model")
model = compile_model_mlp(geneparam, input_shape)
history = LossHistory()
model.fit(x_train, y_train,
epochs=1200,
# using early stopping so no real limit - don't want to waste time on horrible architectures
verbose=1,
validation_data =(x_test, y_test),
# callbacks=[history])
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test mse:', score[0])
print('Test mae:', score[1])
r = pearsonr(model.predict(x_test).ravel(), y_test)[0]
print('Test r:', r)
logging.info("R: %.3f" % r)
K.clear_session()
# we do not care about keeping any of this in memory -
# we just need to know the final scores and the architecture
if r != r:
r = -1.0
return r |
py | 1a4dfbd4d331546e3f5b0854a18826413e2106b9 | # Copyright 2019 Christo Kirov. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for parsing PTB text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tensorflow as tf
def _read_words(filename):
print('Reading %s...' % (filename), file=sys.stderr)
with tf.io.gfile.GFile(filename, "r") as f:
return f.read().strip().replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
data += _read_words(filename + ".out")
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
id_to_word = dict(zip(range(len(words)), words))
# Save the vocab to a file, ordered according to the index.
with open('labels.txt', 'w', encoding='utf-8') as outfile:
for w in words:
outfile.write(w + "\n")
return word_to_id
def _file_to_word_ids(filename, word_to_id):
print('Converting %s to IDs' % (filename), file=sys.stderr)
data_in = _read_words(filename)
data_out = _read_words(filename + ".out")
ids_in = [word_to_id[word] for word in data_in if word in word_to_id]
ids_out = [word_to_id[word] for word in data_out if word in word_to_id]
print(' ', len(ids_in),ids_in[-1],'|', len(ids_out), ids_out[-1], file=sys.stderr)
assert(len(ids_in) == len(ids_out))
return [(x,y) for x, y in zip(ids_in, ids_out)]
def lm_raw_data(data_path=None):
"""Load LM raw data from data directory "data_path".
Reads LM text files, converts strings to integer ids,
and performs mini-batching of the inputs.
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "lm.train.txt")
valid_path = os.path.join(data_path, "lm.valid.txt")
test_path = os.path.join(data_path, "lm.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def lm_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "LMProducer", [raw_data, batch_size, num_steps]):
raw_data_in = [cp[0] for cp in raw_data]
raw_data_out = [cp[1] for cp in raw_data]
raw_data_in = tf.convert_to_tensor(raw_data_in, name="raw_data", dtype=tf.int32)
raw_data_out = tf.convert_to_tensor(raw_data_out, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data_in)
batch_len = data_len // batch_size
data_in = tf.reshape(raw_data_in[0 : batch_size * batch_len],
[batch_size, batch_len])
data_out = tf.reshape(raw_data_out[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.compat.v1.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data_in, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data_out, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y
|
py | 1a4dfbe0db2e143c869ef4c5fb01e36789ff46f4 | import tensorflow as tf
class GAINGenerator(object):
"""This is class to impute missing value with proper values from
observed data and missing mask (0/1 flags indicating missing value)
"""
def __init__(self):
pass
def generate(self, x, m, z, drop):
"""Generate candidate values to be imputated.
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
m : tf.Tensor of tf.bool
mask data indicating missing positions in x.
(if True, observed ; same size as x)
z : tf.Tensor of tf.float32
data frame each cell of which has random numbers
to generate imputed values (same size as x)
Returns
-------
xbar : tf.Tensor of tf.float32
generated data frame which has candidate values
(even in observed cell)
"""
assert x.shape.as_list() == m.shape.as_list() == z.shape.as_list()
assert x.dtype == z.dtype == tf.float32
assert m.dtype == tf.bool
mf = tf.cast(m, dtype=tf.float32, name="mask_float")
out = tf.concat([x, mf, z], axis=1, name="concat")
d = x.shape[1]
out = tf.layers.dense(out, d, activation=tf.tanh, name="dense1")
out = tf.layers.dropout(out, drop)
out = tf.layers.dense(out, int(int(d)/2), activation=tf.tanh, name="dense2")
out = tf.layers.dropout(out, drop)
out = tf.layers.dense(out, d, activation=tf.sigmoid, name="dense3")
xbar = out
return xbar
def impute(self, x, xbar, m):
"""Do missing value imputation. This method uses candidate
values in xbar (which is generated by generate method)
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
xbar : tf.Tensor of tf.float32
data frame which is result of generate method.
all of missing value of x are imputed by candidate values.
(same size as x)
m : tf.Tensor of tf.bool
mask data indicating missing positions (if True, observed)
Returns
-------
xhat : tf.Tensor of tf.float32
result of missing value imputation of x.
"""
assert x.shape.as_list() == xbar.shape.as_list() == m.shape.as_list()
assert x.dtype == xbar.dtype == tf.float32
assert m.dtype == tf.bool
xhat = tf.where(m, x, xbar)
return xhat
def adversarial_loss(self, mhat, m, b):
"""Calculate adversarial loss. This method compares
actual missing mask from output of discriminator, and
uses hint (b).
Parameters
----------
mhat : tf.Tensor of tf.float32
A prediction result of missing mask of discriminator.
It contains probability whether it is observed.
m : tf.Tensor of tf.bool
actual missing mask (same size as mhat)
b : tf.Tensor of tf.bool
Hint flag data
each row has only one True, which is selected at random.
The other cells are False (same size as mhat)
Returns
-------
loss : tf.Tensor of tf.float32 (no dimension)
adversarial loss calculated
"""
assert mhat.shape.as_list() == m.shape.as_list() == b.shape.as_list()
assert mhat.dtype == tf.float32
assert m.dtype == b.dtype == tf.bool
eps = 1e-7
log_loss = - tf.where(m, tf.zeros_like(m, dtype=tf.float32),
tf.log(mhat + eps))
loss = tf.reduce_sum(tf.where(b, log_loss,
tf.zeros_like(b, dtype=tf.float32)))
return loss
def generate_loss(self, x, xbar, m):
"""Calculate generate loss.
The more x is similar to xbar, the less loss is.
Parameters
----------
x : tf.Tensor of tf.float32
data frame which is target of missing value imputation.
xbar : tf.Tensor of tf.float32
data frame which is result of generate method.
all of missing value of x are imputed by candidate values.
(same size as x)
m : tf.Tensor of tf.bool
mask data indicating missing positions in x by 0/1 flag
(0=missing, 1=observed ; same size as x)
Returns
-------
loss : tf.Tensor of tf.float32 (no dimension)
generate loss calculated
"""
assert x.shape.as_list() == xbar.shape.as_list() == m.shape.as_list()
assert x.dtype == xbar.dtype == tf.float32
assert m.dtype == tf.bool
mse = tf.square(x - xbar)
loss = tf.reduce_sum(tf.where(m, mse,
tf.zeros_like(m, dtype=tf.float32)))
return loss
|
py | 1a4dfcd490d6c5460cb8982f2a3c9fcf5b7c4f82 | import unittest2
import os
import tempfile
import shutil
import pip
from hoplite.client.status_updater import MockStatusUpdater
from hoplite.builtin_plugins.constants import InstallPythonPackageJobConstants as KEYS
from hoplite.builtin_plugins import install_python_package_job
from httmock import urlmatch, response, HTTMock
# Monkey patch pip so it doesn't mess with logging. Otherwise, presence of nose xunit logging handlers will cause an
# error when pip tries to set logging things
def blank_func(blank_arg):
pass
import pip.basecommand
pip.basecommand.__dict__['logging_dictConfig'] = blank_func
@urlmatch(path='/reload$')
def reload_site_packages(url, request):
return response(200)
class TestInstallPythonPackage(unittest2.TestCase):
def test_install_from_local_path(self):
setup_str = "from setuptools import setup, find_packages;setup(name='poopy', version='0.1', packages=find_packages())"
tempdir = tempfile.mkdtemp()
try:
setup_py = open(os.path.join(tempdir, "setup.py"), 'w')
setup_py.write(setup_str)
setup_py.close()
package_path = os.path.join(tempdir, "poopy")
os.mkdir(package_path)
init_file = open(os.path.join(package_path, "__init__.py"), 'w')
init_file.close()
config = {KEYS.LOCAL_PATH: tempdir}
status = MockStatusUpdater()
with HTTMock(reload_site_packages):
install_python_package_job.run(config, status)
self.assertTrue(status.status["succeeded"])
try:
import poopy
except ImportError:
self.fail("Could not import installed package")
finally:
pip.main(['uninstall', '-y', "poopy"])
shutil.rmtree(tempdir)
def test_install_fails_success_false_stdout_info(self):
setup_str = "raise ValueError('I FAILED!')"
tempdir = tempfile.mkdtemp()
try:
setup_py = open(os.path.join(tempdir, "setup.py"), 'w')
setup_py.write(setup_str)
setup_py.close()
config = {KEYS.LOCAL_PATH: tempdir}
status = MockStatusUpdater()
install_python_package_job.run(config, status)
self.assertFalse(status.status["succeeded"])
# Because we monkey patch pip so it doesn't mess up nose xunit logging, the traceback info goes to the
# console rather than to status.status.stdout
#self.assertRegexpMatches(status.status["stdout"], "Traceback")
self.assertIn("Pip returned a non-zero error code", status.status["errors"])
finally:
shutil.rmtree(tempdir)
def test_missing_local_path_returns_errors(self):
config = {}
status = MockStatusUpdater()
install_python_package_job.run(config, status)
self.assertFalse(status.status["succeeded"])
self.assertIn("No local path specified", status.status["errors"]) |
py | 1a4dfdf8e2e8b6a5b29a5e5dde66384752ee397c | import operator
from django.db import InterfaceError
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
allows_group_by_selected_pks = True
can_return_columns_from_insert = True
can_return_rows_from_bulk_insert = True
has_real_datatype = True
has_native_uuid_field = True
has_native_duration_field = True
has_native_json_field = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_of = True
has_select_for_update_skip_locked = True
can_release_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_introspect_autofield = True
can_introspect_ip_address_field = True
can_introspect_materialized_views = True
can_introspect_small_integer_field = True
can_distinct_on_fields = True
can_rollback_ddl = True
supports_combined_alters = True
nulls_order_largest = True
closed_cursor_error_class = InterfaceError
has_case_insensitive_like = False
greatest_least_ignores_nulls = True
can_clone_databases = True
supports_temporal_subtraction = True
supports_slicing_ordering_in_compound = True
create_test_procedure_without_params_sql = """
CREATE FUNCTION test_procedure () RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := 1;
END;
$$ LANGUAGE plpgsql;"""
create_test_procedure_with_int_param_sql = """
CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := P_I;
END;
$$ LANGUAGE plpgsql;"""
requires_casted_case_in_updates = True
supports_over_clause = True
only_supports_unbounded_with_preceding_and_following = True
supports_aggregate_filter_clause = True
supported_explain_formats = {"JSON", "TEXT", "XML", "YAML"}
validates_explain_options = False # A query will error on invalid options.
supports_deferrable_unique_constraints = True
has_json_operators = True
json_key_contains_list_matching_requires_list = True
@cached_property
def is_postgresql_9_6(self):
return self.connection.pg_version >= 90600
@cached_property
def is_postgresql_10(self):
return self.connection.pg_version >= 100000
@cached_property
def is_postgresql_11(self):
return self.connection.pg_version >= 110000
@cached_property
def is_postgresql_12(self):
return self.connection.pg_version >= 120000
has_bloom_index = property(operator.attrgetter("is_postgresql_9_6"))
has_brin_autosummarize = property(operator.attrgetter("is_postgresql_10"))
has_phraseto_tsquery = property(operator.attrgetter("is_postgresql_9_6"))
has_websearch_to_tsquery = property(operator.attrgetter("is_postgresql_11"))
supports_table_partitions = property(operator.attrgetter("is_postgresql_10"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.